blob: 94323e39eb85532f97c8fc5ad97737dd018e798a [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
Jon Hallb3ed8ed2015-10-28 16:43:55 -070012CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
Jon Hall5cf14d52015-07-16 12:15:19 -070014CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
Jon Hallb3ed8ed2015-10-28 16:43:55 -070028class HAkillNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -070029
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hallf3d16e72015-12-16 17:45:08 -080052 import time
Jon Hall3b489db2015-10-05 14:38:37 -070053 import pexpect
Jon Hall5cf14d52015-07-16 12:15:19 -070054 main.log.info( "ONOS HA test: Restart minority of ONOS nodes - " +
55 "initialization" )
56 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070057 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070058 "installing ONOS, starting Mininet and ONOS" +\
59 "cli sessions."
60 # TODO: save all the timers and output them for plotting
61
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
Jon Halle1a3b752015-07-22 13:02:46 -070069 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070070 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070071 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070074 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
81
82 # FIXME: just get controller port from params?
83 # TODO: do we really need all these?
84 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
85 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
86 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
87 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
88 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
89 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
90 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
91
Jon Halle1a3b752015-07-22 13:02:46 -070092 try:
93 fileName = "Counters"
94 # TODO: Maybe make a library folder somewhere?
95 path = main.params[ 'imports' ][ 'path' ]
96 main.Counters = imp.load_source( fileName,
97 path + fileName + ".py" )
98 except Exception as e:
99 main.log.exception( e )
100 main.cleanup()
101 main.exit()
102
103 main.CLIs = []
104 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700105 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700106 for i in range( 1, main.numCtrls + 1 ):
107 try:
108 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
109 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
110 ipList.append( main.nodes[ -1 ].ip_address )
111 except AttributeError:
112 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700113
114 main.step( "Create cell file" )
115 cellAppString = main.params[ 'ENV' ][ 'appString' ]
116 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
117 main.Mininet1.ip_address,
118 cellAppString, ipList )
119 main.step( "Applying cell variable to environment" )
120 cellResult = main.ONOSbench.setCell( cellName )
121 verifyResult = main.ONOSbench.verifyCell()
122
123 # FIXME:this is short term fix
124 main.log.info( "Removing raft logs" )
125 main.ONOSbench.onosRemoveRaftLogs()
126
127 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700128 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700129 main.ONOSbench.onosUninstall( node.ip_address )
130
131 # Make sure ONOS is DEAD
132 main.log.info( "Killing any ONOS processes" )
133 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700134 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700135 killed = main.ONOSbench.onosKill( node.ip_address )
136 killResults = killResults and killed
137
138 cleanInstallResult = main.TRUE
139 gitPullResult = main.TRUE
140
141 main.step( "Starting Mininet" )
142 # scp topo file to mininet
143 # TODO: move to params?
144 topoName = "obelisk.py"
145 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700146 main.ONOSbench.scp( main.Mininet1,
147 filePath + topoName,
148 main.Mininet1.home,
149 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700150 mnResult = main.Mininet1.startNet( )
151 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
152 onpass="Mininet Started",
153 onfail="Error starting Mininet" )
154
155 main.step( "Git checkout and pull " + gitBranch )
156 if PULLCODE:
157 main.ONOSbench.gitCheckout( gitBranch )
158 gitPullResult = main.ONOSbench.gitPull()
159 # values of 1 or 3 are good
160 utilities.assert_lesser( expect=0, actual=gitPullResult,
161 onpass="Git pull successful",
162 onfail="Git pull failed" )
163 main.ONOSbench.getVersion( report=True )
164
165 main.step( "Using mvn clean install" )
166 cleanInstallResult = main.TRUE
167 if PULLCODE and gitPullResult == main.TRUE:
168 cleanInstallResult = main.ONOSbench.cleanInstall()
169 else:
170 main.log.warn( "Did not pull new code so skipping mvn " +
171 "clean install" )
172 utilities.assert_equals( expect=main.TRUE,
173 actual=cleanInstallResult,
174 onpass="MCI successful",
175 onfail="MCI failed" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700176
177 main.step( "Make sure ONOS service doesn't automatically respawn" )
178 handle = main.ONOSbench.handle
179 handle.sendline( "sed -i -e 's/^respawn$/#respawn/g' tools/package/init/onos.conf" )
180 handle.expect( "\$" ) # $ from the command
181 handle.expect( "\$" ) # $ from the prompt
182
Jon Hall5cf14d52015-07-16 12:15:19 -0700183 # GRAPHS
184 # NOTE: important params here:
185 # job = name of Jenkins job
186 # Plot Name = Plot-HA, only can be used if multiple plots
187 # index = The number of the graph under plot name
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700188 job = "HAkillNodes"
Jon Hall5cf14d52015-07-16 12:15:19 -0700189 plotName = "Plot-HA"
190 graphs = '<ac:structured-macro ac:name="html">\n'
191 graphs += '<ac:plain-text-body><![CDATA[\n'
192 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
193 '/plot/' + plotName + '/getPlot?index=0' +\
194 '&width=500&height=300"' +\
195 'noborder="0" width="500" height="300" scrolling="yes" ' +\
196 'seamless="seamless"></iframe>\n'
197 graphs += ']]></ac:plain-text-body>\n'
198 graphs += '</ac:structured-macro>\n'
199 main.log.wiki(graphs)
200
201 main.step( "Creating ONOS package" )
Jon Hall3b489db2015-10-05 14:38:37 -0700202 # copy gen-partions file to ONOS
203 # NOTE: this assumes TestON and ONOS are on the same machine
204 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
205 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
206 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
207 main.ONOSbench.ip_address,
208 srcFile,
209 dstDir,
210 pwd=main.ONOSbench.pwd,
211 direction="from" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700212 packageResult = main.ONOSbench.onosPackage()
213 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
214 onpass="ONOS package successful",
215 onfail="ONOS package failed" )
216
217 main.step( "Installing ONOS package" )
218 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700219 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700220 tmpResult = main.ONOSbench.onosInstall( options="-f",
221 node=node.ip_address )
222 onosInstallResult = onosInstallResult and tmpResult
223 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
224 onpass="ONOS install successful",
225 onfail="ONOS install failed" )
Jon Hall3b489db2015-10-05 14:38:37 -0700226 # clean up gen-partitions file
227 try:
228 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
229 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
230 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
231 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
232 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
233 str( main.ONOSbench.handle.before ) )
234 except ( pexpect.TIMEOUT, pexpect.EOF ):
235 main.log.exception( "ONOSbench: pexpect exception found:" +
236 main.ONOSbench.handle.before )
237 main.cleanup()
238 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700239
240 main.step( "Checking if ONOS is up yet" )
241 for i in range( 2 ):
242 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700243 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700244 started = main.ONOSbench.isup( node.ip_address )
245 if not started:
246 main.log.error( node.name + " didn't start!" )
247 main.ONOSbench.onosStop( node.ip_address )
248 main.ONOSbench.onosStart( node.ip_address )
249 onosIsupResult = onosIsupResult and started
250 if onosIsupResult == main.TRUE:
251 break
252 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
253 onpass="ONOS startup successful",
254 onfail="ONOS startup failed" )
255
256 main.log.step( "Starting ONOS CLI sessions" )
257 cliResults = main.TRUE
258 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700259 for i in range( main.numCtrls ):
260 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700261 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700262 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700263 threads.append( t )
264 t.start()
265
266 for t in threads:
267 t.join()
268 cliResults = cliResults and t.result
269 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
270 onpass="ONOS cli startup successful",
271 onfail="ONOS cli startup failed" )
272
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700273 # Create a list of active nodes for use when some nodes are stopped
274 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
275
Jon Hall5cf14d52015-07-16 12:15:19 -0700276 if main.params[ 'tcpdump' ].lower() == "true":
277 main.step( "Start Packet Capture MN" )
278 main.Mininet2.startTcpdump(
279 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
280 + "-MN.pcap",
281 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
282 port=main.params[ 'MNtcpdump' ][ 'port' ] )
283
284 main.step( "App Ids check" )
Jon Hallf3d16e72015-12-16 17:45:08 -0800285 time.sleep(60)
Jon Hall5cf14d52015-07-16 12:15:19 -0700286 appCheck = main.TRUE
287 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700288 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700289 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700290 name="appToIDCheck-" + str( i ),
291 args=[] )
292 threads.append( t )
293 t.start()
294
295 for t in threads:
296 t.join()
297 appCheck = appCheck and t.result
298 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700299 node = main.activeNodes[0]
300 main.log.warn( main.CLIs[node].apps() )
301 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700302 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
303 onpass="App Ids seem to be correct",
304 onfail="Something is wrong with app Ids" )
305
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700306 main.step( "Clean up ONOS service changes" )
307 handle.sendline( "git checkout -- tools/package/init/onos.conf" )
308 handle.expect( "\$" )
309
Jon Hall5cf14d52015-07-16 12:15:19 -0700310 if cliResults == main.FALSE:
311 main.log.error( "Failed to start ONOS, stopping test" )
312 main.cleanup()
313 main.exit()
314
315 def CASE2( self, main ):
316 """
317 Assign devices to controllers
318 """
319 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700320 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700321 assert main, "main not defined"
322 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700323 assert main.CLIs, "main.CLIs not defined"
324 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700325 assert ONOS1Port, "ONOS1Port not defined"
326 assert ONOS2Port, "ONOS2Port not defined"
327 assert ONOS3Port, "ONOS3Port not defined"
328 assert ONOS4Port, "ONOS4Port not defined"
329 assert ONOS5Port, "ONOS5Port not defined"
330 assert ONOS6Port, "ONOS6Port not defined"
331 assert ONOS7Port, "ONOS7Port not defined"
332
333 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700334 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700335 "and check that an ONOS node becomes the " +\
336 "master of the device."
337 main.step( "Assign switches to controllers" )
338
339 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700340 for i in range( main.numCtrls ):
341 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700342 swList = []
343 for i in range( 1, 29 ):
344 swList.append( "s" + str( i ) )
345 main.Mininet1.assignSwController( sw=swList, ip=ipList )
346
347 mastershipCheck = main.TRUE
348 for i in range( 1, 29 ):
349 response = main.Mininet1.getSwController( "s" + str( i ) )
350 try:
351 main.log.info( str( response ) )
352 except Exception:
353 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700354 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700355 if re.search( "tcp:" + node.ip_address, response ):
356 mastershipCheck = mastershipCheck and main.TRUE
357 else:
358 main.log.error( "Error, node " + node.ip_address + " is " +
359 "not in the list of controllers s" +
360 str( i ) + " is connecting to." )
361 mastershipCheck = main.FALSE
362 utilities.assert_equals(
363 expect=main.TRUE,
364 actual=mastershipCheck,
365 onpass="Switch mastership assigned correctly",
366 onfail="Switches not assigned correctly to controllers" )
367
368 def CASE21( self, main ):
369 """
370 Assign mastership to controllers
371 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700372 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700373 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700374 assert main, "main not defined"
375 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700376 assert main.CLIs, "main.CLIs not defined"
377 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700378 assert ONOS1Port, "ONOS1Port not defined"
379 assert ONOS2Port, "ONOS2Port not defined"
380 assert ONOS3Port, "ONOS3Port not defined"
381 assert ONOS4Port, "ONOS4Port not defined"
382 assert ONOS5Port, "ONOS5Port not defined"
383 assert ONOS6Port, "ONOS6Port not defined"
384 assert ONOS7Port, "ONOS7Port not defined"
385
386 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700387 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700388 "device. Then manually assign" +\
389 " mastership to specific ONOS nodes using" +\
390 " 'device-role'"
391 main.step( "Assign mastership of switches to specific controllers" )
392 # Manually assign mastership to the controller we want
393 roleCall = main.TRUE
394
395 ipList = [ ]
396 deviceList = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700397 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700398 try:
399 # Assign mastership to specific controllers. This assignment was
400 # determined for a 7 node cluser, but will work with any sized
401 # cluster
402 for i in range( 1, 29 ): # switches 1 through 28
403 # set up correct variables:
404 if i == 1:
405 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700406 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700407 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700408 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700409 c = 1 % main.numCtrls
410 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700411 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700412 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700413 c = 1 % main.numCtrls
414 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700415 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700416 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700417 c = 3 % main.numCtrls
418 ip = main.nodes[ c ].ip_address # ONOS4
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700419 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700420 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700421 c = 2 % main.numCtrls
422 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700423 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700424 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700425 c = 2 % main.numCtrls
426 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700427 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700428 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700429 c = 5 % main.numCtrls
430 ip = main.nodes[ c ].ip_address # ONOS6
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700431 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700432 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700433 c = 4 % main.numCtrls
434 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700435 dpid = '3' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700436 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700437 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700438 c = 6 % main.numCtrls
439 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700440 dpid = '6' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700441 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700442 elif i == 28:
443 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700444 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700445 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700446 else:
447 main.log.error( "You didn't write an else statement for " +
448 "switch s" + str( i ) )
449 roleCall = main.FALSE
450 # Assign switch
451 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
452 # TODO: make this controller dynamic
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700453 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700454 ipList.append( ip )
455 deviceList.append( deviceId )
456 except ( AttributeError, AssertionError ):
457 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700458 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700459 utilities.assert_equals(
460 expect=main.TRUE,
461 actual=roleCall,
462 onpass="Re-assigned switch mastership to designated controller",
463 onfail="Something wrong with deviceRole calls" )
464
465 main.step( "Check mastership was correctly assigned" )
466 roleCheck = main.TRUE
467 # NOTE: This is due to the fact that device mastership change is not
468 # atomic and is actually a multi step process
469 time.sleep( 5 )
470 for i in range( len( ipList ) ):
471 ip = ipList[i]
472 deviceId = deviceList[i]
473 # Check assignment
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700474 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700475 if ip in master:
476 roleCheck = roleCheck and main.TRUE
477 else:
478 roleCheck = roleCheck and main.FALSE
479 main.log.error( "Error, controller " + ip + " is not" +
480 " master " + "of device " +
481 str( deviceId ) + ". Master is " +
482 repr( master ) + "." )
483 utilities.assert_equals(
484 expect=main.TRUE,
485 actual=roleCheck,
486 onpass="Switches were successfully reassigned to designated " +
487 "controller",
488 onfail="Switches were not successfully reassigned" )
489
490 def CASE3( self, main ):
491 """
492 Assign intents
493 """
494 import time
495 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700496 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700497 assert main, "main not defined"
498 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700499 assert main.CLIs, "main.CLIs not defined"
500 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700501 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700502 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700503 "assign predetermined host-to-host intents." +\
504 " After installation, check that the intent" +\
505 " is distributed to all nodes and the state" +\
506 " is INSTALLED"
507
508 # install onos-app-fwd
509 main.step( "Install reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700510 onosCli = main.CLIs[ main.activeNodes[0] ]
511 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700512 utilities.assert_equals( expect=main.TRUE, actual=installResults,
513 onpass="Install fwd successful",
514 onfail="Install fwd failed" )
515
516 main.step( "Check app ids" )
517 appCheck = main.TRUE
518 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700519 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700520 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700521 name="appToIDCheck-" + str( i ),
522 args=[] )
523 threads.append( t )
524 t.start()
525
526 for t in threads:
527 t.join()
528 appCheck = appCheck and t.result
529 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700530 main.log.warn( onosCli.apps() )
531 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700532 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
533 onpass="App Ids seem to be correct",
534 onfail="Something is wrong with app Ids" )
535
536 main.step( "Discovering Hosts( Via pingall for now )" )
537 # FIXME: Once we have a host discovery mechanism, use that instead
538 # REACTIVE FWD test
539 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700540 passMsg = "Reactive Pingall test passed"
541 time1 = time.time()
542 pingResult = main.Mininet1.pingall()
543 time2 = time.time()
544 if not pingResult:
545 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700546 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700547 passMsg += " on the second try"
548 utilities.assert_equals(
549 expect=main.TRUE,
550 actual=pingResult,
551 onpass= passMsg,
552 onfail="Reactive Pingall failed, " +
553 "one or more ping pairs failed" )
554 main.log.info( "Time for pingall: %2f seconds" %
555 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700556 # timeout for fwd flows
557 time.sleep( 11 )
558 # uninstall onos-app-fwd
559 main.step( "Uninstall reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700560 node = main.activeNodes[0]
561 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700562 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
563 onpass="Uninstall fwd successful",
564 onfail="Uninstall fwd failed" )
565
566 main.step( "Check app ids" )
567 threads = []
568 appCheck2 = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700569 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700570 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700571 name="appToIDCheck-" + str( i ),
572 args=[] )
573 threads.append( t )
574 t.start()
575
576 for t in threads:
577 t.join()
578 appCheck2 = appCheck2 and t.result
579 if appCheck2 != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700580 node = main.activeNodes[0]
581 main.log.warn( main.CLIs[node].apps() )
582 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700583 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
584 onpass="App Ids seem to be correct",
585 onfail="Something is wrong with app Ids" )
586
587 main.step( "Add host intents via cli" )
588 intentIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700589 # TODO: move the host numbers to params
590 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700591 intentAddResult = True
592 hostResult = main.TRUE
593 for i in range( 8, 18 ):
594 main.log.info( "Adding host intent between h" + str( i ) +
595 " and h" + str( i + 10 ) )
596 host1 = "00:00:00:00:00:" + \
597 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
598 host2 = "00:00:00:00:00:" + \
599 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
600 # NOTE: getHost can return None
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700601 host1Dict = onosCli.getHost( host1 )
602 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700603 host1Id = None
604 host2Id = None
605 if host1Dict and host2Dict:
606 host1Id = host1Dict.get( 'id', None )
607 host2Id = host2Dict.get( 'id', None )
608 if host1Id and host2Id:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700609 nodeNum = ( i % len( main.activeNodes ) )
610 node = main.activeNodes[nodeNum]
611 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700612 if tmpId:
613 main.log.info( "Added intent with id: " + tmpId )
614 intentIds.append( tmpId )
615 else:
616 main.log.error( "addHostIntent returned: " +
617 repr( tmpId ) )
618 else:
619 main.log.error( "Error, getHost() failed for h" + str( i ) +
620 " and/or h" + str( i + 10 ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700621 node = main.activeNodes[0]
622 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700623 main.log.warn( "Hosts output: " )
624 try:
625 main.log.warn( json.dumps( json.loads( hosts ),
626 sort_keys=True,
627 indent=4,
628 separators=( ',', ': ' ) ) )
629 except ( ValueError, TypeError ):
630 main.log.warn( repr( hosts ) )
631 hostResult = main.FALSE
632 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
633 onpass="Found a host id for each host",
634 onfail="Error looking up host ids" )
635
636 intentStart = time.time()
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700637 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700638 main.log.info( "Submitted intents: " + str( intentIds ) )
639 main.log.info( "Intents in ONOS: " + str( onosIds ) )
640 for intent in intentIds:
641 if intent in onosIds:
642 pass # intent submitted is in onos
643 else:
644 intentAddResult = False
645 if intentAddResult:
646 intentStop = time.time()
647 else:
648 intentStop = None
649 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700650 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700651 intentStates = []
652 installedCheck = True
653 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
654 count = 0
655 try:
656 for intent in json.loads( intents ):
657 state = intent.get( 'state', None )
658 if "INSTALLED" not in state:
659 installedCheck = False
660 intentId = intent.get( 'id', None )
661 intentStates.append( ( intentId, state ) )
662 except ( ValueError, TypeError ):
663 main.log.exception( "Error parsing intents" )
664 # add submitted intents not in the store
665 tmplist = [ i for i, s in intentStates ]
666 missingIntents = False
667 for i in intentIds:
668 if i not in tmplist:
669 intentStates.append( ( i, " - " ) )
670 missingIntents = True
671 intentStates.sort()
672 for i, s in intentStates:
673 count += 1
674 main.log.info( "%-6s%-15s%-15s" %
675 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700676 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700677 try:
678 missing = False
679 if leaders:
680 parsedLeaders = json.loads( leaders )
681 main.log.warn( json.dumps( parsedLeaders,
682 sort_keys=True,
683 indent=4,
684 separators=( ',', ': ' ) ) )
685 # check for all intent partitions
686 topics = []
687 for i in range( 14 ):
688 topics.append( "intent-partition-" + str( i ) )
689 main.log.debug( topics )
690 ONOStopics = [ j['topic'] for j in parsedLeaders ]
691 for topic in topics:
692 if topic not in ONOStopics:
693 main.log.error( "Error: " + topic +
694 " not in leaders" )
695 missing = True
696 else:
697 main.log.error( "leaders() returned None" )
698 except ( ValueError, TypeError ):
699 main.log.exception( "Error parsing leaders" )
700 main.log.error( repr( leaders ) )
701 # Check all nodes
702 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700703 for i in main.activeNodes:
704 response = main.CLIs[i].leaders( jsonFormat=False)
705 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700706 str( response ) )
707
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700708 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700709 try:
710 if partitions :
711 parsedPartitions = json.loads( partitions )
712 main.log.warn( json.dumps( parsedPartitions,
713 sort_keys=True,
714 indent=4,
715 separators=( ',', ': ' ) ) )
716 # TODO check for a leader in all paritions
717 # TODO check for consistency among nodes
718 else:
719 main.log.error( "partitions() returned None" )
720 except ( ValueError, TypeError ):
721 main.log.exception( "Error parsing partitions" )
722 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700723 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700724 try:
725 if pendingMap :
726 parsedPending = json.loads( pendingMap )
727 main.log.warn( json.dumps( parsedPending,
728 sort_keys=True,
729 indent=4,
730 separators=( ',', ': ' ) ) )
731 # TODO check something here?
732 else:
733 main.log.error( "pendingMap() returned None" )
734 except ( ValueError, TypeError ):
735 main.log.exception( "Error parsing pending map" )
736 main.log.error( repr( pendingMap ) )
737
738 intentAddResult = bool( intentAddResult and not missingIntents and
739 installedCheck )
740 if not intentAddResult:
741 main.log.error( "Error in pushing host intents to ONOS" )
742
743 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700744 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700745 correct = True
746 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700747 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700748 onosIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700749 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700750 onosIds.append( ids )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700751 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700752 str( sorted( onosIds ) ) )
753 if sorted( ids ) != sorted( intentIds ):
754 main.log.warn( "Set of intent IDs doesn't match" )
755 correct = False
756 break
757 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700758 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700759 for intent in intents:
760 if intent[ 'state' ] != "INSTALLED":
761 main.log.warn( "Intent " + intent[ 'id' ] +
762 " is " + intent[ 'state' ] )
763 correct = False
764 break
765 if correct:
766 break
767 else:
768 time.sleep(1)
769 if not intentStop:
770 intentStop = time.time()
771 global gossipTime
772 gossipTime = intentStop - intentStart
773 main.log.info( "It took about " + str( gossipTime ) +
774 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700775 gossipPeriod = int( main.params['timers']['gossip'] )
776 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700777 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700778 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700779 onpass="ECM anti-entropy for intents worked within " +
780 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700781 onfail="Intent ECM anti-entropy took too long. " +
782 "Expected time:{}, Actual time:{}".format( maxGossipTime,
783 gossipTime ) )
784 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700785 intentAddResult = True
786
787 if not intentAddResult or "key" in pendingMap:
788 import time
789 installedCheck = True
790 main.log.info( "Sleeping 60 seconds to see if intents are found" )
791 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700792 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700793 main.log.info( "Submitted intents: " + str( intentIds ) )
794 main.log.info( "Intents in ONOS: " + str( onosIds ) )
795 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700796 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700797 intentStates = []
798 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
799 count = 0
800 try:
801 for intent in json.loads( intents ):
802 # Iter through intents of a node
803 state = intent.get( 'state', None )
804 if "INSTALLED" not in state:
805 installedCheck = False
806 intentId = intent.get( 'id', None )
807 intentStates.append( ( intentId, state ) )
808 except ( ValueError, TypeError ):
809 main.log.exception( "Error parsing intents" )
810 # add submitted intents not in the store
811 tmplist = [ i for i, s in intentStates ]
812 for i in intentIds:
813 if i not in tmplist:
814 intentStates.append( ( i, " - " ) )
815 intentStates.sort()
816 for i, s in intentStates:
817 count += 1
818 main.log.info( "%-6s%-15s%-15s" %
819 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700820 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700821 try:
822 missing = False
823 if leaders:
824 parsedLeaders = json.loads( leaders )
825 main.log.warn( json.dumps( parsedLeaders,
826 sort_keys=True,
827 indent=4,
828 separators=( ',', ': ' ) ) )
829 # check for all intent partitions
830 # check for election
831 topics = []
832 for i in range( 14 ):
833 topics.append( "intent-partition-" + str( i ) )
834 # FIXME: this should only be after we start the app
835 topics.append( "org.onosproject.election" )
836 main.log.debug( topics )
837 ONOStopics = [ j['topic'] for j in parsedLeaders ]
838 for topic in topics:
839 if topic not in ONOStopics:
840 main.log.error( "Error: " + topic +
841 " not in leaders" )
842 missing = True
843 else:
844 main.log.error( "leaders() returned None" )
845 except ( ValueError, TypeError ):
846 main.log.exception( "Error parsing leaders" )
847 main.log.error( repr( leaders ) )
848 # Check all nodes
849 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700850 for i in main.activeNodes:
851 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700852 response = node.leaders( jsonFormat=False)
853 main.log.warn( str( node.name ) + " leaders output: \n" +
854 str( response ) )
855
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700856 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700857 try:
858 if partitions :
859 parsedPartitions = json.loads( partitions )
860 main.log.warn( json.dumps( parsedPartitions,
861 sort_keys=True,
862 indent=4,
863 separators=( ',', ': ' ) ) )
864 # TODO check for a leader in all paritions
865 # TODO check for consistency among nodes
866 else:
867 main.log.error( "partitions() returned None" )
868 except ( ValueError, TypeError ):
869 main.log.exception( "Error parsing partitions" )
870 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700871 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700872 try:
873 if pendingMap :
874 parsedPending = json.loads( pendingMap )
875 main.log.warn( json.dumps( parsedPending,
876 sort_keys=True,
877 indent=4,
878 separators=( ',', ': ' ) ) )
879 # TODO check something here?
880 else:
881 main.log.error( "pendingMap() returned None" )
882 except ( ValueError, TypeError ):
883 main.log.exception( "Error parsing pending map" )
884 main.log.error( repr( pendingMap ) )
885
886 def CASE4( self, main ):
887 """
888 Ping across added host intents
889 """
890 import json
891 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700892 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700893 assert main, "main not defined"
894 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700895 assert main.CLIs, "main.CLIs not defined"
896 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700897 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700898 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700899 "functionality and check the state of " +\
900 "the intent"
901 main.step( "Ping across added host intents" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700902 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700903 PingResult = main.TRUE
904 for i in range( 8, 18 ):
905 ping = main.Mininet1.pingHost( src="h" + str( i ),
906 target="h" + str( i + 10 ) )
907 PingResult = PingResult and ping
908 if ping == main.FALSE:
909 main.log.warn( "Ping failed between h" + str( i ) +
910 " and h" + str( i + 10 ) )
911 elif ping == main.TRUE:
912 main.log.info( "Ping test passed!" )
913 # Don't set PingResult or you'd override failures
914 if PingResult == main.FALSE:
915 main.log.error(
916 "Intents have not been installed correctly, pings failed." )
917 # TODO: pretty print
918 main.log.warn( "ONOS1 intents: " )
919 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700920 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700921 main.log.warn( json.dumps( json.loads( tmpIntents ),
922 sort_keys=True,
923 indent=4,
924 separators=( ',', ': ' ) ) )
925 except ( ValueError, TypeError ):
926 main.log.warn( repr( tmpIntents ) )
927 utilities.assert_equals(
928 expect=main.TRUE,
929 actual=PingResult,
930 onpass="Intents have been installed correctly and pings work",
931 onfail="Intents have not been installed correctly, pings failed." )
932
933 main.step( "Check Intent state" )
934 installedCheck = False
935 loopCount = 0
936 while not installedCheck and loopCount < 40:
937 installedCheck = True
938 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700939 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700940 intentStates = []
941 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
942 count = 0
943 # Iter through intents of a node
944 try:
945 for intent in json.loads( intents ):
946 state = intent.get( 'state', None )
947 if "INSTALLED" not in state:
948 installedCheck = False
949 intentId = intent.get( 'id', None )
950 intentStates.append( ( intentId, state ) )
951 except ( ValueError, TypeError ):
952 main.log.exception( "Error parsing intents." )
953 # Print states
954 intentStates.sort()
955 for i, s in intentStates:
956 count += 1
957 main.log.info( "%-6s%-15s%-15s" %
958 ( str( count ), str( i ), str( s ) ) )
959 if not installedCheck:
960 time.sleep( 1 )
961 loopCount += 1
962 utilities.assert_equals( expect=True, actual=installedCheck,
963 onpass="Intents are all INSTALLED",
964 onfail="Intents are not all in " +
965 "INSTALLED state" )
966
967 main.step( "Check leadership of topics" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700968 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700969 topicCheck = main.TRUE
970 try:
971 if leaders:
972 parsedLeaders = json.loads( leaders )
973 main.log.warn( json.dumps( parsedLeaders,
974 sort_keys=True,
975 indent=4,
976 separators=( ',', ': ' ) ) )
977 # check for all intent partitions
978 # check for election
979 # TODO: Look at Devices as topics now that it uses this system
980 topics = []
981 for i in range( 14 ):
982 topics.append( "intent-partition-" + str( i ) )
983 # FIXME: this should only be after we start the app
984 # FIXME: topics.append( "org.onosproject.election" )
985 # Print leaders output
986 main.log.debug( topics )
987 ONOStopics = [ j['topic'] for j in parsedLeaders ]
988 for topic in topics:
989 if topic not in ONOStopics:
990 main.log.error( "Error: " + topic +
991 " not in leaders" )
992 topicCheck = main.FALSE
993 else:
994 main.log.error( "leaders() returned None" )
995 topicCheck = main.FALSE
996 except ( ValueError, TypeError ):
997 topicCheck = main.FALSE
998 main.log.exception( "Error parsing leaders" )
999 main.log.error( repr( leaders ) )
1000 # TODO: Check for a leader of these topics
1001 # Check all nodes
1002 if topicCheck:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001003 for i in main.activeNodes:
1004 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001005 response = node.leaders( jsonFormat=False)
1006 main.log.warn( str( node.name ) + " leaders output: \n" +
1007 str( response ) )
1008
1009 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1010 onpass="intent Partitions is in leaders",
1011 onfail="Some topics were lost " )
1012 # Print partitions
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001013 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001014 try:
1015 if partitions :
1016 parsedPartitions = json.loads( partitions )
1017 main.log.warn( json.dumps( parsedPartitions,
1018 sort_keys=True,
1019 indent=4,
1020 separators=( ',', ': ' ) ) )
1021 # TODO check for a leader in all paritions
1022 # TODO check for consistency among nodes
1023 else:
1024 main.log.error( "partitions() returned None" )
1025 except ( ValueError, TypeError ):
1026 main.log.exception( "Error parsing partitions" )
1027 main.log.error( repr( partitions ) )
1028 # Print Pending Map
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001029 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001030 try:
1031 if pendingMap :
1032 parsedPending = json.loads( pendingMap )
1033 main.log.warn( json.dumps( parsedPending,
1034 sort_keys=True,
1035 indent=4,
1036 separators=( ',', ': ' ) ) )
1037 # TODO check something here?
1038 else:
1039 main.log.error( "pendingMap() returned None" )
1040 except ( ValueError, TypeError ):
1041 main.log.exception( "Error parsing pending map" )
1042 main.log.error( repr( pendingMap ) )
1043
1044 if not installedCheck:
1045 main.log.info( "Waiting 60 seconds to see if the state of " +
1046 "intents change" )
1047 time.sleep( 60 )
1048 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001049 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001050 intentStates = []
1051 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1052 count = 0
1053 # Iter through intents of a node
1054 try:
1055 for intent in json.loads( intents ):
1056 state = intent.get( 'state', None )
1057 if "INSTALLED" not in state:
1058 installedCheck = False
1059 intentId = intent.get( 'id', None )
1060 intentStates.append( ( intentId, state ) )
1061 except ( ValueError, TypeError ):
1062 main.log.exception( "Error parsing intents." )
1063 intentStates.sort()
1064 for i, s in intentStates:
1065 count += 1
1066 main.log.info( "%-6s%-15s%-15s" %
1067 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001068 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001069 try:
1070 missing = False
1071 if leaders:
1072 parsedLeaders = json.loads( leaders )
1073 main.log.warn( json.dumps( parsedLeaders,
1074 sort_keys=True,
1075 indent=4,
1076 separators=( ',', ': ' ) ) )
1077 # check for all intent partitions
1078 # check for election
1079 topics = []
1080 for i in range( 14 ):
1081 topics.append( "intent-partition-" + str( i ) )
1082 # FIXME: this should only be after we start the app
1083 topics.append( "org.onosproject.election" )
1084 main.log.debug( topics )
1085 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1086 for topic in topics:
1087 if topic not in ONOStopics:
1088 main.log.error( "Error: " + topic +
1089 " not in leaders" )
1090 missing = True
1091 else:
1092 main.log.error( "leaders() returned None" )
1093 except ( ValueError, TypeError ):
1094 main.log.exception( "Error parsing leaders" )
1095 main.log.error( repr( leaders ) )
1096 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001097 for i in main.activeNodes:
1098 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001099 response = node.leaders( jsonFormat=False)
1100 main.log.warn( str( node.name ) + " leaders output: \n" +
1101 str( response ) )
1102
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001103 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001104 try:
1105 if partitions :
1106 parsedPartitions = json.loads( partitions )
1107 main.log.warn( json.dumps( parsedPartitions,
1108 sort_keys=True,
1109 indent=4,
1110 separators=( ',', ': ' ) ) )
1111 # TODO check for a leader in all paritions
1112 # TODO check for consistency among nodes
1113 else:
1114 main.log.error( "partitions() returned None" )
1115 except ( ValueError, TypeError ):
1116 main.log.exception( "Error parsing partitions" )
1117 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001118 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001119 try:
1120 if pendingMap :
1121 parsedPending = json.loads( pendingMap )
1122 main.log.warn( json.dumps( parsedPending,
1123 sort_keys=True,
1124 indent=4,
1125 separators=( ',', ': ' ) ) )
1126 # TODO check something here?
1127 else:
1128 main.log.error( "pendingMap() returned None" )
1129 except ( ValueError, TypeError ):
1130 main.log.exception( "Error parsing pending map" )
1131 main.log.error( repr( pendingMap ) )
1132 # Print flowrules
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001133 node = main.activeNodes[0]
1134 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001135 main.step( "Wait a minute then ping again" )
1136 # the wait is above
1137 PingResult = main.TRUE
1138 for i in range( 8, 18 ):
1139 ping = main.Mininet1.pingHost( src="h" + str( i ),
1140 target="h" + str( i + 10 ) )
1141 PingResult = PingResult and ping
1142 if ping == main.FALSE:
1143 main.log.warn( "Ping failed between h" + str( i ) +
1144 " and h" + str( i + 10 ) )
1145 elif ping == main.TRUE:
1146 main.log.info( "Ping test passed!" )
1147 # Don't set PingResult or you'd override failures
1148 if PingResult == main.FALSE:
1149 main.log.error(
1150 "Intents have not been installed correctly, pings failed." )
1151 # TODO: pretty print
1152 main.log.warn( "ONOS1 intents: " )
1153 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001154 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001155 main.log.warn( json.dumps( json.loads( tmpIntents ),
1156 sort_keys=True,
1157 indent=4,
1158 separators=( ',', ': ' ) ) )
1159 except ( ValueError, TypeError ):
1160 main.log.warn( repr( tmpIntents ) )
1161 utilities.assert_equals(
1162 expect=main.TRUE,
1163 actual=PingResult,
1164 onpass="Intents have been installed correctly and pings work",
1165 onfail="Intents have not been installed correctly, pings failed." )
1166
1167 def CASE5( self, main ):
1168 """
1169 Reading state of ONOS
1170 """
1171 import json
1172 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001173 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001174 assert main, "main not defined"
1175 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001176 assert main.CLIs, "main.CLIs not defined"
1177 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001178
1179 main.case( "Setting up and gathering data for current state" )
1180 # The general idea for this test case is to pull the state of
1181 # ( intents,flows, topology,... ) from each ONOS node
1182 # We can then compare them with each other and also with past states
1183
1184 main.step( "Check that each switch has a master" )
1185 global mastershipState
1186 mastershipState = '[]'
1187
1188 # Assert that each device has a master
1189 rolesNotNull = main.TRUE
1190 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001191 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001192 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001193 name="rolesNotNull-" + str( i ),
1194 args=[] )
1195 threads.append( t )
1196 t.start()
1197
1198 for t in threads:
1199 t.join()
1200 rolesNotNull = rolesNotNull and t.result
1201 utilities.assert_equals(
1202 expect=main.TRUE,
1203 actual=rolesNotNull,
1204 onpass="Each device has a master",
1205 onfail="Some devices don't have a master assigned" )
1206
1207 main.step( "Get the Mastership of each switch from each controller" )
1208 ONOSMastership = []
1209 mastershipCheck = main.FALSE
1210 consistentMastership = True
1211 rolesResults = True
1212 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001213 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001214 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001215 name="roles-" + str( i ),
1216 args=[] )
1217 threads.append( t )
1218 t.start()
1219
1220 for t in threads:
1221 t.join()
1222 ONOSMastership.append( t.result )
1223
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001224 for i in range( len( ONOSMastership ) ):
1225 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001226 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001227 main.log.error( "Error in getting ONOS" + node + " roles" )
1228 main.log.warn( "ONOS" + node + " mastership response: " +
1229 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001230 rolesResults = False
1231 utilities.assert_equals(
1232 expect=True,
1233 actual=rolesResults,
1234 onpass="No error in reading roles output",
1235 onfail="Error in reading roles from ONOS" )
1236
1237 main.step( "Check for consistency in roles from each controller" )
1238 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1239 main.log.info(
1240 "Switch roles are consistent across all ONOS nodes" )
1241 else:
1242 consistentMastership = False
1243 utilities.assert_equals(
1244 expect=True,
1245 actual=consistentMastership,
1246 onpass="Switch roles are consistent across all ONOS nodes",
1247 onfail="ONOS nodes have different views of switch roles" )
1248
1249 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001250 for i in range( len( main.activeNodes ) ):
1251 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001252 try:
1253 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001254 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001255 json.dumps(
1256 json.loads( ONOSMastership[ i ] ),
1257 sort_keys=True,
1258 indent=4,
1259 separators=( ',', ': ' ) ) )
1260 except ( ValueError, TypeError ):
1261 main.log.warn( repr( ONOSMastership[ i ] ) )
1262 elif rolesResults and consistentMastership:
1263 mastershipCheck = main.TRUE
1264 mastershipState = ONOSMastership[ 0 ]
1265
1266 main.step( "Get the intents from each controller" )
1267 global intentState
1268 intentState = []
1269 ONOSIntents = []
1270 intentCheck = main.FALSE
1271 consistentIntents = True
1272 intentsResults = True
1273 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001274 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001275 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001276 name="intents-" + str( i ),
1277 args=[],
1278 kwargs={ 'jsonFormat': True } )
1279 threads.append( t )
1280 t.start()
1281
1282 for t in threads:
1283 t.join()
1284 ONOSIntents.append( t.result )
1285
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001286 for i in range( len( ONOSIntents ) ):
1287 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001288 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001289 main.log.error( "Error in getting ONOS" + node + " intents" )
1290 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001291 repr( ONOSIntents[ i ] ) )
1292 intentsResults = False
1293 utilities.assert_equals(
1294 expect=True,
1295 actual=intentsResults,
1296 onpass="No error in reading intents output",
1297 onfail="Error in reading intents from ONOS" )
1298
1299 main.step( "Check for consistency in Intents from each controller" )
1300 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1301 main.log.info( "Intents are consistent across all ONOS " +
1302 "nodes" )
1303 else:
1304 consistentIntents = False
1305 main.log.error( "Intents not consistent" )
1306 utilities.assert_equals(
1307 expect=True,
1308 actual=consistentIntents,
1309 onpass="Intents are consistent across all ONOS nodes",
1310 onfail="ONOS nodes have different views of intents" )
1311
1312 if intentsResults:
1313 # Try to make it easy to figure out what is happening
1314 #
1315 # Intent ONOS1 ONOS2 ...
1316 # 0x01 INSTALLED INSTALLING
1317 # ... ... ...
1318 # ... ... ...
1319 title = " Id"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001320 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001321 title += " " * 10 + "ONOS" + str( n + 1 )
1322 main.log.warn( title )
1323 # get all intent keys in the cluster
1324 keys = []
1325 for nodeStr in ONOSIntents:
1326 node = json.loads( nodeStr )
1327 for intent in node:
1328 keys.append( intent.get( 'id' ) )
1329 keys = set( keys )
1330 for key in keys:
1331 row = "%-13s" % key
1332 for nodeStr in ONOSIntents:
1333 node = json.loads( nodeStr )
1334 for intent in node:
1335 if intent.get( 'id', "Error" ) == key:
1336 row += "%-15s" % intent.get( 'state' )
1337 main.log.warn( row )
1338 # End table view
1339
1340 if intentsResults and not consistentIntents:
1341 # print the json objects
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001342 n = str( main.activeNodes[-1] + 1 )
1343 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001344 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1345 sort_keys=True,
1346 indent=4,
1347 separators=( ',', ': ' ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001348 for i in range( len( ONOSIntents ) ):
1349 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001350 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001351 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001352 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1353 sort_keys=True,
1354 indent=4,
1355 separators=( ',', ': ' ) ) )
1356 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001357 main.log.debug( "ONOS" + node + " intents match ONOS" +
1358 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001359 elif intentsResults and consistentIntents:
1360 intentCheck = main.TRUE
1361 intentState = ONOSIntents[ 0 ]
1362
1363 main.step( "Get the flows from each controller" )
1364 global flowState
1365 flowState = []
1366 ONOSFlows = []
1367 ONOSFlowsJson = []
1368 flowCheck = main.FALSE
1369 consistentFlows = True
1370 flowsResults = True
1371 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001372 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001373 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001374 name="flows-" + str( i ),
1375 args=[],
1376 kwargs={ 'jsonFormat': True } )
1377 threads.append( t )
1378 t.start()
1379
1380 # NOTE: Flows command can take some time to run
1381 time.sleep(30)
1382 for t in threads:
1383 t.join()
1384 result = t.result
1385 ONOSFlows.append( result )
1386
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001387 for i in range( len( ONOSFlows ) ):
1388 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001389 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1390 main.log.error( "Error in getting ONOS" + num + " flows" )
1391 main.log.warn( "ONOS" + num + " flows response: " +
1392 repr( ONOSFlows[ i ] ) )
1393 flowsResults = False
1394 ONOSFlowsJson.append( None )
1395 else:
1396 try:
1397 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1398 except ( ValueError, TypeError ):
1399 # FIXME: change this to log.error?
1400 main.log.exception( "Error in parsing ONOS" + num +
1401 " response as json." )
1402 main.log.error( repr( ONOSFlows[ i ] ) )
1403 ONOSFlowsJson.append( None )
1404 flowsResults = False
1405 utilities.assert_equals(
1406 expect=True,
1407 actual=flowsResults,
1408 onpass="No error in reading flows output",
1409 onfail="Error in reading flows from ONOS" )
1410
1411 main.step( "Check for consistency in Flows from each controller" )
1412 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1413 if all( tmp ):
1414 main.log.info( "Flow count is consistent across all ONOS nodes" )
1415 else:
1416 consistentFlows = False
1417 utilities.assert_equals(
1418 expect=True,
1419 actual=consistentFlows,
1420 onpass="The flow count is consistent across all ONOS nodes",
1421 onfail="ONOS nodes have different flow counts" )
1422
1423 if flowsResults and not consistentFlows:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001424 for i in range( len( ONOSFlows ) ):
1425 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001426 try:
1427 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001428 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001429 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1430 indent=4, separators=( ',', ': ' ) ) )
1431 except ( ValueError, TypeError ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001432 main.log.warn( "ONOS" + node + " flows: " +
1433 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001434 elif flowsResults and consistentFlows:
1435 flowCheck = main.TRUE
1436 flowState = ONOSFlows[ 0 ]
1437
1438 main.step( "Get the OF Table entries" )
1439 global flows
1440 flows = []
1441 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001442 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001443 if flowCheck == main.FALSE:
1444 for table in flows:
1445 main.log.warn( table )
1446 # TODO: Compare switch flow tables with ONOS flow tables
1447
1448 main.step( "Start continuous pings" )
1449 main.Mininet2.pingLong(
1450 src=main.params[ 'PING' ][ 'source1' ],
1451 target=main.params[ 'PING' ][ 'target1' ],
1452 pingTime=500 )
1453 main.Mininet2.pingLong(
1454 src=main.params[ 'PING' ][ 'source2' ],
1455 target=main.params[ 'PING' ][ 'target2' ],
1456 pingTime=500 )
1457 main.Mininet2.pingLong(
1458 src=main.params[ 'PING' ][ 'source3' ],
1459 target=main.params[ 'PING' ][ 'target3' ],
1460 pingTime=500 )
1461 main.Mininet2.pingLong(
1462 src=main.params[ 'PING' ][ 'source4' ],
1463 target=main.params[ 'PING' ][ 'target4' ],
1464 pingTime=500 )
1465 main.Mininet2.pingLong(
1466 src=main.params[ 'PING' ][ 'source5' ],
1467 target=main.params[ 'PING' ][ 'target5' ],
1468 pingTime=500 )
1469 main.Mininet2.pingLong(
1470 src=main.params[ 'PING' ][ 'source6' ],
1471 target=main.params[ 'PING' ][ 'target6' ],
1472 pingTime=500 )
1473 main.Mininet2.pingLong(
1474 src=main.params[ 'PING' ][ 'source7' ],
1475 target=main.params[ 'PING' ][ 'target7' ],
1476 pingTime=500 )
1477 main.Mininet2.pingLong(
1478 src=main.params[ 'PING' ][ 'source8' ],
1479 target=main.params[ 'PING' ][ 'target8' ],
1480 pingTime=500 )
1481 main.Mininet2.pingLong(
1482 src=main.params[ 'PING' ][ 'source9' ],
1483 target=main.params[ 'PING' ][ 'target9' ],
1484 pingTime=500 )
1485 main.Mininet2.pingLong(
1486 src=main.params[ 'PING' ][ 'source10' ],
1487 target=main.params[ 'PING' ][ 'target10' ],
1488 pingTime=500 )
1489
1490 main.step( "Collecting topology information from ONOS" )
1491 devices = []
1492 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001493 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001494 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001495 name="devices-" + str( i ),
1496 args=[ ] )
1497 threads.append( t )
1498 t.start()
1499
1500 for t in threads:
1501 t.join()
1502 devices.append( t.result )
1503 hosts = []
1504 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001505 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001506 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001507 name="hosts-" + str( i ),
1508 args=[ ] )
1509 threads.append( t )
1510 t.start()
1511
1512 for t in threads:
1513 t.join()
1514 try:
1515 hosts.append( json.loads( t.result ) )
1516 except ( ValueError, TypeError ):
1517 # FIXME: better handling of this, print which node
1518 # Maybe use thread name?
1519 main.log.exception( "Error parsing json output of hosts" )
Jon Hallf3d16e72015-12-16 17:45:08 -08001520 main.log.warn( repr( t.result ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001521 hosts.append( None )
1522
1523 ports = []
1524 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001525 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001526 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001527 name="ports-" + str( i ),
1528 args=[ ] )
1529 threads.append( t )
1530 t.start()
1531
1532 for t in threads:
1533 t.join()
1534 ports.append( t.result )
1535 links = []
1536 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001537 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001538 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001539 name="links-" + str( i ),
1540 args=[ ] )
1541 threads.append( t )
1542 t.start()
1543
1544 for t in threads:
1545 t.join()
1546 links.append( t.result )
1547 clusters = []
1548 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001549 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001550 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001551 name="clusters-" + str( i ),
1552 args=[ ] )
1553 threads.append( t )
1554 t.start()
1555
1556 for t in threads:
1557 t.join()
1558 clusters.append( t.result )
1559 # Compare json objects for hosts and dataplane clusters
1560
1561 # hosts
1562 main.step( "Host view is consistent across ONOS nodes" )
1563 consistentHostsResult = main.TRUE
1564 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001565 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001566 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001567 if hosts[ controller ] == hosts[ 0 ]:
1568 continue
1569 else: # hosts not consistent
1570 main.log.error( "hosts from ONOS" +
1571 controllerStr +
1572 " is inconsistent with ONOS1" )
1573 main.log.warn( repr( hosts[ controller ] ) )
1574 consistentHostsResult = main.FALSE
1575
1576 else:
1577 main.log.error( "Error in getting ONOS hosts from ONOS" +
1578 controllerStr )
1579 consistentHostsResult = main.FALSE
1580 main.log.warn( "ONOS" + controllerStr +
1581 " hosts response: " +
1582 repr( hosts[ controller ] ) )
1583 utilities.assert_equals(
1584 expect=main.TRUE,
1585 actual=consistentHostsResult,
1586 onpass="Hosts view is consistent across all ONOS nodes",
1587 onfail="ONOS nodes have different views of hosts" )
1588
1589 main.step( "Each host has an IP address" )
1590 ipResult = main.TRUE
1591 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001592 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001593 if hosts[ controller ]:
1594 for host in hosts[ controller ]:
1595 if not host.get( 'ipAddresses', [ ] ):
1596 main.log.error( "Error with host ips on controller" +
1597 controllerStr + ": " + str( host ) )
1598 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001599 utilities.assert_equals(
1600 expect=main.TRUE,
1601 actual=ipResult,
1602 onpass="The ips of the hosts aren't empty",
1603 onfail="The ip of at least one host is missing" )
1604
1605 # Strongly connected clusters of devices
1606 main.step( "Cluster view is consistent across ONOS nodes" )
1607 consistentClustersResult = main.TRUE
1608 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001609 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001610 if "Error" not in clusters[ controller ]:
1611 if clusters[ controller ] == clusters[ 0 ]:
1612 continue
1613 else: # clusters not consistent
1614 main.log.error( "clusters from ONOS" + controllerStr +
1615 " is inconsistent with ONOS1" )
1616 consistentClustersResult = main.FALSE
1617
1618 else:
1619 main.log.error( "Error in getting dataplane clusters " +
1620 "from ONOS" + controllerStr )
1621 consistentClustersResult = main.FALSE
1622 main.log.warn( "ONOS" + controllerStr +
1623 " clusters response: " +
1624 repr( clusters[ controller ] ) )
1625 utilities.assert_equals(
1626 expect=main.TRUE,
1627 actual=consistentClustersResult,
1628 onpass="Clusters view is consistent across all ONOS nodes",
1629 onfail="ONOS nodes have different views of clusters" )
1630 # there should always only be one cluster
1631 main.step( "Cluster view correct across ONOS nodes" )
1632 try:
1633 numClusters = len( json.loads( clusters[ 0 ] ) )
1634 except ( ValueError, TypeError ):
1635 main.log.exception( "Error parsing clusters[0]: " +
1636 repr( clusters[ 0 ] ) )
1637 clusterResults = main.FALSE
1638 if numClusters == 1:
1639 clusterResults = main.TRUE
1640 utilities.assert_equals(
1641 expect=1,
1642 actual=numClusters,
1643 onpass="ONOS shows 1 SCC",
1644 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1645
1646 main.step( "Comparing ONOS topology to MN" )
1647 devicesResults = main.TRUE
1648 linksResults = main.TRUE
1649 hostsResults = main.TRUE
1650 mnSwitches = main.Mininet1.getSwitches()
1651 mnLinks = main.Mininet1.getLinks()
1652 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001653 for controller in main.activeNodes:
1654 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001655 if devices[ controller ] and ports[ controller ] and\
1656 "Error" not in devices[ controller ] and\
1657 "Error" not in ports[ controller ]:
1658
1659 currentDevicesResult = main.Mininet1.compareSwitches(
1660 mnSwitches,
1661 json.loads( devices[ controller ] ),
1662 json.loads( ports[ controller ] ) )
1663 else:
1664 currentDevicesResult = main.FALSE
1665 utilities.assert_equals( expect=main.TRUE,
1666 actual=currentDevicesResult,
1667 onpass="ONOS" + controllerStr +
1668 " Switches view is correct",
1669 onfail="ONOS" + controllerStr +
1670 " Switches view is incorrect" )
1671 if links[ controller ] and "Error" not in links[ controller ]:
1672 currentLinksResult = main.Mininet1.compareLinks(
1673 mnSwitches, mnLinks,
1674 json.loads( links[ controller ] ) )
1675 else:
1676 currentLinksResult = main.FALSE
1677 utilities.assert_equals( expect=main.TRUE,
1678 actual=currentLinksResult,
1679 onpass="ONOS" + controllerStr +
1680 " links view is correct",
1681 onfail="ONOS" + controllerStr +
1682 " links view is incorrect" )
1683
Jon Hall657cdf62015-12-17 14:40:51 -08001684 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001685 currentHostsResult = main.Mininet1.compareHosts(
1686 mnHosts,
1687 hosts[ controller ] )
1688 else:
1689 currentHostsResult = main.FALSE
1690 utilities.assert_equals( expect=main.TRUE,
1691 actual=currentHostsResult,
1692 onpass="ONOS" + controllerStr +
1693 " hosts exist in Mininet",
1694 onfail="ONOS" + controllerStr +
1695 " hosts don't match Mininet" )
1696
1697 devicesResults = devicesResults and currentDevicesResult
1698 linksResults = linksResults and currentLinksResult
1699 hostsResults = hostsResults and currentHostsResult
1700
1701 main.step( "Device information is correct" )
1702 utilities.assert_equals(
1703 expect=main.TRUE,
1704 actual=devicesResults,
1705 onpass="Device information is correct",
1706 onfail="Device information is incorrect" )
1707
1708 main.step( "Links are correct" )
1709 utilities.assert_equals(
1710 expect=main.TRUE,
1711 actual=linksResults,
1712 onpass="Link are correct",
1713 onfail="Links are incorrect" )
1714
1715 main.step( "Hosts are correct" )
1716 utilities.assert_equals(
1717 expect=main.TRUE,
1718 actual=hostsResults,
1719 onpass="Hosts are correct",
1720 onfail="Hosts are incorrect" )
1721
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001722 def CASE61( self, main ):
Jon Hall5cf14d52015-07-16 12:15:19 -07001723 """
1724 The Failure case.
1725 """
Jon Halle1a3b752015-07-22 13:02:46 -07001726 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001727 assert main, "main not defined"
1728 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001729 assert main.CLIs, "main.CLIs not defined"
1730 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001731 main.case( "Kill minority of ONOS nodes" )
Jon Hall96091e62015-09-21 17:34:17 -07001732
1733 main.step( "Checking ONOS Logs for errors" )
1734 for node in main.nodes:
1735 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1736 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1737
Jon Hall3b489db2015-10-05 14:38:37 -07001738 n = len( main.nodes ) # Number of nodes
1739 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1740 main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
1741 if n > 3:
1742 main.kill.append( p - 1 )
1743 # NOTE: This only works for cluster sizes of 3,5, or 7.
1744
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001745 main.step( "Kill " + str( len( main.kill ) ) + " ONOS nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07001746 killResults = main.TRUE
1747 for i in main.kill:
1748 killResults = killResults and\
1749 main.ONOSbench.onosKill( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001750 main.activeNodes.remove( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001751 utilities.assert_equals( expect=main.TRUE, actual=killResults,
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001752 onpass="ONOS nodes killed successfully",
1753 onfail="ONOS nodes NOT successfully killed" )
1754
1755 def CASE62( self, main ):
1756 """
1757 The bring up stopped nodes
1758 """
1759 import time
1760 assert main.numCtrls, "main.numCtrls not defined"
1761 assert main, "main not defined"
1762 assert utilities.assert_equals, "utilities.assert_equals not defined"
1763 assert main.CLIs, "main.CLIs not defined"
1764 assert main.nodes, "main.nodes not defined"
1765 assert main.kill, "main.kill not defined"
1766 main.case( "Restart minority of ONOS nodes" )
1767
1768 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1769 startResults = main.TRUE
1770 restartTime = time.time()
1771 for i in main.kill:
1772 startResults = startResults and\
1773 main.ONOSbench.onosStart( main.nodes[i].ip_address )
1774 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1775 onpass="ONOS nodes started successfully",
1776 onfail="ONOS nodes NOT successfully started" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001777
1778 main.step( "Checking if ONOS is up yet" )
1779 count = 0
1780 onosIsupResult = main.FALSE
1781 while onosIsupResult == main.FALSE and count < 10:
Jon Hall3b489db2015-10-05 14:38:37 -07001782 onosIsupResult = main.TRUE
1783 for i in main.kill:
1784 onosIsupResult = onosIsupResult and\
1785 main.ONOSbench.isup( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07001786 count = count + 1
Jon Hall5cf14d52015-07-16 12:15:19 -07001787 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1788 onpass="ONOS restarted successfully",
1789 onfail="ONOS restart NOT successful" )
1790
Jon Halle1a3b752015-07-22 13:02:46 -07001791 main.step( "Restarting ONOS main.CLIs" )
Jon Hall3b489db2015-10-05 14:38:37 -07001792 cliResults = main.TRUE
1793 for i in main.kill:
1794 cliResults = cliResults and\
1795 main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001796 main.activeNodes.append( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001797 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1798 onpass="ONOS cli restarted",
1799 onfail="ONOS cli did not restart" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001800 main.activeNodes.sort()
1801 try:
1802 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1803 "List of active nodes has duplicates, this likely indicates something was run out of order"
1804 except AssertionError:
1805 main.log.exception( "" )
1806 main.cleanup()
1807 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -07001808
1809 # Grab the time of restart so we chan check how long the gossip
1810 # protocol has had time to work
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001811 main.restartTime = time.time() - restartTime
Jon Hall5cf14d52015-07-16 12:15:19 -07001812 main.log.debug( "Restart time: " + str( main.restartTime ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001813 # TODO: MAke this configurable. Also, we are breaking the above timer
1814 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001815 node = main.activeNodes[0]
1816 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1817 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1818 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001819
1820 def CASE7( self, main ):
1821 """
1822 Check state after ONOS failure
1823 """
1824 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001825 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001826 assert main, "main not defined"
1827 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001828 assert main.CLIs, "main.CLIs not defined"
1829 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001830 try:
1831 main.kill
1832 except AttributeError:
1833 main.kill = []
1834
Jon Hall5cf14d52015-07-16 12:15:19 -07001835 main.case( "Running ONOS Constant State Tests" )
1836
1837 main.step( "Check that each switch has a master" )
1838 # Assert that each device has a master
1839 rolesNotNull = main.TRUE
1840 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001841 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001842 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001843 name="rolesNotNull-" + str( i ),
1844 args=[ ] )
1845 threads.append( t )
1846 t.start()
1847
1848 for t in threads:
1849 t.join()
1850 rolesNotNull = rolesNotNull and t.result
1851 utilities.assert_equals(
1852 expect=main.TRUE,
1853 actual=rolesNotNull,
1854 onpass="Each device has a master",
1855 onfail="Some devices don't have a master assigned" )
1856
1857 main.step( "Read device roles from ONOS" )
1858 ONOSMastership = []
1859 consistentMastership = True
1860 rolesResults = True
1861 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001862 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001863 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001864 name="roles-" + str( i ),
1865 args=[] )
1866 threads.append( t )
1867 t.start()
1868
1869 for t in threads:
1870 t.join()
1871 ONOSMastership.append( t.result )
1872
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001873 for i in range( len( ONOSMastership ) ):
1874 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001875 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001876 main.log.error( "Error in getting ONOS" + node + " roles" )
1877 main.log.warn( "ONOS" + node + " mastership response: " +
1878 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001879 rolesResults = False
1880 utilities.assert_equals(
1881 expect=True,
1882 actual=rolesResults,
1883 onpass="No error in reading roles output",
1884 onfail="Error in reading roles from ONOS" )
1885
1886 main.step( "Check for consistency in roles from each controller" )
1887 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1888 main.log.info(
1889 "Switch roles are consistent across all ONOS nodes" )
1890 else:
1891 consistentMastership = False
1892 utilities.assert_equals(
1893 expect=True,
1894 actual=consistentMastership,
1895 onpass="Switch roles are consistent across all ONOS nodes",
1896 onfail="ONOS nodes have different views of switch roles" )
1897
1898 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001899 for i in range( len( ONOSMastership ) ):
1900 node = str( main.activeNodes[i] + 1 )
1901 main.log.warn( "ONOS" + node + " roles: ",
1902 json.dumps( json.loads( ONOSMastership[ i ] ),
1903 sort_keys=True,
1904 indent=4,
1905 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001906
1907 # NOTE: we expect mastership to change on controller failure
Jon Hall5cf14d52015-07-16 12:15:19 -07001908
1909 main.step( "Get the intents and compare across all nodes" )
1910 ONOSIntents = []
1911 intentCheck = main.FALSE
1912 consistentIntents = True
1913 intentsResults = True
1914 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001915 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001916 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001917 name="intents-" + str( i ),
1918 args=[],
1919 kwargs={ 'jsonFormat': True } )
1920 threads.append( t )
1921 t.start()
1922
1923 for t in threads:
1924 t.join()
1925 ONOSIntents.append( t.result )
1926
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001927 for i in range( len( ONOSIntents) ):
1928 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001929 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001930 main.log.error( "Error in getting ONOS" + node + " intents" )
1931 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001932 repr( ONOSIntents[ i ] ) )
1933 intentsResults = False
1934 utilities.assert_equals(
1935 expect=True,
1936 actual=intentsResults,
1937 onpass="No error in reading intents output",
1938 onfail="Error in reading intents from ONOS" )
1939
1940 main.step( "Check for consistency in Intents from each controller" )
1941 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1942 main.log.info( "Intents are consistent across all ONOS " +
1943 "nodes" )
1944 else:
1945 consistentIntents = False
1946
1947 # Try to make it easy to figure out what is happening
1948 #
1949 # Intent ONOS1 ONOS2 ...
1950 # 0x01 INSTALLED INSTALLING
1951 # ... ... ...
1952 # ... ... ...
1953 title = " ID"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001954 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001955 title += " " * 10 + "ONOS" + str( n + 1 )
1956 main.log.warn( title )
1957 # get all intent keys in the cluster
1958 keys = []
1959 for nodeStr in ONOSIntents:
1960 node = json.loads( nodeStr )
1961 for intent in node:
1962 keys.append( intent.get( 'id' ) )
1963 keys = set( keys )
1964 for key in keys:
1965 row = "%-13s" % key
1966 for nodeStr in ONOSIntents:
1967 node = json.loads( nodeStr )
1968 for intent in node:
1969 if intent.get( 'id' ) == key:
1970 row += "%-15s" % intent.get( 'state' )
1971 main.log.warn( row )
1972 # End table view
1973
1974 utilities.assert_equals(
1975 expect=True,
1976 actual=consistentIntents,
1977 onpass="Intents are consistent across all ONOS nodes",
1978 onfail="ONOS nodes have different views of intents" )
1979 intentStates = []
1980 for node in ONOSIntents: # Iter through ONOS nodes
1981 nodeStates = []
1982 # Iter through intents of a node
1983 try:
1984 for intent in json.loads( node ):
1985 nodeStates.append( intent[ 'state' ] )
1986 except ( ValueError, TypeError ):
1987 main.log.exception( "Error in parsing intents" )
1988 main.log.error( repr( node ) )
1989 intentStates.append( nodeStates )
1990 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1991 main.log.info( dict( out ) )
1992
1993 if intentsResults and not consistentIntents:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001994 for i in range( len( main.activeNodes ) ):
1995 node = str( main.activeNodes[i] + 1 )
1996 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001997 main.log.warn( json.dumps(
1998 json.loads( ONOSIntents[ i ] ),
1999 sort_keys=True,
2000 indent=4,
2001 separators=( ',', ': ' ) ) )
2002 elif intentsResults and consistentIntents:
2003 intentCheck = main.TRUE
2004
2005 # NOTE: Store has no durability, so intents are lost across system
2006 # restarts
2007 main.step( "Compare current intents with intents before the failure" )
2008 # NOTE: this requires case 5 to pass for intentState to be set.
2009 # maybe we should stop the test if that fails?
2010 sameIntents = main.FALSE
2011 if intentState and intentState == ONOSIntents[ 0 ]:
2012 sameIntents = main.TRUE
2013 main.log.info( "Intents are consistent with before failure" )
2014 # TODO: possibly the states have changed? we may need to figure out
2015 # what the acceptable states are
2016 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2017 sameIntents = main.TRUE
2018 try:
2019 before = json.loads( intentState )
2020 after = json.loads( ONOSIntents[ 0 ] )
2021 for intent in before:
2022 if intent not in after:
2023 sameIntents = main.FALSE
2024 main.log.debug( "Intent is not currently in ONOS " +
2025 "(at least in the same form):" )
2026 main.log.debug( json.dumps( intent ) )
2027 except ( ValueError, TypeError ):
2028 main.log.exception( "Exception printing intents" )
2029 main.log.debug( repr( ONOSIntents[0] ) )
2030 main.log.debug( repr( intentState ) )
2031 if sameIntents == main.FALSE:
2032 try:
2033 main.log.debug( "ONOS intents before: " )
2034 main.log.debug( json.dumps( json.loads( intentState ),
2035 sort_keys=True, indent=4,
2036 separators=( ',', ': ' ) ) )
2037 main.log.debug( "Current ONOS intents: " )
2038 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2039 sort_keys=True, indent=4,
2040 separators=( ',', ': ' ) ) )
2041 except ( ValueError, TypeError ):
2042 main.log.exception( "Exception printing intents" )
2043 main.log.debug( repr( ONOSIntents[0] ) )
2044 main.log.debug( repr( intentState ) )
2045 utilities.assert_equals(
2046 expect=main.TRUE,
2047 actual=sameIntents,
2048 onpass="Intents are consistent with before failure",
2049 onfail="The Intents changed during failure" )
2050 intentCheck = intentCheck and sameIntents
2051
2052 main.step( "Get the OF Table entries and compare to before " +
2053 "component failure" )
2054 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002055 for i in range( 28 ):
2056 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002057 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2058 FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
Jon Hall5cf14d52015-07-16 12:15:19 -07002059 if FlowTables == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002060 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2061
Jon Hall5cf14d52015-07-16 12:15:19 -07002062 utilities.assert_equals(
2063 expect=main.TRUE,
2064 actual=FlowTables,
2065 onpass="No changes were found in the flow tables",
2066 onfail="Changes were found in the flow tables" )
2067
2068 main.Mininet2.pingLongKill()
2069 '''
2070 main.step( "Check the continuous pings to ensure that no packets " +
2071 "were dropped during component failure" )
2072 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2073 main.params[ 'TESTONIP' ] )
2074 LossInPings = main.FALSE
2075 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2076 for i in range( 8, 18 ):
2077 main.log.info(
2078 "Checking for a loss in pings along flow from s" +
2079 str( i ) )
2080 LossInPings = main.Mininet2.checkForLoss(
2081 "/tmp/ping.h" +
2082 str( i ) ) or LossInPings
2083 if LossInPings == main.TRUE:
2084 main.log.info( "Loss in ping detected" )
2085 elif LossInPings == main.ERROR:
2086 main.log.info( "There are multiple mininet process running" )
2087 elif LossInPings == main.FALSE:
2088 main.log.info( "No Loss in the pings" )
2089 main.log.info( "No loss of dataplane connectivity" )
2090 utilities.assert_equals(
2091 expect=main.FALSE,
2092 actual=LossInPings,
2093 onpass="No Loss of connectivity",
2094 onfail="Loss of dataplane connectivity detected" )
2095 '''
2096
2097 main.step( "Leadership Election is still functional" )
2098 # Test of LeadershipElection
2099 leaderList = []
Jon Hall5cf14d52015-07-16 12:15:19 -07002100
Jon Hall3b489db2015-10-05 14:38:37 -07002101 restarted = []
2102 for i in main.kill:
2103 restarted.append( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07002104 leaderResult = main.TRUE
Jon Hall3b489db2015-10-05 14:38:37 -07002105
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002106 for i in main.activeNodes:
2107 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002108 leaderN = cli.electionTestLeader()
2109 leaderList.append( leaderN )
2110 if leaderN == main.FALSE:
2111 # error in response
2112 main.log.error( "Something is wrong with " +
2113 "electionTestLeader function, check the" +
2114 " error logs" )
2115 leaderResult = main.FALSE
2116 elif leaderN is None:
2117 main.log.error( cli.name +
2118 " shows no leader for the election-app was" +
2119 " elected after the old one died" )
2120 leaderResult = main.FALSE
2121 elif leaderN in restarted:
2122 main.log.error( cli.name + " shows " + str( leaderN ) +
2123 " as leader for the election-app, but it " +
2124 "was restarted" )
2125 leaderResult = main.FALSE
2126 if len( set( leaderList ) ) != 1:
2127 leaderResult = main.FALSE
2128 main.log.error(
2129 "Inconsistent view of leader for the election test app" )
2130 # TODO: print the list
2131 utilities.assert_equals(
2132 expect=main.TRUE,
2133 actual=leaderResult,
2134 onpass="Leadership election passed",
2135 onfail="Something went wrong with Leadership election" )
2136
2137 def CASE8( self, main ):
2138 """
2139 Compare topo
2140 """
2141 import json
2142 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002143 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002144 assert main, "main not defined"
2145 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002146 assert main.CLIs, "main.CLIs not defined"
2147 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002148
2149 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002150 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002151 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002152 topoResult = main.FALSE
2153 elapsed = 0
2154 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002155 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002156 startTime = time.time()
2157 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002158 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002159 devicesResults = main.TRUE
2160 linksResults = main.TRUE
2161 hostsResults = main.TRUE
2162 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002163 count += 1
2164 cliStart = time.time()
2165 devices = []
2166 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002167 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002168 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07002169 name="devices-" + str( i ),
2170 args=[ ] )
2171 threads.append( t )
2172 t.start()
2173
2174 for t in threads:
2175 t.join()
2176 devices.append( t.result )
2177 hosts = []
2178 ipResult = main.TRUE
2179 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002180 for i in main.activeNodes:
Jon Halld8f6de82015-12-17 17:04:34 -08002181 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002182 name="hosts-" + str( i ),
Jon Halld8f6de82015-12-17 17:04:34 -08002183 args=[ main.CLIs[i].hosts, [ None ] ],
2184 kwargs= { 'sleep': 5, 'attempts': 5,
2185 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002186 threads.append( t )
2187 t.start()
2188
2189 for t in threads:
2190 t.join()
2191 try:
2192 hosts.append( json.loads( t.result ) )
2193 except ( ValueError, TypeError ):
2194 main.log.exception( "Error parsing hosts results" )
2195 main.log.error( repr( t.result ) )
Jon Hallf3d16e72015-12-16 17:45:08 -08002196 hosts.append( None )
Jon Hall5cf14d52015-07-16 12:15:19 -07002197 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002198 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallacd1b182015-12-17 11:43:20 -08002199 if hosts[ controller ]:
2200 for host in hosts[ controller ]:
2201 if host is None or host.get( 'ipAddresses', [] ) == []:
2202 main.log.error(
2203 "Error with host ipAddresses on controller" +
2204 controllerStr + ": " + str( host ) )
2205 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002206 ports = []
2207 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002208 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002209 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07002210 name="ports-" + str( i ),
2211 args=[ ] )
2212 threads.append( t )
2213 t.start()
2214
2215 for t in threads:
2216 t.join()
2217 ports.append( t.result )
2218 links = []
2219 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002220 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002221 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07002222 name="links-" + str( i ),
2223 args=[ ] )
2224 threads.append( t )
2225 t.start()
2226
2227 for t in threads:
2228 t.join()
2229 links.append( t.result )
2230 clusters = []
2231 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002232 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002233 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07002234 name="clusters-" + str( i ),
2235 args=[ ] )
2236 threads.append( t )
2237 t.start()
2238
2239 for t in threads:
2240 t.join()
2241 clusters.append( t.result )
2242
2243 elapsed = time.time() - startTime
2244 cliTime = time.time() - cliStart
2245 print "Elapsed time: " + str( elapsed )
2246 print "CLI time: " + str( cliTime )
2247
2248 mnSwitches = main.Mininet1.getSwitches()
2249 mnLinks = main.Mininet1.getLinks()
2250 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002251 for controller in range( len( main.activeNodes ) ):
2252 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002253 if devices[ controller ] and ports[ controller ] and\
2254 "Error" not in devices[ controller ] and\
2255 "Error" not in ports[ controller ]:
2256
2257 currentDevicesResult = main.Mininet1.compareSwitches(
2258 mnSwitches,
2259 json.loads( devices[ controller ] ),
2260 json.loads( ports[ controller ] ) )
2261 else:
2262 currentDevicesResult = main.FALSE
2263 utilities.assert_equals( expect=main.TRUE,
2264 actual=currentDevicesResult,
2265 onpass="ONOS" + controllerStr +
2266 " Switches view is correct",
2267 onfail="ONOS" + controllerStr +
2268 " Switches view is incorrect" )
2269
2270 if links[ controller ] and "Error" not in links[ controller ]:
2271 currentLinksResult = main.Mininet1.compareLinks(
2272 mnSwitches, mnLinks,
2273 json.loads( links[ controller ] ) )
2274 else:
2275 currentLinksResult = main.FALSE
2276 utilities.assert_equals( expect=main.TRUE,
2277 actual=currentLinksResult,
2278 onpass="ONOS" + controllerStr +
2279 " links view is correct",
2280 onfail="ONOS" + controllerStr +
2281 " links view is incorrect" )
Jon Hall657cdf62015-12-17 14:40:51 -08002282 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002283 currentHostsResult = main.Mininet1.compareHosts(
2284 mnHosts,
2285 hosts[ controller ] )
Jon Hall13b446e2016-01-05 12:17:01 -08002286 elif hosts[ controller ] == []:
2287 currentHostsResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002288 else:
2289 currentHostsResult = main.FALSE
2290 utilities.assert_equals( expect=main.TRUE,
2291 actual=currentHostsResult,
2292 onpass="ONOS" + controllerStr +
2293 " hosts exist in Mininet",
2294 onfail="ONOS" + controllerStr +
2295 " hosts don't match Mininet" )
2296 # CHECKING HOST ATTACHMENT POINTS
2297 hostAttachment = True
2298 zeroHosts = False
2299 # FIXME: topo-HA/obelisk specific mappings:
2300 # key is mac and value is dpid
2301 mappings = {}
2302 for i in range( 1, 29 ): # hosts 1 through 28
2303 # set up correct variables:
2304 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2305 if i == 1:
2306 deviceId = "1000".zfill(16)
2307 elif i == 2:
2308 deviceId = "2000".zfill(16)
2309 elif i == 3:
2310 deviceId = "3000".zfill(16)
2311 elif i == 4:
2312 deviceId = "3004".zfill(16)
2313 elif i == 5:
2314 deviceId = "5000".zfill(16)
2315 elif i == 6:
2316 deviceId = "6000".zfill(16)
2317 elif i == 7:
2318 deviceId = "6007".zfill(16)
2319 elif i >= 8 and i <= 17:
2320 dpid = '3' + str( i ).zfill( 3 )
2321 deviceId = dpid.zfill(16)
2322 elif i >= 18 and i <= 27:
2323 dpid = '6' + str( i ).zfill( 3 )
2324 deviceId = dpid.zfill(16)
2325 elif i == 28:
2326 deviceId = "2800".zfill(16)
2327 mappings[ macId ] = deviceId
Jon Halld8f6de82015-12-17 17:04:34 -08002328 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002329 if hosts[ controller ] == []:
2330 main.log.warn( "There are no hosts discovered" )
2331 zeroHosts = True
2332 else:
2333 for host in hosts[ controller ]:
2334 mac = None
2335 location = None
2336 device = None
2337 port = None
2338 try:
2339 mac = host.get( 'mac' )
2340 assert mac, "mac field could not be found for this host object"
2341
2342 location = host.get( 'location' )
2343 assert location, "location field could not be found for this host object"
2344
2345 # Trim the protocol identifier off deviceId
2346 device = str( location.get( 'elementId' ) ).split(':')[1]
2347 assert device, "elementId field could not be found for this host location object"
2348
2349 port = location.get( 'port' )
2350 assert port, "port field could not be found for this host location object"
2351
2352 # Now check if this matches where they should be
2353 if mac and device and port:
2354 if str( port ) != "1":
2355 main.log.error( "The attachment port is incorrect for " +
2356 "host " + str( mac ) +
2357 ". Expected: 1 Actual: " + str( port) )
2358 hostAttachment = False
2359 if device != mappings[ str( mac ) ]:
2360 main.log.error( "The attachment device is incorrect for " +
2361 "host " + str( mac ) +
2362 ". Expected: " + mappings[ str( mac ) ] +
2363 " Actual: " + device )
2364 hostAttachment = False
2365 else:
2366 hostAttachment = False
2367 except AssertionError:
2368 main.log.exception( "Json object not as expected" )
2369 main.log.error( repr( host ) )
2370 hostAttachment = False
2371 else:
2372 main.log.error( "No hosts json output or \"Error\"" +
2373 " in output. hosts = " +
2374 repr( hosts[ controller ] ) )
2375 if zeroHosts is False:
2376 hostAttachment = True
2377
2378 # END CHECKING HOST ATTACHMENT POINTS
2379 devicesResults = devicesResults and currentDevicesResult
2380 linksResults = linksResults and currentLinksResult
2381 hostsResults = hostsResults and currentHostsResult
2382 hostAttachmentResults = hostAttachmentResults and\
2383 hostAttachment
Jon Halle9b1fa32015-12-08 15:32:21 -08002384 topoResult = devicesResults and linksResults and\
2385 hostsResults and hostAttachmentResults
2386 utilities.assert_equals( expect=True,
2387 actual=topoResult,
2388 onpass="ONOS topology matches Mininet",
2389 onfail="ONOS topology don't match Mininet" )
2390 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002391
2392 # Compare json objects for hosts and dataplane clusters
2393
2394 # hosts
2395 main.step( "Hosts view is consistent across all ONOS nodes" )
2396 consistentHostsResult = main.TRUE
2397 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002398 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall13b446e2016-01-05 12:17:01 -08002399 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002400 if hosts[ controller ] == hosts[ 0 ]:
2401 continue
2402 else: # hosts not consistent
2403 main.log.error( "hosts from ONOS" + controllerStr +
2404 " is inconsistent with ONOS1" )
2405 main.log.warn( repr( hosts[ controller ] ) )
2406 consistentHostsResult = main.FALSE
2407
2408 else:
2409 main.log.error( "Error in getting ONOS hosts from ONOS" +
2410 controllerStr )
2411 consistentHostsResult = main.FALSE
2412 main.log.warn( "ONOS" + controllerStr +
2413 " hosts response: " +
2414 repr( hosts[ controller ] ) )
2415 utilities.assert_equals(
2416 expect=main.TRUE,
2417 actual=consistentHostsResult,
2418 onpass="Hosts view is consistent across all ONOS nodes",
2419 onfail="ONOS nodes have different views of hosts" )
2420
2421 main.step( "Hosts information is correct" )
2422 hostsResults = hostsResults and ipResult
2423 utilities.assert_equals(
2424 expect=main.TRUE,
2425 actual=hostsResults,
2426 onpass="Host information is correct",
2427 onfail="Host information is incorrect" )
2428
2429 main.step( "Host attachment points to the network" )
2430 utilities.assert_equals(
2431 expect=True,
2432 actual=hostAttachmentResults,
2433 onpass="Hosts are correctly attached to the network",
2434 onfail="ONOS did not correctly attach hosts to the network" )
2435
2436 # Strongly connected clusters of devices
2437 main.step( "Clusters view is consistent across all ONOS nodes" )
2438 consistentClustersResult = main.TRUE
2439 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002440 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002441 if "Error" not in clusters[ controller ]:
2442 if clusters[ controller ] == clusters[ 0 ]:
2443 continue
2444 else: # clusters not consistent
2445 main.log.error( "clusters from ONOS" +
2446 controllerStr +
2447 " is inconsistent with ONOS1" )
2448 consistentClustersResult = main.FALSE
2449
2450 else:
2451 main.log.error( "Error in getting dataplane clusters " +
2452 "from ONOS" + controllerStr )
2453 consistentClustersResult = main.FALSE
2454 main.log.warn( "ONOS" + controllerStr +
2455 " clusters response: " +
2456 repr( clusters[ controller ] ) )
2457 utilities.assert_equals(
2458 expect=main.TRUE,
2459 actual=consistentClustersResult,
2460 onpass="Clusters view is consistent across all ONOS nodes",
2461 onfail="ONOS nodes have different views of clusters" )
2462
2463 main.step( "There is only one SCC" )
2464 # there should always only be one cluster
2465 try:
2466 numClusters = len( json.loads( clusters[ 0 ] ) )
2467 except ( ValueError, TypeError ):
2468 main.log.exception( "Error parsing clusters[0]: " +
2469 repr( clusters[0] ) )
2470 clusterResults = main.FALSE
2471 if numClusters == 1:
2472 clusterResults = main.TRUE
2473 utilities.assert_equals(
2474 expect=1,
2475 actual=numClusters,
2476 onpass="ONOS shows 1 SCC",
2477 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2478
2479 topoResult = ( devicesResults and linksResults
2480 and hostsResults and consistentHostsResult
2481 and consistentClustersResult and clusterResults
2482 and ipResult and hostAttachmentResults )
2483
2484 topoResult = topoResult and int( count <= 2 )
2485 note = "note it takes about " + str( int( cliTime ) ) + \
2486 " seconds for the test to make all the cli calls to fetch " +\
2487 "the topology from each ONOS instance"
2488 main.log.info(
2489 "Very crass estimate for topology discovery/convergence( " +
2490 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2491 str( count ) + " tries" )
2492
2493 main.step( "Device information is correct" )
2494 utilities.assert_equals(
2495 expect=main.TRUE,
2496 actual=devicesResults,
2497 onpass="Device information is correct",
2498 onfail="Device information is incorrect" )
2499
2500 main.step( "Links are correct" )
2501 utilities.assert_equals(
2502 expect=main.TRUE,
2503 actual=linksResults,
2504 onpass="Link are correct",
2505 onfail="Links are incorrect" )
2506
2507 # FIXME: move this to an ONOS state case
2508 main.step( "Checking ONOS nodes" )
2509 nodesOutput = []
2510 nodeResults = main.TRUE
2511 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002512 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002513 t = main.Thread( target=main.CLIs[i].nodes,
Jon Hall5cf14d52015-07-16 12:15:19 -07002514 name="nodes-" + str( i ),
2515 args=[ ] )
2516 threads.append( t )
2517 t.start()
2518
2519 for t in threads:
2520 t.join()
2521 nodesOutput.append( t.result )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002522 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
Jon Halle9b1fa32015-12-08 15:32:21 -08002523 ips.sort()
Jon Hall5cf14d52015-07-16 12:15:19 -07002524 for i in nodesOutput:
2525 try:
2526 current = json.loads( i )
Jon Halle9b1fa32015-12-08 15:32:21 -08002527 activeIps = []
2528 currentResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002529 for node in current:
Jon Halle9b1fa32015-12-08 15:32:21 -08002530 if node['state'] == 'ACTIVE':
2531 activeIps.append( node['ip'] )
2532 activeIps.sort()
2533 if ips == activeIps:
2534 currentResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002535 except ( ValueError, TypeError ):
2536 main.log.error( "Error parsing nodes output" )
2537 main.log.warn( repr( i ) )
Jon Halle9b1fa32015-12-08 15:32:21 -08002538 currentResult = main.FALSE
2539 nodeResults = nodeResults and currentResult
Jon Hall5cf14d52015-07-16 12:15:19 -07002540 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2541 onpass="Nodes check successful",
2542 onfail="Nodes check NOT successful" )
2543
2544 def CASE9( self, main ):
2545 """
2546 Link s3-s28 down
2547 """
2548 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002549 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002550 assert main, "main not defined"
2551 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002552 assert main.CLIs, "main.CLIs not defined"
2553 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002554 # NOTE: You should probably run a topology check after this
2555
2556 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2557
2558 description = "Turn off a link to ensure that Link Discovery " +\
2559 "is working properly"
2560 main.case( description )
2561
2562 main.step( "Kill Link between s3 and s28" )
2563 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2564 main.log.info( "Waiting " + str( linkSleep ) +
2565 " seconds for link down to be discovered" )
2566 time.sleep( linkSleep )
2567 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2568 onpass="Link down successful",
2569 onfail="Failed to bring link down" )
2570 # TODO do some sort of check here
2571
2572 def CASE10( self, main ):
2573 """
2574 Link s3-s28 up
2575 """
2576 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002577 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002578 assert main, "main not defined"
2579 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002580 assert main.CLIs, "main.CLIs not defined"
2581 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002582 # NOTE: You should probably run a topology check after this
2583
2584 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2585
2586 description = "Restore a link to ensure that Link Discovery is " + \
2587 "working properly"
2588 main.case( description )
2589
2590 main.step( "Bring link between s3 and s28 back up" )
2591 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2592 main.log.info( "Waiting " + str( linkSleep ) +
2593 " seconds for link up to be discovered" )
2594 time.sleep( linkSleep )
2595 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2596 onpass="Link up successful",
2597 onfail="Failed to bring link up" )
2598 # TODO do some sort of check here
2599
2600 def CASE11( self, main ):
2601 """
2602 Switch Down
2603 """
2604 # NOTE: You should probably run a topology check after this
2605 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002606 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002607 assert main, "main not defined"
2608 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002609 assert main.CLIs, "main.CLIs not defined"
2610 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002611
2612 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2613
2614 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002615 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002616 main.case( description )
2617 switch = main.params[ 'kill' ][ 'switch' ]
2618 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2619
2620 # TODO: Make this switch parameterizable
2621 main.step( "Kill " + switch )
2622 main.log.info( "Deleting " + switch )
2623 main.Mininet1.delSwitch( switch )
2624 main.log.info( "Waiting " + str( switchSleep ) +
2625 " seconds for switch down to be discovered" )
2626 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002627 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002628 # Peek at the deleted switch
2629 main.log.warn( str( device ) )
2630 result = main.FALSE
2631 if device and device[ 'available' ] is False:
2632 result = main.TRUE
2633 utilities.assert_equals( expect=main.TRUE, actual=result,
2634 onpass="Kill switch successful",
2635 onfail="Failed to kill switch?" )
2636
2637 def CASE12( self, main ):
2638 """
2639 Switch Up
2640 """
2641 # NOTE: You should probably run a topology check after this
2642 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002643 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002644 assert main, "main not defined"
2645 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002646 assert main.CLIs, "main.CLIs not defined"
2647 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002648 assert ONOS1Port, "ONOS1Port not defined"
2649 assert ONOS2Port, "ONOS2Port not defined"
2650 assert ONOS3Port, "ONOS3Port not defined"
2651 assert ONOS4Port, "ONOS4Port not defined"
2652 assert ONOS5Port, "ONOS5Port not defined"
2653 assert ONOS6Port, "ONOS6Port not defined"
2654 assert ONOS7Port, "ONOS7Port not defined"
2655
2656 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2657 switch = main.params[ 'kill' ][ 'switch' ]
2658 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2659 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002660 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002661 description = "Adding a switch to ensure it is discovered correctly"
2662 main.case( description )
2663
2664 main.step( "Add back " + switch )
2665 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2666 for peer in links:
2667 main.Mininet1.addLink( switch, peer )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002668 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002669 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2670 main.log.info( "Waiting " + str( switchSleep ) +
2671 " seconds for switch up to be discovered" )
2672 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002673 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002674 # Peek at the deleted switch
2675 main.log.warn( str( device ) )
2676 result = main.FALSE
2677 if device and device[ 'available' ]:
2678 result = main.TRUE
2679 utilities.assert_equals( expect=main.TRUE, actual=result,
2680 onpass="add switch successful",
2681 onfail="Failed to add switch?" )
2682
2683 def CASE13( self, main ):
2684 """
2685 Clean up
2686 """
2687 import os
2688 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002689 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002690 assert main, "main not defined"
2691 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002692 assert main.CLIs, "main.CLIs not defined"
2693 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002694
2695 # printing colors to terminal
2696 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2697 'blue': '\033[94m', 'green': '\033[92m',
2698 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2699 main.case( "Test Cleanup" )
2700 main.step( "Killing tcpdumps" )
2701 main.Mininet2.stopTcpdump()
2702
2703 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002704 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002705 main.step( "Copying MN pcap and ONOS log files to test station" )
2706 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2707 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002708 # NOTE: MN Pcap file is being saved to logdir.
2709 # We scp this file as MN and TestON aren't necessarily the same vm
2710
2711 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002712 # TODO: Load these from params
2713 # NOTE: must end in /
2714 logFolder = "/opt/onos/log/"
2715 logFiles = [ "karaf.log", "karaf.log.1" ]
2716 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002717 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002718 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002719 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002720 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2721 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002722 # std*.log's
2723 # NOTE: must end in /
2724 logFolder = "/opt/onos/var/"
2725 logFiles = [ "stderr.log", "stdout.log" ]
2726 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002727 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002728 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002729 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002730 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2731 logFolder + f, dstName )
2732 else:
2733 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002734
2735 main.step( "Stopping Mininet" )
2736 mnResult = main.Mininet1.stopNet()
2737 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2738 onpass="Mininet stopped",
2739 onfail="MN cleanup NOT successful" )
2740
2741 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002742 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002743 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2744 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002745
2746 try:
2747 timerLog = open( main.logdir + "/Timers.csv", 'w')
2748 # Overwrite with empty line and close
2749 labels = "Gossip Intents, Restart"
2750 data = str( gossipTime ) + ", " + str( main.restartTime )
2751 timerLog.write( labels + "\n" + data )
2752 timerLog.close()
2753 except NameError, e:
2754 main.log.exception(e)
2755
2756 def CASE14( self, main ):
2757 """
2758 start election app on all onos nodes
2759 """
Jon Halle1a3b752015-07-22 13:02:46 -07002760 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002761 assert main, "main not defined"
2762 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002763 assert main.CLIs, "main.CLIs not defined"
2764 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002765
2766 main.case("Start Leadership Election app")
2767 main.step( "Install leadership election app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002768 onosCli = main.CLIs[ main.activeNodes[0] ]
2769 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002770 utilities.assert_equals(
2771 expect=main.TRUE,
2772 actual=appResult,
2773 onpass="Election app installed",
2774 onfail="Something went wrong with installing Leadership election" )
2775
2776 main.step( "Run for election on each node" )
2777 leaderResult = main.TRUE
2778 leaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002779 for i in main.activeNodes:
2780 main.CLIs[i].electionTestRun()
2781 for i in main.activeNodes:
2782 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002783 leader = cli.electionTestLeader()
2784 if leader is None or leader == main.FALSE:
2785 main.log.error( cli.name + ": Leader for the election app " +
2786 "should be an ONOS node, instead got '" +
2787 str( leader ) + "'" )
2788 leaderResult = main.FALSE
2789 leaders.append( leader )
2790 utilities.assert_equals(
2791 expect=main.TRUE,
2792 actual=leaderResult,
2793 onpass="Successfully ran for leadership",
2794 onfail="Failed to run for leadership" )
2795
2796 main.step( "Check that each node shows the same leader" )
2797 sameLeader = main.TRUE
2798 if len( set( leaders ) ) != 1:
2799 sameLeader = main.FALSE
Jon Halle1a3b752015-07-22 13:02:46 -07002800 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
Jon Hall5cf14d52015-07-16 12:15:19 -07002801 str( leaders ) )
2802 utilities.assert_equals(
2803 expect=main.TRUE,
2804 actual=sameLeader,
2805 onpass="Leadership is consistent for the election topic",
2806 onfail="Nodes have different leaders" )
2807
2808 def CASE15( self, main ):
2809 """
2810 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002811 15.1 Run election on each node
2812 15.2 Check that each node has the same leaders and candidates
2813 15.3 Find current leader and withdraw
2814 15.4 Check that a new node was elected leader
2815 15.5 Check that that new leader was the candidate of old leader
2816 15.6 Run for election on old leader
2817 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2818 15.8 Make sure that the old leader was added to the candidate list
2819
2820 old and new variable prefixes refer to data from before vs after
2821 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002822 """
2823 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002824 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002825 assert main, "main not defined"
2826 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002827 assert main.CLIs, "main.CLIs not defined"
2828 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002829
Jon Hall5cf14d52015-07-16 12:15:19 -07002830 description = "Check that Leadership Election is still functional"
2831 main.case( description )
acsmars71adceb2015-08-31 15:09:26 -07002832 # NOTE: Need to re-run since being a canidate is not persistant
2833 # TODO: add check for "Command not found:" in the driver, this
2834 # means the election test app isn't loaded
Jon Hall5cf14d52015-07-16 12:15:19 -07002835
acsmars71adceb2015-08-31 15:09:26 -07002836 oldLeaders = [] # leaders by node before withdrawl from candidates
2837 newLeaders = [] # leaders by node after withdrawl from candidates
2838 oldAllCandidates = [] # list of lists of each nodes' candidates before
2839 newAllCandidates = [] # list of lists of each nodes' candidates after
2840 oldCandidates = [] # list of candidates from node 0 before withdrawl
2841 newCandidates = [] # list of candidates from node 0 after withdrawl
2842 oldLeader = '' # the old leader from oldLeaders, None if not same
2843 newLeader = '' # the new leaders fron newLoeaders, None if not same
2844 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2845 expectNoLeader = False # True when there is only one leader
2846 if main.numCtrls == 1:
2847 expectNoLeader = True
2848
2849 main.step( "Run for election on each node" )
2850 electionResult = main.TRUE
2851
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002852 for i in main.activeNodes: # run test election on each node
2853 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002854 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002855 utilities.assert_equals(
2856 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002857 actual=electionResult,
2858 onpass="All nodes successfully ran for leadership",
2859 onfail="At least one node failed to run for leadership" )
2860
acsmars3a72bde2015-09-02 14:16:22 -07002861 if electionResult == main.FALSE:
2862 main.log.error(
2863 "Skipping Test Case because Election Test App isn't loaded" )
2864 main.skipCase()
2865
acsmars71adceb2015-08-31 15:09:26 -07002866 main.step( "Check that each node shows the same leader and candidates" )
2867 sameResult = main.TRUE
2868 failMessage = "Nodes have different leaders"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002869 for i in main.activeNodes:
2870 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002871 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2872 oldAllCandidates.append( node )
2873 oldLeaders.append( node[ 0 ] )
2874 oldCandidates = oldAllCandidates[ 0 ]
2875
2876 # Check that each node has the same leader. Defines oldLeader
2877 if len( set( oldLeaders ) ) != 1:
2878 sameResult = main.FALSE
2879 main.log.error( "More than one leader present:" + str( oldLeaders ) )
2880 oldLeader = None
2881 else:
2882 oldLeader = oldLeaders[ 0 ]
2883
2884 # Check that each node's candidate list is the same
acsmars29233db2015-11-04 11:15:00 -08002885 candidateDiscrepancy = False # Boolean of candidate mismatches
acsmars71adceb2015-08-31 15:09:26 -07002886 for candidates in oldAllCandidates:
2887 if set( candidates ) != set( oldCandidates ):
2888 sameResult = main.FALSE
acsmars29233db2015-11-04 11:15:00 -08002889 candidateDiscrepancy = True
2890
2891 if candidateDiscrepancy:
2892 failMessage += " and candidates"
2893
acsmars71adceb2015-08-31 15:09:26 -07002894 utilities.assert_equals(
2895 expect=main.TRUE,
2896 actual=sameResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002897 onpass="Leadership is consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002898 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002899
2900 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002901 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002902 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002903 if oldLeader is None:
2904 main.log.error( "Leadership isn't consistent." )
2905 withdrawResult = main.FALSE
2906 # Get the CLI of the oldLeader
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002907 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07002908 if oldLeader == main.nodes[ i ].ip_address:
2909 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002910 break
2911 else: # FOR/ELSE statement
2912 main.log.error( "Leader election, could not find current leader" )
2913 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07002914 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07002915 utilities.assert_equals(
2916 expect=main.TRUE,
2917 actual=withdrawResult,
2918 onpass="Node was withdrawn from election",
2919 onfail="Node was not withdrawn from election" )
2920
acsmars71adceb2015-08-31 15:09:26 -07002921 main.step( "Check that a new node was elected leader" )
2922
Jon Hall5cf14d52015-07-16 12:15:19 -07002923 # FIXME: use threads
acsmars71adceb2015-08-31 15:09:26 -07002924 newLeaderResult = main.TRUE
2925 failMessage = "Nodes have different leaders"
2926
2927 # Get new leaders and candidates
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002928 for i in main.activeNodes:
2929 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002930 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2931 # elections might no have finished yet
2932 if node[ 0 ] == 'none' and not expectNoLeader:
2933 main.log.info( "Node has no leader, waiting 5 seconds to be " +
2934 "sure elections are complete." )
2935 time.sleep(5)
2936 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2937 # election still isn't done or there is a problem
2938 if node[ 0 ] == 'none':
2939 main.log.error( "No leader was elected on at least 1 node" )
2940 newLeaderResult = main.FALSE
2941 newAllCandidates.append( node )
2942 newLeaders.append( node[ 0 ] )
2943 newCandidates = newAllCandidates[ 0 ]
2944
2945 # Check that each node has the same leader. Defines newLeader
2946 if len( set( newLeaders ) ) != 1:
2947 newLeaderResult = main.FALSE
2948 main.log.error( "Nodes have different leaders: " +
2949 str( newLeaders ) )
2950 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07002951 else:
acsmars71adceb2015-08-31 15:09:26 -07002952 newLeader = newLeaders[ 0 ]
2953
2954 # Check that each node's candidate list is the same
2955 for candidates in newAllCandidates:
2956 if set( candidates ) != set( newCandidates ):
2957 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07002958 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07002959
2960 # Check that the new leader is not the older leader, which was withdrawn
2961 if newLeader == oldLeader:
2962 newLeaderResult = main.FALSE
2963 main.log.error( "All nodes still see old leader: " + oldLeader +
2964 " as the current leader" )
2965
Jon Hall5cf14d52015-07-16 12:15:19 -07002966 utilities.assert_equals(
2967 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002968 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002969 onpass="Leadership election passed",
2970 onfail="Something went wrong with Leadership election" )
2971
acsmars71adceb2015-08-31 15:09:26 -07002972 main.step( "Check that that new leader was the candidate of old leader")
2973 # candidates[ 2 ] should be come the top candidate after withdrawl
2974 correctCandidateResult = main.TRUE
2975 if expectNoLeader:
2976 if newLeader == 'none':
2977 main.log.info( "No leader expected. None found. Pass" )
2978 correctCandidateResult = main.TRUE
2979 else:
2980 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2981 correctCandidateResult = main.FALSE
2982 elif newLeader != oldCandidates[ 2 ]:
2983 correctCandidateResult = main.FALSE
2984 main.log.error( "Candidate " + newLeader + " was elected. " +
2985 oldCandidates[ 2 ] + " should have had priority." )
2986
2987 utilities.assert_equals(
2988 expect=main.TRUE,
2989 actual=correctCandidateResult,
2990 onpass="Correct Candidate Elected",
2991 onfail="Incorrect Candidate Elected" )
2992
Jon Hall5cf14d52015-07-16 12:15:19 -07002993 main.step( "Run for election on old leader( just so everyone " +
2994 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07002995 if oldLeaderCLI is not None:
2996 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07002997 else:
acsmars71adceb2015-08-31 15:09:26 -07002998 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002999 runResult = main.FALSE
3000 utilities.assert_equals(
3001 expect=main.TRUE,
3002 actual=runResult,
3003 onpass="App re-ran for election",
3004 onfail="App failed to run for election" )
acsmars71adceb2015-08-31 15:09:26 -07003005 main.step(
3006 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003007 # verify leader didn't just change
acsmars71adceb2015-08-31 15:09:26 -07003008 positionResult = main.TRUE
3009 # Get new leaders and candidates, wait if oldLeader is not a candidate yet
3010
3011 # Reset and reuse the new candidate and leaders lists
3012 newAllCandidates = []
3013 newCandidates = []
3014 newLeaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003015 for i in main.activeNodes:
3016 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07003017 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3018 if oldLeader not in node: # election might no have finished yet
3019 main.log.info( "Old Leader not elected, waiting 5 seconds to " +
3020 "be sure elections are complete" )
3021 time.sleep(5)
3022 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3023 if oldLeader not in node: # election still isn't done, errors
3024 main.log.error(
3025 "Old leader was not elected on at least one node" )
3026 positionResult = main.FALSE
3027 newAllCandidates.append( node )
3028 newLeaders.append( node[ 0 ] )
3029 newCandidates = newAllCandidates[ 0 ]
3030
3031 # Check that each node has the same leader. Defines newLeader
3032 if len( set( newLeaders ) ) != 1:
3033 positionResult = main.FALSE
3034 main.log.error( "Nodes have different leaders: " +
3035 str( newLeaders ) )
3036 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07003037 else:
acsmars71adceb2015-08-31 15:09:26 -07003038 newLeader = newLeaders[ 0 ]
3039
3040 # Check that each node's candidate list is the same
3041 for candidates in newAllCandidates:
3042 if set( candidates ) != set( newCandidates ):
3043 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07003044 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07003045
3046 # Check that the re-elected node is last on the candidate List
3047 if oldLeader != newCandidates[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003048 main.log.error( "Old Leader (" + oldLeader + ") not in the proper position " +
acsmars71adceb2015-08-31 15:09:26 -07003049 str( newCandidates ) )
3050 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003051
3052 utilities.assert_equals(
3053 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07003054 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003055 onpass="Old leader successfully re-ran for election",
3056 onfail="Something went wrong with Leadership election after " +
3057 "the old leader re-ran for election" )
3058
3059 def CASE16( self, main ):
3060 """
3061 Install Distributed Primitives app
3062 """
3063 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003064 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003065 assert main, "main not defined"
3066 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003067 assert main.CLIs, "main.CLIs not defined"
3068 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003069
3070 # Variables for the distributed primitives tests
3071 global pCounterName
3072 global iCounterName
3073 global pCounterValue
3074 global iCounterValue
3075 global onosSet
3076 global onosSetName
3077 pCounterName = "TestON-Partitions"
3078 iCounterName = "TestON-inMemory"
3079 pCounterValue = 0
3080 iCounterValue = 0
3081 onosSet = set([])
3082 onosSetName = "TestON-set"
3083
3084 description = "Install Primitives app"
3085 main.case( description )
3086 main.step( "Install Primitives app" )
3087 appName = "org.onosproject.distributedprimitives"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003088 node = main.activeNodes[0]
3089 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003090 utilities.assert_equals( expect=main.TRUE,
3091 actual=appResults,
3092 onpass="Primitives app activated",
3093 onfail="Primitives app not activated" )
3094 time.sleep( 5 ) # To allow all nodes to activate
3095
3096 def CASE17( self, main ):
3097 """
3098 Check for basic functionality with distributed primitives
3099 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003100 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003101 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003102 assert main, "main not defined"
3103 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003104 assert main.CLIs, "main.CLIs not defined"
3105 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003106 assert pCounterName, "pCounterName not defined"
3107 assert iCounterName, "iCounterName not defined"
3108 assert onosSetName, "onosSetName not defined"
3109 # NOTE: assert fails if value is 0/None/Empty/False
3110 try:
3111 pCounterValue
3112 except NameError:
3113 main.log.error( "pCounterValue not defined, setting to 0" )
3114 pCounterValue = 0
3115 try:
3116 iCounterValue
3117 except NameError:
3118 main.log.error( "iCounterValue not defined, setting to 0" )
3119 iCounterValue = 0
3120 try:
3121 onosSet
3122 except NameError:
3123 main.log.error( "onosSet not defined, setting to empty Set" )
3124 onosSet = set([])
3125 # Variables for the distributed primitives tests. These are local only
3126 addValue = "a"
3127 addAllValue = "a b c d e f"
3128 retainValue = "c d e f"
3129
3130 description = "Check for basic functionality with distributed " +\
3131 "primitives"
3132 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003133 main.caseExplanation = "Test the methods of the distributed " +\
3134 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003135 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003136 # Partitioned counters
3137 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003138 pCounters = []
3139 threads = []
3140 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003141 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003142 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3143 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003144 args=[ pCounterName ] )
3145 pCounterValue += 1
3146 addedPValues.append( pCounterValue )
3147 threads.append( t )
3148 t.start()
3149
3150 for t in threads:
3151 t.join()
3152 pCounters.append( t.result )
3153 # Check that counter incremented numController times
3154 pCounterResults = True
3155 for i in addedPValues:
3156 tmpResult = i in pCounters
3157 pCounterResults = pCounterResults and tmpResult
3158 if not tmpResult:
3159 main.log.error( str( i ) + " is not in partitioned "
3160 "counter incremented results" )
3161 utilities.assert_equals( expect=True,
3162 actual=pCounterResults,
3163 onpass="Default counter incremented",
3164 onfail="Error incrementing default" +
3165 " counter" )
3166
Jon Halle1a3b752015-07-22 13:02:46 -07003167 main.step( "Get then Increment a default counter on each node" )
3168 pCounters = []
3169 threads = []
3170 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003171 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003172 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3173 name="counterGetAndAdd-" + str( i ),
3174 args=[ pCounterName ] )
3175 addedPValues.append( pCounterValue )
3176 pCounterValue += 1
3177 threads.append( t )
3178 t.start()
3179
3180 for t in threads:
3181 t.join()
3182 pCounters.append( t.result )
3183 # Check that counter incremented numController times
3184 pCounterResults = True
3185 for i in addedPValues:
3186 tmpResult = i in pCounters
3187 pCounterResults = pCounterResults and tmpResult
3188 if not tmpResult:
3189 main.log.error( str( i ) + " is not in partitioned "
3190 "counter incremented results" )
3191 utilities.assert_equals( expect=True,
3192 actual=pCounterResults,
3193 onpass="Default counter incremented",
3194 onfail="Error incrementing default" +
3195 " counter" )
3196
3197 main.step( "Counters we added have the correct values" )
3198 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3199 utilities.assert_equals( expect=main.TRUE,
3200 actual=incrementCheck,
3201 onpass="Added counters are correct",
3202 onfail="Added counters are incorrect" )
3203
3204 main.step( "Add -8 to then get a default counter on each node" )
3205 pCounters = []
3206 threads = []
3207 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003208 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003209 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3210 name="counterIncrement-" + str( i ),
3211 args=[ pCounterName ],
3212 kwargs={ "delta": -8 } )
3213 pCounterValue += -8
3214 addedPValues.append( pCounterValue )
3215 threads.append( t )
3216 t.start()
3217
3218 for t in threads:
3219 t.join()
3220 pCounters.append( t.result )
3221 # Check that counter incremented numController times
3222 pCounterResults = True
3223 for i in addedPValues:
3224 tmpResult = i in pCounters
3225 pCounterResults = pCounterResults and tmpResult
3226 if not tmpResult:
3227 main.log.error( str( i ) + " is not in partitioned "
3228 "counter incremented results" )
3229 utilities.assert_equals( expect=True,
3230 actual=pCounterResults,
3231 onpass="Default counter incremented",
3232 onfail="Error incrementing default" +
3233 " counter" )
3234
3235 main.step( "Add 5 to then get a default counter on each node" )
3236 pCounters = []
3237 threads = []
3238 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003239 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003240 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3241 name="counterIncrement-" + str( i ),
3242 args=[ pCounterName ],
3243 kwargs={ "delta": 5 } )
3244 pCounterValue += 5
3245 addedPValues.append( pCounterValue )
3246 threads.append( t )
3247 t.start()
3248
3249 for t in threads:
3250 t.join()
3251 pCounters.append( t.result )
3252 # Check that counter incremented numController times
3253 pCounterResults = True
3254 for i in addedPValues:
3255 tmpResult = i in pCounters
3256 pCounterResults = pCounterResults and tmpResult
3257 if not tmpResult:
3258 main.log.error( str( i ) + " is not in partitioned "
3259 "counter incremented results" )
3260 utilities.assert_equals( expect=True,
3261 actual=pCounterResults,
3262 onpass="Default counter incremented",
3263 onfail="Error incrementing default" +
3264 " counter" )
3265
3266 main.step( "Get then add 5 to a default counter on each node" )
3267 pCounters = []
3268 threads = []
3269 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003270 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003271 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3272 name="counterIncrement-" + str( i ),
3273 args=[ pCounterName ],
3274 kwargs={ "delta": 5 } )
3275 addedPValues.append( pCounterValue )
3276 pCounterValue += 5
3277 threads.append( t )
3278 t.start()
3279
3280 for t in threads:
3281 t.join()
3282 pCounters.append( t.result )
3283 # Check that counter incremented numController times
3284 pCounterResults = True
3285 for i in addedPValues:
3286 tmpResult = i in pCounters
3287 pCounterResults = pCounterResults and tmpResult
3288 if not tmpResult:
3289 main.log.error( str( i ) + " is not in partitioned "
3290 "counter incremented results" )
3291 utilities.assert_equals( expect=True,
3292 actual=pCounterResults,
3293 onpass="Default counter incremented",
3294 onfail="Error incrementing default" +
3295 " counter" )
3296
3297 main.step( "Counters we added have the correct values" )
3298 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3299 utilities.assert_equals( expect=main.TRUE,
3300 actual=incrementCheck,
3301 onpass="Added counters are correct",
3302 onfail="Added counters are incorrect" )
3303
3304 # In-Memory counters
3305 main.step( "Increment and get an in-memory counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003306 iCounters = []
3307 addedIValues = []
3308 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003309 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003310 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003311 name="icounterIncrement-" + str( i ),
3312 args=[ iCounterName ],
3313 kwargs={ "inMemory": True } )
3314 iCounterValue += 1
3315 addedIValues.append( iCounterValue )
3316 threads.append( t )
3317 t.start()
3318
3319 for t in threads:
3320 t.join()
3321 iCounters.append( t.result )
3322 # Check that counter incremented numController times
3323 iCounterResults = True
3324 for i in addedIValues:
3325 tmpResult = i in iCounters
3326 iCounterResults = iCounterResults and tmpResult
3327 if not tmpResult:
3328 main.log.error( str( i ) + " is not in the in-memory "
3329 "counter incremented results" )
3330 utilities.assert_equals( expect=True,
3331 actual=iCounterResults,
Jon Halle1a3b752015-07-22 13:02:46 -07003332 onpass="In-memory counter incremented",
3333 onfail="Error incrementing in-memory" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003334 " counter" )
3335
Jon Halle1a3b752015-07-22 13:02:46 -07003336 main.step( "Get then Increment a in-memory counter on each node" )
3337 iCounters = []
3338 threads = []
3339 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003340 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003341 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3342 name="counterGetAndAdd-" + str( i ),
3343 args=[ iCounterName ],
3344 kwargs={ "inMemory": True } )
3345 addedIValues.append( iCounterValue )
3346 iCounterValue += 1
3347 threads.append( t )
3348 t.start()
3349
3350 for t in threads:
3351 t.join()
3352 iCounters.append( t.result )
3353 # Check that counter incremented numController times
3354 iCounterResults = True
3355 for i in addedIValues:
3356 tmpResult = i in iCounters
3357 iCounterResults = iCounterResults and tmpResult
3358 if not tmpResult:
3359 main.log.error( str( i ) + " is not in in-memory "
3360 "counter incremented results" )
3361 utilities.assert_equals( expect=True,
3362 actual=iCounterResults,
3363 onpass="In-memory counter incremented",
3364 onfail="Error incrementing in-memory" +
3365 " counter" )
3366
3367 main.step( "Counters we added have the correct values" )
3368 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3369 utilities.assert_equals( expect=main.TRUE,
3370 actual=incrementCheck,
3371 onpass="Added counters are correct",
3372 onfail="Added counters are incorrect" )
3373
3374 main.step( "Add -8 to then get a in-memory counter on each node" )
3375 iCounters = []
3376 threads = []
3377 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003378 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003379 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3380 name="counterIncrement-" + str( i ),
3381 args=[ iCounterName ],
3382 kwargs={ "delta": -8, "inMemory": True } )
3383 iCounterValue += -8
3384 addedIValues.append( iCounterValue )
3385 threads.append( t )
3386 t.start()
3387
3388 for t in threads:
3389 t.join()
3390 iCounters.append( t.result )
3391 # Check that counter incremented numController times
3392 iCounterResults = True
3393 for i in addedIValues:
3394 tmpResult = i in iCounters
3395 iCounterResults = iCounterResults and tmpResult
3396 if not tmpResult:
3397 main.log.error( str( i ) + " is not in in-memory "
3398 "counter incremented results" )
3399 utilities.assert_equals( expect=True,
3400 actual=pCounterResults,
3401 onpass="In-memory counter incremented",
3402 onfail="Error incrementing in-memory" +
3403 " counter" )
3404
3405 main.step( "Add 5 to then get a in-memory counter on each node" )
3406 iCounters = []
3407 threads = []
3408 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003409 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003410 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3411 name="counterIncrement-" + str( i ),
3412 args=[ iCounterName ],
3413 kwargs={ "delta": 5, "inMemory": True } )
3414 iCounterValue += 5
3415 addedIValues.append( iCounterValue )
3416 threads.append( t )
3417 t.start()
3418
3419 for t in threads:
3420 t.join()
3421 iCounters.append( t.result )
3422 # Check that counter incremented numController times
3423 iCounterResults = True
3424 for i in addedIValues:
3425 tmpResult = i in iCounters
3426 iCounterResults = iCounterResults and tmpResult
3427 if not tmpResult:
3428 main.log.error( str( i ) + " is not in in-memory "
3429 "counter incremented results" )
3430 utilities.assert_equals( expect=True,
3431 actual=pCounterResults,
3432 onpass="In-memory counter incremented",
3433 onfail="Error incrementing in-memory" +
3434 " counter" )
3435
3436 main.step( "Get then add 5 to a in-memory counter on each node" )
3437 iCounters = []
3438 threads = []
3439 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003440 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003441 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3442 name="counterIncrement-" + str( i ),
3443 args=[ iCounterName ],
3444 kwargs={ "delta": 5, "inMemory": True } )
3445 addedIValues.append( iCounterValue )
3446 iCounterValue += 5
3447 threads.append( t )
3448 t.start()
3449
3450 for t in threads:
3451 t.join()
3452 iCounters.append( t.result )
3453 # Check that counter incremented numController times
3454 iCounterResults = True
3455 for i in addedIValues:
3456 tmpResult = i in iCounters
3457 iCounterResults = iCounterResults and tmpResult
3458 if not tmpResult:
3459 main.log.error( str( i ) + " is not in in-memory "
3460 "counter incremented results" )
3461 utilities.assert_equals( expect=True,
3462 actual=iCounterResults,
3463 onpass="In-memory counter incremented",
3464 onfail="Error incrementing in-memory" +
3465 " counter" )
3466
3467 main.step( "Counters we added have the correct values" )
3468 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3469 utilities.assert_equals( expect=main.TRUE,
3470 actual=incrementCheck,
3471 onpass="Added counters are correct",
3472 onfail="Added counters are incorrect" )
3473
Jon Hall5cf14d52015-07-16 12:15:19 -07003474 main.step( "Check counters are consistant across nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07003475 onosCounters, consistentCounterResults = main.Counters.consistentCheck()
Jon Hall5cf14d52015-07-16 12:15:19 -07003476 utilities.assert_equals( expect=main.TRUE,
3477 actual=consistentCounterResults,
3478 onpass="ONOS counters are consistent " +
3479 "across nodes",
3480 onfail="ONOS Counters are inconsistent " +
3481 "across nodes" )
3482
3483 main.step( "Counters we added have the correct values" )
Jon Halle1a3b752015-07-22 13:02:46 -07003484 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3485 incrementCheck = incrementCheck and \
3486 main.Counters.counterCheck( iCounterName, iCounterValue )
Jon Hall5cf14d52015-07-16 12:15:19 -07003487 utilities.assert_equals( expect=main.TRUE,
Jon Halle1a3b752015-07-22 13:02:46 -07003488 actual=incrementCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -07003489 onpass="Added counters are correct",
3490 onfail="Added counters are incorrect" )
3491 # DISTRIBUTED SETS
3492 main.step( "Distributed Set get" )
3493 size = len( onosSet )
3494 getResponses = []
3495 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003496 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003497 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003498 name="setTestGet-" + str( i ),
3499 args=[ onosSetName ] )
3500 threads.append( t )
3501 t.start()
3502 for t in threads:
3503 t.join()
3504 getResponses.append( t.result )
3505
3506 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003507 for i in range( len( main.activeNodes ) ):
3508 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003509 if isinstance( getResponses[ i ], list):
3510 current = set( getResponses[ i ] )
3511 if len( current ) == len( getResponses[ i ] ):
3512 # no repeats
3513 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003514 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003515 " has incorrect view" +
3516 " of set " + onosSetName + ":\n" +
3517 str( getResponses[ i ] ) )
3518 main.log.debug( "Expected: " + str( onosSet ) )
3519 main.log.debug( "Actual: " + str( current ) )
3520 getResults = main.FALSE
3521 else:
3522 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003523 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003524 " has repeat elements in" +
3525 " set " + onosSetName + ":\n" +
3526 str( getResponses[ i ] ) )
3527 getResults = main.FALSE
3528 elif getResponses[ i ] == main.ERROR:
3529 getResults = main.FALSE
3530 utilities.assert_equals( expect=main.TRUE,
3531 actual=getResults,
3532 onpass="Set elements are correct",
3533 onfail="Set elements are incorrect" )
3534
3535 main.step( "Distributed Set size" )
3536 sizeResponses = []
3537 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003538 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003539 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003540 name="setTestSize-" + str( i ),
3541 args=[ onosSetName ] )
3542 threads.append( t )
3543 t.start()
3544 for t in threads:
3545 t.join()
3546 sizeResponses.append( t.result )
3547
3548 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003549 for i in range( len( main.activeNodes ) ):
3550 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003551 if size != sizeResponses[ i ]:
3552 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003553 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003554 " expected a size of " + str( size ) +
3555 " for set " + onosSetName +
3556 " but got " + str( sizeResponses[ i ] ) )
3557 utilities.assert_equals( expect=main.TRUE,
3558 actual=sizeResults,
3559 onpass="Set sizes are correct",
3560 onfail="Set sizes are incorrect" )
3561
3562 main.step( "Distributed Set add()" )
3563 onosSet.add( addValue )
3564 addResponses = []
3565 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003566 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003567 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003568 name="setTestAdd-" + str( i ),
3569 args=[ onosSetName, addValue ] )
3570 threads.append( t )
3571 t.start()
3572 for t in threads:
3573 t.join()
3574 addResponses.append( t.result )
3575
3576 # main.TRUE = successfully changed the set
3577 # main.FALSE = action resulted in no change in set
3578 # main.ERROR - Some error in executing the function
3579 addResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003580 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003581 if addResponses[ i ] == main.TRUE:
3582 # All is well
3583 pass
3584 elif addResponses[ i ] == main.FALSE:
3585 # Already in set, probably fine
3586 pass
3587 elif addResponses[ i ] == main.ERROR:
3588 # Error in execution
3589 addResults = main.FALSE
3590 else:
3591 # unexpected result
3592 addResults = main.FALSE
3593 if addResults != main.TRUE:
3594 main.log.error( "Error executing set add" )
3595
3596 # Check if set is still correct
3597 size = len( onosSet )
3598 getResponses = []
3599 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003600 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003601 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003602 name="setTestGet-" + str( i ),
3603 args=[ onosSetName ] )
3604 threads.append( t )
3605 t.start()
3606 for t in threads:
3607 t.join()
3608 getResponses.append( t.result )
3609 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003610 for i in range( len( main.activeNodes ) ):
3611 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003612 if isinstance( getResponses[ i ], list):
3613 current = set( getResponses[ i ] )
3614 if len( current ) == len( getResponses[ i ] ):
3615 # no repeats
3616 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003617 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003618 " of set " + onosSetName + ":\n" +
3619 str( getResponses[ i ] ) )
3620 main.log.debug( "Expected: " + str( onosSet ) )
3621 main.log.debug( "Actual: " + str( current ) )
3622 getResults = main.FALSE
3623 else:
3624 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003625 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003626 " set " + onosSetName + ":\n" +
3627 str( getResponses[ i ] ) )
3628 getResults = main.FALSE
3629 elif getResponses[ i ] == main.ERROR:
3630 getResults = main.FALSE
3631 sizeResponses = []
3632 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003633 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003634 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003635 name="setTestSize-" + str( i ),
3636 args=[ onosSetName ] )
3637 threads.append( t )
3638 t.start()
3639 for t in threads:
3640 t.join()
3641 sizeResponses.append( t.result )
3642 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003643 for i in range( len( main.activeNodes ) ):
3644 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003645 if size != sizeResponses[ i ]:
3646 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003647 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003648 " expected a size of " + str( size ) +
3649 " for set " + onosSetName +
3650 " but got " + str( sizeResponses[ i ] ) )
3651 addResults = addResults and getResults and sizeResults
3652 utilities.assert_equals( expect=main.TRUE,
3653 actual=addResults,
3654 onpass="Set add correct",
3655 onfail="Set add was incorrect" )
3656
3657 main.step( "Distributed Set addAll()" )
3658 onosSet.update( addAllValue.split() )
3659 addResponses = []
3660 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003661 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003662 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003663 name="setTestAddAll-" + str( i ),
3664 args=[ onosSetName, addAllValue ] )
3665 threads.append( t )
3666 t.start()
3667 for t in threads:
3668 t.join()
3669 addResponses.append( t.result )
3670
3671 # main.TRUE = successfully changed the set
3672 # main.FALSE = action resulted in no change in set
3673 # main.ERROR - Some error in executing the function
3674 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003675 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003676 if addResponses[ i ] == main.TRUE:
3677 # All is well
3678 pass
3679 elif addResponses[ i ] == main.FALSE:
3680 # Already in set, probably fine
3681 pass
3682 elif addResponses[ i ] == main.ERROR:
3683 # Error in execution
3684 addAllResults = main.FALSE
3685 else:
3686 # unexpected result
3687 addAllResults = main.FALSE
3688 if addAllResults != main.TRUE:
3689 main.log.error( "Error executing set addAll" )
3690
3691 # Check if set is still correct
3692 size = len( onosSet )
3693 getResponses = []
3694 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003695 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003696 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003697 name="setTestGet-" + str( i ),
3698 args=[ onosSetName ] )
3699 threads.append( t )
3700 t.start()
3701 for t in threads:
3702 t.join()
3703 getResponses.append( t.result )
3704 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003705 for i in range( len( main.activeNodes ) ):
3706 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003707 if isinstance( getResponses[ i ], list):
3708 current = set( getResponses[ i ] )
3709 if len( current ) == len( getResponses[ i ] ):
3710 # no repeats
3711 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003712 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003713 " has incorrect view" +
3714 " of set " + onosSetName + ":\n" +
3715 str( getResponses[ i ] ) )
3716 main.log.debug( "Expected: " + str( onosSet ) )
3717 main.log.debug( "Actual: " + str( current ) )
3718 getResults = main.FALSE
3719 else:
3720 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003721 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003722 " has repeat elements in" +
3723 " set " + onosSetName + ":\n" +
3724 str( getResponses[ i ] ) )
3725 getResults = main.FALSE
3726 elif getResponses[ i ] == main.ERROR:
3727 getResults = main.FALSE
3728 sizeResponses = []
3729 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003730 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003731 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003732 name="setTestSize-" + str( i ),
3733 args=[ onosSetName ] )
3734 threads.append( t )
3735 t.start()
3736 for t in threads:
3737 t.join()
3738 sizeResponses.append( t.result )
3739 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003740 for i in range( len( main.activeNodes ) ):
3741 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003742 if size != sizeResponses[ i ]:
3743 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003744 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003745 " expected a size of " + str( size ) +
3746 " for set " + onosSetName +
3747 " but got " + str( sizeResponses[ i ] ) )
3748 addAllResults = addAllResults and getResults and sizeResults
3749 utilities.assert_equals( expect=main.TRUE,
3750 actual=addAllResults,
3751 onpass="Set addAll correct",
3752 onfail="Set addAll was incorrect" )
3753
3754 main.step( "Distributed Set contains()" )
3755 containsResponses = []
3756 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003757 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003758 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003759 name="setContains-" + str( i ),
3760 args=[ onosSetName ],
3761 kwargs={ "values": addValue } )
3762 threads.append( t )
3763 t.start()
3764 for t in threads:
3765 t.join()
3766 # NOTE: This is the tuple
3767 containsResponses.append( t.result )
3768
3769 containsResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003770 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003771 if containsResponses[ i ] == main.ERROR:
3772 containsResults = main.FALSE
3773 else:
3774 containsResults = containsResults and\
3775 containsResponses[ i ][ 1 ]
3776 utilities.assert_equals( expect=main.TRUE,
3777 actual=containsResults,
3778 onpass="Set contains is functional",
3779 onfail="Set contains failed" )
3780
3781 main.step( "Distributed Set containsAll()" )
3782 containsAllResponses = []
3783 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003784 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003785 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003786 name="setContainsAll-" + str( i ),
3787 args=[ onosSetName ],
3788 kwargs={ "values": addAllValue } )
3789 threads.append( t )
3790 t.start()
3791 for t in threads:
3792 t.join()
3793 # NOTE: This is the tuple
3794 containsAllResponses.append( t.result )
3795
3796 containsAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003797 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003798 if containsResponses[ i ] == main.ERROR:
3799 containsResults = main.FALSE
3800 else:
3801 containsResults = containsResults and\
3802 containsResponses[ i ][ 1 ]
3803 utilities.assert_equals( expect=main.TRUE,
3804 actual=containsAllResults,
3805 onpass="Set containsAll is functional",
3806 onfail="Set containsAll failed" )
3807
3808 main.step( "Distributed Set remove()" )
3809 onosSet.remove( addValue )
3810 removeResponses = []
3811 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003812 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003813 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003814 name="setTestRemove-" + str( i ),
3815 args=[ onosSetName, addValue ] )
3816 threads.append( t )
3817 t.start()
3818 for t in threads:
3819 t.join()
3820 removeResponses.append( t.result )
3821
3822 # main.TRUE = successfully changed the set
3823 # main.FALSE = action resulted in no change in set
3824 # main.ERROR - Some error in executing the function
3825 removeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003826 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003827 if removeResponses[ i ] == main.TRUE:
3828 # All is well
3829 pass
3830 elif removeResponses[ i ] == main.FALSE:
3831 # not in set, probably fine
3832 pass
3833 elif removeResponses[ i ] == main.ERROR:
3834 # Error in execution
3835 removeResults = main.FALSE
3836 else:
3837 # unexpected result
3838 removeResults = main.FALSE
3839 if removeResults != main.TRUE:
3840 main.log.error( "Error executing set remove" )
3841
3842 # Check if set is still correct
3843 size = len( onosSet )
3844 getResponses = []
3845 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003846 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003847 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003848 name="setTestGet-" + str( i ),
3849 args=[ onosSetName ] )
3850 threads.append( t )
3851 t.start()
3852 for t in threads:
3853 t.join()
3854 getResponses.append( t.result )
3855 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003856 for i in range( len( main.activeNodes ) ):
3857 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003858 if isinstance( getResponses[ i ], list):
3859 current = set( getResponses[ i ] )
3860 if len( current ) == len( getResponses[ i ] ):
3861 # no repeats
3862 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003863 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003864 " has incorrect view" +
3865 " of set " + onosSetName + ":\n" +
3866 str( getResponses[ i ] ) )
3867 main.log.debug( "Expected: " + str( onosSet ) )
3868 main.log.debug( "Actual: " + str( current ) )
3869 getResults = main.FALSE
3870 else:
3871 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003872 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003873 " has repeat elements in" +
3874 " set " + onosSetName + ":\n" +
3875 str( getResponses[ i ] ) )
3876 getResults = main.FALSE
3877 elif getResponses[ i ] == main.ERROR:
3878 getResults = main.FALSE
3879 sizeResponses = []
3880 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003881 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003882 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003883 name="setTestSize-" + str( i ),
3884 args=[ onosSetName ] )
3885 threads.append( t )
3886 t.start()
3887 for t in threads:
3888 t.join()
3889 sizeResponses.append( t.result )
3890 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003891 for i in range( len( main.activeNodes ) ):
3892 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003893 if size != sizeResponses[ i ]:
3894 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003895 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003896 " expected a size of " + str( size ) +
3897 " for set " + onosSetName +
3898 " but got " + str( sizeResponses[ i ] ) )
3899 removeResults = removeResults and getResults and sizeResults
3900 utilities.assert_equals( expect=main.TRUE,
3901 actual=removeResults,
3902 onpass="Set remove correct",
3903 onfail="Set remove was incorrect" )
3904
3905 main.step( "Distributed Set removeAll()" )
3906 onosSet.difference_update( addAllValue.split() )
3907 removeAllResponses = []
3908 threads = []
3909 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003910 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003911 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003912 name="setTestRemoveAll-" + str( i ),
3913 args=[ onosSetName, addAllValue ] )
3914 threads.append( t )
3915 t.start()
3916 for t in threads:
3917 t.join()
3918 removeAllResponses.append( t.result )
3919 except Exception, e:
3920 main.log.exception(e)
3921
3922 # main.TRUE = successfully changed the set
3923 # main.FALSE = action resulted in no change in set
3924 # main.ERROR - Some error in executing the function
3925 removeAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003926 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003927 if removeAllResponses[ i ] == main.TRUE:
3928 # All is well
3929 pass
3930 elif removeAllResponses[ i ] == main.FALSE:
3931 # not in set, probably fine
3932 pass
3933 elif removeAllResponses[ i ] == main.ERROR:
3934 # Error in execution
3935 removeAllResults = main.FALSE
3936 else:
3937 # unexpected result
3938 removeAllResults = main.FALSE
3939 if removeAllResults != main.TRUE:
3940 main.log.error( "Error executing set removeAll" )
3941
3942 # Check if set is still correct
3943 size = len( onosSet )
3944 getResponses = []
3945 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003946 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003947 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003948 name="setTestGet-" + str( i ),
3949 args=[ onosSetName ] )
3950 threads.append( t )
3951 t.start()
3952 for t in threads:
3953 t.join()
3954 getResponses.append( t.result )
3955 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003956 for i in range( len( main.activeNodes ) ):
3957 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003958 if isinstance( getResponses[ i ], list):
3959 current = set( getResponses[ i ] )
3960 if len( current ) == len( getResponses[ i ] ):
3961 # no repeats
3962 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003963 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003964 " has incorrect view" +
3965 " of set " + onosSetName + ":\n" +
3966 str( getResponses[ i ] ) )
3967 main.log.debug( "Expected: " + str( onosSet ) )
3968 main.log.debug( "Actual: " + str( current ) )
3969 getResults = main.FALSE
3970 else:
3971 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003972 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003973 " has repeat elements in" +
3974 " set " + onosSetName + ":\n" +
3975 str( getResponses[ i ] ) )
3976 getResults = main.FALSE
3977 elif getResponses[ i ] == main.ERROR:
3978 getResults = main.FALSE
3979 sizeResponses = []
3980 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003981 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003982 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003983 name="setTestSize-" + str( i ),
3984 args=[ onosSetName ] )
3985 threads.append( t )
3986 t.start()
3987 for t in threads:
3988 t.join()
3989 sizeResponses.append( t.result )
3990 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003991 for i in range( len( main.activeNodes ) ):
3992 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003993 if size != sizeResponses[ i ]:
3994 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003995 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003996 " expected a size of " + str( size ) +
3997 " for set " + onosSetName +
3998 " but got " + str( sizeResponses[ i ] ) )
3999 removeAllResults = removeAllResults and getResults and sizeResults
4000 utilities.assert_equals( expect=main.TRUE,
4001 actual=removeAllResults,
4002 onpass="Set removeAll correct",
4003 onfail="Set removeAll was incorrect" )
4004
4005 main.step( "Distributed Set addAll()" )
4006 onosSet.update( addAllValue.split() )
4007 addResponses = []
4008 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004009 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004010 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004011 name="setTestAddAll-" + str( i ),
4012 args=[ onosSetName, addAllValue ] )
4013 threads.append( t )
4014 t.start()
4015 for t in threads:
4016 t.join()
4017 addResponses.append( t.result )
4018
4019 # main.TRUE = successfully changed the set
4020 # main.FALSE = action resulted in no change in set
4021 # main.ERROR - Some error in executing the function
4022 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004023 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004024 if addResponses[ i ] == main.TRUE:
4025 # All is well
4026 pass
4027 elif addResponses[ i ] == main.FALSE:
4028 # Already in set, probably fine
4029 pass
4030 elif addResponses[ i ] == main.ERROR:
4031 # Error in execution
4032 addAllResults = main.FALSE
4033 else:
4034 # unexpected result
4035 addAllResults = main.FALSE
4036 if addAllResults != main.TRUE:
4037 main.log.error( "Error executing set addAll" )
4038
4039 # Check if set is still correct
4040 size = len( onosSet )
4041 getResponses = []
4042 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004043 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004044 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004045 name="setTestGet-" + str( i ),
4046 args=[ onosSetName ] )
4047 threads.append( t )
4048 t.start()
4049 for t in threads:
4050 t.join()
4051 getResponses.append( t.result )
4052 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004053 for i in range( len( main.activeNodes ) ):
4054 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004055 if isinstance( getResponses[ i ], list):
4056 current = set( getResponses[ i ] )
4057 if len( current ) == len( getResponses[ i ] ):
4058 # no repeats
4059 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004060 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004061 " has incorrect view" +
4062 " of set " + onosSetName + ":\n" +
4063 str( getResponses[ i ] ) )
4064 main.log.debug( "Expected: " + str( onosSet ) )
4065 main.log.debug( "Actual: " + str( current ) )
4066 getResults = main.FALSE
4067 else:
4068 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004069 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004070 " has repeat elements in" +
4071 " set " + onosSetName + ":\n" +
4072 str( getResponses[ i ] ) )
4073 getResults = main.FALSE
4074 elif getResponses[ i ] == main.ERROR:
4075 getResults = main.FALSE
4076 sizeResponses = []
4077 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004078 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004079 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004080 name="setTestSize-" + str( i ),
4081 args=[ onosSetName ] )
4082 threads.append( t )
4083 t.start()
4084 for t in threads:
4085 t.join()
4086 sizeResponses.append( t.result )
4087 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004088 for i in range( len( main.activeNodes ) ):
4089 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004090 if size != sizeResponses[ i ]:
4091 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004092 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004093 " expected a size of " + str( size ) +
4094 " for set " + onosSetName +
4095 " but got " + str( sizeResponses[ i ] ) )
4096 addAllResults = addAllResults and getResults and sizeResults
4097 utilities.assert_equals( expect=main.TRUE,
4098 actual=addAllResults,
4099 onpass="Set addAll correct",
4100 onfail="Set addAll was incorrect" )
4101
4102 main.step( "Distributed Set clear()" )
4103 onosSet.clear()
4104 clearResponses = []
4105 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004106 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004107 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004108 name="setTestClear-" + str( i ),
4109 args=[ onosSetName, " "], # Values doesn't matter
4110 kwargs={ "clear": True } )
4111 threads.append( t )
4112 t.start()
4113 for t in threads:
4114 t.join()
4115 clearResponses.append( t.result )
4116
4117 # main.TRUE = successfully changed the set
4118 # main.FALSE = action resulted in no change in set
4119 # main.ERROR - Some error in executing the function
4120 clearResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004121 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004122 if clearResponses[ i ] == main.TRUE:
4123 # All is well
4124 pass
4125 elif clearResponses[ i ] == main.FALSE:
4126 # Nothing set, probably fine
4127 pass
4128 elif clearResponses[ i ] == main.ERROR:
4129 # Error in execution
4130 clearResults = main.FALSE
4131 else:
4132 # unexpected result
4133 clearResults = main.FALSE
4134 if clearResults != main.TRUE:
4135 main.log.error( "Error executing set clear" )
4136
4137 # Check if set is still correct
4138 size = len( onosSet )
4139 getResponses = []
4140 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004141 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004142 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004143 name="setTestGet-" + str( i ),
4144 args=[ onosSetName ] )
4145 threads.append( t )
4146 t.start()
4147 for t in threads:
4148 t.join()
4149 getResponses.append( t.result )
4150 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004151 for i in range( len( main.activeNodes ) ):
4152 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004153 if isinstance( getResponses[ i ], list):
4154 current = set( getResponses[ i ] )
4155 if len( current ) == len( getResponses[ i ] ):
4156 # no repeats
4157 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004158 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004159 " has incorrect view" +
4160 " of set " + onosSetName + ":\n" +
4161 str( getResponses[ i ] ) )
4162 main.log.debug( "Expected: " + str( onosSet ) )
4163 main.log.debug( "Actual: " + str( current ) )
4164 getResults = main.FALSE
4165 else:
4166 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004167 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004168 " has repeat elements in" +
4169 " set " + onosSetName + ":\n" +
4170 str( getResponses[ i ] ) )
4171 getResults = main.FALSE
4172 elif getResponses[ i ] == main.ERROR:
4173 getResults = main.FALSE
4174 sizeResponses = []
4175 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004176 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004177 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004178 name="setTestSize-" + str( i ),
4179 args=[ onosSetName ] )
4180 threads.append( t )
4181 t.start()
4182 for t in threads:
4183 t.join()
4184 sizeResponses.append( t.result )
4185 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004186 for i in range( len( main.activeNodes ) ):
4187 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004188 if size != sizeResponses[ i ]:
4189 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004190 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004191 " expected a size of " + str( size ) +
4192 " for set " + onosSetName +
4193 " but got " + str( sizeResponses[ i ] ) )
4194 clearResults = clearResults and getResults and sizeResults
4195 utilities.assert_equals( expect=main.TRUE,
4196 actual=clearResults,
4197 onpass="Set clear correct",
4198 onfail="Set clear was incorrect" )
4199
4200 main.step( "Distributed Set addAll()" )
4201 onosSet.update( addAllValue.split() )
4202 addResponses = []
4203 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004204 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004205 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004206 name="setTestAddAll-" + str( i ),
4207 args=[ onosSetName, addAllValue ] )
4208 threads.append( t )
4209 t.start()
4210 for t in threads:
4211 t.join()
4212 addResponses.append( t.result )
4213
4214 # main.TRUE = successfully changed the set
4215 # main.FALSE = action resulted in no change in set
4216 # main.ERROR - Some error in executing the function
4217 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004218 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004219 if addResponses[ i ] == main.TRUE:
4220 # All is well
4221 pass
4222 elif addResponses[ i ] == main.FALSE:
4223 # Already in set, probably fine
4224 pass
4225 elif addResponses[ i ] == main.ERROR:
4226 # Error in execution
4227 addAllResults = main.FALSE
4228 else:
4229 # unexpected result
4230 addAllResults = main.FALSE
4231 if addAllResults != main.TRUE:
4232 main.log.error( "Error executing set addAll" )
4233
4234 # Check if set is still correct
4235 size = len( onosSet )
4236 getResponses = []
4237 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004238 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004239 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004240 name="setTestGet-" + str( i ),
4241 args=[ onosSetName ] )
4242 threads.append( t )
4243 t.start()
4244 for t in threads:
4245 t.join()
4246 getResponses.append( t.result )
4247 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004248 for i in range( len( main.activeNodes ) ):
4249 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004250 if isinstance( getResponses[ i ], list):
4251 current = set( getResponses[ i ] )
4252 if len( current ) == len( getResponses[ i ] ):
4253 # no repeats
4254 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004255 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004256 " has incorrect view" +
4257 " of set " + onosSetName + ":\n" +
4258 str( getResponses[ i ] ) )
4259 main.log.debug( "Expected: " + str( onosSet ) )
4260 main.log.debug( "Actual: " + str( current ) )
4261 getResults = main.FALSE
4262 else:
4263 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004264 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004265 " has repeat elements in" +
4266 " set " + onosSetName + ":\n" +
4267 str( getResponses[ i ] ) )
4268 getResults = main.FALSE
4269 elif getResponses[ i ] == main.ERROR:
4270 getResults = main.FALSE
4271 sizeResponses = []
4272 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004273 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004274 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004275 name="setTestSize-" + str( i ),
4276 args=[ onosSetName ] )
4277 threads.append( t )
4278 t.start()
4279 for t in threads:
4280 t.join()
4281 sizeResponses.append( t.result )
4282 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004283 for i in range( len( main.activeNodes ) ):
4284 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004285 if size != sizeResponses[ i ]:
4286 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004287 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004288 " expected a size of " + str( size ) +
4289 " for set " + onosSetName +
4290 " but got " + str( sizeResponses[ i ] ) )
4291 addAllResults = addAllResults and getResults and sizeResults
4292 utilities.assert_equals( expect=main.TRUE,
4293 actual=addAllResults,
4294 onpass="Set addAll correct",
4295 onfail="Set addAll was incorrect" )
4296
4297 main.step( "Distributed Set retain()" )
4298 onosSet.intersection_update( retainValue.split() )
4299 retainResponses = []
4300 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004301 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004302 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004303 name="setTestRetain-" + str( i ),
4304 args=[ onosSetName, retainValue ],
4305 kwargs={ "retain": True } )
4306 threads.append( t )
4307 t.start()
4308 for t in threads:
4309 t.join()
4310 retainResponses.append( t.result )
4311
4312 # main.TRUE = successfully changed the set
4313 # main.FALSE = action resulted in no change in set
4314 # main.ERROR - Some error in executing the function
4315 retainResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004316 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004317 if retainResponses[ i ] == main.TRUE:
4318 # All is well
4319 pass
4320 elif retainResponses[ i ] == main.FALSE:
4321 # Already in set, probably fine
4322 pass
4323 elif retainResponses[ i ] == main.ERROR:
4324 # Error in execution
4325 retainResults = main.FALSE
4326 else:
4327 # unexpected result
4328 retainResults = main.FALSE
4329 if retainResults != main.TRUE:
4330 main.log.error( "Error executing set retain" )
4331
4332 # Check if set is still correct
4333 size = len( onosSet )
4334 getResponses = []
4335 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004336 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004337 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004338 name="setTestGet-" + str( i ),
4339 args=[ onosSetName ] )
4340 threads.append( t )
4341 t.start()
4342 for t in threads:
4343 t.join()
4344 getResponses.append( t.result )
4345 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004346 for i in range( len( main.activeNodes ) ):
4347 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004348 if isinstance( getResponses[ i ], list):
4349 current = set( getResponses[ i ] )
4350 if len( current ) == len( getResponses[ i ] ):
4351 # no repeats
4352 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004353 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004354 " has incorrect view" +
4355 " of set " + onosSetName + ":\n" +
4356 str( getResponses[ i ] ) )
4357 main.log.debug( "Expected: " + str( onosSet ) )
4358 main.log.debug( "Actual: " + str( current ) )
4359 getResults = main.FALSE
4360 else:
4361 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004362 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004363 " has repeat elements in" +
4364 " set " + onosSetName + ":\n" +
4365 str( getResponses[ i ] ) )
4366 getResults = main.FALSE
4367 elif getResponses[ i ] == main.ERROR:
4368 getResults = main.FALSE
4369 sizeResponses = []
4370 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004371 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004372 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004373 name="setTestSize-" + str( i ),
4374 args=[ onosSetName ] )
4375 threads.append( t )
4376 t.start()
4377 for t in threads:
4378 t.join()
4379 sizeResponses.append( t.result )
4380 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004381 for i in range( len( main.activeNodes ) ):
4382 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004383 if size != sizeResponses[ i ]:
4384 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004385 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004386 str( size ) + " for set " + onosSetName +
4387 " but got " + str( sizeResponses[ i ] ) )
4388 retainResults = retainResults and getResults and sizeResults
4389 utilities.assert_equals( expect=main.TRUE,
4390 actual=retainResults,
4391 onpass="Set retain correct",
4392 onfail="Set retain was incorrect" )
4393
Jon Hall2a5002c2015-08-21 16:49:11 -07004394 # Transactional maps
4395 main.step( "Partitioned Transactional maps put" )
4396 tMapValue = "Testing"
4397 numKeys = 100
4398 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004399 node = main.activeNodes[0]
4400 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
Jon Hall2a5002c2015-08-21 16:49:11 -07004401 if len( putResponses ) == 100:
4402 for i in putResponses:
4403 if putResponses[ i ][ 'value' ] != tMapValue:
4404 putResult = False
4405 else:
4406 putResult = False
4407 if not putResult:
4408 main.log.debug( "Put response values: " + str( putResponses ) )
4409 utilities.assert_equals( expect=True,
4410 actual=putResult,
4411 onpass="Partitioned Transactional Map put successful",
4412 onfail="Partitioned Transactional Map put values are incorrect" )
4413
4414 main.step( "Partitioned Transactional maps get" )
4415 getCheck = True
4416 for n in range( 1, numKeys + 1 ):
4417 getResponses = []
4418 threads = []
4419 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004420 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004421 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4422 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004423 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004424 threads.append( t )
4425 t.start()
4426 for t in threads:
4427 t.join()
4428 getResponses.append( t.result )
4429 for node in getResponses:
4430 if node != tMapValue:
4431 valueCheck = False
4432 if not valueCheck:
4433 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4434 main.log.warn( getResponses )
4435 getCheck = getCheck and valueCheck
4436 utilities.assert_equals( expect=True,
4437 actual=getCheck,
4438 onpass="Partitioned Transactional Map get values were correct",
4439 onfail="Partitioned Transactional Map values incorrect" )
4440
4441 main.step( "In-memory Transactional maps put" )
4442 tMapValue = "Testing"
4443 numKeys = 100
4444 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004445 node = main.activeNodes[0]
4446 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue, inMemory=True )
Jon Hall2a5002c2015-08-21 16:49:11 -07004447 if len( putResponses ) == 100:
4448 for i in putResponses:
4449 if putResponses[ i ][ 'value' ] != tMapValue:
4450 putResult = False
4451 else:
4452 putResult = False
4453 if not putResult:
4454 main.log.debug( "Put response values: " + str( putResponses ) )
4455 utilities.assert_equals( expect=True,
4456 actual=putResult,
4457 onpass="In-Memory Transactional Map put successful",
4458 onfail="In-Memory Transactional Map put values are incorrect" )
4459
4460 main.step( "In-Memory Transactional maps get" )
4461 getCheck = True
4462 for n in range( 1, numKeys + 1 ):
4463 getResponses = []
4464 threads = []
4465 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004466 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004467 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4468 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004469 args=[ "Key" + str( n ) ],
Jon Hall2a5002c2015-08-21 16:49:11 -07004470 kwargs={ "inMemory": True } )
4471 threads.append( t )
4472 t.start()
4473 for t in threads:
4474 t.join()
4475 getResponses.append( t.result )
4476 for node in getResponses:
4477 if node != tMapValue:
4478 valueCheck = False
4479 if not valueCheck:
4480 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4481 main.log.warn( getResponses )
4482 getCheck = getCheck and valueCheck
4483 utilities.assert_equals( expect=True,
4484 actual=getCheck,
4485 onpass="In-Memory Transactional Map get values were correct",
4486 onfail="In-Memory Transactional Map values incorrect" )