blob: d26df2fbd9fa5969038c2ccaf923abd8df33b0a5 [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
Jon Hallb3ed8ed2015-10-28 16:43:55 -070012CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
Jon Hall5cf14d52015-07-16 12:15:19 -070014CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
Jon Hallb3ed8ed2015-10-28 16:43:55 -070028class HAkillNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -070029
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hall3b489db2015-10-05 14:38:37 -070052 import pexpect
Jon Hall5cf14d52015-07-16 12:15:19 -070053 main.log.info( "ONOS HA test: Restart minority of ONOS nodes - " +
54 "initialization" )
55 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070056 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070057 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
59 # TODO: save all the timers and output them for plotting
60
61 # load some variables from the params file
62 PULLCODE = False
63 if main.params[ 'Git' ] == 'True':
64 PULLCODE = True
65 gitBranch = main.params[ 'branch' ]
66 cellName = main.params[ 'ENV' ][ 'cellName' ]
67
Jon Halle1a3b752015-07-22 13:02:46 -070068 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070069 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070070 if main.ONOSbench.maxNodes < main.numCtrls:
71 main.numCtrls = int( main.ONOSbench.maxNodes )
72 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070073 global ONOS1Port
74 global ONOS2Port
75 global ONOS3Port
76 global ONOS4Port
77 global ONOS5Port
78 global ONOS6Port
79 global ONOS7Port
80
81 # FIXME: just get controller port from params?
82 # TODO: do we really need all these?
83 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
84 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
85 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
86 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
87 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
88 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
89 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
90
Jon Halle1a3b752015-07-22 13:02:46 -070091 try:
92 fileName = "Counters"
93 # TODO: Maybe make a library folder somewhere?
94 path = main.params[ 'imports' ][ 'path' ]
95 main.Counters = imp.load_source( fileName,
96 path + fileName + ".py" )
97 except Exception as e:
98 main.log.exception( e )
99 main.cleanup()
100 main.exit()
101
102 main.CLIs = []
103 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700104 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700105 for i in range( 1, main.numCtrls + 1 ):
106 try:
107 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
108 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
109 ipList.append( main.nodes[ -1 ].ip_address )
110 except AttributeError:
111 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700112
113 main.step( "Create cell file" )
114 cellAppString = main.params[ 'ENV' ][ 'appString' ]
115 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
116 main.Mininet1.ip_address,
117 cellAppString, ipList )
118 main.step( "Applying cell variable to environment" )
119 cellResult = main.ONOSbench.setCell( cellName )
120 verifyResult = main.ONOSbench.verifyCell()
121
122 # FIXME:this is short term fix
123 main.log.info( "Removing raft logs" )
124 main.ONOSbench.onosRemoveRaftLogs()
125
126 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700127 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700128 main.ONOSbench.onosUninstall( node.ip_address )
129
130 # Make sure ONOS is DEAD
131 main.log.info( "Killing any ONOS processes" )
132 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700133 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700134 killed = main.ONOSbench.onosKill( node.ip_address )
135 killResults = killResults and killed
136
137 cleanInstallResult = main.TRUE
138 gitPullResult = main.TRUE
139
140 main.step( "Starting Mininet" )
141 # scp topo file to mininet
142 # TODO: move to params?
143 topoName = "obelisk.py"
144 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700145 main.ONOSbench.scp( main.Mininet1,
146 filePath + topoName,
147 main.Mininet1.home,
148 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700149 mnResult = main.Mininet1.startNet( )
150 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
151 onpass="Mininet Started",
152 onfail="Error starting Mininet" )
153
154 main.step( "Git checkout and pull " + gitBranch )
155 if PULLCODE:
156 main.ONOSbench.gitCheckout( gitBranch )
157 gitPullResult = main.ONOSbench.gitPull()
158 # values of 1 or 3 are good
159 utilities.assert_lesser( expect=0, actual=gitPullResult,
160 onpass="Git pull successful",
161 onfail="Git pull failed" )
162 main.ONOSbench.getVersion( report=True )
163
164 main.step( "Using mvn clean install" )
165 cleanInstallResult = main.TRUE
166 if PULLCODE and gitPullResult == main.TRUE:
167 cleanInstallResult = main.ONOSbench.cleanInstall()
168 else:
169 main.log.warn( "Did not pull new code so skipping mvn " +
170 "clean install" )
171 utilities.assert_equals( expect=main.TRUE,
172 actual=cleanInstallResult,
173 onpass="MCI successful",
174 onfail="MCI failed" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700175
176 main.step( "Make sure ONOS service doesn't automatically respawn" )
177 handle = main.ONOSbench.handle
178 handle.sendline( "sed -i -e 's/^respawn$/#respawn/g' tools/package/init/onos.conf" )
179 handle.expect( "\$" ) # $ from the command
180 handle.expect( "\$" ) # $ from the prompt
181
Jon Hall5cf14d52015-07-16 12:15:19 -0700182 # GRAPHS
183 # NOTE: important params here:
184 # job = name of Jenkins job
185 # Plot Name = Plot-HA, only can be used if multiple plots
186 # index = The number of the graph under plot name
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700187 job = "HAkillNodes"
Jon Hall5cf14d52015-07-16 12:15:19 -0700188 plotName = "Plot-HA"
189 graphs = '<ac:structured-macro ac:name="html">\n'
190 graphs += '<ac:plain-text-body><![CDATA[\n'
191 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
192 '/plot/' + plotName + '/getPlot?index=0' +\
193 '&width=500&height=300"' +\
194 'noborder="0" width="500" height="300" scrolling="yes" ' +\
195 'seamless="seamless"></iframe>\n'
196 graphs += ']]></ac:plain-text-body>\n'
197 graphs += '</ac:structured-macro>\n'
198 main.log.wiki(graphs)
199
200 main.step( "Creating ONOS package" )
Jon Hall3b489db2015-10-05 14:38:37 -0700201 # copy gen-partions file to ONOS
202 # NOTE: this assumes TestON and ONOS are on the same machine
203 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
204 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
205 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
206 main.ONOSbench.ip_address,
207 srcFile,
208 dstDir,
209 pwd=main.ONOSbench.pwd,
210 direction="from" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700211 packageResult = main.ONOSbench.onosPackage()
212 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
213 onpass="ONOS package successful",
214 onfail="ONOS package failed" )
215
216 main.step( "Installing ONOS package" )
217 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700218 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700219 tmpResult = main.ONOSbench.onosInstall( options="-f",
220 node=node.ip_address )
221 onosInstallResult = onosInstallResult and tmpResult
222 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
223 onpass="ONOS install successful",
224 onfail="ONOS install failed" )
Jon Hall3b489db2015-10-05 14:38:37 -0700225 # clean up gen-partitions file
226 try:
227 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
228 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
229 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
230 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
231 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
232 str( main.ONOSbench.handle.before ) )
233 except ( pexpect.TIMEOUT, pexpect.EOF ):
234 main.log.exception( "ONOSbench: pexpect exception found:" +
235 main.ONOSbench.handle.before )
236 main.cleanup()
237 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700238
239 main.step( "Checking if ONOS is up yet" )
240 for i in range( 2 ):
241 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700242 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700243 started = main.ONOSbench.isup( node.ip_address )
244 if not started:
245 main.log.error( node.name + " didn't start!" )
246 main.ONOSbench.onosStop( node.ip_address )
247 main.ONOSbench.onosStart( node.ip_address )
248 onosIsupResult = onosIsupResult and started
249 if onosIsupResult == main.TRUE:
250 break
251 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
252 onpass="ONOS startup successful",
253 onfail="ONOS startup failed" )
254
255 main.log.step( "Starting ONOS CLI sessions" )
256 cliResults = main.TRUE
257 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700258 for i in range( main.numCtrls ):
259 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700260 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700261 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700262 threads.append( t )
263 t.start()
264
265 for t in threads:
266 t.join()
267 cliResults = cliResults and t.result
268 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
269 onpass="ONOS cli startup successful",
270 onfail="ONOS cli startup failed" )
271
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700272 # Create a list of active nodes for use when some nodes are stopped
273 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
274
Jon Hall5cf14d52015-07-16 12:15:19 -0700275 if main.params[ 'tcpdump' ].lower() == "true":
276 main.step( "Start Packet Capture MN" )
277 main.Mininet2.startTcpdump(
278 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
279 + "-MN.pcap",
280 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
281 port=main.params[ 'MNtcpdump' ][ 'port' ] )
282
283 main.step( "App Ids check" )
284 appCheck = main.TRUE
285 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700286 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700287 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700288 name="appToIDCheck-" + str( i ),
289 args=[] )
290 threads.append( t )
291 t.start()
292
293 for t in threads:
294 t.join()
295 appCheck = appCheck and t.result
296 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700297 node = main.activeNodes[0]
298 main.log.warn( main.CLIs[node].apps() )
299 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700300 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
301 onpass="App Ids seem to be correct",
302 onfail="Something is wrong with app Ids" )
303
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700304 main.step( "Clean up ONOS service changes" )
305 handle.sendline( "git checkout -- tools/package/init/onos.conf" )
306 handle.expect( "\$" )
307
Jon Hall5cf14d52015-07-16 12:15:19 -0700308 if cliResults == main.FALSE:
309 main.log.error( "Failed to start ONOS, stopping test" )
310 main.cleanup()
311 main.exit()
312
313 def CASE2( self, main ):
314 """
315 Assign devices to controllers
316 """
317 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700318 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700319 assert main, "main not defined"
320 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700321 assert main.CLIs, "main.CLIs not defined"
322 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700323 assert ONOS1Port, "ONOS1Port not defined"
324 assert ONOS2Port, "ONOS2Port not defined"
325 assert ONOS3Port, "ONOS3Port not defined"
326 assert ONOS4Port, "ONOS4Port not defined"
327 assert ONOS5Port, "ONOS5Port not defined"
328 assert ONOS6Port, "ONOS6Port not defined"
329 assert ONOS7Port, "ONOS7Port not defined"
330
331 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700332 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700333 "and check that an ONOS node becomes the " +\
334 "master of the device."
335 main.step( "Assign switches to controllers" )
336
337 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700338 for i in range( main.numCtrls ):
339 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700340 swList = []
341 for i in range( 1, 29 ):
342 swList.append( "s" + str( i ) )
343 main.Mininet1.assignSwController( sw=swList, ip=ipList )
344
345 mastershipCheck = main.TRUE
346 for i in range( 1, 29 ):
347 response = main.Mininet1.getSwController( "s" + str( i ) )
348 try:
349 main.log.info( str( response ) )
350 except Exception:
351 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700352 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700353 if re.search( "tcp:" + node.ip_address, response ):
354 mastershipCheck = mastershipCheck and main.TRUE
355 else:
356 main.log.error( "Error, node " + node.ip_address + " is " +
357 "not in the list of controllers s" +
358 str( i ) + " is connecting to." )
359 mastershipCheck = main.FALSE
360 utilities.assert_equals(
361 expect=main.TRUE,
362 actual=mastershipCheck,
363 onpass="Switch mastership assigned correctly",
364 onfail="Switches not assigned correctly to controllers" )
365
366 def CASE21( self, main ):
367 """
368 Assign mastership to controllers
369 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700370 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700371 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700372 assert main, "main not defined"
373 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700374 assert main.CLIs, "main.CLIs not defined"
375 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700376 assert ONOS1Port, "ONOS1Port not defined"
377 assert ONOS2Port, "ONOS2Port not defined"
378 assert ONOS3Port, "ONOS3Port not defined"
379 assert ONOS4Port, "ONOS4Port not defined"
380 assert ONOS5Port, "ONOS5Port not defined"
381 assert ONOS6Port, "ONOS6Port not defined"
382 assert ONOS7Port, "ONOS7Port not defined"
383
384 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700385 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700386 "device. Then manually assign" +\
387 " mastership to specific ONOS nodes using" +\
388 " 'device-role'"
389 main.step( "Assign mastership of switches to specific controllers" )
390 # Manually assign mastership to the controller we want
391 roleCall = main.TRUE
392
393 ipList = [ ]
394 deviceList = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700395 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700396 try:
397 # Assign mastership to specific controllers. This assignment was
398 # determined for a 7 node cluser, but will work with any sized
399 # cluster
400 for i in range( 1, 29 ): # switches 1 through 28
401 # set up correct variables:
402 if i == 1:
403 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700404 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700405 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700406 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700407 c = 1 % main.numCtrls
408 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700409 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700410 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700411 c = 1 % main.numCtrls
412 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700413 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700414 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700415 c = 3 % main.numCtrls
416 ip = main.nodes[ c ].ip_address # ONOS4
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700417 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700418 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700419 c = 2 % main.numCtrls
420 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700421 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700422 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700423 c = 2 % main.numCtrls
424 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700425 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700426 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700427 c = 5 % main.numCtrls
428 ip = main.nodes[ c ].ip_address # ONOS6
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700429 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700430 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700431 c = 4 % main.numCtrls
432 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700433 dpid = '3' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700434 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700435 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700436 c = 6 % main.numCtrls
437 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700438 dpid = '6' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700439 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700440 elif i == 28:
441 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700442 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700443 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700444 else:
445 main.log.error( "You didn't write an else statement for " +
446 "switch s" + str( i ) )
447 roleCall = main.FALSE
448 # Assign switch
449 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
450 # TODO: make this controller dynamic
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700451 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700452 ipList.append( ip )
453 deviceList.append( deviceId )
454 except ( AttributeError, AssertionError ):
455 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700456 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700457 utilities.assert_equals(
458 expect=main.TRUE,
459 actual=roleCall,
460 onpass="Re-assigned switch mastership to designated controller",
461 onfail="Something wrong with deviceRole calls" )
462
463 main.step( "Check mastership was correctly assigned" )
464 roleCheck = main.TRUE
465 # NOTE: This is due to the fact that device mastership change is not
466 # atomic and is actually a multi step process
467 time.sleep( 5 )
468 for i in range( len( ipList ) ):
469 ip = ipList[i]
470 deviceId = deviceList[i]
471 # Check assignment
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700472 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700473 if ip in master:
474 roleCheck = roleCheck and main.TRUE
475 else:
476 roleCheck = roleCheck and main.FALSE
477 main.log.error( "Error, controller " + ip + " is not" +
478 " master " + "of device " +
479 str( deviceId ) + ". Master is " +
480 repr( master ) + "." )
481 utilities.assert_equals(
482 expect=main.TRUE,
483 actual=roleCheck,
484 onpass="Switches were successfully reassigned to designated " +
485 "controller",
486 onfail="Switches were not successfully reassigned" )
487
488 def CASE3( self, main ):
489 """
490 Assign intents
491 """
492 import time
493 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700494 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700495 assert main, "main not defined"
496 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700497 assert main.CLIs, "main.CLIs not defined"
498 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700499 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700500 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700501 "assign predetermined host-to-host intents." +\
502 " After installation, check that the intent" +\
503 " is distributed to all nodes and the state" +\
504 " is INSTALLED"
505
506 # install onos-app-fwd
507 main.step( "Install reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700508 onosCli = main.CLIs[ main.activeNodes[0] ]
509 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700510 utilities.assert_equals( expect=main.TRUE, actual=installResults,
511 onpass="Install fwd successful",
512 onfail="Install fwd failed" )
513
514 main.step( "Check app ids" )
515 appCheck = main.TRUE
516 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700517 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700518 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700519 name="appToIDCheck-" + str( i ),
520 args=[] )
521 threads.append( t )
522 t.start()
523
524 for t in threads:
525 t.join()
526 appCheck = appCheck and t.result
527 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700528 main.log.warn( onosCli.apps() )
529 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700530 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
531 onpass="App Ids seem to be correct",
532 onfail="Something is wrong with app Ids" )
533
534 main.step( "Discovering Hosts( Via pingall for now )" )
535 # FIXME: Once we have a host discovery mechanism, use that instead
536 # REACTIVE FWD test
537 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700538 passMsg = "Reactive Pingall test passed"
539 time1 = time.time()
540 pingResult = main.Mininet1.pingall()
541 time2 = time.time()
542 if not pingResult:
543 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700544 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700545 passMsg += " on the second try"
546 utilities.assert_equals(
547 expect=main.TRUE,
548 actual=pingResult,
549 onpass= passMsg,
550 onfail="Reactive Pingall failed, " +
551 "one or more ping pairs failed" )
552 main.log.info( "Time for pingall: %2f seconds" %
553 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700554 # timeout for fwd flows
555 time.sleep( 11 )
556 # uninstall onos-app-fwd
557 main.step( "Uninstall reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700558 node = main.activeNodes[0]
559 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700560 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
561 onpass="Uninstall fwd successful",
562 onfail="Uninstall fwd failed" )
563
564 main.step( "Check app ids" )
565 threads = []
566 appCheck2 = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700567 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700568 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700569 name="appToIDCheck-" + str( i ),
570 args=[] )
571 threads.append( t )
572 t.start()
573
574 for t in threads:
575 t.join()
576 appCheck2 = appCheck2 and t.result
577 if appCheck2 != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700578 node = main.activeNodes[0]
579 main.log.warn( main.CLIs[node].apps() )
580 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700581 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
582 onpass="App Ids seem to be correct",
583 onfail="Something is wrong with app Ids" )
584
585 main.step( "Add host intents via cli" )
586 intentIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700587 # TODO: move the host numbers to params
588 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700589 intentAddResult = True
590 hostResult = main.TRUE
591 for i in range( 8, 18 ):
592 main.log.info( "Adding host intent between h" + str( i ) +
593 " and h" + str( i + 10 ) )
594 host1 = "00:00:00:00:00:" + \
595 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
596 host2 = "00:00:00:00:00:" + \
597 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
598 # NOTE: getHost can return None
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700599 host1Dict = onosCli.getHost( host1 )
600 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700601 host1Id = None
602 host2Id = None
603 if host1Dict and host2Dict:
604 host1Id = host1Dict.get( 'id', None )
605 host2Id = host2Dict.get( 'id', None )
606 if host1Id and host2Id:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700607 nodeNum = ( i % len( main.activeNodes ) )
608 node = main.activeNodes[nodeNum]
609 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700610 if tmpId:
611 main.log.info( "Added intent with id: " + tmpId )
612 intentIds.append( tmpId )
613 else:
614 main.log.error( "addHostIntent returned: " +
615 repr( tmpId ) )
616 else:
617 main.log.error( "Error, getHost() failed for h" + str( i ) +
618 " and/or h" + str( i + 10 ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700619 node = main.activeNodes[0]
620 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700621 main.log.warn( "Hosts output: " )
622 try:
623 main.log.warn( json.dumps( json.loads( hosts ),
624 sort_keys=True,
625 indent=4,
626 separators=( ',', ': ' ) ) )
627 except ( ValueError, TypeError ):
628 main.log.warn( repr( hosts ) )
629 hostResult = main.FALSE
630 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
631 onpass="Found a host id for each host",
632 onfail="Error looking up host ids" )
633
634 intentStart = time.time()
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700635 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700636 main.log.info( "Submitted intents: " + str( intentIds ) )
637 main.log.info( "Intents in ONOS: " + str( onosIds ) )
638 for intent in intentIds:
639 if intent in onosIds:
640 pass # intent submitted is in onos
641 else:
642 intentAddResult = False
643 if intentAddResult:
644 intentStop = time.time()
645 else:
646 intentStop = None
647 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700648 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700649 intentStates = []
650 installedCheck = True
651 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
652 count = 0
653 try:
654 for intent in json.loads( intents ):
655 state = intent.get( 'state', None )
656 if "INSTALLED" not in state:
657 installedCheck = False
658 intentId = intent.get( 'id', None )
659 intentStates.append( ( intentId, state ) )
660 except ( ValueError, TypeError ):
661 main.log.exception( "Error parsing intents" )
662 # add submitted intents not in the store
663 tmplist = [ i for i, s in intentStates ]
664 missingIntents = False
665 for i in intentIds:
666 if i not in tmplist:
667 intentStates.append( ( i, " - " ) )
668 missingIntents = True
669 intentStates.sort()
670 for i, s in intentStates:
671 count += 1
672 main.log.info( "%-6s%-15s%-15s" %
673 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700674 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700675 try:
676 missing = False
677 if leaders:
678 parsedLeaders = json.loads( leaders )
679 main.log.warn( json.dumps( parsedLeaders,
680 sort_keys=True,
681 indent=4,
682 separators=( ',', ': ' ) ) )
683 # check for all intent partitions
684 topics = []
685 for i in range( 14 ):
686 topics.append( "intent-partition-" + str( i ) )
687 main.log.debug( topics )
688 ONOStopics = [ j['topic'] for j in parsedLeaders ]
689 for topic in topics:
690 if topic not in ONOStopics:
691 main.log.error( "Error: " + topic +
692 " not in leaders" )
693 missing = True
694 else:
695 main.log.error( "leaders() returned None" )
696 except ( ValueError, TypeError ):
697 main.log.exception( "Error parsing leaders" )
698 main.log.error( repr( leaders ) )
699 # Check all nodes
700 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700701 for i in main.activeNodes:
702 response = main.CLIs[i].leaders( jsonFormat=False)
703 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700704 str( response ) )
705
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700706 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700707 try:
708 if partitions :
709 parsedPartitions = json.loads( partitions )
710 main.log.warn( json.dumps( parsedPartitions,
711 sort_keys=True,
712 indent=4,
713 separators=( ',', ': ' ) ) )
714 # TODO check for a leader in all paritions
715 # TODO check for consistency among nodes
716 else:
717 main.log.error( "partitions() returned None" )
718 except ( ValueError, TypeError ):
719 main.log.exception( "Error parsing partitions" )
720 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700721 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700722 try:
723 if pendingMap :
724 parsedPending = json.loads( pendingMap )
725 main.log.warn( json.dumps( parsedPending,
726 sort_keys=True,
727 indent=4,
728 separators=( ',', ': ' ) ) )
729 # TODO check something here?
730 else:
731 main.log.error( "pendingMap() returned None" )
732 except ( ValueError, TypeError ):
733 main.log.exception( "Error parsing pending map" )
734 main.log.error( repr( pendingMap ) )
735
736 intentAddResult = bool( intentAddResult and not missingIntents and
737 installedCheck )
738 if not intentAddResult:
739 main.log.error( "Error in pushing host intents to ONOS" )
740
741 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700742 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700743 correct = True
744 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700745 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700746 onosIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700747 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700748 onosIds.append( ids )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700749 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700750 str( sorted( onosIds ) ) )
751 if sorted( ids ) != sorted( intentIds ):
752 main.log.warn( "Set of intent IDs doesn't match" )
753 correct = False
754 break
755 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700756 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700757 for intent in intents:
758 if intent[ 'state' ] != "INSTALLED":
759 main.log.warn( "Intent " + intent[ 'id' ] +
760 " is " + intent[ 'state' ] )
761 correct = False
762 break
763 if correct:
764 break
765 else:
766 time.sleep(1)
767 if not intentStop:
768 intentStop = time.time()
769 global gossipTime
770 gossipTime = intentStop - intentStart
771 main.log.info( "It took about " + str( gossipTime ) +
772 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700773 gossipPeriod = int( main.params['timers']['gossip'] )
774 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700775 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700776 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700777 onpass="ECM anti-entropy for intents worked within " +
778 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700779 onfail="Intent ECM anti-entropy took too long. " +
780 "Expected time:{}, Actual time:{}".format( maxGossipTime,
781 gossipTime ) )
782 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700783 intentAddResult = True
784
785 if not intentAddResult or "key" in pendingMap:
786 import time
787 installedCheck = True
788 main.log.info( "Sleeping 60 seconds to see if intents are found" )
789 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700790 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700791 main.log.info( "Submitted intents: " + str( intentIds ) )
792 main.log.info( "Intents in ONOS: " + str( onosIds ) )
793 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700794 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700795 intentStates = []
796 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
797 count = 0
798 try:
799 for intent in json.loads( intents ):
800 # Iter through intents of a node
801 state = intent.get( 'state', None )
802 if "INSTALLED" not in state:
803 installedCheck = False
804 intentId = intent.get( 'id', None )
805 intentStates.append( ( intentId, state ) )
806 except ( ValueError, TypeError ):
807 main.log.exception( "Error parsing intents" )
808 # add submitted intents not in the store
809 tmplist = [ i for i, s in intentStates ]
810 for i in intentIds:
811 if i not in tmplist:
812 intentStates.append( ( i, " - " ) )
813 intentStates.sort()
814 for i, s in intentStates:
815 count += 1
816 main.log.info( "%-6s%-15s%-15s" %
817 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700818 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700819 try:
820 missing = False
821 if leaders:
822 parsedLeaders = json.loads( leaders )
823 main.log.warn( json.dumps( parsedLeaders,
824 sort_keys=True,
825 indent=4,
826 separators=( ',', ': ' ) ) )
827 # check for all intent partitions
828 # check for election
829 topics = []
830 for i in range( 14 ):
831 topics.append( "intent-partition-" + str( i ) )
832 # FIXME: this should only be after we start the app
833 topics.append( "org.onosproject.election" )
834 main.log.debug( topics )
835 ONOStopics = [ j['topic'] for j in parsedLeaders ]
836 for topic in topics:
837 if topic not in ONOStopics:
838 main.log.error( "Error: " + topic +
839 " not in leaders" )
840 missing = True
841 else:
842 main.log.error( "leaders() returned None" )
843 except ( ValueError, TypeError ):
844 main.log.exception( "Error parsing leaders" )
845 main.log.error( repr( leaders ) )
846 # Check all nodes
847 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700848 for i in main.activeNodes:
849 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700850 response = node.leaders( jsonFormat=False)
851 main.log.warn( str( node.name ) + " leaders output: \n" +
852 str( response ) )
853
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700854 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700855 try:
856 if partitions :
857 parsedPartitions = json.loads( partitions )
858 main.log.warn( json.dumps( parsedPartitions,
859 sort_keys=True,
860 indent=4,
861 separators=( ',', ': ' ) ) )
862 # TODO check for a leader in all paritions
863 # TODO check for consistency among nodes
864 else:
865 main.log.error( "partitions() returned None" )
866 except ( ValueError, TypeError ):
867 main.log.exception( "Error parsing partitions" )
868 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700869 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700870 try:
871 if pendingMap :
872 parsedPending = json.loads( pendingMap )
873 main.log.warn( json.dumps( parsedPending,
874 sort_keys=True,
875 indent=4,
876 separators=( ',', ': ' ) ) )
877 # TODO check something here?
878 else:
879 main.log.error( "pendingMap() returned None" )
880 except ( ValueError, TypeError ):
881 main.log.exception( "Error parsing pending map" )
882 main.log.error( repr( pendingMap ) )
883
884 def CASE4( self, main ):
885 """
886 Ping across added host intents
887 """
888 import json
889 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700890 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700891 assert main, "main not defined"
892 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700893 assert main.CLIs, "main.CLIs not defined"
894 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700895 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700896 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700897 "functionality and check the state of " +\
898 "the intent"
899 main.step( "Ping across added host intents" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700900 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700901 PingResult = main.TRUE
902 for i in range( 8, 18 ):
903 ping = main.Mininet1.pingHost( src="h" + str( i ),
904 target="h" + str( i + 10 ) )
905 PingResult = PingResult and ping
906 if ping == main.FALSE:
907 main.log.warn( "Ping failed between h" + str( i ) +
908 " and h" + str( i + 10 ) )
909 elif ping == main.TRUE:
910 main.log.info( "Ping test passed!" )
911 # Don't set PingResult or you'd override failures
912 if PingResult == main.FALSE:
913 main.log.error(
914 "Intents have not been installed correctly, pings failed." )
915 # TODO: pretty print
916 main.log.warn( "ONOS1 intents: " )
917 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700918 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700919 main.log.warn( json.dumps( json.loads( tmpIntents ),
920 sort_keys=True,
921 indent=4,
922 separators=( ',', ': ' ) ) )
923 except ( ValueError, TypeError ):
924 main.log.warn( repr( tmpIntents ) )
925 utilities.assert_equals(
926 expect=main.TRUE,
927 actual=PingResult,
928 onpass="Intents have been installed correctly and pings work",
929 onfail="Intents have not been installed correctly, pings failed." )
930
931 main.step( "Check Intent state" )
932 installedCheck = False
933 loopCount = 0
934 while not installedCheck and loopCount < 40:
935 installedCheck = True
936 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700937 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700938 intentStates = []
939 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
940 count = 0
941 # Iter through intents of a node
942 try:
943 for intent in json.loads( intents ):
944 state = intent.get( 'state', None )
945 if "INSTALLED" not in state:
946 installedCheck = False
947 intentId = intent.get( 'id', None )
948 intentStates.append( ( intentId, state ) )
949 except ( ValueError, TypeError ):
950 main.log.exception( "Error parsing intents." )
951 # Print states
952 intentStates.sort()
953 for i, s in intentStates:
954 count += 1
955 main.log.info( "%-6s%-15s%-15s" %
956 ( str( count ), str( i ), str( s ) ) )
957 if not installedCheck:
958 time.sleep( 1 )
959 loopCount += 1
960 utilities.assert_equals( expect=True, actual=installedCheck,
961 onpass="Intents are all INSTALLED",
962 onfail="Intents are not all in " +
963 "INSTALLED state" )
964
965 main.step( "Check leadership of topics" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700966 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700967 topicCheck = main.TRUE
968 try:
969 if leaders:
970 parsedLeaders = json.loads( leaders )
971 main.log.warn( json.dumps( parsedLeaders,
972 sort_keys=True,
973 indent=4,
974 separators=( ',', ': ' ) ) )
975 # check for all intent partitions
976 # check for election
977 # TODO: Look at Devices as topics now that it uses this system
978 topics = []
979 for i in range( 14 ):
980 topics.append( "intent-partition-" + str( i ) )
981 # FIXME: this should only be after we start the app
982 # FIXME: topics.append( "org.onosproject.election" )
983 # Print leaders output
984 main.log.debug( topics )
985 ONOStopics = [ j['topic'] for j in parsedLeaders ]
986 for topic in topics:
987 if topic not in ONOStopics:
988 main.log.error( "Error: " + topic +
989 " not in leaders" )
990 topicCheck = main.FALSE
991 else:
992 main.log.error( "leaders() returned None" )
993 topicCheck = main.FALSE
994 except ( ValueError, TypeError ):
995 topicCheck = main.FALSE
996 main.log.exception( "Error parsing leaders" )
997 main.log.error( repr( leaders ) )
998 # TODO: Check for a leader of these topics
999 # Check all nodes
1000 if topicCheck:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001001 for i in main.activeNodes:
1002 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001003 response = node.leaders( jsonFormat=False)
1004 main.log.warn( str( node.name ) + " leaders output: \n" +
1005 str( response ) )
1006
1007 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1008 onpass="intent Partitions is in leaders",
1009 onfail="Some topics were lost " )
1010 # Print partitions
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001011 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001012 try:
1013 if partitions :
1014 parsedPartitions = json.loads( partitions )
1015 main.log.warn( json.dumps( parsedPartitions,
1016 sort_keys=True,
1017 indent=4,
1018 separators=( ',', ': ' ) ) )
1019 # TODO check for a leader in all paritions
1020 # TODO check for consistency among nodes
1021 else:
1022 main.log.error( "partitions() returned None" )
1023 except ( ValueError, TypeError ):
1024 main.log.exception( "Error parsing partitions" )
1025 main.log.error( repr( partitions ) )
1026 # Print Pending Map
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001027 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001028 try:
1029 if pendingMap :
1030 parsedPending = json.loads( pendingMap )
1031 main.log.warn( json.dumps( parsedPending,
1032 sort_keys=True,
1033 indent=4,
1034 separators=( ',', ': ' ) ) )
1035 # TODO check something here?
1036 else:
1037 main.log.error( "pendingMap() returned None" )
1038 except ( ValueError, TypeError ):
1039 main.log.exception( "Error parsing pending map" )
1040 main.log.error( repr( pendingMap ) )
1041
1042 if not installedCheck:
1043 main.log.info( "Waiting 60 seconds to see if the state of " +
1044 "intents change" )
1045 time.sleep( 60 )
1046 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001047 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001048 intentStates = []
1049 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1050 count = 0
1051 # Iter through intents of a node
1052 try:
1053 for intent in json.loads( intents ):
1054 state = intent.get( 'state', None )
1055 if "INSTALLED" not in state:
1056 installedCheck = False
1057 intentId = intent.get( 'id', None )
1058 intentStates.append( ( intentId, state ) )
1059 except ( ValueError, TypeError ):
1060 main.log.exception( "Error parsing intents." )
1061 intentStates.sort()
1062 for i, s in intentStates:
1063 count += 1
1064 main.log.info( "%-6s%-15s%-15s" %
1065 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001066 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001067 try:
1068 missing = False
1069 if leaders:
1070 parsedLeaders = json.loads( leaders )
1071 main.log.warn( json.dumps( parsedLeaders,
1072 sort_keys=True,
1073 indent=4,
1074 separators=( ',', ': ' ) ) )
1075 # check for all intent partitions
1076 # check for election
1077 topics = []
1078 for i in range( 14 ):
1079 topics.append( "intent-partition-" + str( i ) )
1080 # FIXME: this should only be after we start the app
1081 topics.append( "org.onosproject.election" )
1082 main.log.debug( topics )
1083 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1084 for topic in topics:
1085 if topic not in ONOStopics:
1086 main.log.error( "Error: " + topic +
1087 " not in leaders" )
1088 missing = True
1089 else:
1090 main.log.error( "leaders() returned None" )
1091 except ( ValueError, TypeError ):
1092 main.log.exception( "Error parsing leaders" )
1093 main.log.error( repr( leaders ) )
1094 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001095 for i in main.activeNodes:
1096 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001097 response = node.leaders( jsonFormat=False)
1098 main.log.warn( str( node.name ) + " leaders output: \n" +
1099 str( response ) )
1100
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001101 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001102 try:
1103 if partitions :
1104 parsedPartitions = json.loads( partitions )
1105 main.log.warn( json.dumps( parsedPartitions,
1106 sort_keys=True,
1107 indent=4,
1108 separators=( ',', ': ' ) ) )
1109 # TODO check for a leader in all paritions
1110 # TODO check for consistency among nodes
1111 else:
1112 main.log.error( "partitions() returned None" )
1113 except ( ValueError, TypeError ):
1114 main.log.exception( "Error parsing partitions" )
1115 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001116 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001117 try:
1118 if pendingMap :
1119 parsedPending = json.loads( pendingMap )
1120 main.log.warn( json.dumps( parsedPending,
1121 sort_keys=True,
1122 indent=4,
1123 separators=( ',', ': ' ) ) )
1124 # TODO check something here?
1125 else:
1126 main.log.error( "pendingMap() returned None" )
1127 except ( ValueError, TypeError ):
1128 main.log.exception( "Error parsing pending map" )
1129 main.log.error( repr( pendingMap ) )
1130 # Print flowrules
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001131 node = main.activeNodes[0]
1132 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001133 main.step( "Wait a minute then ping again" )
1134 # the wait is above
1135 PingResult = main.TRUE
1136 for i in range( 8, 18 ):
1137 ping = main.Mininet1.pingHost( src="h" + str( i ),
1138 target="h" + str( i + 10 ) )
1139 PingResult = PingResult and ping
1140 if ping == main.FALSE:
1141 main.log.warn( "Ping failed between h" + str( i ) +
1142 " and h" + str( i + 10 ) )
1143 elif ping == main.TRUE:
1144 main.log.info( "Ping test passed!" )
1145 # Don't set PingResult or you'd override failures
1146 if PingResult == main.FALSE:
1147 main.log.error(
1148 "Intents have not been installed correctly, pings failed." )
1149 # TODO: pretty print
1150 main.log.warn( "ONOS1 intents: " )
1151 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001152 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001153 main.log.warn( json.dumps( json.loads( tmpIntents ),
1154 sort_keys=True,
1155 indent=4,
1156 separators=( ',', ': ' ) ) )
1157 except ( ValueError, TypeError ):
1158 main.log.warn( repr( tmpIntents ) )
1159 utilities.assert_equals(
1160 expect=main.TRUE,
1161 actual=PingResult,
1162 onpass="Intents have been installed correctly and pings work",
1163 onfail="Intents have not been installed correctly, pings failed." )
1164
1165 def CASE5( self, main ):
1166 """
1167 Reading state of ONOS
1168 """
1169 import json
1170 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001171 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001172 assert main, "main not defined"
1173 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001174 assert main.CLIs, "main.CLIs not defined"
1175 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001176
1177 main.case( "Setting up and gathering data for current state" )
1178 # The general idea for this test case is to pull the state of
1179 # ( intents,flows, topology,... ) from each ONOS node
1180 # We can then compare them with each other and also with past states
1181
1182 main.step( "Check that each switch has a master" )
1183 global mastershipState
1184 mastershipState = '[]'
1185
1186 # Assert that each device has a master
1187 rolesNotNull = main.TRUE
1188 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001189 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001190 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001191 name="rolesNotNull-" + str( i ),
1192 args=[] )
1193 threads.append( t )
1194 t.start()
1195
1196 for t in threads:
1197 t.join()
1198 rolesNotNull = rolesNotNull and t.result
1199 utilities.assert_equals(
1200 expect=main.TRUE,
1201 actual=rolesNotNull,
1202 onpass="Each device has a master",
1203 onfail="Some devices don't have a master assigned" )
1204
1205 main.step( "Get the Mastership of each switch from each controller" )
1206 ONOSMastership = []
1207 mastershipCheck = main.FALSE
1208 consistentMastership = True
1209 rolesResults = True
1210 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001211 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001212 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001213 name="roles-" + str( i ),
1214 args=[] )
1215 threads.append( t )
1216 t.start()
1217
1218 for t in threads:
1219 t.join()
1220 ONOSMastership.append( t.result )
1221
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001222 for i in range( len( ONOSMastership ) ):
1223 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001224 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001225 main.log.error( "Error in getting ONOS" + node + " roles" )
1226 main.log.warn( "ONOS" + node + " mastership response: " +
1227 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001228 rolesResults = False
1229 utilities.assert_equals(
1230 expect=True,
1231 actual=rolesResults,
1232 onpass="No error in reading roles output",
1233 onfail="Error in reading roles from ONOS" )
1234
1235 main.step( "Check for consistency in roles from each controller" )
1236 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1237 main.log.info(
1238 "Switch roles are consistent across all ONOS nodes" )
1239 else:
1240 consistentMastership = False
1241 utilities.assert_equals(
1242 expect=True,
1243 actual=consistentMastership,
1244 onpass="Switch roles are consistent across all ONOS nodes",
1245 onfail="ONOS nodes have different views of switch roles" )
1246
1247 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001248 for i in range( len( main.activeNodes ) ):
1249 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001250 try:
1251 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001252 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001253 json.dumps(
1254 json.loads( ONOSMastership[ i ] ),
1255 sort_keys=True,
1256 indent=4,
1257 separators=( ',', ': ' ) ) )
1258 except ( ValueError, TypeError ):
1259 main.log.warn( repr( ONOSMastership[ i ] ) )
1260 elif rolesResults and consistentMastership:
1261 mastershipCheck = main.TRUE
1262 mastershipState = ONOSMastership[ 0 ]
1263
1264 main.step( "Get the intents from each controller" )
1265 global intentState
1266 intentState = []
1267 ONOSIntents = []
1268 intentCheck = main.FALSE
1269 consistentIntents = True
1270 intentsResults = True
1271 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001272 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001273 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001274 name="intents-" + str( i ),
1275 args=[],
1276 kwargs={ 'jsonFormat': True } )
1277 threads.append( t )
1278 t.start()
1279
1280 for t in threads:
1281 t.join()
1282 ONOSIntents.append( t.result )
1283
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001284 for i in range( len( ONOSIntents ) ):
1285 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001286 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001287 main.log.error( "Error in getting ONOS" + node + " intents" )
1288 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001289 repr( ONOSIntents[ i ] ) )
1290 intentsResults = False
1291 utilities.assert_equals(
1292 expect=True,
1293 actual=intentsResults,
1294 onpass="No error in reading intents output",
1295 onfail="Error in reading intents from ONOS" )
1296
1297 main.step( "Check for consistency in Intents from each controller" )
1298 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1299 main.log.info( "Intents are consistent across all ONOS " +
1300 "nodes" )
1301 else:
1302 consistentIntents = False
1303 main.log.error( "Intents not consistent" )
1304 utilities.assert_equals(
1305 expect=True,
1306 actual=consistentIntents,
1307 onpass="Intents are consistent across all ONOS nodes",
1308 onfail="ONOS nodes have different views of intents" )
1309
1310 if intentsResults:
1311 # Try to make it easy to figure out what is happening
1312 #
1313 # Intent ONOS1 ONOS2 ...
1314 # 0x01 INSTALLED INSTALLING
1315 # ... ... ...
1316 # ... ... ...
1317 title = " Id"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001318 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001319 title += " " * 10 + "ONOS" + str( n + 1 )
1320 main.log.warn( title )
1321 # get all intent keys in the cluster
1322 keys = []
1323 for nodeStr in ONOSIntents:
1324 node = json.loads( nodeStr )
1325 for intent in node:
1326 keys.append( intent.get( 'id' ) )
1327 keys = set( keys )
1328 for key in keys:
1329 row = "%-13s" % key
1330 for nodeStr in ONOSIntents:
1331 node = json.loads( nodeStr )
1332 for intent in node:
1333 if intent.get( 'id', "Error" ) == key:
1334 row += "%-15s" % intent.get( 'state' )
1335 main.log.warn( row )
1336 # End table view
1337
1338 if intentsResults and not consistentIntents:
1339 # print the json objects
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001340 n = str( main.activeNodes[-1] + 1 )
1341 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001342 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1343 sort_keys=True,
1344 indent=4,
1345 separators=( ',', ': ' ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001346 for i in range( len( ONOSIntents ) ):
1347 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001348 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001349 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001350 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1351 sort_keys=True,
1352 indent=4,
1353 separators=( ',', ': ' ) ) )
1354 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001355 main.log.debug( "ONOS" + node + " intents match ONOS" +
1356 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001357 elif intentsResults and consistentIntents:
1358 intentCheck = main.TRUE
1359 intentState = ONOSIntents[ 0 ]
1360
1361 main.step( "Get the flows from each controller" )
1362 global flowState
1363 flowState = []
1364 ONOSFlows = []
1365 ONOSFlowsJson = []
1366 flowCheck = main.FALSE
1367 consistentFlows = True
1368 flowsResults = True
1369 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001370 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001371 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001372 name="flows-" + str( i ),
1373 args=[],
1374 kwargs={ 'jsonFormat': True } )
1375 threads.append( t )
1376 t.start()
1377
1378 # NOTE: Flows command can take some time to run
1379 time.sleep(30)
1380 for t in threads:
1381 t.join()
1382 result = t.result
1383 ONOSFlows.append( result )
1384
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001385 for i in range( len( ONOSFlows ) ):
1386 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001387 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1388 main.log.error( "Error in getting ONOS" + num + " flows" )
1389 main.log.warn( "ONOS" + num + " flows response: " +
1390 repr( ONOSFlows[ i ] ) )
1391 flowsResults = False
1392 ONOSFlowsJson.append( None )
1393 else:
1394 try:
1395 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1396 except ( ValueError, TypeError ):
1397 # FIXME: change this to log.error?
1398 main.log.exception( "Error in parsing ONOS" + num +
1399 " response as json." )
1400 main.log.error( repr( ONOSFlows[ i ] ) )
1401 ONOSFlowsJson.append( None )
1402 flowsResults = False
1403 utilities.assert_equals(
1404 expect=True,
1405 actual=flowsResults,
1406 onpass="No error in reading flows output",
1407 onfail="Error in reading flows from ONOS" )
1408
1409 main.step( "Check for consistency in Flows from each controller" )
1410 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1411 if all( tmp ):
1412 main.log.info( "Flow count is consistent across all ONOS nodes" )
1413 else:
1414 consistentFlows = False
1415 utilities.assert_equals(
1416 expect=True,
1417 actual=consistentFlows,
1418 onpass="The flow count is consistent across all ONOS nodes",
1419 onfail="ONOS nodes have different flow counts" )
1420
1421 if flowsResults and not consistentFlows:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001422 for i in range( len( ONOSFlows ) ):
1423 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001424 try:
1425 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001426 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001427 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1428 indent=4, separators=( ',', ': ' ) ) )
1429 except ( ValueError, TypeError ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001430 main.log.warn( "ONOS" + node + " flows: " +
1431 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001432 elif flowsResults and consistentFlows:
1433 flowCheck = main.TRUE
1434 flowState = ONOSFlows[ 0 ]
1435
1436 main.step( "Get the OF Table entries" )
1437 global flows
1438 flows = []
1439 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001440 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001441 if flowCheck == main.FALSE:
1442 for table in flows:
1443 main.log.warn( table )
1444 # TODO: Compare switch flow tables with ONOS flow tables
1445
1446 main.step( "Start continuous pings" )
1447 main.Mininet2.pingLong(
1448 src=main.params[ 'PING' ][ 'source1' ],
1449 target=main.params[ 'PING' ][ 'target1' ],
1450 pingTime=500 )
1451 main.Mininet2.pingLong(
1452 src=main.params[ 'PING' ][ 'source2' ],
1453 target=main.params[ 'PING' ][ 'target2' ],
1454 pingTime=500 )
1455 main.Mininet2.pingLong(
1456 src=main.params[ 'PING' ][ 'source3' ],
1457 target=main.params[ 'PING' ][ 'target3' ],
1458 pingTime=500 )
1459 main.Mininet2.pingLong(
1460 src=main.params[ 'PING' ][ 'source4' ],
1461 target=main.params[ 'PING' ][ 'target4' ],
1462 pingTime=500 )
1463 main.Mininet2.pingLong(
1464 src=main.params[ 'PING' ][ 'source5' ],
1465 target=main.params[ 'PING' ][ 'target5' ],
1466 pingTime=500 )
1467 main.Mininet2.pingLong(
1468 src=main.params[ 'PING' ][ 'source6' ],
1469 target=main.params[ 'PING' ][ 'target6' ],
1470 pingTime=500 )
1471 main.Mininet2.pingLong(
1472 src=main.params[ 'PING' ][ 'source7' ],
1473 target=main.params[ 'PING' ][ 'target7' ],
1474 pingTime=500 )
1475 main.Mininet2.pingLong(
1476 src=main.params[ 'PING' ][ 'source8' ],
1477 target=main.params[ 'PING' ][ 'target8' ],
1478 pingTime=500 )
1479 main.Mininet2.pingLong(
1480 src=main.params[ 'PING' ][ 'source9' ],
1481 target=main.params[ 'PING' ][ 'target9' ],
1482 pingTime=500 )
1483 main.Mininet2.pingLong(
1484 src=main.params[ 'PING' ][ 'source10' ],
1485 target=main.params[ 'PING' ][ 'target10' ],
1486 pingTime=500 )
1487
1488 main.step( "Collecting topology information from ONOS" )
1489 devices = []
1490 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001491 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001492 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001493 name="devices-" + str( i ),
1494 args=[ ] )
1495 threads.append( t )
1496 t.start()
1497
1498 for t in threads:
1499 t.join()
1500 devices.append( t.result )
1501 hosts = []
1502 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001503 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001504 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001505 name="hosts-" + str( i ),
1506 args=[ ] )
1507 threads.append( t )
1508 t.start()
1509
1510 for t in threads:
1511 t.join()
1512 try:
1513 hosts.append( json.loads( t.result ) )
1514 except ( ValueError, TypeError ):
1515 # FIXME: better handling of this, print which node
1516 # Maybe use thread name?
1517 main.log.exception( "Error parsing json output of hosts" )
1518 # FIXME: should this be an empty json object instead?
1519 hosts.append( None )
1520
1521 ports = []
1522 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001523 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001524 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001525 name="ports-" + str( i ),
1526 args=[ ] )
1527 threads.append( t )
1528 t.start()
1529
1530 for t in threads:
1531 t.join()
1532 ports.append( t.result )
1533 links = []
1534 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001535 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001536 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001537 name="links-" + str( i ),
1538 args=[ ] )
1539 threads.append( t )
1540 t.start()
1541
1542 for t in threads:
1543 t.join()
1544 links.append( t.result )
1545 clusters = []
1546 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001547 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001548 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001549 name="clusters-" + str( i ),
1550 args=[ ] )
1551 threads.append( t )
1552 t.start()
1553
1554 for t in threads:
1555 t.join()
1556 clusters.append( t.result )
1557 # Compare json objects for hosts and dataplane clusters
1558
1559 # hosts
1560 main.step( "Host view is consistent across ONOS nodes" )
1561 consistentHostsResult = main.TRUE
1562 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001563 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001564 if "Error" not in hosts[ controller ]:
1565 if hosts[ controller ] == hosts[ 0 ]:
1566 continue
1567 else: # hosts not consistent
1568 main.log.error( "hosts from ONOS" +
1569 controllerStr +
1570 " is inconsistent with ONOS1" )
1571 main.log.warn( repr( hosts[ controller ] ) )
1572 consistentHostsResult = main.FALSE
1573
1574 else:
1575 main.log.error( "Error in getting ONOS hosts from ONOS" +
1576 controllerStr )
1577 consistentHostsResult = main.FALSE
1578 main.log.warn( "ONOS" + controllerStr +
1579 " hosts response: " +
1580 repr( hosts[ controller ] ) )
1581 utilities.assert_equals(
1582 expect=main.TRUE,
1583 actual=consistentHostsResult,
1584 onpass="Hosts view is consistent across all ONOS nodes",
1585 onfail="ONOS nodes have different views of hosts" )
1586
1587 main.step( "Each host has an IP address" )
1588 ipResult = main.TRUE
1589 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001590 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001591 for host in hosts[ controller ]:
1592 if not host.get( 'ipAddresses', [ ] ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001593 main.log.error( "Error with host ips on controller" +
Jon Hall5cf14d52015-07-16 12:15:19 -07001594 controllerStr + ": " + str( host ) )
1595 ipResult = main.FALSE
1596 utilities.assert_equals(
1597 expect=main.TRUE,
1598 actual=ipResult,
1599 onpass="The ips of the hosts aren't empty",
1600 onfail="The ip of at least one host is missing" )
1601
1602 # Strongly connected clusters of devices
1603 main.step( "Cluster view is consistent across ONOS nodes" )
1604 consistentClustersResult = main.TRUE
1605 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001606 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001607 if "Error" not in clusters[ controller ]:
1608 if clusters[ controller ] == clusters[ 0 ]:
1609 continue
1610 else: # clusters not consistent
1611 main.log.error( "clusters from ONOS" + controllerStr +
1612 " is inconsistent with ONOS1" )
1613 consistentClustersResult = main.FALSE
1614
1615 else:
1616 main.log.error( "Error in getting dataplane clusters " +
1617 "from ONOS" + controllerStr )
1618 consistentClustersResult = main.FALSE
1619 main.log.warn( "ONOS" + controllerStr +
1620 " clusters response: " +
1621 repr( clusters[ controller ] ) )
1622 utilities.assert_equals(
1623 expect=main.TRUE,
1624 actual=consistentClustersResult,
1625 onpass="Clusters view is consistent across all ONOS nodes",
1626 onfail="ONOS nodes have different views of clusters" )
1627 # there should always only be one cluster
1628 main.step( "Cluster view correct across ONOS nodes" )
1629 try:
1630 numClusters = len( json.loads( clusters[ 0 ] ) )
1631 except ( ValueError, TypeError ):
1632 main.log.exception( "Error parsing clusters[0]: " +
1633 repr( clusters[ 0 ] ) )
1634 clusterResults = main.FALSE
1635 if numClusters == 1:
1636 clusterResults = main.TRUE
1637 utilities.assert_equals(
1638 expect=1,
1639 actual=numClusters,
1640 onpass="ONOS shows 1 SCC",
1641 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1642
1643 main.step( "Comparing ONOS topology to MN" )
1644 devicesResults = main.TRUE
1645 linksResults = main.TRUE
1646 hostsResults = main.TRUE
1647 mnSwitches = main.Mininet1.getSwitches()
1648 mnLinks = main.Mininet1.getLinks()
1649 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001650 for controller in main.activeNodes:
1651 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001652 if devices[ controller ] and ports[ controller ] and\
1653 "Error" not in devices[ controller ] and\
1654 "Error" not in ports[ controller ]:
1655
1656 currentDevicesResult = main.Mininet1.compareSwitches(
1657 mnSwitches,
1658 json.loads( devices[ controller ] ),
1659 json.loads( ports[ controller ] ) )
1660 else:
1661 currentDevicesResult = main.FALSE
1662 utilities.assert_equals( expect=main.TRUE,
1663 actual=currentDevicesResult,
1664 onpass="ONOS" + controllerStr +
1665 " Switches view is correct",
1666 onfail="ONOS" + controllerStr +
1667 " Switches view is incorrect" )
1668 if links[ controller ] and "Error" not in links[ controller ]:
1669 currentLinksResult = main.Mininet1.compareLinks(
1670 mnSwitches, mnLinks,
1671 json.loads( links[ controller ] ) )
1672 else:
1673 currentLinksResult = main.FALSE
1674 utilities.assert_equals( expect=main.TRUE,
1675 actual=currentLinksResult,
1676 onpass="ONOS" + controllerStr +
1677 " links view is correct",
1678 onfail="ONOS" + controllerStr +
1679 " links view is incorrect" )
1680
1681 if hosts[ controller ] or "Error" not in hosts[ controller ]:
1682 currentHostsResult = main.Mininet1.compareHosts(
1683 mnHosts,
1684 hosts[ controller ] )
1685 else:
1686 currentHostsResult = main.FALSE
1687 utilities.assert_equals( expect=main.TRUE,
1688 actual=currentHostsResult,
1689 onpass="ONOS" + controllerStr +
1690 " hosts exist in Mininet",
1691 onfail="ONOS" + controllerStr +
1692 " hosts don't match Mininet" )
1693
1694 devicesResults = devicesResults and currentDevicesResult
1695 linksResults = linksResults and currentLinksResult
1696 hostsResults = hostsResults and currentHostsResult
1697
1698 main.step( "Device information is correct" )
1699 utilities.assert_equals(
1700 expect=main.TRUE,
1701 actual=devicesResults,
1702 onpass="Device information is correct",
1703 onfail="Device information is incorrect" )
1704
1705 main.step( "Links are correct" )
1706 utilities.assert_equals(
1707 expect=main.TRUE,
1708 actual=linksResults,
1709 onpass="Link are correct",
1710 onfail="Links are incorrect" )
1711
1712 main.step( "Hosts are correct" )
1713 utilities.assert_equals(
1714 expect=main.TRUE,
1715 actual=hostsResults,
1716 onpass="Hosts are correct",
1717 onfail="Hosts are incorrect" )
1718
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001719 def CASE61( self, main ):
Jon Hall5cf14d52015-07-16 12:15:19 -07001720 """
1721 The Failure case.
1722 """
Jon Halle1a3b752015-07-22 13:02:46 -07001723 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001724 assert main, "main not defined"
1725 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001726 assert main.CLIs, "main.CLIs not defined"
1727 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001728 main.case( "Kill minority of ONOS nodes" )
Jon Hall96091e62015-09-21 17:34:17 -07001729
1730 main.step( "Checking ONOS Logs for errors" )
1731 for node in main.nodes:
1732 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1733 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1734
Jon Hall3b489db2015-10-05 14:38:37 -07001735 n = len( main.nodes ) # Number of nodes
1736 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1737 main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
1738 if n > 3:
1739 main.kill.append( p - 1 )
1740 # NOTE: This only works for cluster sizes of 3,5, or 7.
1741
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001742 main.step( "Kill " + str( len( main.kill ) ) + " ONOS nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07001743 killResults = main.TRUE
1744 for i in main.kill:
1745 killResults = killResults and\
1746 main.ONOSbench.onosKill( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001747 main.activeNodes.remove( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001748 utilities.assert_equals( expect=main.TRUE, actual=killResults,
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001749 onpass="ONOS nodes killed successfully",
1750 onfail="ONOS nodes NOT successfully killed" )
1751
1752 def CASE62( self, main ):
1753 """
1754 The bring up stopped nodes
1755 """
1756 import time
1757 assert main.numCtrls, "main.numCtrls not defined"
1758 assert main, "main not defined"
1759 assert utilities.assert_equals, "utilities.assert_equals not defined"
1760 assert main.CLIs, "main.CLIs not defined"
1761 assert main.nodes, "main.nodes not defined"
1762 assert main.kill, "main.kill not defined"
1763 main.case( "Restart minority of ONOS nodes" )
1764
1765 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1766 startResults = main.TRUE
1767 restartTime = time.time()
1768 for i in main.kill:
1769 startResults = startResults and\
1770 main.ONOSbench.onosStart( main.nodes[i].ip_address )
1771 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1772 onpass="ONOS nodes started successfully",
1773 onfail="ONOS nodes NOT successfully started" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001774
1775 main.step( "Checking if ONOS is up yet" )
1776 count = 0
1777 onosIsupResult = main.FALSE
1778 while onosIsupResult == main.FALSE and count < 10:
Jon Hall3b489db2015-10-05 14:38:37 -07001779 onosIsupResult = main.TRUE
1780 for i in main.kill:
1781 onosIsupResult = onosIsupResult and\
1782 main.ONOSbench.isup( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07001783 count = count + 1
Jon Hall5cf14d52015-07-16 12:15:19 -07001784 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1785 onpass="ONOS restarted successfully",
1786 onfail="ONOS restart NOT successful" )
1787
Jon Halle1a3b752015-07-22 13:02:46 -07001788 main.step( "Restarting ONOS main.CLIs" )
Jon Hall3b489db2015-10-05 14:38:37 -07001789 cliResults = main.TRUE
1790 for i in main.kill:
1791 cliResults = cliResults and\
1792 main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001793 main.activeNodes.append( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001794 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1795 onpass="ONOS cli restarted",
1796 onfail="ONOS cli did not restart" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001797 main.activeNodes.sort()
1798 try:
1799 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1800 "List of active nodes has duplicates, this likely indicates something was run out of order"
1801 except AssertionError:
1802 main.log.exception( "" )
1803 main.cleanup()
1804 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -07001805
1806 # Grab the time of restart so we chan check how long the gossip
1807 # protocol has had time to work
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001808 main.restartTime = time.time() - restartTime
Jon Hall5cf14d52015-07-16 12:15:19 -07001809 main.log.debug( "Restart time: " + str( main.restartTime ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001810 # TODO: MAke this configurable. Also, we are breaking the above timer
1811 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001812 node = main.activeNodes[0]
1813 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1814 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1815 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001816
1817 def CASE7( self, main ):
1818 """
1819 Check state after ONOS failure
1820 """
1821 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001822 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001823 assert main, "main not defined"
1824 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001825 assert main.CLIs, "main.CLIs not defined"
1826 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001827 try:
1828 main.kill
1829 except AttributeError:
1830 main.kill = []
1831
Jon Hall5cf14d52015-07-16 12:15:19 -07001832 main.case( "Running ONOS Constant State Tests" )
1833
1834 main.step( "Check that each switch has a master" )
1835 # Assert that each device has a master
1836 rolesNotNull = main.TRUE
1837 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001838 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001839 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001840 name="rolesNotNull-" + str( i ),
1841 args=[ ] )
1842 threads.append( t )
1843 t.start()
1844
1845 for t in threads:
1846 t.join()
1847 rolesNotNull = rolesNotNull and t.result
1848 utilities.assert_equals(
1849 expect=main.TRUE,
1850 actual=rolesNotNull,
1851 onpass="Each device has a master",
1852 onfail="Some devices don't have a master assigned" )
1853
1854 main.step( "Read device roles from ONOS" )
1855 ONOSMastership = []
1856 consistentMastership = True
1857 rolesResults = True
1858 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001859 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001860 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001861 name="roles-" + str( i ),
1862 args=[] )
1863 threads.append( t )
1864 t.start()
1865
1866 for t in threads:
1867 t.join()
1868 ONOSMastership.append( t.result )
1869
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001870 for i in range( len( ONOSMastership ) ):
1871 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001872 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001873 main.log.error( "Error in getting ONOS" + node + " roles" )
1874 main.log.warn( "ONOS" + node + " mastership response: " +
1875 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001876 rolesResults = False
1877 utilities.assert_equals(
1878 expect=True,
1879 actual=rolesResults,
1880 onpass="No error in reading roles output",
1881 onfail="Error in reading roles from ONOS" )
1882
1883 main.step( "Check for consistency in roles from each controller" )
1884 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1885 main.log.info(
1886 "Switch roles are consistent across all ONOS nodes" )
1887 else:
1888 consistentMastership = False
1889 utilities.assert_equals(
1890 expect=True,
1891 actual=consistentMastership,
1892 onpass="Switch roles are consistent across all ONOS nodes",
1893 onfail="ONOS nodes have different views of switch roles" )
1894
1895 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001896 for i in range( len( ONOSMastership ) ):
1897 node = str( main.activeNodes[i] + 1 )
1898 main.log.warn( "ONOS" + node + " roles: ",
1899 json.dumps( json.loads( ONOSMastership[ i ] ),
1900 sort_keys=True,
1901 indent=4,
1902 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001903
1904 # NOTE: we expect mastership to change on controller failure
Jon Hall5cf14d52015-07-16 12:15:19 -07001905
1906 main.step( "Get the intents and compare across all nodes" )
1907 ONOSIntents = []
1908 intentCheck = main.FALSE
1909 consistentIntents = True
1910 intentsResults = True
1911 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001912 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001913 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001914 name="intents-" + str( i ),
1915 args=[],
1916 kwargs={ 'jsonFormat': True } )
1917 threads.append( t )
1918 t.start()
1919
1920 for t in threads:
1921 t.join()
1922 ONOSIntents.append( t.result )
1923
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001924 for i in range( len( ONOSIntents) ):
1925 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001926 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001927 main.log.error( "Error in getting ONOS" + node + " intents" )
1928 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001929 repr( ONOSIntents[ i ] ) )
1930 intentsResults = False
1931 utilities.assert_equals(
1932 expect=True,
1933 actual=intentsResults,
1934 onpass="No error in reading intents output",
1935 onfail="Error in reading intents from ONOS" )
1936
1937 main.step( "Check for consistency in Intents from each controller" )
1938 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1939 main.log.info( "Intents are consistent across all ONOS " +
1940 "nodes" )
1941 else:
1942 consistentIntents = False
1943
1944 # Try to make it easy to figure out what is happening
1945 #
1946 # Intent ONOS1 ONOS2 ...
1947 # 0x01 INSTALLED INSTALLING
1948 # ... ... ...
1949 # ... ... ...
1950 title = " ID"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001951 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001952 title += " " * 10 + "ONOS" + str( n + 1 )
1953 main.log.warn( title )
1954 # get all intent keys in the cluster
1955 keys = []
1956 for nodeStr in ONOSIntents:
1957 node = json.loads( nodeStr )
1958 for intent in node:
1959 keys.append( intent.get( 'id' ) )
1960 keys = set( keys )
1961 for key in keys:
1962 row = "%-13s" % key
1963 for nodeStr in ONOSIntents:
1964 node = json.loads( nodeStr )
1965 for intent in node:
1966 if intent.get( 'id' ) == key:
1967 row += "%-15s" % intent.get( 'state' )
1968 main.log.warn( row )
1969 # End table view
1970
1971 utilities.assert_equals(
1972 expect=True,
1973 actual=consistentIntents,
1974 onpass="Intents are consistent across all ONOS nodes",
1975 onfail="ONOS nodes have different views of intents" )
1976 intentStates = []
1977 for node in ONOSIntents: # Iter through ONOS nodes
1978 nodeStates = []
1979 # Iter through intents of a node
1980 try:
1981 for intent in json.loads( node ):
1982 nodeStates.append( intent[ 'state' ] )
1983 except ( ValueError, TypeError ):
1984 main.log.exception( "Error in parsing intents" )
1985 main.log.error( repr( node ) )
1986 intentStates.append( nodeStates )
1987 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1988 main.log.info( dict( out ) )
1989
1990 if intentsResults and not consistentIntents:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001991 for i in range( len( main.activeNodes ) ):
1992 node = str( main.activeNodes[i] + 1 )
1993 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001994 main.log.warn( json.dumps(
1995 json.loads( ONOSIntents[ i ] ),
1996 sort_keys=True,
1997 indent=4,
1998 separators=( ',', ': ' ) ) )
1999 elif intentsResults and consistentIntents:
2000 intentCheck = main.TRUE
2001
2002 # NOTE: Store has no durability, so intents are lost across system
2003 # restarts
2004 main.step( "Compare current intents with intents before the failure" )
2005 # NOTE: this requires case 5 to pass for intentState to be set.
2006 # maybe we should stop the test if that fails?
2007 sameIntents = main.FALSE
2008 if intentState and intentState == ONOSIntents[ 0 ]:
2009 sameIntents = main.TRUE
2010 main.log.info( "Intents are consistent with before failure" )
2011 # TODO: possibly the states have changed? we may need to figure out
2012 # what the acceptable states are
2013 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2014 sameIntents = main.TRUE
2015 try:
2016 before = json.loads( intentState )
2017 after = json.loads( ONOSIntents[ 0 ] )
2018 for intent in before:
2019 if intent not in after:
2020 sameIntents = main.FALSE
2021 main.log.debug( "Intent is not currently in ONOS " +
2022 "(at least in the same form):" )
2023 main.log.debug( json.dumps( intent ) )
2024 except ( ValueError, TypeError ):
2025 main.log.exception( "Exception printing intents" )
2026 main.log.debug( repr( ONOSIntents[0] ) )
2027 main.log.debug( repr( intentState ) )
2028 if sameIntents == main.FALSE:
2029 try:
2030 main.log.debug( "ONOS intents before: " )
2031 main.log.debug( json.dumps( json.loads( intentState ),
2032 sort_keys=True, indent=4,
2033 separators=( ',', ': ' ) ) )
2034 main.log.debug( "Current ONOS intents: " )
2035 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2036 sort_keys=True, indent=4,
2037 separators=( ',', ': ' ) ) )
2038 except ( ValueError, TypeError ):
2039 main.log.exception( "Exception printing intents" )
2040 main.log.debug( repr( ONOSIntents[0] ) )
2041 main.log.debug( repr( intentState ) )
2042 utilities.assert_equals(
2043 expect=main.TRUE,
2044 actual=sameIntents,
2045 onpass="Intents are consistent with before failure",
2046 onfail="The Intents changed during failure" )
2047 intentCheck = intentCheck and sameIntents
2048
2049 main.step( "Get the OF Table entries and compare to before " +
2050 "component failure" )
2051 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002052 for i in range( 28 ):
2053 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002054 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2055 FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
Jon Hall5cf14d52015-07-16 12:15:19 -07002056 if FlowTables == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002057 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2058
Jon Hall5cf14d52015-07-16 12:15:19 -07002059 utilities.assert_equals(
2060 expect=main.TRUE,
2061 actual=FlowTables,
2062 onpass="No changes were found in the flow tables",
2063 onfail="Changes were found in the flow tables" )
2064
2065 main.Mininet2.pingLongKill()
2066 '''
2067 main.step( "Check the continuous pings to ensure that no packets " +
2068 "were dropped during component failure" )
2069 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2070 main.params[ 'TESTONIP' ] )
2071 LossInPings = main.FALSE
2072 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2073 for i in range( 8, 18 ):
2074 main.log.info(
2075 "Checking for a loss in pings along flow from s" +
2076 str( i ) )
2077 LossInPings = main.Mininet2.checkForLoss(
2078 "/tmp/ping.h" +
2079 str( i ) ) or LossInPings
2080 if LossInPings == main.TRUE:
2081 main.log.info( "Loss in ping detected" )
2082 elif LossInPings == main.ERROR:
2083 main.log.info( "There are multiple mininet process running" )
2084 elif LossInPings == main.FALSE:
2085 main.log.info( "No Loss in the pings" )
2086 main.log.info( "No loss of dataplane connectivity" )
2087 utilities.assert_equals(
2088 expect=main.FALSE,
2089 actual=LossInPings,
2090 onpass="No Loss of connectivity",
2091 onfail="Loss of dataplane connectivity detected" )
2092 '''
2093
2094 main.step( "Leadership Election is still functional" )
2095 # Test of LeadershipElection
2096 leaderList = []
Jon Hall5cf14d52015-07-16 12:15:19 -07002097
Jon Hall3b489db2015-10-05 14:38:37 -07002098 restarted = []
2099 for i in main.kill:
2100 restarted.append( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07002101 leaderResult = main.TRUE
Jon Hall3b489db2015-10-05 14:38:37 -07002102
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002103 for i in main.activeNodes:
2104 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002105 leaderN = cli.electionTestLeader()
2106 leaderList.append( leaderN )
2107 if leaderN == main.FALSE:
2108 # error in response
2109 main.log.error( "Something is wrong with " +
2110 "electionTestLeader function, check the" +
2111 " error logs" )
2112 leaderResult = main.FALSE
2113 elif leaderN is None:
2114 main.log.error( cli.name +
2115 " shows no leader for the election-app was" +
2116 " elected after the old one died" )
2117 leaderResult = main.FALSE
2118 elif leaderN in restarted:
2119 main.log.error( cli.name + " shows " + str( leaderN ) +
2120 " as leader for the election-app, but it " +
2121 "was restarted" )
2122 leaderResult = main.FALSE
2123 if len( set( leaderList ) ) != 1:
2124 leaderResult = main.FALSE
2125 main.log.error(
2126 "Inconsistent view of leader for the election test app" )
2127 # TODO: print the list
2128 utilities.assert_equals(
2129 expect=main.TRUE,
2130 actual=leaderResult,
2131 onpass="Leadership election passed",
2132 onfail="Something went wrong with Leadership election" )
2133
2134 def CASE8( self, main ):
2135 """
2136 Compare topo
2137 """
2138 import json
2139 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002140 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002141 assert main, "main not defined"
2142 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002143 assert main.CLIs, "main.CLIs not defined"
2144 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002145
2146 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002147 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002148 " and ONOS"
2149
2150 main.step( "Comparing ONOS topology to MN" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002151 topoResult = main.FALSE
2152 elapsed = 0
2153 count = 0
2154 main.step( "Collecting topology information from ONOS" )
2155 startTime = time.time()
2156 # Give time for Gossip to work
2157 while topoResult == main.FALSE and elapsed < 60:
Jon Hall96091e62015-09-21 17:34:17 -07002158 devicesResults = main.TRUE
2159 linksResults = main.TRUE
2160 hostsResults = main.TRUE
2161 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002162 count += 1
2163 cliStart = time.time()
2164 devices = []
2165 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002166 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002167 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07002168 name="devices-" + str( i ),
2169 args=[ ] )
2170 threads.append( t )
2171 t.start()
2172
2173 for t in threads:
2174 t.join()
2175 devices.append( t.result )
2176 hosts = []
2177 ipResult = main.TRUE
2178 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002179 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002180 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07002181 name="hosts-" + str( i ),
2182 args=[ ] )
2183 threads.append( t )
2184 t.start()
2185
2186 for t in threads:
2187 t.join()
2188 try:
2189 hosts.append( json.loads( t.result ) )
2190 except ( ValueError, TypeError ):
2191 main.log.exception( "Error parsing hosts results" )
2192 main.log.error( repr( t.result ) )
2193 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002194 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002195 for host in hosts[ controller ]:
2196 if host is None or host.get( 'ipAddresses', [] ) == []:
2197 main.log.error(
2198 "DEBUG:Error with host ipAddresses on controller" +
2199 controllerStr + ": " + str( host ) )
2200 ipResult = main.FALSE
2201 ports = []
2202 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002203 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002204 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07002205 name="ports-" + str( i ),
2206 args=[ ] )
2207 threads.append( t )
2208 t.start()
2209
2210 for t in threads:
2211 t.join()
2212 ports.append( t.result )
2213 links = []
2214 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002215 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002216 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07002217 name="links-" + str( i ),
2218 args=[ ] )
2219 threads.append( t )
2220 t.start()
2221
2222 for t in threads:
2223 t.join()
2224 links.append( t.result )
2225 clusters = []
2226 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002227 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002228 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07002229 name="clusters-" + str( i ),
2230 args=[ ] )
2231 threads.append( t )
2232 t.start()
2233
2234 for t in threads:
2235 t.join()
2236 clusters.append( t.result )
2237
2238 elapsed = time.time() - startTime
2239 cliTime = time.time() - cliStart
2240 print "Elapsed time: " + str( elapsed )
2241 print "CLI time: " + str( cliTime )
2242
2243 mnSwitches = main.Mininet1.getSwitches()
2244 mnLinks = main.Mininet1.getLinks()
2245 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002246 for controller in range( len( main.activeNodes ) ):
2247 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002248 if devices[ controller ] and ports[ controller ] and\
2249 "Error" not in devices[ controller ] and\
2250 "Error" not in ports[ controller ]:
2251
2252 currentDevicesResult = main.Mininet1.compareSwitches(
2253 mnSwitches,
2254 json.loads( devices[ controller ] ),
2255 json.loads( ports[ controller ] ) )
2256 else:
2257 currentDevicesResult = main.FALSE
2258 utilities.assert_equals( expect=main.TRUE,
2259 actual=currentDevicesResult,
2260 onpass="ONOS" + controllerStr +
2261 " Switches view is correct",
2262 onfail="ONOS" + controllerStr +
2263 " Switches view is incorrect" )
2264
2265 if links[ controller ] and "Error" not in links[ controller ]:
2266 currentLinksResult = main.Mininet1.compareLinks(
2267 mnSwitches, mnLinks,
2268 json.loads( links[ controller ] ) )
2269 else:
2270 currentLinksResult = main.FALSE
2271 utilities.assert_equals( expect=main.TRUE,
2272 actual=currentLinksResult,
2273 onpass="ONOS" + controllerStr +
2274 " links view is correct",
2275 onfail="ONOS" + controllerStr +
2276 " links view is incorrect" )
2277
2278 if hosts[ controller ] or "Error" not in hosts[ controller ]:
2279 currentHostsResult = main.Mininet1.compareHosts(
2280 mnHosts,
2281 hosts[ controller ] )
2282 else:
2283 currentHostsResult = main.FALSE
2284 utilities.assert_equals( expect=main.TRUE,
2285 actual=currentHostsResult,
2286 onpass="ONOS" + controllerStr +
2287 " hosts exist in Mininet",
2288 onfail="ONOS" + controllerStr +
2289 " hosts don't match Mininet" )
2290 # CHECKING HOST ATTACHMENT POINTS
2291 hostAttachment = True
2292 zeroHosts = False
2293 # FIXME: topo-HA/obelisk specific mappings:
2294 # key is mac and value is dpid
2295 mappings = {}
2296 for i in range( 1, 29 ): # hosts 1 through 28
2297 # set up correct variables:
2298 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2299 if i == 1:
2300 deviceId = "1000".zfill(16)
2301 elif i == 2:
2302 deviceId = "2000".zfill(16)
2303 elif i == 3:
2304 deviceId = "3000".zfill(16)
2305 elif i == 4:
2306 deviceId = "3004".zfill(16)
2307 elif i == 5:
2308 deviceId = "5000".zfill(16)
2309 elif i == 6:
2310 deviceId = "6000".zfill(16)
2311 elif i == 7:
2312 deviceId = "6007".zfill(16)
2313 elif i >= 8 and i <= 17:
2314 dpid = '3' + str( i ).zfill( 3 )
2315 deviceId = dpid.zfill(16)
2316 elif i >= 18 and i <= 27:
2317 dpid = '6' + str( i ).zfill( 3 )
2318 deviceId = dpid.zfill(16)
2319 elif i == 28:
2320 deviceId = "2800".zfill(16)
2321 mappings[ macId ] = deviceId
2322 if hosts[ controller ] or "Error" not in hosts[ controller ]:
2323 if hosts[ controller ] == []:
2324 main.log.warn( "There are no hosts discovered" )
2325 zeroHosts = True
2326 else:
2327 for host in hosts[ controller ]:
2328 mac = None
2329 location = None
2330 device = None
2331 port = None
2332 try:
2333 mac = host.get( 'mac' )
2334 assert mac, "mac field could not be found for this host object"
2335
2336 location = host.get( 'location' )
2337 assert location, "location field could not be found for this host object"
2338
2339 # Trim the protocol identifier off deviceId
2340 device = str( location.get( 'elementId' ) ).split(':')[1]
2341 assert device, "elementId field could not be found for this host location object"
2342
2343 port = location.get( 'port' )
2344 assert port, "port field could not be found for this host location object"
2345
2346 # Now check if this matches where they should be
2347 if mac and device and port:
2348 if str( port ) != "1":
2349 main.log.error( "The attachment port is incorrect for " +
2350 "host " + str( mac ) +
2351 ". Expected: 1 Actual: " + str( port) )
2352 hostAttachment = False
2353 if device != mappings[ str( mac ) ]:
2354 main.log.error( "The attachment device is incorrect for " +
2355 "host " + str( mac ) +
2356 ". Expected: " + mappings[ str( mac ) ] +
2357 " Actual: " + device )
2358 hostAttachment = False
2359 else:
2360 hostAttachment = False
2361 except AssertionError:
2362 main.log.exception( "Json object not as expected" )
2363 main.log.error( repr( host ) )
2364 hostAttachment = False
2365 else:
2366 main.log.error( "No hosts json output or \"Error\"" +
2367 " in output. hosts = " +
2368 repr( hosts[ controller ] ) )
2369 if zeroHosts is False:
2370 hostAttachment = True
2371
2372 # END CHECKING HOST ATTACHMENT POINTS
2373 devicesResults = devicesResults and currentDevicesResult
2374 linksResults = linksResults and currentLinksResult
2375 hostsResults = hostsResults and currentHostsResult
2376 hostAttachmentResults = hostAttachmentResults and\
2377 hostAttachment
2378
2379 # Compare json objects for hosts and dataplane clusters
2380
2381 # hosts
2382 main.step( "Hosts view is consistent across all ONOS nodes" )
2383 consistentHostsResult = main.TRUE
2384 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002385 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002386 if "Error" not in hosts[ controller ]:
2387 if hosts[ controller ] == hosts[ 0 ]:
2388 continue
2389 else: # hosts not consistent
2390 main.log.error( "hosts from ONOS" + controllerStr +
2391 " is inconsistent with ONOS1" )
2392 main.log.warn( repr( hosts[ controller ] ) )
2393 consistentHostsResult = main.FALSE
2394
2395 else:
2396 main.log.error( "Error in getting ONOS hosts from ONOS" +
2397 controllerStr )
2398 consistentHostsResult = main.FALSE
2399 main.log.warn( "ONOS" + controllerStr +
2400 " hosts response: " +
2401 repr( hosts[ controller ] ) )
2402 utilities.assert_equals(
2403 expect=main.TRUE,
2404 actual=consistentHostsResult,
2405 onpass="Hosts view is consistent across all ONOS nodes",
2406 onfail="ONOS nodes have different views of hosts" )
2407
2408 main.step( "Hosts information is correct" )
2409 hostsResults = hostsResults and ipResult
2410 utilities.assert_equals(
2411 expect=main.TRUE,
2412 actual=hostsResults,
2413 onpass="Host information is correct",
2414 onfail="Host information is incorrect" )
2415
2416 main.step( "Host attachment points to the network" )
2417 utilities.assert_equals(
2418 expect=True,
2419 actual=hostAttachmentResults,
2420 onpass="Hosts are correctly attached to the network",
2421 onfail="ONOS did not correctly attach hosts to the network" )
2422
2423 # Strongly connected clusters of devices
2424 main.step( "Clusters view is consistent across all ONOS nodes" )
2425 consistentClustersResult = main.TRUE
2426 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002427 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002428 if "Error" not in clusters[ controller ]:
2429 if clusters[ controller ] == clusters[ 0 ]:
2430 continue
2431 else: # clusters not consistent
2432 main.log.error( "clusters from ONOS" +
2433 controllerStr +
2434 " is inconsistent with ONOS1" )
2435 consistentClustersResult = main.FALSE
2436
2437 else:
2438 main.log.error( "Error in getting dataplane clusters " +
2439 "from ONOS" + controllerStr )
2440 consistentClustersResult = main.FALSE
2441 main.log.warn( "ONOS" + controllerStr +
2442 " clusters response: " +
2443 repr( clusters[ controller ] ) )
2444 utilities.assert_equals(
2445 expect=main.TRUE,
2446 actual=consistentClustersResult,
2447 onpass="Clusters view is consistent across all ONOS nodes",
2448 onfail="ONOS nodes have different views of clusters" )
2449
2450 main.step( "There is only one SCC" )
2451 # there should always only be one cluster
2452 try:
2453 numClusters = len( json.loads( clusters[ 0 ] ) )
2454 except ( ValueError, TypeError ):
2455 main.log.exception( "Error parsing clusters[0]: " +
2456 repr( clusters[0] ) )
2457 clusterResults = main.FALSE
2458 if numClusters == 1:
2459 clusterResults = main.TRUE
2460 utilities.assert_equals(
2461 expect=1,
2462 actual=numClusters,
2463 onpass="ONOS shows 1 SCC",
2464 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2465
2466 topoResult = ( devicesResults and linksResults
2467 and hostsResults and consistentHostsResult
2468 and consistentClustersResult and clusterResults
2469 and ipResult and hostAttachmentResults )
2470
2471 topoResult = topoResult and int( count <= 2 )
2472 note = "note it takes about " + str( int( cliTime ) ) + \
2473 " seconds for the test to make all the cli calls to fetch " +\
2474 "the topology from each ONOS instance"
2475 main.log.info(
2476 "Very crass estimate for topology discovery/convergence( " +
2477 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2478 str( count ) + " tries" )
2479
2480 main.step( "Device information is correct" )
2481 utilities.assert_equals(
2482 expect=main.TRUE,
2483 actual=devicesResults,
2484 onpass="Device information is correct",
2485 onfail="Device information is incorrect" )
2486
2487 main.step( "Links are correct" )
2488 utilities.assert_equals(
2489 expect=main.TRUE,
2490 actual=linksResults,
2491 onpass="Link are correct",
2492 onfail="Links are incorrect" )
2493
2494 # FIXME: move this to an ONOS state case
2495 main.step( "Checking ONOS nodes" )
2496 nodesOutput = []
2497 nodeResults = main.TRUE
2498 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002499 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002500 t = main.Thread( target=main.CLIs[i].nodes,
Jon Hall5cf14d52015-07-16 12:15:19 -07002501 name="nodes-" + str( i ),
2502 args=[ ] )
2503 threads.append( t )
2504 t.start()
2505
2506 for t in threads:
2507 t.join()
2508 nodesOutput.append( t.result )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002509 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002510 for i in nodesOutput:
2511 try:
2512 current = json.loads( i )
2513 for node in current:
2514 currentResult = main.FALSE
2515 if node['ip'] in ips: # node in nodes() output is in cell
2516 if node['state'] == 'ACTIVE':
2517 currentResult = main.TRUE
2518 else:
2519 main.log.error( "Error in ONOS node availability" )
2520 main.log.error(
2521 json.dumps( current,
2522 sort_keys=True,
2523 indent=4,
2524 separators=( ',', ': ' ) ) )
2525 break
2526 nodeResults = nodeResults and currentResult
2527 except ( ValueError, TypeError ):
2528 main.log.error( "Error parsing nodes output" )
2529 main.log.warn( repr( i ) )
2530 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2531 onpass="Nodes check successful",
2532 onfail="Nodes check NOT successful" )
2533
2534 def CASE9( self, main ):
2535 """
2536 Link s3-s28 down
2537 """
2538 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002539 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002540 assert main, "main not defined"
2541 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002542 assert main.CLIs, "main.CLIs not defined"
2543 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002544 # NOTE: You should probably run a topology check after this
2545
2546 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2547
2548 description = "Turn off a link to ensure that Link Discovery " +\
2549 "is working properly"
2550 main.case( description )
2551
2552 main.step( "Kill Link between s3 and s28" )
2553 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2554 main.log.info( "Waiting " + str( linkSleep ) +
2555 " seconds for link down to be discovered" )
2556 time.sleep( linkSleep )
2557 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2558 onpass="Link down successful",
2559 onfail="Failed to bring link down" )
2560 # TODO do some sort of check here
2561
2562 def CASE10( self, main ):
2563 """
2564 Link s3-s28 up
2565 """
2566 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002567 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002568 assert main, "main not defined"
2569 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002570 assert main.CLIs, "main.CLIs not defined"
2571 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002572 # NOTE: You should probably run a topology check after this
2573
2574 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2575
2576 description = "Restore a link to ensure that Link Discovery is " + \
2577 "working properly"
2578 main.case( description )
2579
2580 main.step( "Bring link between s3 and s28 back up" )
2581 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2582 main.log.info( "Waiting " + str( linkSleep ) +
2583 " seconds for link up to be discovered" )
2584 time.sleep( linkSleep )
2585 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2586 onpass="Link up successful",
2587 onfail="Failed to bring link up" )
2588 # TODO do some sort of check here
2589
2590 def CASE11( self, main ):
2591 """
2592 Switch Down
2593 """
2594 # NOTE: You should probably run a topology check after this
2595 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002596 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002597 assert main, "main not defined"
2598 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002599 assert main.CLIs, "main.CLIs not defined"
2600 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002601
2602 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2603
2604 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002605 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002606 main.case( description )
2607 switch = main.params[ 'kill' ][ 'switch' ]
2608 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2609
2610 # TODO: Make this switch parameterizable
2611 main.step( "Kill " + switch )
2612 main.log.info( "Deleting " + switch )
2613 main.Mininet1.delSwitch( switch )
2614 main.log.info( "Waiting " + str( switchSleep ) +
2615 " seconds for switch down to be discovered" )
2616 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002617 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002618 # Peek at the deleted switch
2619 main.log.warn( str( device ) )
2620 result = main.FALSE
2621 if device and device[ 'available' ] is False:
2622 result = main.TRUE
2623 utilities.assert_equals( expect=main.TRUE, actual=result,
2624 onpass="Kill switch successful",
2625 onfail="Failed to kill switch?" )
2626
2627 def CASE12( self, main ):
2628 """
2629 Switch Up
2630 """
2631 # NOTE: You should probably run a topology check after this
2632 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002633 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002634 assert main, "main not defined"
2635 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002636 assert main.CLIs, "main.CLIs not defined"
2637 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002638 assert ONOS1Port, "ONOS1Port not defined"
2639 assert ONOS2Port, "ONOS2Port not defined"
2640 assert ONOS3Port, "ONOS3Port not defined"
2641 assert ONOS4Port, "ONOS4Port not defined"
2642 assert ONOS5Port, "ONOS5Port not defined"
2643 assert ONOS6Port, "ONOS6Port not defined"
2644 assert ONOS7Port, "ONOS7Port not defined"
2645
2646 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2647 switch = main.params[ 'kill' ][ 'switch' ]
2648 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2649 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002650 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002651 description = "Adding a switch to ensure it is discovered correctly"
2652 main.case( description )
2653
2654 main.step( "Add back " + switch )
2655 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2656 for peer in links:
2657 main.Mininet1.addLink( switch, peer )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002658 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002659 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2660 main.log.info( "Waiting " + str( switchSleep ) +
2661 " seconds for switch up to be discovered" )
2662 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002663 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002664 # Peek at the deleted switch
2665 main.log.warn( str( device ) )
2666 result = main.FALSE
2667 if device and device[ 'available' ]:
2668 result = main.TRUE
2669 utilities.assert_equals( expect=main.TRUE, actual=result,
2670 onpass="add switch successful",
2671 onfail="Failed to add switch?" )
2672
2673 def CASE13( self, main ):
2674 """
2675 Clean up
2676 """
2677 import os
2678 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002679 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002680 assert main, "main not defined"
2681 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002682 assert main.CLIs, "main.CLIs not defined"
2683 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002684
2685 # printing colors to terminal
2686 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2687 'blue': '\033[94m', 'green': '\033[92m',
2688 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2689 main.case( "Test Cleanup" )
2690 main.step( "Killing tcpdumps" )
2691 main.Mininet2.stopTcpdump()
2692
2693 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002694 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002695 main.step( "Copying MN pcap and ONOS log files to test station" )
2696 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2697 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002698 # NOTE: MN Pcap file is being saved to logdir.
2699 # We scp this file as MN and TestON aren't necessarily the same vm
2700
2701 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002702 # TODO: Load these from params
2703 # NOTE: must end in /
2704 logFolder = "/opt/onos/log/"
2705 logFiles = [ "karaf.log", "karaf.log.1" ]
2706 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002707 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002708 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002709 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002710 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2711 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002712 # std*.log's
2713 # NOTE: must end in /
2714 logFolder = "/opt/onos/var/"
2715 logFiles = [ "stderr.log", "stdout.log" ]
2716 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002717 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002718 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002719 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002720 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2721 logFolder + f, dstName )
2722 else:
2723 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002724
2725 main.step( "Stopping Mininet" )
2726 mnResult = main.Mininet1.stopNet()
2727 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2728 onpass="Mininet stopped",
2729 onfail="MN cleanup NOT successful" )
2730
2731 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002732 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002733 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2734 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002735
2736 try:
2737 timerLog = open( main.logdir + "/Timers.csv", 'w')
2738 # Overwrite with empty line and close
2739 labels = "Gossip Intents, Restart"
2740 data = str( gossipTime ) + ", " + str( main.restartTime )
2741 timerLog.write( labels + "\n" + data )
2742 timerLog.close()
2743 except NameError, e:
2744 main.log.exception(e)
2745
2746 def CASE14( self, main ):
2747 """
2748 start election app on all onos nodes
2749 """
Jon Halle1a3b752015-07-22 13:02:46 -07002750 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002751 assert main, "main not defined"
2752 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002753 assert main.CLIs, "main.CLIs not defined"
2754 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002755
2756 main.case("Start Leadership Election app")
2757 main.step( "Install leadership election app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002758 onosCli = main.CLIs[ main.activeNodes[0] ]
2759 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002760 utilities.assert_equals(
2761 expect=main.TRUE,
2762 actual=appResult,
2763 onpass="Election app installed",
2764 onfail="Something went wrong with installing Leadership election" )
2765
2766 main.step( "Run for election on each node" )
2767 leaderResult = main.TRUE
2768 leaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002769 for i in main.activeNodes:
2770 main.CLIs[i].electionTestRun()
2771 for i in main.activeNodes:
2772 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002773 leader = cli.electionTestLeader()
2774 if leader is None or leader == main.FALSE:
2775 main.log.error( cli.name + ": Leader for the election app " +
2776 "should be an ONOS node, instead got '" +
2777 str( leader ) + "'" )
2778 leaderResult = main.FALSE
2779 leaders.append( leader )
2780 utilities.assert_equals(
2781 expect=main.TRUE,
2782 actual=leaderResult,
2783 onpass="Successfully ran for leadership",
2784 onfail="Failed to run for leadership" )
2785
2786 main.step( "Check that each node shows the same leader" )
2787 sameLeader = main.TRUE
2788 if len( set( leaders ) ) != 1:
2789 sameLeader = main.FALSE
Jon Halle1a3b752015-07-22 13:02:46 -07002790 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
Jon Hall5cf14d52015-07-16 12:15:19 -07002791 str( leaders ) )
2792 utilities.assert_equals(
2793 expect=main.TRUE,
2794 actual=sameLeader,
2795 onpass="Leadership is consistent for the election topic",
2796 onfail="Nodes have different leaders" )
2797
2798 def CASE15( self, main ):
2799 """
2800 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002801 15.1 Run election on each node
2802 15.2 Check that each node has the same leaders and candidates
2803 15.3 Find current leader and withdraw
2804 15.4 Check that a new node was elected leader
2805 15.5 Check that that new leader was the candidate of old leader
2806 15.6 Run for election on old leader
2807 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2808 15.8 Make sure that the old leader was added to the candidate list
2809
2810 old and new variable prefixes refer to data from before vs after
2811 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002812 """
2813 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002814 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002815 assert main, "main not defined"
2816 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002817 assert main.CLIs, "main.CLIs not defined"
2818 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002819
Jon Hall5cf14d52015-07-16 12:15:19 -07002820 description = "Check that Leadership Election is still functional"
2821 main.case( description )
acsmars71adceb2015-08-31 15:09:26 -07002822 # NOTE: Need to re-run since being a canidate is not persistant
2823 # TODO: add check for "Command not found:" in the driver, this
2824 # means the election test app isn't loaded
Jon Hall5cf14d52015-07-16 12:15:19 -07002825
acsmars71adceb2015-08-31 15:09:26 -07002826 oldLeaders = [] # leaders by node before withdrawl from candidates
2827 newLeaders = [] # leaders by node after withdrawl from candidates
2828 oldAllCandidates = [] # list of lists of each nodes' candidates before
2829 newAllCandidates = [] # list of lists of each nodes' candidates after
2830 oldCandidates = [] # list of candidates from node 0 before withdrawl
2831 newCandidates = [] # list of candidates from node 0 after withdrawl
2832 oldLeader = '' # the old leader from oldLeaders, None if not same
2833 newLeader = '' # the new leaders fron newLoeaders, None if not same
2834 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2835 expectNoLeader = False # True when there is only one leader
2836 if main.numCtrls == 1:
2837 expectNoLeader = True
2838
2839 main.step( "Run for election on each node" )
2840 electionResult = main.TRUE
2841
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002842 for i in main.activeNodes: # run test election on each node
2843 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002844 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002845 utilities.assert_equals(
2846 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002847 actual=electionResult,
2848 onpass="All nodes successfully ran for leadership",
2849 onfail="At least one node failed to run for leadership" )
2850
acsmars3a72bde2015-09-02 14:16:22 -07002851 if electionResult == main.FALSE:
2852 main.log.error(
2853 "Skipping Test Case because Election Test App isn't loaded" )
2854 main.skipCase()
2855
acsmars71adceb2015-08-31 15:09:26 -07002856 main.step( "Check that each node shows the same leader and candidates" )
2857 sameResult = main.TRUE
2858 failMessage = "Nodes have different leaders"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002859 for i in main.activeNodes:
2860 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002861 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2862 oldAllCandidates.append( node )
2863 oldLeaders.append( node[ 0 ] )
2864 oldCandidates = oldAllCandidates[ 0 ]
2865
2866 # Check that each node has the same leader. Defines oldLeader
2867 if len( set( oldLeaders ) ) != 1:
2868 sameResult = main.FALSE
2869 main.log.error( "More than one leader present:" + str( oldLeaders ) )
2870 oldLeader = None
2871 else:
2872 oldLeader = oldLeaders[ 0 ]
2873
2874 # Check that each node's candidate list is the same
acsmars29233db2015-11-04 11:15:00 -08002875 candidateDiscrepancy = False # Boolean of candidate mismatches
acsmars71adceb2015-08-31 15:09:26 -07002876 for candidates in oldAllCandidates:
2877 if set( candidates ) != set( oldCandidates ):
2878 sameResult = main.FALSE
acsmars29233db2015-11-04 11:15:00 -08002879 candidateDiscrepancy = True
2880
2881 if candidateDiscrepancy:
2882 failMessage += " and candidates"
2883
acsmars71adceb2015-08-31 15:09:26 -07002884 utilities.assert_equals(
2885 expect=main.TRUE,
2886 actual=sameResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002887 onpass="Leadership is consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002888 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002889
2890 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002891 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002892 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002893 if oldLeader is None:
2894 main.log.error( "Leadership isn't consistent." )
2895 withdrawResult = main.FALSE
2896 # Get the CLI of the oldLeader
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002897 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07002898 if oldLeader == main.nodes[ i ].ip_address:
2899 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002900 break
2901 else: # FOR/ELSE statement
2902 main.log.error( "Leader election, could not find current leader" )
2903 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07002904 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07002905 utilities.assert_equals(
2906 expect=main.TRUE,
2907 actual=withdrawResult,
2908 onpass="Node was withdrawn from election",
2909 onfail="Node was not withdrawn from election" )
2910
acsmars71adceb2015-08-31 15:09:26 -07002911 main.step( "Check that a new node was elected leader" )
2912
Jon Hall5cf14d52015-07-16 12:15:19 -07002913 # FIXME: use threads
acsmars71adceb2015-08-31 15:09:26 -07002914 newLeaderResult = main.TRUE
2915 failMessage = "Nodes have different leaders"
2916
2917 # Get new leaders and candidates
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002918 for i in main.activeNodes:
2919 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002920 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2921 # elections might no have finished yet
2922 if node[ 0 ] == 'none' and not expectNoLeader:
2923 main.log.info( "Node has no leader, waiting 5 seconds to be " +
2924 "sure elections are complete." )
2925 time.sleep(5)
2926 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2927 # election still isn't done or there is a problem
2928 if node[ 0 ] == 'none':
2929 main.log.error( "No leader was elected on at least 1 node" )
2930 newLeaderResult = main.FALSE
2931 newAllCandidates.append( node )
2932 newLeaders.append( node[ 0 ] )
2933 newCandidates = newAllCandidates[ 0 ]
2934
2935 # Check that each node has the same leader. Defines newLeader
2936 if len( set( newLeaders ) ) != 1:
2937 newLeaderResult = main.FALSE
2938 main.log.error( "Nodes have different leaders: " +
2939 str( newLeaders ) )
2940 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07002941 else:
acsmars71adceb2015-08-31 15:09:26 -07002942 newLeader = newLeaders[ 0 ]
2943
2944 # Check that each node's candidate list is the same
2945 for candidates in newAllCandidates:
2946 if set( candidates ) != set( newCandidates ):
2947 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07002948 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07002949
2950 # Check that the new leader is not the older leader, which was withdrawn
2951 if newLeader == oldLeader:
2952 newLeaderResult = main.FALSE
2953 main.log.error( "All nodes still see old leader: " + oldLeader +
2954 " as the current leader" )
2955
Jon Hall5cf14d52015-07-16 12:15:19 -07002956 utilities.assert_equals(
2957 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002958 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002959 onpass="Leadership election passed",
2960 onfail="Something went wrong with Leadership election" )
2961
acsmars71adceb2015-08-31 15:09:26 -07002962 main.step( "Check that that new leader was the candidate of old leader")
2963 # candidates[ 2 ] should be come the top candidate after withdrawl
2964 correctCandidateResult = main.TRUE
2965 if expectNoLeader:
2966 if newLeader == 'none':
2967 main.log.info( "No leader expected. None found. Pass" )
2968 correctCandidateResult = main.TRUE
2969 else:
2970 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2971 correctCandidateResult = main.FALSE
2972 elif newLeader != oldCandidates[ 2 ]:
2973 correctCandidateResult = main.FALSE
2974 main.log.error( "Candidate " + newLeader + " was elected. " +
2975 oldCandidates[ 2 ] + " should have had priority." )
2976
2977 utilities.assert_equals(
2978 expect=main.TRUE,
2979 actual=correctCandidateResult,
2980 onpass="Correct Candidate Elected",
2981 onfail="Incorrect Candidate Elected" )
2982
Jon Hall5cf14d52015-07-16 12:15:19 -07002983 main.step( "Run for election on old leader( just so everyone " +
2984 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07002985 if oldLeaderCLI is not None:
2986 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07002987 else:
acsmars71adceb2015-08-31 15:09:26 -07002988 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002989 runResult = main.FALSE
2990 utilities.assert_equals(
2991 expect=main.TRUE,
2992 actual=runResult,
2993 onpass="App re-ran for election",
2994 onfail="App failed to run for election" )
acsmars71adceb2015-08-31 15:09:26 -07002995 main.step(
2996 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002997 # verify leader didn't just change
acsmars71adceb2015-08-31 15:09:26 -07002998 positionResult = main.TRUE
2999 # Get new leaders and candidates, wait if oldLeader is not a candidate yet
3000
3001 # Reset and reuse the new candidate and leaders lists
3002 newAllCandidates = []
3003 newCandidates = []
3004 newLeaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003005 for i in main.activeNodes:
3006 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07003007 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3008 if oldLeader not in node: # election might no have finished yet
3009 main.log.info( "Old Leader not elected, waiting 5 seconds to " +
3010 "be sure elections are complete" )
3011 time.sleep(5)
3012 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3013 if oldLeader not in node: # election still isn't done, errors
3014 main.log.error(
3015 "Old leader was not elected on at least one node" )
3016 positionResult = main.FALSE
3017 newAllCandidates.append( node )
3018 newLeaders.append( node[ 0 ] )
3019 newCandidates = newAllCandidates[ 0 ]
3020
3021 # Check that each node has the same leader. Defines newLeader
3022 if len( set( newLeaders ) ) != 1:
3023 positionResult = main.FALSE
3024 main.log.error( "Nodes have different leaders: " +
3025 str( newLeaders ) )
3026 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07003027 else:
acsmars71adceb2015-08-31 15:09:26 -07003028 newLeader = newLeaders[ 0 ]
3029
3030 # Check that each node's candidate list is the same
3031 for candidates in newAllCandidates:
3032 if set( candidates ) != set( newCandidates ):
3033 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07003034 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07003035
3036 # Check that the re-elected node is last on the candidate List
3037 if oldLeader != newCandidates[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003038 main.log.error( "Old Leader (" + oldLeader + ") not in the proper position " +
acsmars71adceb2015-08-31 15:09:26 -07003039 str( newCandidates ) )
3040 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003041
3042 utilities.assert_equals(
3043 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07003044 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003045 onpass="Old leader successfully re-ran for election",
3046 onfail="Something went wrong with Leadership election after " +
3047 "the old leader re-ran for election" )
3048
3049 def CASE16( self, main ):
3050 """
3051 Install Distributed Primitives app
3052 """
3053 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003054 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003055 assert main, "main not defined"
3056 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003057 assert main.CLIs, "main.CLIs not defined"
3058 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003059
3060 # Variables for the distributed primitives tests
3061 global pCounterName
3062 global iCounterName
3063 global pCounterValue
3064 global iCounterValue
3065 global onosSet
3066 global onosSetName
3067 pCounterName = "TestON-Partitions"
3068 iCounterName = "TestON-inMemory"
3069 pCounterValue = 0
3070 iCounterValue = 0
3071 onosSet = set([])
3072 onosSetName = "TestON-set"
3073
3074 description = "Install Primitives app"
3075 main.case( description )
3076 main.step( "Install Primitives app" )
3077 appName = "org.onosproject.distributedprimitives"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003078 node = main.activeNodes[0]
3079 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003080 utilities.assert_equals( expect=main.TRUE,
3081 actual=appResults,
3082 onpass="Primitives app activated",
3083 onfail="Primitives app not activated" )
3084 time.sleep( 5 ) # To allow all nodes to activate
3085
3086 def CASE17( self, main ):
3087 """
3088 Check for basic functionality with distributed primitives
3089 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003090 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003091 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003092 assert main, "main not defined"
3093 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003094 assert main.CLIs, "main.CLIs not defined"
3095 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003096 assert pCounterName, "pCounterName not defined"
3097 assert iCounterName, "iCounterName not defined"
3098 assert onosSetName, "onosSetName not defined"
3099 # NOTE: assert fails if value is 0/None/Empty/False
3100 try:
3101 pCounterValue
3102 except NameError:
3103 main.log.error( "pCounterValue not defined, setting to 0" )
3104 pCounterValue = 0
3105 try:
3106 iCounterValue
3107 except NameError:
3108 main.log.error( "iCounterValue not defined, setting to 0" )
3109 iCounterValue = 0
3110 try:
3111 onosSet
3112 except NameError:
3113 main.log.error( "onosSet not defined, setting to empty Set" )
3114 onosSet = set([])
3115 # Variables for the distributed primitives tests. These are local only
3116 addValue = "a"
3117 addAllValue = "a b c d e f"
3118 retainValue = "c d e f"
3119
3120 description = "Check for basic functionality with distributed " +\
3121 "primitives"
3122 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003123 main.caseExplanation = "Test the methods of the distributed " +\
3124 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003125 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003126 # Partitioned counters
3127 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003128 pCounters = []
3129 threads = []
3130 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003131 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003132 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3133 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003134 args=[ pCounterName ] )
3135 pCounterValue += 1
3136 addedPValues.append( pCounterValue )
3137 threads.append( t )
3138 t.start()
3139
3140 for t in threads:
3141 t.join()
3142 pCounters.append( t.result )
3143 # Check that counter incremented numController times
3144 pCounterResults = True
3145 for i in addedPValues:
3146 tmpResult = i in pCounters
3147 pCounterResults = pCounterResults and tmpResult
3148 if not tmpResult:
3149 main.log.error( str( i ) + " is not in partitioned "
3150 "counter incremented results" )
3151 utilities.assert_equals( expect=True,
3152 actual=pCounterResults,
3153 onpass="Default counter incremented",
3154 onfail="Error incrementing default" +
3155 " counter" )
3156
Jon Halle1a3b752015-07-22 13:02:46 -07003157 main.step( "Get then Increment a default counter on each node" )
3158 pCounters = []
3159 threads = []
3160 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003161 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003162 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3163 name="counterGetAndAdd-" + str( i ),
3164 args=[ pCounterName ] )
3165 addedPValues.append( pCounterValue )
3166 pCounterValue += 1
3167 threads.append( t )
3168 t.start()
3169
3170 for t in threads:
3171 t.join()
3172 pCounters.append( t.result )
3173 # Check that counter incremented numController times
3174 pCounterResults = True
3175 for i in addedPValues:
3176 tmpResult = i in pCounters
3177 pCounterResults = pCounterResults and tmpResult
3178 if not tmpResult:
3179 main.log.error( str( i ) + " is not in partitioned "
3180 "counter incremented results" )
3181 utilities.assert_equals( expect=True,
3182 actual=pCounterResults,
3183 onpass="Default counter incremented",
3184 onfail="Error incrementing default" +
3185 " counter" )
3186
3187 main.step( "Counters we added have the correct values" )
3188 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3189 utilities.assert_equals( expect=main.TRUE,
3190 actual=incrementCheck,
3191 onpass="Added counters are correct",
3192 onfail="Added counters are incorrect" )
3193
3194 main.step( "Add -8 to then get a default counter on each node" )
3195 pCounters = []
3196 threads = []
3197 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003198 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003199 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3200 name="counterIncrement-" + str( i ),
3201 args=[ pCounterName ],
3202 kwargs={ "delta": -8 } )
3203 pCounterValue += -8
3204 addedPValues.append( pCounterValue )
3205 threads.append( t )
3206 t.start()
3207
3208 for t in threads:
3209 t.join()
3210 pCounters.append( t.result )
3211 # Check that counter incremented numController times
3212 pCounterResults = True
3213 for i in addedPValues:
3214 tmpResult = i in pCounters
3215 pCounterResults = pCounterResults and tmpResult
3216 if not tmpResult:
3217 main.log.error( str( i ) + " is not in partitioned "
3218 "counter incremented results" )
3219 utilities.assert_equals( expect=True,
3220 actual=pCounterResults,
3221 onpass="Default counter incremented",
3222 onfail="Error incrementing default" +
3223 " counter" )
3224
3225 main.step( "Add 5 to then get a default counter on each node" )
3226 pCounters = []
3227 threads = []
3228 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003229 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003230 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3231 name="counterIncrement-" + str( i ),
3232 args=[ pCounterName ],
3233 kwargs={ "delta": 5 } )
3234 pCounterValue += 5
3235 addedPValues.append( pCounterValue )
3236 threads.append( t )
3237 t.start()
3238
3239 for t in threads:
3240 t.join()
3241 pCounters.append( t.result )
3242 # Check that counter incremented numController times
3243 pCounterResults = True
3244 for i in addedPValues:
3245 tmpResult = i in pCounters
3246 pCounterResults = pCounterResults and tmpResult
3247 if not tmpResult:
3248 main.log.error( str( i ) + " is not in partitioned "
3249 "counter incremented results" )
3250 utilities.assert_equals( expect=True,
3251 actual=pCounterResults,
3252 onpass="Default counter incremented",
3253 onfail="Error incrementing default" +
3254 " counter" )
3255
3256 main.step( "Get then add 5 to a default counter on each node" )
3257 pCounters = []
3258 threads = []
3259 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003260 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003261 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3262 name="counterIncrement-" + str( i ),
3263 args=[ pCounterName ],
3264 kwargs={ "delta": 5 } )
3265 addedPValues.append( pCounterValue )
3266 pCounterValue += 5
3267 threads.append( t )
3268 t.start()
3269
3270 for t in threads:
3271 t.join()
3272 pCounters.append( t.result )
3273 # Check that counter incremented numController times
3274 pCounterResults = True
3275 for i in addedPValues:
3276 tmpResult = i in pCounters
3277 pCounterResults = pCounterResults and tmpResult
3278 if not tmpResult:
3279 main.log.error( str( i ) + " is not in partitioned "
3280 "counter incremented results" )
3281 utilities.assert_equals( expect=True,
3282 actual=pCounterResults,
3283 onpass="Default counter incremented",
3284 onfail="Error incrementing default" +
3285 " counter" )
3286
3287 main.step( "Counters we added have the correct values" )
3288 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3289 utilities.assert_equals( expect=main.TRUE,
3290 actual=incrementCheck,
3291 onpass="Added counters are correct",
3292 onfail="Added counters are incorrect" )
3293
3294 # In-Memory counters
3295 main.step( "Increment and get an in-memory counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003296 iCounters = []
3297 addedIValues = []
3298 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003299 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003300 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003301 name="icounterIncrement-" + str( i ),
3302 args=[ iCounterName ],
3303 kwargs={ "inMemory": True } )
3304 iCounterValue += 1
3305 addedIValues.append( iCounterValue )
3306 threads.append( t )
3307 t.start()
3308
3309 for t in threads:
3310 t.join()
3311 iCounters.append( t.result )
3312 # Check that counter incremented numController times
3313 iCounterResults = True
3314 for i in addedIValues:
3315 tmpResult = i in iCounters
3316 iCounterResults = iCounterResults and tmpResult
3317 if not tmpResult:
3318 main.log.error( str( i ) + " is not in the in-memory "
3319 "counter incremented results" )
3320 utilities.assert_equals( expect=True,
3321 actual=iCounterResults,
Jon Halle1a3b752015-07-22 13:02:46 -07003322 onpass="In-memory counter incremented",
3323 onfail="Error incrementing in-memory" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003324 " counter" )
3325
Jon Halle1a3b752015-07-22 13:02:46 -07003326 main.step( "Get then Increment a in-memory counter on each node" )
3327 iCounters = []
3328 threads = []
3329 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003330 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003331 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3332 name="counterGetAndAdd-" + str( i ),
3333 args=[ iCounterName ],
3334 kwargs={ "inMemory": True } )
3335 addedIValues.append( iCounterValue )
3336 iCounterValue += 1
3337 threads.append( t )
3338 t.start()
3339
3340 for t in threads:
3341 t.join()
3342 iCounters.append( t.result )
3343 # Check that counter incremented numController times
3344 iCounterResults = True
3345 for i in addedIValues:
3346 tmpResult = i in iCounters
3347 iCounterResults = iCounterResults and tmpResult
3348 if not tmpResult:
3349 main.log.error( str( i ) + " is not in in-memory "
3350 "counter incremented results" )
3351 utilities.assert_equals( expect=True,
3352 actual=iCounterResults,
3353 onpass="In-memory counter incremented",
3354 onfail="Error incrementing in-memory" +
3355 " counter" )
3356
3357 main.step( "Counters we added have the correct values" )
3358 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3359 utilities.assert_equals( expect=main.TRUE,
3360 actual=incrementCheck,
3361 onpass="Added counters are correct",
3362 onfail="Added counters are incorrect" )
3363
3364 main.step( "Add -8 to then get a in-memory counter on each node" )
3365 iCounters = []
3366 threads = []
3367 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003368 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003369 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3370 name="counterIncrement-" + str( i ),
3371 args=[ iCounterName ],
3372 kwargs={ "delta": -8, "inMemory": True } )
3373 iCounterValue += -8
3374 addedIValues.append( iCounterValue )
3375 threads.append( t )
3376 t.start()
3377
3378 for t in threads:
3379 t.join()
3380 iCounters.append( t.result )
3381 # Check that counter incremented numController times
3382 iCounterResults = True
3383 for i in addedIValues:
3384 tmpResult = i in iCounters
3385 iCounterResults = iCounterResults and tmpResult
3386 if not tmpResult:
3387 main.log.error( str( i ) + " is not in in-memory "
3388 "counter incremented results" )
3389 utilities.assert_equals( expect=True,
3390 actual=pCounterResults,
3391 onpass="In-memory counter incremented",
3392 onfail="Error incrementing in-memory" +
3393 " counter" )
3394
3395 main.step( "Add 5 to then get a in-memory counter on each node" )
3396 iCounters = []
3397 threads = []
3398 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003399 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003400 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3401 name="counterIncrement-" + str( i ),
3402 args=[ iCounterName ],
3403 kwargs={ "delta": 5, "inMemory": True } )
3404 iCounterValue += 5
3405 addedIValues.append( iCounterValue )
3406 threads.append( t )
3407 t.start()
3408
3409 for t in threads:
3410 t.join()
3411 iCounters.append( t.result )
3412 # Check that counter incremented numController times
3413 iCounterResults = True
3414 for i in addedIValues:
3415 tmpResult = i in iCounters
3416 iCounterResults = iCounterResults and tmpResult
3417 if not tmpResult:
3418 main.log.error( str( i ) + " is not in in-memory "
3419 "counter incremented results" )
3420 utilities.assert_equals( expect=True,
3421 actual=pCounterResults,
3422 onpass="In-memory counter incremented",
3423 onfail="Error incrementing in-memory" +
3424 " counter" )
3425
3426 main.step( "Get then add 5 to a in-memory counter on each node" )
3427 iCounters = []
3428 threads = []
3429 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003430 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003431 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3432 name="counterIncrement-" + str( i ),
3433 args=[ iCounterName ],
3434 kwargs={ "delta": 5, "inMemory": True } )
3435 addedIValues.append( iCounterValue )
3436 iCounterValue += 5
3437 threads.append( t )
3438 t.start()
3439
3440 for t in threads:
3441 t.join()
3442 iCounters.append( t.result )
3443 # Check that counter incremented numController times
3444 iCounterResults = True
3445 for i in addedIValues:
3446 tmpResult = i in iCounters
3447 iCounterResults = iCounterResults and tmpResult
3448 if not tmpResult:
3449 main.log.error( str( i ) + " is not in in-memory "
3450 "counter incremented results" )
3451 utilities.assert_equals( expect=True,
3452 actual=iCounterResults,
3453 onpass="In-memory counter incremented",
3454 onfail="Error incrementing in-memory" +
3455 " counter" )
3456
3457 main.step( "Counters we added have the correct values" )
3458 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3459 utilities.assert_equals( expect=main.TRUE,
3460 actual=incrementCheck,
3461 onpass="Added counters are correct",
3462 onfail="Added counters are incorrect" )
3463
Jon Hall5cf14d52015-07-16 12:15:19 -07003464 main.step( "Check counters are consistant across nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07003465 onosCounters, consistentCounterResults = main.Counters.consistentCheck()
Jon Hall5cf14d52015-07-16 12:15:19 -07003466 utilities.assert_equals( expect=main.TRUE,
3467 actual=consistentCounterResults,
3468 onpass="ONOS counters are consistent " +
3469 "across nodes",
3470 onfail="ONOS Counters are inconsistent " +
3471 "across nodes" )
3472
3473 main.step( "Counters we added have the correct values" )
Jon Halle1a3b752015-07-22 13:02:46 -07003474 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3475 incrementCheck = incrementCheck and \
3476 main.Counters.counterCheck( iCounterName, iCounterValue )
Jon Hall5cf14d52015-07-16 12:15:19 -07003477 utilities.assert_equals( expect=main.TRUE,
Jon Halle1a3b752015-07-22 13:02:46 -07003478 actual=incrementCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -07003479 onpass="Added counters are correct",
3480 onfail="Added counters are incorrect" )
3481 # DISTRIBUTED SETS
3482 main.step( "Distributed Set get" )
3483 size = len( onosSet )
3484 getResponses = []
3485 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003486 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003487 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003488 name="setTestGet-" + str( i ),
3489 args=[ onosSetName ] )
3490 threads.append( t )
3491 t.start()
3492 for t in threads:
3493 t.join()
3494 getResponses.append( t.result )
3495
3496 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003497 for i in range( len( main.activeNodes ) ):
3498 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003499 if isinstance( getResponses[ i ], list):
3500 current = set( getResponses[ i ] )
3501 if len( current ) == len( getResponses[ i ] ):
3502 # no repeats
3503 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003504 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003505 " has incorrect view" +
3506 " of set " + onosSetName + ":\n" +
3507 str( getResponses[ i ] ) )
3508 main.log.debug( "Expected: " + str( onosSet ) )
3509 main.log.debug( "Actual: " + str( current ) )
3510 getResults = main.FALSE
3511 else:
3512 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003513 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003514 " has repeat elements in" +
3515 " set " + onosSetName + ":\n" +
3516 str( getResponses[ i ] ) )
3517 getResults = main.FALSE
3518 elif getResponses[ i ] == main.ERROR:
3519 getResults = main.FALSE
3520 utilities.assert_equals( expect=main.TRUE,
3521 actual=getResults,
3522 onpass="Set elements are correct",
3523 onfail="Set elements are incorrect" )
3524
3525 main.step( "Distributed Set size" )
3526 sizeResponses = []
3527 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003528 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003529 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003530 name="setTestSize-" + str( i ),
3531 args=[ onosSetName ] )
3532 threads.append( t )
3533 t.start()
3534 for t in threads:
3535 t.join()
3536 sizeResponses.append( t.result )
3537
3538 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003539 for i in range( len( main.activeNodes ) ):
3540 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003541 if size != sizeResponses[ i ]:
3542 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003543 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003544 " expected a size of " + str( size ) +
3545 " for set " + onosSetName +
3546 " but got " + str( sizeResponses[ i ] ) )
3547 utilities.assert_equals( expect=main.TRUE,
3548 actual=sizeResults,
3549 onpass="Set sizes are correct",
3550 onfail="Set sizes are incorrect" )
3551
3552 main.step( "Distributed Set add()" )
3553 onosSet.add( addValue )
3554 addResponses = []
3555 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003556 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003557 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003558 name="setTestAdd-" + str( i ),
3559 args=[ onosSetName, addValue ] )
3560 threads.append( t )
3561 t.start()
3562 for t in threads:
3563 t.join()
3564 addResponses.append( t.result )
3565
3566 # main.TRUE = successfully changed the set
3567 # main.FALSE = action resulted in no change in set
3568 # main.ERROR - Some error in executing the function
3569 addResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003570 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003571 if addResponses[ i ] == main.TRUE:
3572 # All is well
3573 pass
3574 elif addResponses[ i ] == main.FALSE:
3575 # Already in set, probably fine
3576 pass
3577 elif addResponses[ i ] == main.ERROR:
3578 # Error in execution
3579 addResults = main.FALSE
3580 else:
3581 # unexpected result
3582 addResults = main.FALSE
3583 if addResults != main.TRUE:
3584 main.log.error( "Error executing set add" )
3585
3586 # Check if set is still correct
3587 size = len( onosSet )
3588 getResponses = []
3589 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003590 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003591 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003592 name="setTestGet-" + str( i ),
3593 args=[ onosSetName ] )
3594 threads.append( t )
3595 t.start()
3596 for t in threads:
3597 t.join()
3598 getResponses.append( t.result )
3599 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003600 for i in range( len( main.activeNodes ) ):
3601 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003602 if isinstance( getResponses[ i ], list):
3603 current = set( getResponses[ i ] )
3604 if len( current ) == len( getResponses[ i ] ):
3605 # no repeats
3606 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003607 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003608 " of set " + onosSetName + ":\n" +
3609 str( getResponses[ i ] ) )
3610 main.log.debug( "Expected: " + str( onosSet ) )
3611 main.log.debug( "Actual: " + str( current ) )
3612 getResults = main.FALSE
3613 else:
3614 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003615 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003616 " set " + onosSetName + ":\n" +
3617 str( getResponses[ i ] ) )
3618 getResults = main.FALSE
3619 elif getResponses[ i ] == main.ERROR:
3620 getResults = main.FALSE
3621 sizeResponses = []
3622 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003623 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003624 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003625 name="setTestSize-" + str( i ),
3626 args=[ onosSetName ] )
3627 threads.append( t )
3628 t.start()
3629 for t in threads:
3630 t.join()
3631 sizeResponses.append( t.result )
3632 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003633 for i in range( len( main.activeNodes ) ):
3634 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003635 if size != sizeResponses[ i ]:
3636 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003637 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003638 " expected a size of " + str( size ) +
3639 " for set " + onosSetName +
3640 " but got " + str( sizeResponses[ i ] ) )
3641 addResults = addResults and getResults and sizeResults
3642 utilities.assert_equals( expect=main.TRUE,
3643 actual=addResults,
3644 onpass="Set add correct",
3645 onfail="Set add was incorrect" )
3646
3647 main.step( "Distributed Set addAll()" )
3648 onosSet.update( addAllValue.split() )
3649 addResponses = []
3650 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003651 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003652 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003653 name="setTestAddAll-" + str( i ),
3654 args=[ onosSetName, addAllValue ] )
3655 threads.append( t )
3656 t.start()
3657 for t in threads:
3658 t.join()
3659 addResponses.append( t.result )
3660
3661 # main.TRUE = successfully changed the set
3662 # main.FALSE = action resulted in no change in set
3663 # main.ERROR - Some error in executing the function
3664 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003665 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003666 if addResponses[ i ] == main.TRUE:
3667 # All is well
3668 pass
3669 elif addResponses[ i ] == main.FALSE:
3670 # Already in set, probably fine
3671 pass
3672 elif addResponses[ i ] == main.ERROR:
3673 # Error in execution
3674 addAllResults = main.FALSE
3675 else:
3676 # unexpected result
3677 addAllResults = main.FALSE
3678 if addAllResults != main.TRUE:
3679 main.log.error( "Error executing set addAll" )
3680
3681 # Check if set is still correct
3682 size = len( onosSet )
3683 getResponses = []
3684 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003685 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003686 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003687 name="setTestGet-" + str( i ),
3688 args=[ onosSetName ] )
3689 threads.append( t )
3690 t.start()
3691 for t in threads:
3692 t.join()
3693 getResponses.append( t.result )
3694 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003695 for i in range( len( main.activeNodes ) ):
3696 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003697 if isinstance( getResponses[ i ], list):
3698 current = set( getResponses[ i ] )
3699 if len( current ) == len( getResponses[ i ] ):
3700 # no repeats
3701 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003702 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003703 " has incorrect view" +
3704 " of set " + onosSetName + ":\n" +
3705 str( getResponses[ i ] ) )
3706 main.log.debug( "Expected: " + str( onosSet ) )
3707 main.log.debug( "Actual: " + str( current ) )
3708 getResults = main.FALSE
3709 else:
3710 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003711 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003712 " has repeat elements in" +
3713 " set " + onosSetName + ":\n" +
3714 str( getResponses[ i ] ) )
3715 getResults = main.FALSE
3716 elif getResponses[ i ] == main.ERROR:
3717 getResults = main.FALSE
3718 sizeResponses = []
3719 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003720 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003721 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003722 name="setTestSize-" + str( i ),
3723 args=[ onosSetName ] )
3724 threads.append( t )
3725 t.start()
3726 for t in threads:
3727 t.join()
3728 sizeResponses.append( t.result )
3729 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003730 for i in range( len( main.activeNodes ) ):
3731 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003732 if size != sizeResponses[ i ]:
3733 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003734 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003735 " expected a size of " + str( size ) +
3736 " for set " + onosSetName +
3737 " but got " + str( sizeResponses[ i ] ) )
3738 addAllResults = addAllResults and getResults and sizeResults
3739 utilities.assert_equals( expect=main.TRUE,
3740 actual=addAllResults,
3741 onpass="Set addAll correct",
3742 onfail="Set addAll was incorrect" )
3743
3744 main.step( "Distributed Set contains()" )
3745 containsResponses = []
3746 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003747 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003748 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003749 name="setContains-" + str( i ),
3750 args=[ onosSetName ],
3751 kwargs={ "values": addValue } )
3752 threads.append( t )
3753 t.start()
3754 for t in threads:
3755 t.join()
3756 # NOTE: This is the tuple
3757 containsResponses.append( t.result )
3758
3759 containsResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003760 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003761 if containsResponses[ i ] == main.ERROR:
3762 containsResults = main.FALSE
3763 else:
3764 containsResults = containsResults and\
3765 containsResponses[ i ][ 1 ]
3766 utilities.assert_equals( expect=main.TRUE,
3767 actual=containsResults,
3768 onpass="Set contains is functional",
3769 onfail="Set contains failed" )
3770
3771 main.step( "Distributed Set containsAll()" )
3772 containsAllResponses = []
3773 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003774 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003775 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003776 name="setContainsAll-" + str( i ),
3777 args=[ onosSetName ],
3778 kwargs={ "values": addAllValue } )
3779 threads.append( t )
3780 t.start()
3781 for t in threads:
3782 t.join()
3783 # NOTE: This is the tuple
3784 containsAllResponses.append( t.result )
3785
3786 containsAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003787 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003788 if containsResponses[ i ] == main.ERROR:
3789 containsResults = main.FALSE
3790 else:
3791 containsResults = containsResults and\
3792 containsResponses[ i ][ 1 ]
3793 utilities.assert_equals( expect=main.TRUE,
3794 actual=containsAllResults,
3795 onpass="Set containsAll is functional",
3796 onfail="Set containsAll failed" )
3797
3798 main.step( "Distributed Set remove()" )
3799 onosSet.remove( addValue )
3800 removeResponses = []
3801 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003802 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003803 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003804 name="setTestRemove-" + str( i ),
3805 args=[ onosSetName, addValue ] )
3806 threads.append( t )
3807 t.start()
3808 for t in threads:
3809 t.join()
3810 removeResponses.append( t.result )
3811
3812 # main.TRUE = successfully changed the set
3813 # main.FALSE = action resulted in no change in set
3814 # main.ERROR - Some error in executing the function
3815 removeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003816 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003817 if removeResponses[ i ] == main.TRUE:
3818 # All is well
3819 pass
3820 elif removeResponses[ i ] == main.FALSE:
3821 # not in set, probably fine
3822 pass
3823 elif removeResponses[ i ] == main.ERROR:
3824 # Error in execution
3825 removeResults = main.FALSE
3826 else:
3827 # unexpected result
3828 removeResults = main.FALSE
3829 if removeResults != main.TRUE:
3830 main.log.error( "Error executing set remove" )
3831
3832 # Check if set is still correct
3833 size = len( onosSet )
3834 getResponses = []
3835 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003836 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003837 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003838 name="setTestGet-" + str( i ),
3839 args=[ onosSetName ] )
3840 threads.append( t )
3841 t.start()
3842 for t in threads:
3843 t.join()
3844 getResponses.append( t.result )
3845 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003846 for i in range( len( main.activeNodes ) ):
3847 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003848 if isinstance( getResponses[ i ], list):
3849 current = set( getResponses[ i ] )
3850 if len( current ) == len( getResponses[ i ] ):
3851 # no repeats
3852 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003853 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003854 " has incorrect view" +
3855 " of set " + onosSetName + ":\n" +
3856 str( getResponses[ i ] ) )
3857 main.log.debug( "Expected: " + str( onosSet ) )
3858 main.log.debug( "Actual: " + str( current ) )
3859 getResults = main.FALSE
3860 else:
3861 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003862 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003863 " has repeat elements in" +
3864 " set " + onosSetName + ":\n" +
3865 str( getResponses[ i ] ) )
3866 getResults = main.FALSE
3867 elif getResponses[ i ] == main.ERROR:
3868 getResults = main.FALSE
3869 sizeResponses = []
3870 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003871 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003872 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003873 name="setTestSize-" + str( i ),
3874 args=[ onosSetName ] )
3875 threads.append( t )
3876 t.start()
3877 for t in threads:
3878 t.join()
3879 sizeResponses.append( t.result )
3880 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003881 for i in range( len( main.activeNodes ) ):
3882 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003883 if size != sizeResponses[ i ]:
3884 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003885 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003886 " expected a size of " + str( size ) +
3887 " for set " + onosSetName +
3888 " but got " + str( sizeResponses[ i ] ) )
3889 removeResults = removeResults and getResults and sizeResults
3890 utilities.assert_equals( expect=main.TRUE,
3891 actual=removeResults,
3892 onpass="Set remove correct",
3893 onfail="Set remove was incorrect" )
3894
3895 main.step( "Distributed Set removeAll()" )
3896 onosSet.difference_update( addAllValue.split() )
3897 removeAllResponses = []
3898 threads = []
3899 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003900 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003901 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003902 name="setTestRemoveAll-" + str( i ),
3903 args=[ onosSetName, addAllValue ] )
3904 threads.append( t )
3905 t.start()
3906 for t in threads:
3907 t.join()
3908 removeAllResponses.append( t.result )
3909 except Exception, e:
3910 main.log.exception(e)
3911
3912 # main.TRUE = successfully changed the set
3913 # main.FALSE = action resulted in no change in set
3914 # main.ERROR - Some error in executing the function
3915 removeAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003916 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003917 if removeAllResponses[ i ] == main.TRUE:
3918 # All is well
3919 pass
3920 elif removeAllResponses[ i ] == main.FALSE:
3921 # not in set, probably fine
3922 pass
3923 elif removeAllResponses[ i ] == main.ERROR:
3924 # Error in execution
3925 removeAllResults = main.FALSE
3926 else:
3927 # unexpected result
3928 removeAllResults = main.FALSE
3929 if removeAllResults != main.TRUE:
3930 main.log.error( "Error executing set removeAll" )
3931
3932 # Check if set is still correct
3933 size = len( onosSet )
3934 getResponses = []
3935 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003936 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003937 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003938 name="setTestGet-" + str( i ),
3939 args=[ onosSetName ] )
3940 threads.append( t )
3941 t.start()
3942 for t in threads:
3943 t.join()
3944 getResponses.append( t.result )
3945 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003946 for i in range( len( main.activeNodes ) ):
3947 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003948 if isinstance( getResponses[ i ], list):
3949 current = set( getResponses[ i ] )
3950 if len( current ) == len( getResponses[ i ] ):
3951 # no repeats
3952 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003953 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003954 " has incorrect view" +
3955 " of set " + onosSetName + ":\n" +
3956 str( getResponses[ i ] ) )
3957 main.log.debug( "Expected: " + str( onosSet ) )
3958 main.log.debug( "Actual: " + str( current ) )
3959 getResults = main.FALSE
3960 else:
3961 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003962 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003963 " has repeat elements in" +
3964 " set " + onosSetName + ":\n" +
3965 str( getResponses[ i ] ) )
3966 getResults = main.FALSE
3967 elif getResponses[ i ] == main.ERROR:
3968 getResults = main.FALSE
3969 sizeResponses = []
3970 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003971 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003972 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003973 name="setTestSize-" + str( i ),
3974 args=[ onosSetName ] )
3975 threads.append( t )
3976 t.start()
3977 for t in threads:
3978 t.join()
3979 sizeResponses.append( t.result )
3980 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003981 for i in range( len( main.activeNodes ) ):
3982 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003983 if size != sizeResponses[ i ]:
3984 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003985 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003986 " expected a size of " + str( size ) +
3987 " for set " + onosSetName +
3988 " but got " + str( sizeResponses[ i ] ) )
3989 removeAllResults = removeAllResults and getResults and sizeResults
3990 utilities.assert_equals( expect=main.TRUE,
3991 actual=removeAllResults,
3992 onpass="Set removeAll correct",
3993 onfail="Set removeAll was incorrect" )
3994
3995 main.step( "Distributed Set addAll()" )
3996 onosSet.update( addAllValue.split() )
3997 addResponses = []
3998 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003999 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004000 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004001 name="setTestAddAll-" + str( i ),
4002 args=[ onosSetName, addAllValue ] )
4003 threads.append( t )
4004 t.start()
4005 for t in threads:
4006 t.join()
4007 addResponses.append( t.result )
4008
4009 # main.TRUE = successfully changed the set
4010 # main.FALSE = action resulted in no change in set
4011 # main.ERROR - Some error in executing the function
4012 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004013 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004014 if addResponses[ i ] == main.TRUE:
4015 # All is well
4016 pass
4017 elif addResponses[ i ] == main.FALSE:
4018 # Already in set, probably fine
4019 pass
4020 elif addResponses[ i ] == main.ERROR:
4021 # Error in execution
4022 addAllResults = main.FALSE
4023 else:
4024 # unexpected result
4025 addAllResults = main.FALSE
4026 if addAllResults != main.TRUE:
4027 main.log.error( "Error executing set addAll" )
4028
4029 # Check if set is still correct
4030 size = len( onosSet )
4031 getResponses = []
4032 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004033 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004034 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004035 name="setTestGet-" + str( i ),
4036 args=[ onosSetName ] )
4037 threads.append( t )
4038 t.start()
4039 for t in threads:
4040 t.join()
4041 getResponses.append( t.result )
4042 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004043 for i in range( len( main.activeNodes ) ):
4044 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004045 if isinstance( getResponses[ i ], list):
4046 current = set( getResponses[ i ] )
4047 if len( current ) == len( getResponses[ i ] ):
4048 # no repeats
4049 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004050 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004051 " has incorrect view" +
4052 " of set " + onosSetName + ":\n" +
4053 str( getResponses[ i ] ) )
4054 main.log.debug( "Expected: " + str( onosSet ) )
4055 main.log.debug( "Actual: " + str( current ) )
4056 getResults = main.FALSE
4057 else:
4058 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004059 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004060 " has repeat elements in" +
4061 " set " + onosSetName + ":\n" +
4062 str( getResponses[ i ] ) )
4063 getResults = main.FALSE
4064 elif getResponses[ i ] == main.ERROR:
4065 getResults = main.FALSE
4066 sizeResponses = []
4067 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004068 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004069 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004070 name="setTestSize-" + str( i ),
4071 args=[ onosSetName ] )
4072 threads.append( t )
4073 t.start()
4074 for t in threads:
4075 t.join()
4076 sizeResponses.append( t.result )
4077 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004078 for i in range( len( main.activeNodes ) ):
4079 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004080 if size != sizeResponses[ i ]:
4081 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004082 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004083 " expected a size of " + str( size ) +
4084 " for set " + onosSetName +
4085 " but got " + str( sizeResponses[ i ] ) )
4086 addAllResults = addAllResults and getResults and sizeResults
4087 utilities.assert_equals( expect=main.TRUE,
4088 actual=addAllResults,
4089 onpass="Set addAll correct",
4090 onfail="Set addAll was incorrect" )
4091
4092 main.step( "Distributed Set clear()" )
4093 onosSet.clear()
4094 clearResponses = []
4095 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004096 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004097 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004098 name="setTestClear-" + str( i ),
4099 args=[ onosSetName, " "], # Values doesn't matter
4100 kwargs={ "clear": True } )
4101 threads.append( t )
4102 t.start()
4103 for t in threads:
4104 t.join()
4105 clearResponses.append( t.result )
4106
4107 # main.TRUE = successfully changed the set
4108 # main.FALSE = action resulted in no change in set
4109 # main.ERROR - Some error in executing the function
4110 clearResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004111 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004112 if clearResponses[ i ] == main.TRUE:
4113 # All is well
4114 pass
4115 elif clearResponses[ i ] == main.FALSE:
4116 # Nothing set, probably fine
4117 pass
4118 elif clearResponses[ i ] == main.ERROR:
4119 # Error in execution
4120 clearResults = main.FALSE
4121 else:
4122 # unexpected result
4123 clearResults = main.FALSE
4124 if clearResults != main.TRUE:
4125 main.log.error( "Error executing set clear" )
4126
4127 # Check if set is still correct
4128 size = len( onosSet )
4129 getResponses = []
4130 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004131 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004132 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004133 name="setTestGet-" + str( i ),
4134 args=[ onosSetName ] )
4135 threads.append( t )
4136 t.start()
4137 for t in threads:
4138 t.join()
4139 getResponses.append( t.result )
4140 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004141 for i in range( len( main.activeNodes ) ):
4142 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004143 if isinstance( getResponses[ i ], list):
4144 current = set( getResponses[ i ] )
4145 if len( current ) == len( getResponses[ i ] ):
4146 # no repeats
4147 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004148 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004149 " has incorrect view" +
4150 " of set " + onosSetName + ":\n" +
4151 str( getResponses[ i ] ) )
4152 main.log.debug( "Expected: " + str( onosSet ) )
4153 main.log.debug( "Actual: " + str( current ) )
4154 getResults = main.FALSE
4155 else:
4156 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004157 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004158 " has repeat elements in" +
4159 " set " + onosSetName + ":\n" +
4160 str( getResponses[ i ] ) )
4161 getResults = main.FALSE
4162 elif getResponses[ i ] == main.ERROR:
4163 getResults = main.FALSE
4164 sizeResponses = []
4165 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004166 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004167 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004168 name="setTestSize-" + str( i ),
4169 args=[ onosSetName ] )
4170 threads.append( t )
4171 t.start()
4172 for t in threads:
4173 t.join()
4174 sizeResponses.append( t.result )
4175 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004176 for i in range( len( main.activeNodes ) ):
4177 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004178 if size != sizeResponses[ i ]:
4179 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004180 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004181 " expected a size of " + str( size ) +
4182 " for set " + onosSetName +
4183 " but got " + str( sizeResponses[ i ] ) )
4184 clearResults = clearResults and getResults and sizeResults
4185 utilities.assert_equals( expect=main.TRUE,
4186 actual=clearResults,
4187 onpass="Set clear correct",
4188 onfail="Set clear was incorrect" )
4189
4190 main.step( "Distributed Set addAll()" )
4191 onosSet.update( addAllValue.split() )
4192 addResponses = []
4193 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004194 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004195 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004196 name="setTestAddAll-" + str( i ),
4197 args=[ onosSetName, addAllValue ] )
4198 threads.append( t )
4199 t.start()
4200 for t in threads:
4201 t.join()
4202 addResponses.append( t.result )
4203
4204 # main.TRUE = successfully changed the set
4205 # main.FALSE = action resulted in no change in set
4206 # main.ERROR - Some error in executing the function
4207 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004208 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004209 if addResponses[ i ] == main.TRUE:
4210 # All is well
4211 pass
4212 elif addResponses[ i ] == main.FALSE:
4213 # Already in set, probably fine
4214 pass
4215 elif addResponses[ i ] == main.ERROR:
4216 # Error in execution
4217 addAllResults = main.FALSE
4218 else:
4219 # unexpected result
4220 addAllResults = main.FALSE
4221 if addAllResults != main.TRUE:
4222 main.log.error( "Error executing set addAll" )
4223
4224 # Check if set is still correct
4225 size = len( onosSet )
4226 getResponses = []
4227 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004228 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004229 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004230 name="setTestGet-" + str( i ),
4231 args=[ onosSetName ] )
4232 threads.append( t )
4233 t.start()
4234 for t in threads:
4235 t.join()
4236 getResponses.append( t.result )
4237 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004238 for i in range( len( main.activeNodes ) ):
4239 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004240 if isinstance( getResponses[ i ], list):
4241 current = set( getResponses[ i ] )
4242 if len( current ) == len( getResponses[ i ] ):
4243 # no repeats
4244 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004245 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004246 " has incorrect view" +
4247 " of set " + onosSetName + ":\n" +
4248 str( getResponses[ i ] ) )
4249 main.log.debug( "Expected: " + str( onosSet ) )
4250 main.log.debug( "Actual: " + str( current ) )
4251 getResults = main.FALSE
4252 else:
4253 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004254 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004255 " has repeat elements in" +
4256 " set " + onosSetName + ":\n" +
4257 str( getResponses[ i ] ) )
4258 getResults = main.FALSE
4259 elif getResponses[ i ] == main.ERROR:
4260 getResults = main.FALSE
4261 sizeResponses = []
4262 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004263 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004264 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004265 name="setTestSize-" + str( i ),
4266 args=[ onosSetName ] )
4267 threads.append( t )
4268 t.start()
4269 for t in threads:
4270 t.join()
4271 sizeResponses.append( t.result )
4272 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004273 for i in range( len( main.activeNodes ) ):
4274 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004275 if size != sizeResponses[ i ]:
4276 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004277 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004278 " expected a size of " + str( size ) +
4279 " for set " + onosSetName +
4280 " but got " + str( sizeResponses[ i ] ) )
4281 addAllResults = addAllResults and getResults and sizeResults
4282 utilities.assert_equals( expect=main.TRUE,
4283 actual=addAllResults,
4284 onpass="Set addAll correct",
4285 onfail="Set addAll was incorrect" )
4286
4287 main.step( "Distributed Set retain()" )
4288 onosSet.intersection_update( retainValue.split() )
4289 retainResponses = []
4290 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004291 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004292 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004293 name="setTestRetain-" + str( i ),
4294 args=[ onosSetName, retainValue ],
4295 kwargs={ "retain": True } )
4296 threads.append( t )
4297 t.start()
4298 for t in threads:
4299 t.join()
4300 retainResponses.append( t.result )
4301
4302 # main.TRUE = successfully changed the set
4303 # main.FALSE = action resulted in no change in set
4304 # main.ERROR - Some error in executing the function
4305 retainResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004306 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004307 if retainResponses[ i ] == main.TRUE:
4308 # All is well
4309 pass
4310 elif retainResponses[ i ] == main.FALSE:
4311 # Already in set, probably fine
4312 pass
4313 elif retainResponses[ i ] == main.ERROR:
4314 # Error in execution
4315 retainResults = main.FALSE
4316 else:
4317 # unexpected result
4318 retainResults = main.FALSE
4319 if retainResults != main.TRUE:
4320 main.log.error( "Error executing set retain" )
4321
4322 # Check if set is still correct
4323 size = len( onosSet )
4324 getResponses = []
4325 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004326 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004327 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004328 name="setTestGet-" + str( i ),
4329 args=[ onosSetName ] )
4330 threads.append( t )
4331 t.start()
4332 for t in threads:
4333 t.join()
4334 getResponses.append( t.result )
4335 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004336 for i in range( len( main.activeNodes ) ):
4337 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004338 if isinstance( getResponses[ i ], list):
4339 current = set( getResponses[ i ] )
4340 if len( current ) == len( getResponses[ i ] ):
4341 # no repeats
4342 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004343 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004344 " has incorrect view" +
4345 " of set " + onosSetName + ":\n" +
4346 str( getResponses[ i ] ) )
4347 main.log.debug( "Expected: " + str( onosSet ) )
4348 main.log.debug( "Actual: " + str( current ) )
4349 getResults = main.FALSE
4350 else:
4351 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004352 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004353 " has repeat elements in" +
4354 " set " + onosSetName + ":\n" +
4355 str( getResponses[ i ] ) )
4356 getResults = main.FALSE
4357 elif getResponses[ i ] == main.ERROR:
4358 getResults = main.FALSE
4359 sizeResponses = []
4360 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004361 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004362 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004363 name="setTestSize-" + str( i ),
4364 args=[ onosSetName ] )
4365 threads.append( t )
4366 t.start()
4367 for t in threads:
4368 t.join()
4369 sizeResponses.append( t.result )
4370 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004371 for i in range( len( main.activeNodes ) ):
4372 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004373 if size != sizeResponses[ i ]:
4374 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004375 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004376 str( size ) + " for set " + onosSetName +
4377 " but got " + str( sizeResponses[ i ] ) )
4378 retainResults = retainResults and getResults and sizeResults
4379 utilities.assert_equals( expect=main.TRUE,
4380 actual=retainResults,
4381 onpass="Set retain correct",
4382 onfail="Set retain was incorrect" )
4383
Jon Hall2a5002c2015-08-21 16:49:11 -07004384 # Transactional maps
4385 main.step( "Partitioned Transactional maps put" )
4386 tMapValue = "Testing"
4387 numKeys = 100
4388 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004389 node = main.activeNodes[0]
4390 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
Jon Hall2a5002c2015-08-21 16:49:11 -07004391 if len( putResponses ) == 100:
4392 for i in putResponses:
4393 if putResponses[ i ][ 'value' ] != tMapValue:
4394 putResult = False
4395 else:
4396 putResult = False
4397 if not putResult:
4398 main.log.debug( "Put response values: " + str( putResponses ) )
4399 utilities.assert_equals( expect=True,
4400 actual=putResult,
4401 onpass="Partitioned Transactional Map put successful",
4402 onfail="Partitioned Transactional Map put values are incorrect" )
4403
4404 main.step( "Partitioned Transactional maps get" )
4405 getCheck = True
4406 for n in range( 1, numKeys + 1 ):
4407 getResponses = []
4408 threads = []
4409 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004410 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004411 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4412 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004413 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004414 threads.append( t )
4415 t.start()
4416 for t in threads:
4417 t.join()
4418 getResponses.append( t.result )
4419 for node in getResponses:
4420 if node != tMapValue:
4421 valueCheck = False
4422 if not valueCheck:
4423 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4424 main.log.warn( getResponses )
4425 getCheck = getCheck and valueCheck
4426 utilities.assert_equals( expect=True,
4427 actual=getCheck,
4428 onpass="Partitioned Transactional Map get values were correct",
4429 onfail="Partitioned Transactional Map values incorrect" )
4430
4431 main.step( "In-memory Transactional maps put" )
4432 tMapValue = "Testing"
4433 numKeys = 100
4434 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004435 node = main.activeNodes[0]
4436 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue, inMemory=True )
Jon Hall2a5002c2015-08-21 16:49:11 -07004437 if len( putResponses ) == 100:
4438 for i in putResponses:
4439 if putResponses[ i ][ 'value' ] != tMapValue:
4440 putResult = False
4441 else:
4442 putResult = False
4443 if not putResult:
4444 main.log.debug( "Put response values: " + str( putResponses ) )
4445 utilities.assert_equals( expect=True,
4446 actual=putResult,
4447 onpass="In-Memory Transactional Map put successful",
4448 onfail="In-Memory Transactional Map put values are incorrect" )
4449
4450 main.step( "In-Memory Transactional maps get" )
4451 getCheck = True
4452 for n in range( 1, numKeys + 1 ):
4453 getResponses = []
4454 threads = []
4455 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004456 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004457 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4458 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004459 args=[ "Key" + str( n ) ],
Jon Hall2a5002c2015-08-21 16:49:11 -07004460 kwargs={ "inMemory": True } )
4461 threads.append( t )
4462 t.start()
4463 for t in threads:
4464 t.join()
4465 getResponses.append( t.result )
4466 for node in getResponses:
4467 if node != tMapValue:
4468 valueCheck = False
4469 if not valueCheck:
4470 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4471 main.log.warn( getResponses )
4472 getCheck = getCheck and valueCheck
4473 utilities.assert_equals( expect=True,
4474 actual=getCheck,
4475 onpass="In-Memory Transactional Map get values were correct",
4476 onfail="In-Memory Transactional Map values incorrect" )