blob: aa49941cc1c5e21dff70787fa107fcf7cc3f4af9 [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
Jon Hallb3ed8ed2015-10-28 16:43:55 -070012CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
Jon Hall5cf14d52015-07-16 12:15:19 -070014CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
Jon Hallb3ed8ed2015-10-28 16:43:55 -070028class HAkillNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -070029
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hall3b489db2015-10-05 14:38:37 -070052 import pexpect
Jon Hall5cf14d52015-07-16 12:15:19 -070053 main.log.info( "ONOS HA test: Restart minority of ONOS nodes - " +
54 "initialization" )
55 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070056 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070057 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
59 # TODO: save all the timers and output them for plotting
60
61 # load some variables from the params file
62 PULLCODE = False
63 if main.params[ 'Git' ] == 'True':
64 PULLCODE = True
65 gitBranch = main.params[ 'branch' ]
66 cellName = main.params[ 'ENV' ][ 'cellName' ]
67
Jon Halle1a3b752015-07-22 13:02:46 -070068 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070069 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070070 if main.ONOSbench.maxNodes < main.numCtrls:
71 main.numCtrls = int( main.ONOSbench.maxNodes )
72 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070073 global ONOS1Port
74 global ONOS2Port
75 global ONOS3Port
76 global ONOS4Port
77 global ONOS5Port
78 global ONOS6Port
79 global ONOS7Port
80
81 # FIXME: just get controller port from params?
82 # TODO: do we really need all these?
83 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
84 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
85 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
86 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
87 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
88 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
89 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
90
Jon Halle1a3b752015-07-22 13:02:46 -070091 try:
92 fileName = "Counters"
93 # TODO: Maybe make a library folder somewhere?
94 path = main.params[ 'imports' ][ 'path' ]
95 main.Counters = imp.load_source( fileName,
96 path + fileName + ".py" )
97 except Exception as e:
98 main.log.exception( e )
99 main.cleanup()
100 main.exit()
101
102 main.CLIs = []
103 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700104 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700105 for i in range( 1, main.numCtrls + 1 ):
106 try:
107 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
108 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
109 ipList.append( main.nodes[ -1 ].ip_address )
110 except AttributeError:
111 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700112
113 main.step( "Create cell file" )
114 cellAppString = main.params[ 'ENV' ][ 'appString' ]
115 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
116 main.Mininet1.ip_address,
117 cellAppString, ipList )
118 main.step( "Applying cell variable to environment" )
119 cellResult = main.ONOSbench.setCell( cellName )
120 verifyResult = main.ONOSbench.verifyCell()
121
122 # FIXME:this is short term fix
123 main.log.info( "Removing raft logs" )
124 main.ONOSbench.onosRemoveRaftLogs()
125
126 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700127 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700128 main.ONOSbench.onosUninstall( node.ip_address )
129
130 # Make sure ONOS is DEAD
131 main.log.info( "Killing any ONOS processes" )
132 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700133 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700134 killed = main.ONOSbench.onosKill( node.ip_address )
135 killResults = killResults and killed
136
137 cleanInstallResult = main.TRUE
138 gitPullResult = main.TRUE
139
140 main.step( "Starting Mininet" )
141 # scp topo file to mininet
142 # TODO: move to params?
143 topoName = "obelisk.py"
144 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700145 main.ONOSbench.scp( main.Mininet1,
146 filePath + topoName,
147 main.Mininet1.home,
148 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700149 mnResult = main.Mininet1.startNet( )
150 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
151 onpass="Mininet Started",
152 onfail="Error starting Mininet" )
153
154 main.step( "Git checkout and pull " + gitBranch )
155 if PULLCODE:
156 main.ONOSbench.gitCheckout( gitBranch )
157 gitPullResult = main.ONOSbench.gitPull()
158 # values of 1 or 3 are good
159 utilities.assert_lesser( expect=0, actual=gitPullResult,
160 onpass="Git pull successful",
161 onfail="Git pull failed" )
162 main.ONOSbench.getVersion( report=True )
163
164 main.step( "Using mvn clean install" )
165 cleanInstallResult = main.TRUE
166 if PULLCODE and gitPullResult == main.TRUE:
167 cleanInstallResult = main.ONOSbench.cleanInstall()
168 else:
169 main.log.warn( "Did not pull new code so skipping mvn " +
170 "clean install" )
171 utilities.assert_equals( expect=main.TRUE,
172 actual=cleanInstallResult,
173 onpass="MCI successful",
174 onfail="MCI failed" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700175
176 main.step( "Make sure ONOS service doesn't automatically respawn" )
177 handle = main.ONOSbench.handle
178 handle.sendline( "sed -i -e 's/^respawn$/#respawn/g' tools/package/init/onos.conf" )
179 handle.expect( "\$" ) # $ from the command
180 handle.expect( "\$" ) # $ from the prompt
181
Jon Hall5cf14d52015-07-16 12:15:19 -0700182 # GRAPHS
183 # NOTE: important params here:
184 # job = name of Jenkins job
185 # Plot Name = Plot-HA, only can be used if multiple plots
186 # index = The number of the graph under plot name
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700187 job = "HAkillNodes"
Jon Hall5cf14d52015-07-16 12:15:19 -0700188 plotName = "Plot-HA"
189 graphs = '<ac:structured-macro ac:name="html">\n'
190 graphs += '<ac:plain-text-body><![CDATA[\n'
191 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
192 '/plot/' + plotName + '/getPlot?index=0' +\
193 '&width=500&height=300"' +\
194 'noborder="0" width="500" height="300" scrolling="yes" ' +\
195 'seamless="seamless"></iframe>\n'
196 graphs += ']]></ac:plain-text-body>\n'
197 graphs += '</ac:structured-macro>\n'
198 main.log.wiki(graphs)
199
200 main.step( "Creating ONOS package" )
Jon Hall3b489db2015-10-05 14:38:37 -0700201 # copy gen-partions file to ONOS
202 # NOTE: this assumes TestON and ONOS are on the same machine
203 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
204 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
205 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
206 main.ONOSbench.ip_address,
207 srcFile,
208 dstDir,
209 pwd=main.ONOSbench.pwd,
210 direction="from" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700211 packageResult = main.ONOSbench.onosPackage()
212 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
213 onpass="ONOS package successful",
214 onfail="ONOS package failed" )
215
216 main.step( "Installing ONOS package" )
217 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700218 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700219 tmpResult = main.ONOSbench.onosInstall( options="-f",
220 node=node.ip_address )
221 onosInstallResult = onosInstallResult and tmpResult
222 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
223 onpass="ONOS install successful",
224 onfail="ONOS install failed" )
Jon Hall3b489db2015-10-05 14:38:37 -0700225 # clean up gen-partitions file
226 try:
227 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
228 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
229 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
230 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
231 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
232 str( main.ONOSbench.handle.before ) )
233 except ( pexpect.TIMEOUT, pexpect.EOF ):
234 main.log.exception( "ONOSbench: pexpect exception found:" +
235 main.ONOSbench.handle.before )
236 main.cleanup()
237 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700238
239 main.step( "Checking if ONOS is up yet" )
240 for i in range( 2 ):
241 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700242 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700243 started = main.ONOSbench.isup( node.ip_address )
244 if not started:
245 main.log.error( node.name + " didn't start!" )
246 main.ONOSbench.onosStop( node.ip_address )
247 main.ONOSbench.onosStart( node.ip_address )
248 onosIsupResult = onosIsupResult and started
249 if onosIsupResult == main.TRUE:
250 break
251 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
252 onpass="ONOS startup successful",
253 onfail="ONOS startup failed" )
254
255 main.log.step( "Starting ONOS CLI sessions" )
256 cliResults = main.TRUE
257 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700258 for i in range( main.numCtrls ):
259 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700260 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700261 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700262 threads.append( t )
263 t.start()
264
265 for t in threads:
266 t.join()
267 cliResults = cliResults and t.result
268 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
269 onpass="ONOS cli startup successful",
270 onfail="ONOS cli startup failed" )
271
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700272 # Create a list of active nodes for use when some nodes are stopped
273 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
274
Jon Hall5cf14d52015-07-16 12:15:19 -0700275 if main.params[ 'tcpdump' ].lower() == "true":
276 main.step( "Start Packet Capture MN" )
277 main.Mininet2.startTcpdump(
278 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
279 + "-MN.pcap",
280 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
281 port=main.params[ 'MNtcpdump' ][ 'port' ] )
282
283 main.step( "App Ids check" )
284 appCheck = main.TRUE
285 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700286 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700287 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700288 name="appToIDCheck-" + str( i ),
289 args=[] )
290 threads.append( t )
291 t.start()
292
293 for t in threads:
294 t.join()
295 appCheck = appCheck and t.result
296 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700297 node = main.activeNodes[0]
298 main.log.warn( main.CLIs[node].apps() )
299 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700300 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
301 onpass="App Ids seem to be correct",
302 onfail="Something is wrong with app Ids" )
303
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700304 main.step( "Clean up ONOS service changes" )
305 handle.sendline( "git checkout -- tools/package/init/onos.conf" )
306 handle.expect( "\$" )
307
Jon Hall5cf14d52015-07-16 12:15:19 -0700308 if cliResults == main.FALSE:
309 main.log.error( "Failed to start ONOS, stopping test" )
310 main.cleanup()
311 main.exit()
312
313 def CASE2( self, main ):
314 """
315 Assign devices to controllers
316 """
317 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700318 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700319 assert main, "main not defined"
320 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700321 assert main.CLIs, "main.CLIs not defined"
322 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700323 assert ONOS1Port, "ONOS1Port not defined"
324 assert ONOS2Port, "ONOS2Port not defined"
325 assert ONOS3Port, "ONOS3Port not defined"
326 assert ONOS4Port, "ONOS4Port not defined"
327 assert ONOS5Port, "ONOS5Port not defined"
328 assert ONOS6Port, "ONOS6Port not defined"
329 assert ONOS7Port, "ONOS7Port not defined"
330
331 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700332 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700333 "and check that an ONOS node becomes the " +\
334 "master of the device."
335 main.step( "Assign switches to controllers" )
336
337 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700338 for i in range( main.numCtrls ):
339 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700340 swList = []
341 for i in range( 1, 29 ):
342 swList.append( "s" + str( i ) )
343 main.Mininet1.assignSwController( sw=swList, ip=ipList )
344
345 mastershipCheck = main.TRUE
346 for i in range( 1, 29 ):
347 response = main.Mininet1.getSwController( "s" + str( i ) )
348 try:
349 main.log.info( str( response ) )
350 except Exception:
351 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700352 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700353 if re.search( "tcp:" + node.ip_address, response ):
354 mastershipCheck = mastershipCheck and main.TRUE
355 else:
356 main.log.error( "Error, node " + node.ip_address + " is " +
357 "not in the list of controllers s" +
358 str( i ) + " is connecting to." )
359 mastershipCheck = main.FALSE
360 utilities.assert_equals(
361 expect=main.TRUE,
362 actual=mastershipCheck,
363 onpass="Switch mastership assigned correctly",
364 onfail="Switches not assigned correctly to controllers" )
365
366 def CASE21( self, main ):
367 """
368 Assign mastership to controllers
369 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700370 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700371 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700372 assert main, "main not defined"
373 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700374 assert main.CLIs, "main.CLIs not defined"
375 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700376 assert ONOS1Port, "ONOS1Port not defined"
377 assert ONOS2Port, "ONOS2Port not defined"
378 assert ONOS3Port, "ONOS3Port not defined"
379 assert ONOS4Port, "ONOS4Port not defined"
380 assert ONOS5Port, "ONOS5Port not defined"
381 assert ONOS6Port, "ONOS6Port not defined"
382 assert ONOS7Port, "ONOS7Port not defined"
383
384 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700385 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700386 "device. Then manually assign" +\
387 " mastership to specific ONOS nodes using" +\
388 " 'device-role'"
389 main.step( "Assign mastership of switches to specific controllers" )
390 # Manually assign mastership to the controller we want
391 roleCall = main.TRUE
392
393 ipList = [ ]
394 deviceList = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700395 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700396 try:
397 # Assign mastership to specific controllers. This assignment was
398 # determined for a 7 node cluser, but will work with any sized
399 # cluster
400 for i in range( 1, 29 ): # switches 1 through 28
401 # set up correct variables:
402 if i == 1:
403 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700404 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700405 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700406 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700407 c = 1 % main.numCtrls
408 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700409 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700410 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700411 c = 1 % main.numCtrls
412 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700413 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700414 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700415 c = 3 % main.numCtrls
416 ip = main.nodes[ c ].ip_address # ONOS4
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700417 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700418 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700419 c = 2 % main.numCtrls
420 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700421 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700422 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700423 c = 2 % main.numCtrls
424 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700425 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700426 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700427 c = 5 % main.numCtrls
428 ip = main.nodes[ c ].ip_address # ONOS6
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700429 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700430 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700431 c = 4 % main.numCtrls
432 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700433 dpid = '3' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700434 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700435 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700436 c = 6 % main.numCtrls
437 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700438 dpid = '6' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700439 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700440 elif i == 28:
441 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700442 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700443 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700444 else:
445 main.log.error( "You didn't write an else statement for " +
446 "switch s" + str( i ) )
447 roleCall = main.FALSE
448 # Assign switch
449 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
450 # TODO: make this controller dynamic
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700451 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700452 ipList.append( ip )
453 deviceList.append( deviceId )
454 except ( AttributeError, AssertionError ):
455 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700456 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700457 utilities.assert_equals(
458 expect=main.TRUE,
459 actual=roleCall,
460 onpass="Re-assigned switch mastership to designated controller",
461 onfail="Something wrong with deviceRole calls" )
462
463 main.step( "Check mastership was correctly assigned" )
464 roleCheck = main.TRUE
465 # NOTE: This is due to the fact that device mastership change is not
466 # atomic and is actually a multi step process
467 time.sleep( 5 )
468 for i in range( len( ipList ) ):
469 ip = ipList[i]
470 deviceId = deviceList[i]
471 # Check assignment
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700472 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700473 if ip in master:
474 roleCheck = roleCheck and main.TRUE
475 else:
476 roleCheck = roleCheck and main.FALSE
477 main.log.error( "Error, controller " + ip + " is not" +
478 " master " + "of device " +
479 str( deviceId ) + ". Master is " +
480 repr( master ) + "." )
481 utilities.assert_equals(
482 expect=main.TRUE,
483 actual=roleCheck,
484 onpass="Switches were successfully reassigned to designated " +
485 "controller",
486 onfail="Switches were not successfully reassigned" )
487
488 def CASE3( self, main ):
489 """
490 Assign intents
491 """
492 import time
493 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700494 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700495 assert main, "main not defined"
496 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700497 assert main.CLIs, "main.CLIs not defined"
498 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700499 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700500 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700501 "assign predetermined host-to-host intents." +\
502 " After installation, check that the intent" +\
503 " is distributed to all nodes and the state" +\
504 " is INSTALLED"
505
506 # install onos-app-fwd
507 main.step( "Install reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700508 onosCli = main.CLIs[ main.activeNodes[0] ]
509 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700510 utilities.assert_equals( expect=main.TRUE, actual=installResults,
511 onpass="Install fwd successful",
512 onfail="Install fwd failed" )
513
514 main.step( "Check app ids" )
515 appCheck = main.TRUE
516 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700517 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700518 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700519 name="appToIDCheck-" + str( i ),
520 args=[] )
521 threads.append( t )
522 t.start()
523
524 for t in threads:
525 t.join()
526 appCheck = appCheck and t.result
527 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700528 main.log.warn( onosCli.apps() )
529 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700530 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
531 onpass="App Ids seem to be correct",
532 onfail="Something is wrong with app Ids" )
533
534 main.step( "Discovering Hosts( Via pingall for now )" )
535 # FIXME: Once we have a host discovery mechanism, use that instead
536 # REACTIVE FWD test
537 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700538 passMsg = "Reactive Pingall test passed"
539 time1 = time.time()
540 pingResult = main.Mininet1.pingall()
541 time2 = time.time()
542 if not pingResult:
543 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700544 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700545 passMsg += " on the second try"
546 utilities.assert_equals(
547 expect=main.TRUE,
548 actual=pingResult,
549 onpass= passMsg,
550 onfail="Reactive Pingall failed, " +
551 "one or more ping pairs failed" )
552 main.log.info( "Time for pingall: %2f seconds" %
553 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700554 # timeout for fwd flows
555 time.sleep( 11 )
556 # uninstall onos-app-fwd
557 main.step( "Uninstall reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700558 node = main.activeNodes[0]
559 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700560 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
561 onpass="Uninstall fwd successful",
562 onfail="Uninstall fwd failed" )
563
564 main.step( "Check app ids" )
565 threads = []
566 appCheck2 = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700567 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700568 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700569 name="appToIDCheck-" + str( i ),
570 args=[] )
571 threads.append( t )
572 t.start()
573
574 for t in threads:
575 t.join()
576 appCheck2 = appCheck2 and t.result
577 if appCheck2 != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700578 node = main.activeNodes[0]
579 main.log.warn( main.CLIs[node].apps() )
580 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700581 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
582 onpass="App Ids seem to be correct",
583 onfail="Something is wrong with app Ids" )
584
585 main.step( "Add host intents via cli" )
586 intentIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700587 # TODO: move the host numbers to params
588 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700589 intentAddResult = True
590 hostResult = main.TRUE
591 for i in range( 8, 18 ):
592 main.log.info( "Adding host intent between h" + str( i ) +
593 " and h" + str( i + 10 ) )
594 host1 = "00:00:00:00:00:" + \
595 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
596 host2 = "00:00:00:00:00:" + \
597 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
598 # NOTE: getHost can return None
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700599 host1Dict = onosCli.getHost( host1 )
600 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700601 host1Id = None
602 host2Id = None
603 if host1Dict and host2Dict:
604 host1Id = host1Dict.get( 'id', None )
605 host2Id = host2Dict.get( 'id', None )
606 if host1Id and host2Id:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700607 nodeNum = ( i % len( main.activeNodes ) )
608 node = main.activeNodes[nodeNum]
609 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700610 if tmpId:
611 main.log.info( "Added intent with id: " + tmpId )
612 intentIds.append( tmpId )
613 else:
614 main.log.error( "addHostIntent returned: " +
615 repr( tmpId ) )
616 else:
617 main.log.error( "Error, getHost() failed for h" + str( i ) +
618 " and/or h" + str( i + 10 ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700619 node = main.activeNodes[0]
620 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700621 main.log.warn( "Hosts output: " )
622 try:
623 main.log.warn( json.dumps( json.loads( hosts ),
624 sort_keys=True,
625 indent=4,
626 separators=( ',', ': ' ) ) )
627 except ( ValueError, TypeError ):
628 main.log.warn( repr( hosts ) )
629 hostResult = main.FALSE
630 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
631 onpass="Found a host id for each host",
632 onfail="Error looking up host ids" )
633
634 intentStart = time.time()
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700635 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700636 main.log.info( "Submitted intents: " + str( intentIds ) )
637 main.log.info( "Intents in ONOS: " + str( onosIds ) )
638 for intent in intentIds:
639 if intent in onosIds:
640 pass # intent submitted is in onos
641 else:
642 intentAddResult = False
643 if intentAddResult:
644 intentStop = time.time()
645 else:
646 intentStop = None
647 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700648 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700649 intentStates = []
650 installedCheck = True
651 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
652 count = 0
653 try:
654 for intent in json.loads( intents ):
655 state = intent.get( 'state', None )
656 if "INSTALLED" not in state:
657 installedCheck = False
658 intentId = intent.get( 'id', None )
659 intentStates.append( ( intentId, state ) )
660 except ( ValueError, TypeError ):
661 main.log.exception( "Error parsing intents" )
662 # add submitted intents not in the store
663 tmplist = [ i for i, s in intentStates ]
664 missingIntents = False
665 for i in intentIds:
666 if i not in tmplist:
667 intentStates.append( ( i, " - " ) )
668 missingIntents = True
669 intentStates.sort()
670 for i, s in intentStates:
671 count += 1
672 main.log.info( "%-6s%-15s%-15s" %
673 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700674 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700675 try:
676 missing = False
677 if leaders:
678 parsedLeaders = json.loads( leaders )
679 main.log.warn( json.dumps( parsedLeaders,
680 sort_keys=True,
681 indent=4,
682 separators=( ',', ': ' ) ) )
683 # check for all intent partitions
684 topics = []
685 for i in range( 14 ):
686 topics.append( "intent-partition-" + str( i ) )
687 main.log.debug( topics )
688 ONOStopics = [ j['topic'] for j in parsedLeaders ]
689 for topic in topics:
690 if topic not in ONOStopics:
691 main.log.error( "Error: " + topic +
692 " not in leaders" )
693 missing = True
694 else:
695 main.log.error( "leaders() returned None" )
696 except ( ValueError, TypeError ):
697 main.log.exception( "Error parsing leaders" )
698 main.log.error( repr( leaders ) )
699 # Check all nodes
700 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700701 for i in main.activeNodes:
702 response = main.CLIs[i].leaders( jsonFormat=False)
703 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700704 str( response ) )
705
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700706 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700707 try:
708 if partitions :
709 parsedPartitions = json.loads( partitions )
710 main.log.warn( json.dumps( parsedPartitions,
711 sort_keys=True,
712 indent=4,
713 separators=( ',', ': ' ) ) )
714 # TODO check for a leader in all paritions
715 # TODO check for consistency among nodes
716 else:
717 main.log.error( "partitions() returned None" )
718 except ( ValueError, TypeError ):
719 main.log.exception( "Error parsing partitions" )
720 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700721 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700722 try:
723 if pendingMap :
724 parsedPending = json.loads( pendingMap )
725 main.log.warn( json.dumps( parsedPending,
726 sort_keys=True,
727 indent=4,
728 separators=( ',', ': ' ) ) )
729 # TODO check something here?
730 else:
731 main.log.error( "pendingMap() returned None" )
732 except ( ValueError, TypeError ):
733 main.log.exception( "Error parsing pending map" )
734 main.log.error( repr( pendingMap ) )
735
736 intentAddResult = bool( intentAddResult and not missingIntents and
737 installedCheck )
738 if not intentAddResult:
739 main.log.error( "Error in pushing host intents to ONOS" )
740
741 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700742 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700743 correct = True
744 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700745 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700746 onosIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700747 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700748 onosIds.append( ids )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700749 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700750 str( sorted( onosIds ) ) )
751 if sorted( ids ) != sorted( intentIds ):
752 main.log.warn( "Set of intent IDs doesn't match" )
753 correct = False
754 break
755 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700756 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700757 for intent in intents:
758 if intent[ 'state' ] != "INSTALLED":
759 main.log.warn( "Intent " + intent[ 'id' ] +
760 " is " + intent[ 'state' ] )
761 correct = False
762 break
763 if correct:
764 break
765 else:
766 time.sleep(1)
767 if not intentStop:
768 intentStop = time.time()
769 global gossipTime
770 gossipTime = intentStop - intentStart
771 main.log.info( "It took about " + str( gossipTime ) +
772 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700773 gossipPeriod = int( main.params['timers']['gossip'] )
774 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700775 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700776 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700777 onpass="ECM anti-entropy for intents worked within " +
778 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700779 onfail="Intent ECM anti-entropy took too long. " +
780 "Expected time:{}, Actual time:{}".format( maxGossipTime,
781 gossipTime ) )
782 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700783 intentAddResult = True
784
785 if not intentAddResult or "key" in pendingMap:
786 import time
787 installedCheck = True
788 main.log.info( "Sleeping 60 seconds to see if intents are found" )
789 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700790 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700791 main.log.info( "Submitted intents: " + str( intentIds ) )
792 main.log.info( "Intents in ONOS: " + str( onosIds ) )
793 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700794 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700795 intentStates = []
796 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
797 count = 0
798 try:
799 for intent in json.loads( intents ):
800 # Iter through intents of a node
801 state = intent.get( 'state', None )
802 if "INSTALLED" not in state:
803 installedCheck = False
804 intentId = intent.get( 'id', None )
805 intentStates.append( ( intentId, state ) )
806 except ( ValueError, TypeError ):
807 main.log.exception( "Error parsing intents" )
808 # add submitted intents not in the store
809 tmplist = [ i for i, s in intentStates ]
810 for i in intentIds:
811 if i not in tmplist:
812 intentStates.append( ( i, " - " ) )
813 intentStates.sort()
814 for i, s in intentStates:
815 count += 1
816 main.log.info( "%-6s%-15s%-15s" %
817 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700818 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700819 try:
820 missing = False
821 if leaders:
822 parsedLeaders = json.loads( leaders )
823 main.log.warn( json.dumps( parsedLeaders,
824 sort_keys=True,
825 indent=4,
826 separators=( ',', ': ' ) ) )
827 # check for all intent partitions
828 # check for election
829 topics = []
830 for i in range( 14 ):
831 topics.append( "intent-partition-" + str( i ) )
832 # FIXME: this should only be after we start the app
833 topics.append( "org.onosproject.election" )
834 main.log.debug( topics )
835 ONOStopics = [ j['topic'] for j in parsedLeaders ]
836 for topic in topics:
837 if topic not in ONOStopics:
838 main.log.error( "Error: " + topic +
839 " not in leaders" )
840 missing = True
841 else:
842 main.log.error( "leaders() returned None" )
843 except ( ValueError, TypeError ):
844 main.log.exception( "Error parsing leaders" )
845 main.log.error( repr( leaders ) )
846 # Check all nodes
847 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700848 for i in main.activeNodes:
849 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700850 response = node.leaders( jsonFormat=False)
851 main.log.warn( str( node.name ) + " leaders output: \n" +
852 str( response ) )
853
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700854 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700855 try:
856 if partitions :
857 parsedPartitions = json.loads( partitions )
858 main.log.warn( json.dumps( parsedPartitions,
859 sort_keys=True,
860 indent=4,
861 separators=( ',', ': ' ) ) )
862 # TODO check for a leader in all paritions
863 # TODO check for consistency among nodes
864 else:
865 main.log.error( "partitions() returned None" )
866 except ( ValueError, TypeError ):
867 main.log.exception( "Error parsing partitions" )
868 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700869 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700870 try:
871 if pendingMap :
872 parsedPending = json.loads( pendingMap )
873 main.log.warn( json.dumps( parsedPending,
874 sort_keys=True,
875 indent=4,
876 separators=( ',', ': ' ) ) )
877 # TODO check something here?
878 else:
879 main.log.error( "pendingMap() returned None" )
880 except ( ValueError, TypeError ):
881 main.log.exception( "Error parsing pending map" )
882 main.log.error( repr( pendingMap ) )
883
884 def CASE4( self, main ):
885 """
886 Ping across added host intents
887 """
888 import json
889 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700890 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700891 assert main, "main not defined"
892 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700893 assert main.CLIs, "main.CLIs not defined"
894 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700895 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700896 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700897 "functionality and check the state of " +\
898 "the intent"
899 main.step( "Ping across added host intents" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700900 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700901 PingResult = main.TRUE
902 for i in range( 8, 18 ):
903 ping = main.Mininet1.pingHost( src="h" + str( i ),
904 target="h" + str( i + 10 ) )
905 PingResult = PingResult and ping
906 if ping == main.FALSE:
907 main.log.warn( "Ping failed between h" + str( i ) +
908 " and h" + str( i + 10 ) )
909 elif ping == main.TRUE:
910 main.log.info( "Ping test passed!" )
911 # Don't set PingResult or you'd override failures
912 if PingResult == main.FALSE:
913 main.log.error(
914 "Intents have not been installed correctly, pings failed." )
915 # TODO: pretty print
916 main.log.warn( "ONOS1 intents: " )
917 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700918 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700919 main.log.warn( json.dumps( json.loads( tmpIntents ),
920 sort_keys=True,
921 indent=4,
922 separators=( ',', ': ' ) ) )
923 except ( ValueError, TypeError ):
924 main.log.warn( repr( tmpIntents ) )
925 utilities.assert_equals(
926 expect=main.TRUE,
927 actual=PingResult,
928 onpass="Intents have been installed correctly and pings work",
929 onfail="Intents have not been installed correctly, pings failed." )
930
931 main.step( "Check Intent state" )
932 installedCheck = False
933 loopCount = 0
934 while not installedCheck and loopCount < 40:
935 installedCheck = True
936 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700937 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700938 intentStates = []
939 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
940 count = 0
941 # Iter through intents of a node
942 try:
943 for intent in json.loads( intents ):
944 state = intent.get( 'state', None )
945 if "INSTALLED" not in state:
946 installedCheck = False
947 intentId = intent.get( 'id', None )
948 intentStates.append( ( intentId, state ) )
949 except ( ValueError, TypeError ):
950 main.log.exception( "Error parsing intents." )
951 # Print states
952 intentStates.sort()
953 for i, s in intentStates:
954 count += 1
955 main.log.info( "%-6s%-15s%-15s" %
956 ( str( count ), str( i ), str( s ) ) )
957 if not installedCheck:
958 time.sleep( 1 )
959 loopCount += 1
960 utilities.assert_equals( expect=True, actual=installedCheck,
961 onpass="Intents are all INSTALLED",
962 onfail="Intents are not all in " +
963 "INSTALLED state" )
964
965 main.step( "Check leadership of topics" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700966 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700967 topicCheck = main.TRUE
968 try:
969 if leaders:
970 parsedLeaders = json.loads( leaders )
971 main.log.warn( json.dumps( parsedLeaders,
972 sort_keys=True,
973 indent=4,
974 separators=( ',', ': ' ) ) )
975 # check for all intent partitions
976 # check for election
977 # TODO: Look at Devices as topics now that it uses this system
978 topics = []
979 for i in range( 14 ):
980 topics.append( "intent-partition-" + str( i ) )
981 # FIXME: this should only be after we start the app
982 # FIXME: topics.append( "org.onosproject.election" )
983 # Print leaders output
984 main.log.debug( topics )
985 ONOStopics = [ j['topic'] for j in parsedLeaders ]
986 for topic in topics:
987 if topic not in ONOStopics:
988 main.log.error( "Error: " + topic +
989 " not in leaders" )
990 topicCheck = main.FALSE
991 else:
992 main.log.error( "leaders() returned None" )
993 topicCheck = main.FALSE
994 except ( ValueError, TypeError ):
995 topicCheck = main.FALSE
996 main.log.exception( "Error parsing leaders" )
997 main.log.error( repr( leaders ) )
998 # TODO: Check for a leader of these topics
999 # Check all nodes
1000 if topicCheck:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001001 for i in main.activeNodes:
1002 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001003 response = node.leaders( jsonFormat=False)
1004 main.log.warn( str( node.name ) + " leaders output: \n" +
1005 str( response ) )
1006
1007 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1008 onpass="intent Partitions is in leaders",
1009 onfail="Some topics were lost " )
1010 # Print partitions
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001011 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001012 try:
1013 if partitions :
1014 parsedPartitions = json.loads( partitions )
1015 main.log.warn( json.dumps( parsedPartitions,
1016 sort_keys=True,
1017 indent=4,
1018 separators=( ',', ': ' ) ) )
1019 # TODO check for a leader in all paritions
1020 # TODO check for consistency among nodes
1021 else:
1022 main.log.error( "partitions() returned None" )
1023 except ( ValueError, TypeError ):
1024 main.log.exception( "Error parsing partitions" )
1025 main.log.error( repr( partitions ) )
1026 # Print Pending Map
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001027 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001028 try:
1029 if pendingMap :
1030 parsedPending = json.loads( pendingMap )
1031 main.log.warn( json.dumps( parsedPending,
1032 sort_keys=True,
1033 indent=4,
1034 separators=( ',', ': ' ) ) )
1035 # TODO check something here?
1036 else:
1037 main.log.error( "pendingMap() returned None" )
1038 except ( ValueError, TypeError ):
1039 main.log.exception( "Error parsing pending map" )
1040 main.log.error( repr( pendingMap ) )
1041
1042 if not installedCheck:
1043 main.log.info( "Waiting 60 seconds to see if the state of " +
1044 "intents change" )
1045 time.sleep( 60 )
1046 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001047 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001048 intentStates = []
1049 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1050 count = 0
1051 # Iter through intents of a node
1052 try:
1053 for intent in json.loads( intents ):
1054 state = intent.get( 'state', None )
1055 if "INSTALLED" not in state:
1056 installedCheck = False
1057 intentId = intent.get( 'id', None )
1058 intentStates.append( ( intentId, state ) )
1059 except ( ValueError, TypeError ):
1060 main.log.exception( "Error parsing intents." )
1061 intentStates.sort()
1062 for i, s in intentStates:
1063 count += 1
1064 main.log.info( "%-6s%-15s%-15s" %
1065 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001066 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001067 try:
1068 missing = False
1069 if leaders:
1070 parsedLeaders = json.loads( leaders )
1071 main.log.warn( json.dumps( parsedLeaders,
1072 sort_keys=True,
1073 indent=4,
1074 separators=( ',', ': ' ) ) )
1075 # check for all intent partitions
1076 # check for election
1077 topics = []
1078 for i in range( 14 ):
1079 topics.append( "intent-partition-" + str( i ) )
1080 # FIXME: this should only be after we start the app
1081 topics.append( "org.onosproject.election" )
1082 main.log.debug( topics )
1083 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1084 for topic in topics:
1085 if topic not in ONOStopics:
1086 main.log.error( "Error: " + topic +
1087 " not in leaders" )
1088 missing = True
1089 else:
1090 main.log.error( "leaders() returned None" )
1091 except ( ValueError, TypeError ):
1092 main.log.exception( "Error parsing leaders" )
1093 main.log.error( repr( leaders ) )
1094 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001095 for i in main.activeNodes:
1096 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001097 response = node.leaders( jsonFormat=False)
1098 main.log.warn( str( node.name ) + " leaders output: \n" +
1099 str( response ) )
1100
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001101 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001102 try:
1103 if partitions :
1104 parsedPartitions = json.loads( partitions )
1105 main.log.warn( json.dumps( parsedPartitions,
1106 sort_keys=True,
1107 indent=4,
1108 separators=( ',', ': ' ) ) )
1109 # TODO check for a leader in all paritions
1110 # TODO check for consistency among nodes
1111 else:
1112 main.log.error( "partitions() returned None" )
1113 except ( ValueError, TypeError ):
1114 main.log.exception( "Error parsing partitions" )
1115 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001116 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001117 try:
1118 if pendingMap :
1119 parsedPending = json.loads( pendingMap )
1120 main.log.warn( json.dumps( parsedPending,
1121 sort_keys=True,
1122 indent=4,
1123 separators=( ',', ': ' ) ) )
1124 # TODO check something here?
1125 else:
1126 main.log.error( "pendingMap() returned None" )
1127 except ( ValueError, TypeError ):
1128 main.log.exception( "Error parsing pending map" )
1129 main.log.error( repr( pendingMap ) )
1130 # Print flowrules
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001131 node = main.activeNodes[0]
1132 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001133 main.step( "Wait a minute then ping again" )
1134 # the wait is above
1135 PingResult = main.TRUE
1136 for i in range( 8, 18 ):
1137 ping = main.Mininet1.pingHost( src="h" + str( i ),
1138 target="h" + str( i + 10 ) )
1139 PingResult = PingResult and ping
1140 if ping == main.FALSE:
1141 main.log.warn( "Ping failed between h" + str( i ) +
1142 " and h" + str( i + 10 ) )
1143 elif ping == main.TRUE:
1144 main.log.info( "Ping test passed!" )
1145 # Don't set PingResult or you'd override failures
1146 if PingResult == main.FALSE:
1147 main.log.error(
1148 "Intents have not been installed correctly, pings failed." )
1149 # TODO: pretty print
1150 main.log.warn( "ONOS1 intents: " )
1151 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001152 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001153 main.log.warn( json.dumps( json.loads( tmpIntents ),
1154 sort_keys=True,
1155 indent=4,
1156 separators=( ',', ': ' ) ) )
1157 except ( ValueError, TypeError ):
1158 main.log.warn( repr( tmpIntents ) )
1159 utilities.assert_equals(
1160 expect=main.TRUE,
1161 actual=PingResult,
1162 onpass="Intents have been installed correctly and pings work",
1163 onfail="Intents have not been installed correctly, pings failed." )
1164
1165 def CASE5( self, main ):
1166 """
1167 Reading state of ONOS
1168 """
1169 import json
1170 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001171 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001172 assert main, "main not defined"
1173 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001174 assert main.CLIs, "main.CLIs not defined"
1175 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001176
1177 main.case( "Setting up and gathering data for current state" )
1178 # The general idea for this test case is to pull the state of
1179 # ( intents,flows, topology,... ) from each ONOS node
1180 # We can then compare them with each other and also with past states
1181
1182 main.step( "Check that each switch has a master" )
1183 global mastershipState
1184 mastershipState = '[]'
1185
1186 # Assert that each device has a master
1187 rolesNotNull = main.TRUE
1188 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001189 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001190 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001191 name="rolesNotNull-" + str( i ),
1192 args=[] )
1193 threads.append( t )
1194 t.start()
1195
1196 for t in threads:
1197 t.join()
1198 rolesNotNull = rolesNotNull and t.result
1199 utilities.assert_equals(
1200 expect=main.TRUE,
1201 actual=rolesNotNull,
1202 onpass="Each device has a master",
1203 onfail="Some devices don't have a master assigned" )
1204
1205 main.step( "Get the Mastership of each switch from each controller" )
1206 ONOSMastership = []
1207 mastershipCheck = main.FALSE
1208 consistentMastership = True
1209 rolesResults = True
1210 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001211 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001212 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001213 name="roles-" + str( i ),
1214 args=[] )
1215 threads.append( t )
1216 t.start()
1217
1218 for t in threads:
1219 t.join()
1220 ONOSMastership.append( t.result )
1221
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001222 for i in range( len( ONOSMastership ) ):
1223 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001224 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001225 main.log.error( "Error in getting ONOS" + node + " roles" )
1226 main.log.warn( "ONOS" + node + " mastership response: " +
1227 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001228 rolesResults = False
1229 utilities.assert_equals(
1230 expect=True,
1231 actual=rolesResults,
1232 onpass="No error in reading roles output",
1233 onfail="Error in reading roles from ONOS" )
1234
1235 main.step( "Check for consistency in roles from each controller" )
1236 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1237 main.log.info(
1238 "Switch roles are consistent across all ONOS nodes" )
1239 else:
1240 consistentMastership = False
1241 utilities.assert_equals(
1242 expect=True,
1243 actual=consistentMastership,
1244 onpass="Switch roles are consistent across all ONOS nodes",
1245 onfail="ONOS nodes have different views of switch roles" )
1246
1247 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001248 for i in range( len( main.activeNodes ) ):
1249 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001250 try:
1251 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001252 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001253 json.dumps(
1254 json.loads( ONOSMastership[ i ] ),
1255 sort_keys=True,
1256 indent=4,
1257 separators=( ',', ': ' ) ) )
1258 except ( ValueError, TypeError ):
1259 main.log.warn( repr( ONOSMastership[ i ] ) )
1260 elif rolesResults and consistentMastership:
1261 mastershipCheck = main.TRUE
1262 mastershipState = ONOSMastership[ 0 ]
1263
1264 main.step( "Get the intents from each controller" )
1265 global intentState
1266 intentState = []
1267 ONOSIntents = []
1268 intentCheck = main.FALSE
1269 consistentIntents = True
1270 intentsResults = True
1271 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001272 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001273 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001274 name="intents-" + str( i ),
1275 args=[],
1276 kwargs={ 'jsonFormat': True } )
1277 threads.append( t )
1278 t.start()
1279
1280 for t in threads:
1281 t.join()
1282 ONOSIntents.append( t.result )
1283
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001284 for i in range( len( ONOSIntents ) ):
1285 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001286 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001287 main.log.error( "Error in getting ONOS" + node + " intents" )
1288 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001289 repr( ONOSIntents[ i ] ) )
1290 intentsResults = False
1291 utilities.assert_equals(
1292 expect=True,
1293 actual=intentsResults,
1294 onpass="No error in reading intents output",
1295 onfail="Error in reading intents from ONOS" )
1296
1297 main.step( "Check for consistency in Intents from each controller" )
1298 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1299 main.log.info( "Intents are consistent across all ONOS " +
1300 "nodes" )
1301 else:
1302 consistentIntents = False
1303 main.log.error( "Intents not consistent" )
1304 utilities.assert_equals(
1305 expect=True,
1306 actual=consistentIntents,
1307 onpass="Intents are consistent across all ONOS nodes",
1308 onfail="ONOS nodes have different views of intents" )
1309
1310 if intentsResults:
1311 # Try to make it easy to figure out what is happening
1312 #
1313 # Intent ONOS1 ONOS2 ...
1314 # 0x01 INSTALLED INSTALLING
1315 # ... ... ...
1316 # ... ... ...
1317 title = " Id"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001318 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001319 title += " " * 10 + "ONOS" + str( n + 1 )
1320 main.log.warn( title )
1321 # get all intent keys in the cluster
1322 keys = []
1323 for nodeStr in ONOSIntents:
1324 node = json.loads( nodeStr )
1325 for intent in node:
1326 keys.append( intent.get( 'id' ) )
1327 keys = set( keys )
1328 for key in keys:
1329 row = "%-13s" % key
1330 for nodeStr in ONOSIntents:
1331 node = json.loads( nodeStr )
1332 for intent in node:
1333 if intent.get( 'id', "Error" ) == key:
1334 row += "%-15s" % intent.get( 'state' )
1335 main.log.warn( row )
1336 # End table view
1337
1338 if intentsResults and not consistentIntents:
1339 # print the json objects
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001340 n = str( main.activeNodes[-1] + 1 )
1341 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001342 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1343 sort_keys=True,
1344 indent=4,
1345 separators=( ',', ': ' ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001346 for i in range( len( ONOSIntents ) ):
1347 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001348 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001349 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001350 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1351 sort_keys=True,
1352 indent=4,
1353 separators=( ',', ': ' ) ) )
1354 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001355 main.log.debug( "ONOS" + node + " intents match ONOS" +
1356 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001357 elif intentsResults and consistentIntents:
1358 intentCheck = main.TRUE
1359 intentState = ONOSIntents[ 0 ]
1360
1361 main.step( "Get the flows from each controller" )
1362 global flowState
1363 flowState = []
1364 ONOSFlows = []
1365 ONOSFlowsJson = []
1366 flowCheck = main.FALSE
1367 consistentFlows = True
1368 flowsResults = True
1369 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001370 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001371 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001372 name="flows-" + str( i ),
1373 args=[],
1374 kwargs={ 'jsonFormat': True } )
1375 threads.append( t )
1376 t.start()
1377
1378 # NOTE: Flows command can take some time to run
1379 time.sleep(30)
1380 for t in threads:
1381 t.join()
1382 result = t.result
1383 ONOSFlows.append( result )
1384
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001385 for i in range( len( ONOSFlows ) ):
1386 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001387 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1388 main.log.error( "Error in getting ONOS" + num + " flows" )
1389 main.log.warn( "ONOS" + num + " flows response: " +
1390 repr( ONOSFlows[ i ] ) )
1391 flowsResults = False
1392 ONOSFlowsJson.append( None )
1393 else:
1394 try:
1395 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1396 except ( ValueError, TypeError ):
1397 # FIXME: change this to log.error?
1398 main.log.exception( "Error in parsing ONOS" + num +
1399 " response as json." )
1400 main.log.error( repr( ONOSFlows[ i ] ) )
1401 ONOSFlowsJson.append( None )
1402 flowsResults = False
1403 utilities.assert_equals(
1404 expect=True,
1405 actual=flowsResults,
1406 onpass="No error in reading flows output",
1407 onfail="Error in reading flows from ONOS" )
1408
1409 main.step( "Check for consistency in Flows from each controller" )
1410 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1411 if all( tmp ):
1412 main.log.info( "Flow count is consistent across all ONOS nodes" )
1413 else:
1414 consistentFlows = False
1415 utilities.assert_equals(
1416 expect=True,
1417 actual=consistentFlows,
1418 onpass="The flow count is consistent across all ONOS nodes",
1419 onfail="ONOS nodes have different flow counts" )
1420
1421 if flowsResults and not consistentFlows:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001422 for i in range( len( ONOSFlows ) ):
1423 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001424 try:
1425 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001426 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001427 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1428 indent=4, separators=( ',', ': ' ) ) )
1429 except ( ValueError, TypeError ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001430 main.log.warn( "ONOS" + node + " flows: " +
1431 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001432 elif flowsResults and consistentFlows:
1433 flowCheck = main.TRUE
1434 flowState = ONOSFlows[ 0 ]
1435
1436 main.step( "Get the OF Table entries" )
1437 global flows
1438 flows = []
1439 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001440 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001441 if flowCheck == main.FALSE:
1442 for table in flows:
1443 main.log.warn( table )
1444 # TODO: Compare switch flow tables with ONOS flow tables
1445
1446 main.step( "Start continuous pings" )
1447 main.Mininet2.pingLong(
1448 src=main.params[ 'PING' ][ 'source1' ],
1449 target=main.params[ 'PING' ][ 'target1' ],
1450 pingTime=500 )
1451 main.Mininet2.pingLong(
1452 src=main.params[ 'PING' ][ 'source2' ],
1453 target=main.params[ 'PING' ][ 'target2' ],
1454 pingTime=500 )
1455 main.Mininet2.pingLong(
1456 src=main.params[ 'PING' ][ 'source3' ],
1457 target=main.params[ 'PING' ][ 'target3' ],
1458 pingTime=500 )
1459 main.Mininet2.pingLong(
1460 src=main.params[ 'PING' ][ 'source4' ],
1461 target=main.params[ 'PING' ][ 'target4' ],
1462 pingTime=500 )
1463 main.Mininet2.pingLong(
1464 src=main.params[ 'PING' ][ 'source5' ],
1465 target=main.params[ 'PING' ][ 'target5' ],
1466 pingTime=500 )
1467 main.Mininet2.pingLong(
1468 src=main.params[ 'PING' ][ 'source6' ],
1469 target=main.params[ 'PING' ][ 'target6' ],
1470 pingTime=500 )
1471 main.Mininet2.pingLong(
1472 src=main.params[ 'PING' ][ 'source7' ],
1473 target=main.params[ 'PING' ][ 'target7' ],
1474 pingTime=500 )
1475 main.Mininet2.pingLong(
1476 src=main.params[ 'PING' ][ 'source8' ],
1477 target=main.params[ 'PING' ][ 'target8' ],
1478 pingTime=500 )
1479 main.Mininet2.pingLong(
1480 src=main.params[ 'PING' ][ 'source9' ],
1481 target=main.params[ 'PING' ][ 'target9' ],
1482 pingTime=500 )
1483 main.Mininet2.pingLong(
1484 src=main.params[ 'PING' ][ 'source10' ],
1485 target=main.params[ 'PING' ][ 'target10' ],
1486 pingTime=500 )
1487
1488 main.step( "Collecting topology information from ONOS" )
1489 devices = []
1490 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001491 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001492 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001493 name="devices-" + str( i ),
1494 args=[ ] )
1495 threads.append( t )
1496 t.start()
1497
1498 for t in threads:
1499 t.join()
1500 devices.append( t.result )
1501 hosts = []
1502 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001503 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001504 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001505 name="hosts-" + str( i ),
1506 args=[ ] )
1507 threads.append( t )
1508 t.start()
1509
1510 for t in threads:
1511 t.join()
1512 try:
1513 hosts.append( json.loads( t.result ) )
1514 except ( ValueError, TypeError ):
1515 # FIXME: better handling of this, print which node
1516 # Maybe use thread name?
1517 main.log.exception( "Error parsing json output of hosts" )
1518 # FIXME: should this be an empty json object instead?
1519 hosts.append( None )
1520
1521 ports = []
1522 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001523 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001524 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001525 name="ports-" + str( i ),
1526 args=[ ] )
1527 threads.append( t )
1528 t.start()
1529
1530 for t in threads:
1531 t.join()
1532 ports.append( t.result )
1533 links = []
1534 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001535 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001536 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001537 name="links-" + str( i ),
1538 args=[ ] )
1539 threads.append( t )
1540 t.start()
1541
1542 for t in threads:
1543 t.join()
1544 links.append( t.result )
1545 clusters = []
1546 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001547 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001548 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001549 name="clusters-" + str( i ),
1550 args=[ ] )
1551 threads.append( t )
1552 t.start()
1553
1554 for t in threads:
1555 t.join()
1556 clusters.append( t.result )
1557 # Compare json objects for hosts and dataplane clusters
1558
1559 # hosts
1560 main.step( "Host view is consistent across ONOS nodes" )
1561 consistentHostsResult = main.TRUE
1562 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001563 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001564 if "Error" not in hosts[ controller ]:
1565 if hosts[ controller ] == hosts[ 0 ]:
1566 continue
1567 else: # hosts not consistent
1568 main.log.error( "hosts from ONOS" +
1569 controllerStr +
1570 " is inconsistent with ONOS1" )
1571 main.log.warn( repr( hosts[ controller ] ) )
1572 consistentHostsResult = main.FALSE
1573
1574 else:
1575 main.log.error( "Error in getting ONOS hosts from ONOS" +
1576 controllerStr )
1577 consistentHostsResult = main.FALSE
1578 main.log.warn( "ONOS" + controllerStr +
1579 " hosts response: " +
1580 repr( hosts[ controller ] ) )
1581 utilities.assert_equals(
1582 expect=main.TRUE,
1583 actual=consistentHostsResult,
1584 onpass="Hosts view is consistent across all ONOS nodes",
1585 onfail="ONOS nodes have different views of hosts" )
1586
1587 main.step( "Each host has an IP address" )
1588 ipResult = main.TRUE
1589 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001590 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001591 for host in hosts[ controller ]:
1592 if not host.get( 'ipAddresses', [ ] ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001593 main.log.error( "Error with host ips on controller" +
Jon Hall5cf14d52015-07-16 12:15:19 -07001594 controllerStr + ": " + str( host ) )
1595 ipResult = main.FALSE
1596 utilities.assert_equals(
1597 expect=main.TRUE,
1598 actual=ipResult,
1599 onpass="The ips of the hosts aren't empty",
1600 onfail="The ip of at least one host is missing" )
1601
1602 # Strongly connected clusters of devices
1603 main.step( "Cluster view is consistent across ONOS nodes" )
1604 consistentClustersResult = main.TRUE
1605 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001606 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001607 if "Error" not in clusters[ controller ]:
1608 if clusters[ controller ] == clusters[ 0 ]:
1609 continue
1610 else: # clusters not consistent
1611 main.log.error( "clusters from ONOS" + controllerStr +
1612 " is inconsistent with ONOS1" )
1613 consistentClustersResult = main.FALSE
1614
1615 else:
1616 main.log.error( "Error in getting dataplane clusters " +
1617 "from ONOS" + controllerStr )
1618 consistentClustersResult = main.FALSE
1619 main.log.warn( "ONOS" + controllerStr +
1620 " clusters response: " +
1621 repr( clusters[ controller ] ) )
1622 utilities.assert_equals(
1623 expect=main.TRUE,
1624 actual=consistentClustersResult,
1625 onpass="Clusters view is consistent across all ONOS nodes",
1626 onfail="ONOS nodes have different views of clusters" )
1627 # there should always only be one cluster
1628 main.step( "Cluster view correct across ONOS nodes" )
1629 try:
1630 numClusters = len( json.loads( clusters[ 0 ] ) )
1631 except ( ValueError, TypeError ):
1632 main.log.exception( "Error parsing clusters[0]: " +
1633 repr( clusters[ 0 ] ) )
1634 clusterResults = main.FALSE
1635 if numClusters == 1:
1636 clusterResults = main.TRUE
1637 utilities.assert_equals(
1638 expect=1,
1639 actual=numClusters,
1640 onpass="ONOS shows 1 SCC",
1641 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1642
1643 main.step( "Comparing ONOS topology to MN" )
1644 devicesResults = main.TRUE
1645 linksResults = main.TRUE
1646 hostsResults = main.TRUE
1647 mnSwitches = main.Mininet1.getSwitches()
1648 mnLinks = main.Mininet1.getLinks()
1649 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001650 for controller in main.activeNodes:
1651 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001652 if devices[ controller ] and ports[ controller ] and\
1653 "Error" not in devices[ controller ] and\
1654 "Error" not in ports[ controller ]:
1655
1656 currentDevicesResult = main.Mininet1.compareSwitches(
1657 mnSwitches,
1658 json.loads( devices[ controller ] ),
1659 json.loads( ports[ controller ] ) )
1660 else:
1661 currentDevicesResult = main.FALSE
1662 utilities.assert_equals( expect=main.TRUE,
1663 actual=currentDevicesResult,
1664 onpass="ONOS" + controllerStr +
1665 " Switches view is correct",
1666 onfail="ONOS" + controllerStr +
1667 " Switches view is incorrect" )
1668 if links[ controller ] and "Error" not in links[ controller ]:
1669 currentLinksResult = main.Mininet1.compareLinks(
1670 mnSwitches, mnLinks,
1671 json.loads( links[ controller ] ) )
1672 else:
1673 currentLinksResult = main.FALSE
1674 utilities.assert_equals( expect=main.TRUE,
1675 actual=currentLinksResult,
1676 onpass="ONOS" + controllerStr +
1677 " links view is correct",
1678 onfail="ONOS" + controllerStr +
1679 " links view is incorrect" )
1680
1681 if hosts[ controller ] or "Error" not in hosts[ controller ]:
1682 currentHostsResult = main.Mininet1.compareHosts(
1683 mnHosts,
1684 hosts[ controller ] )
1685 else:
1686 currentHostsResult = main.FALSE
1687 utilities.assert_equals( expect=main.TRUE,
1688 actual=currentHostsResult,
1689 onpass="ONOS" + controllerStr +
1690 " hosts exist in Mininet",
1691 onfail="ONOS" + controllerStr +
1692 " hosts don't match Mininet" )
1693
1694 devicesResults = devicesResults and currentDevicesResult
1695 linksResults = linksResults and currentLinksResult
1696 hostsResults = hostsResults and currentHostsResult
1697
1698 main.step( "Device information is correct" )
1699 utilities.assert_equals(
1700 expect=main.TRUE,
1701 actual=devicesResults,
1702 onpass="Device information is correct",
1703 onfail="Device information is incorrect" )
1704
1705 main.step( "Links are correct" )
1706 utilities.assert_equals(
1707 expect=main.TRUE,
1708 actual=linksResults,
1709 onpass="Link are correct",
1710 onfail="Links are incorrect" )
1711
1712 main.step( "Hosts are correct" )
1713 utilities.assert_equals(
1714 expect=main.TRUE,
1715 actual=hostsResults,
1716 onpass="Hosts are correct",
1717 onfail="Hosts are incorrect" )
1718
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001719 def CASE61( self, main ):
Jon Hall5cf14d52015-07-16 12:15:19 -07001720 """
1721 The Failure case.
1722 """
Jon Halle1a3b752015-07-22 13:02:46 -07001723 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001724 assert main, "main not defined"
1725 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001726 assert main.CLIs, "main.CLIs not defined"
1727 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001728 main.case( "Kill minority of ONOS nodes" )
Jon Hall96091e62015-09-21 17:34:17 -07001729
1730 main.step( "Checking ONOS Logs for errors" )
1731 for node in main.nodes:
1732 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1733 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1734
Jon Hall3b489db2015-10-05 14:38:37 -07001735 n = len( main.nodes ) # Number of nodes
1736 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1737 main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
1738 if n > 3:
1739 main.kill.append( p - 1 )
1740 # NOTE: This only works for cluster sizes of 3,5, or 7.
1741
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001742 main.step( "Kill " + str( len( main.kill ) ) + " ONOS nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07001743 killResults = main.TRUE
1744 for i in main.kill:
1745 killResults = killResults and\
1746 main.ONOSbench.onosKill( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001747 main.activeNodes.remove( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001748 utilities.assert_equals( expect=main.TRUE, actual=killResults,
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001749 onpass="ONOS nodes killed successfully",
1750 onfail="ONOS nodes NOT successfully killed" )
1751
1752 def CASE62( self, main ):
1753 """
1754 The bring up stopped nodes
1755 """
1756 import time
1757 assert main.numCtrls, "main.numCtrls not defined"
1758 assert main, "main not defined"
1759 assert utilities.assert_equals, "utilities.assert_equals not defined"
1760 assert main.CLIs, "main.CLIs not defined"
1761 assert main.nodes, "main.nodes not defined"
1762 assert main.kill, "main.kill not defined"
1763 main.case( "Restart minority of ONOS nodes" )
1764
1765 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1766 startResults = main.TRUE
1767 restartTime = time.time()
1768 for i in main.kill:
1769 startResults = startResults and\
1770 main.ONOSbench.onosStart( main.nodes[i].ip_address )
1771 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1772 onpass="ONOS nodes started successfully",
1773 onfail="ONOS nodes NOT successfully started" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001774
1775 main.step( "Checking if ONOS is up yet" )
1776 count = 0
1777 onosIsupResult = main.FALSE
1778 while onosIsupResult == main.FALSE and count < 10:
Jon Hall3b489db2015-10-05 14:38:37 -07001779 onosIsupResult = main.TRUE
1780 for i in main.kill:
1781 onosIsupResult = onosIsupResult and\
1782 main.ONOSbench.isup( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07001783 count = count + 1
Jon Hall5cf14d52015-07-16 12:15:19 -07001784 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1785 onpass="ONOS restarted successfully",
1786 onfail="ONOS restart NOT successful" )
1787
Jon Halle1a3b752015-07-22 13:02:46 -07001788 main.step( "Restarting ONOS main.CLIs" )
Jon Hall3b489db2015-10-05 14:38:37 -07001789 cliResults = main.TRUE
1790 for i in main.kill:
1791 cliResults = cliResults and\
1792 main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001793 main.activeNodes.append( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001794 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1795 onpass="ONOS cli restarted",
1796 onfail="ONOS cli did not restart" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001797 main.activeNodes.sort()
1798 try:
1799 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1800 "List of active nodes has duplicates, this likely indicates something was run out of order"
1801 except AssertionError:
1802 main.log.exception( "" )
1803 main.cleanup()
1804 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -07001805
1806 # Grab the time of restart so we chan check how long the gossip
1807 # protocol has had time to work
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001808 main.restartTime = time.time() - restartTime
Jon Hall5cf14d52015-07-16 12:15:19 -07001809 main.log.debug( "Restart time: " + str( main.restartTime ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001810 # TODO: MAke this configurable. Also, we are breaking the above timer
1811 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001812 node = main.activeNodes[0]
1813 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1814 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1815 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001816
1817 def CASE7( self, main ):
1818 """
1819 Check state after ONOS failure
1820 """
1821 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001822 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001823 assert main, "main not defined"
1824 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001825 assert main.CLIs, "main.CLIs not defined"
1826 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001827 try:
1828 main.kill
1829 except AttributeError:
1830 main.kill = []
1831
Jon Hall5cf14d52015-07-16 12:15:19 -07001832 main.case( "Running ONOS Constant State Tests" )
1833
1834 main.step( "Check that each switch has a master" )
1835 # Assert that each device has a master
1836 rolesNotNull = main.TRUE
1837 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001838 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001839 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001840 name="rolesNotNull-" + str( i ),
1841 args=[ ] )
1842 threads.append( t )
1843 t.start()
1844
1845 for t in threads:
1846 t.join()
1847 rolesNotNull = rolesNotNull and t.result
1848 utilities.assert_equals(
1849 expect=main.TRUE,
1850 actual=rolesNotNull,
1851 onpass="Each device has a master",
1852 onfail="Some devices don't have a master assigned" )
1853
1854 main.step( "Read device roles from ONOS" )
1855 ONOSMastership = []
1856 consistentMastership = True
1857 rolesResults = True
1858 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001859 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001860 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001861 name="roles-" + str( i ),
1862 args=[] )
1863 threads.append( t )
1864 t.start()
1865
1866 for t in threads:
1867 t.join()
1868 ONOSMastership.append( t.result )
1869
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001870 for i in range( len( ONOSMastership ) ):
1871 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001872 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001873 main.log.error( "Error in getting ONOS" + node + " roles" )
1874 main.log.warn( "ONOS" + node + " mastership response: " +
1875 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001876 rolesResults = False
1877 utilities.assert_equals(
1878 expect=True,
1879 actual=rolesResults,
1880 onpass="No error in reading roles output",
1881 onfail="Error in reading roles from ONOS" )
1882
1883 main.step( "Check for consistency in roles from each controller" )
1884 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1885 main.log.info(
1886 "Switch roles are consistent across all ONOS nodes" )
1887 else:
1888 consistentMastership = False
1889 utilities.assert_equals(
1890 expect=True,
1891 actual=consistentMastership,
1892 onpass="Switch roles are consistent across all ONOS nodes",
1893 onfail="ONOS nodes have different views of switch roles" )
1894
1895 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001896 for i in range( len( ONOSMastership ) ):
1897 node = str( main.activeNodes[i] + 1 )
1898 main.log.warn( "ONOS" + node + " roles: ",
1899 json.dumps( json.loads( ONOSMastership[ i ] ),
1900 sort_keys=True,
1901 indent=4,
1902 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001903
1904 # NOTE: we expect mastership to change on controller failure
Jon Hall5cf14d52015-07-16 12:15:19 -07001905
1906 main.step( "Get the intents and compare across all nodes" )
1907 ONOSIntents = []
1908 intentCheck = main.FALSE
1909 consistentIntents = True
1910 intentsResults = True
1911 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001912 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001913 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001914 name="intents-" + str( i ),
1915 args=[],
1916 kwargs={ 'jsonFormat': True } )
1917 threads.append( t )
1918 t.start()
1919
1920 for t in threads:
1921 t.join()
1922 ONOSIntents.append( t.result )
1923
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001924 for i in range( len( ONOSIntents) ):
1925 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001926 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001927 main.log.error( "Error in getting ONOS" + node + " intents" )
1928 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001929 repr( ONOSIntents[ i ] ) )
1930 intentsResults = False
1931 utilities.assert_equals(
1932 expect=True,
1933 actual=intentsResults,
1934 onpass="No error in reading intents output",
1935 onfail="Error in reading intents from ONOS" )
1936
1937 main.step( "Check for consistency in Intents from each controller" )
1938 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1939 main.log.info( "Intents are consistent across all ONOS " +
1940 "nodes" )
1941 else:
1942 consistentIntents = False
1943
1944 # Try to make it easy to figure out what is happening
1945 #
1946 # Intent ONOS1 ONOS2 ...
1947 # 0x01 INSTALLED INSTALLING
1948 # ... ... ...
1949 # ... ... ...
1950 title = " ID"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001951 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001952 title += " " * 10 + "ONOS" + str( n + 1 )
1953 main.log.warn( title )
1954 # get all intent keys in the cluster
1955 keys = []
1956 for nodeStr in ONOSIntents:
1957 node = json.loads( nodeStr )
1958 for intent in node:
1959 keys.append( intent.get( 'id' ) )
1960 keys = set( keys )
1961 for key in keys:
1962 row = "%-13s" % key
1963 for nodeStr in ONOSIntents:
1964 node = json.loads( nodeStr )
1965 for intent in node:
1966 if intent.get( 'id' ) == key:
1967 row += "%-15s" % intent.get( 'state' )
1968 main.log.warn( row )
1969 # End table view
1970
1971 utilities.assert_equals(
1972 expect=True,
1973 actual=consistentIntents,
1974 onpass="Intents are consistent across all ONOS nodes",
1975 onfail="ONOS nodes have different views of intents" )
1976 intentStates = []
1977 for node in ONOSIntents: # Iter through ONOS nodes
1978 nodeStates = []
1979 # Iter through intents of a node
1980 try:
1981 for intent in json.loads( node ):
1982 nodeStates.append( intent[ 'state' ] )
1983 except ( ValueError, TypeError ):
1984 main.log.exception( "Error in parsing intents" )
1985 main.log.error( repr( node ) )
1986 intentStates.append( nodeStates )
1987 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1988 main.log.info( dict( out ) )
1989
1990 if intentsResults and not consistentIntents:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001991 for i in range( len( main.activeNodes ) ):
1992 node = str( main.activeNodes[i] + 1 )
1993 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001994 main.log.warn( json.dumps(
1995 json.loads( ONOSIntents[ i ] ),
1996 sort_keys=True,
1997 indent=4,
1998 separators=( ',', ': ' ) ) )
1999 elif intentsResults and consistentIntents:
2000 intentCheck = main.TRUE
2001
2002 # NOTE: Store has no durability, so intents are lost across system
2003 # restarts
2004 main.step( "Compare current intents with intents before the failure" )
2005 # NOTE: this requires case 5 to pass for intentState to be set.
2006 # maybe we should stop the test if that fails?
2007 sameIntents = main.FALSE
2008 if intentState and intentState == ONOSIntents[ 0 ]:
2009 sameIntents = main.TRUE
2010 main.log.info( "Intents are consistent with before failure" )
2011 # TODO: possibly the states have changed? we may need to figure out
2012 # what the acceptable states are
2013 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2014 sameIntents = main.TRUE
2015 try:
2016 before = json.loads( intentState )
2017 after = json.loads( ONOSIntents[ 0 ] )
2018 for intent in before:
2019 if intent not in after:
2020 sameIntents = main.FALSE
2021 main.log.debug( "Intent is not currently in ONOS " +
2022 "(at least in the same form):" )
2023 main.log.debug( json.dumps( intent ) )
2024 except ( ValueError, TypeError ):
2025 main.log.exception( "Exception printing intents" )
2026 main.log.debug( repr( ONOSIntents[0] ) )
2027 main.log.debug( repr( intentState ) )
2028 if sameIntents == main.FALSE:
2029 try:
2030 main.log.debug( "ONOS intents before: " )
2031 main.log.debug( json.dumps( json.loads( intentState ),
2032 sort_keys=True, indent=4,
2033 separators=( ',', ': ' ) ) )
2034 main.log.debug( "Current ONOS intents: " )
2035 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2036 sort_keys=True, indent=4,
2037 separators=( ',', ': ' ) ) )
2038 except ( ValueError, TypeError ):
2039 main.log.exception( "Exception printing intents" )
2040 main.log.debug( repr( ONOSIntents[0] ) )
2041 main.log.debug( repr( intentState ) )
2042 utilities.assert_equals(
2043 expect=main.TRUE,
2044 actual=sameIntents,
2045 onpass="Intents are consistent with before failure",
2046 onfail="The Intents changed during failure" )
2047 intentCheck = intentCheck and sameIntents
2048
2049 main.step( "Get the OF Table entries and compare to before " +
2050 "component failure" )
2051 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002052 for i in range( 28 ):
2053 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002054 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2055 FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
Jon Hall5cf14d52015-07-16 12:15:19 -07002056 if FlowTables == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002057 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2058
Jon Hall5cf14d52015-07-16 12:15:19 -07002059 utilities.assert_equals(
2060 expect=main.TRUE,
2061 actual=FlowTables,
2062 onpass="No changes were found in the flow tables",
2063 onfail="Changes were found in the flow tables" )
2064
2065 main.Mininet2.pingLongKill()
2066 '''
2067 main.step( "Check the continuous pings to ensure that no packets " +
2068 "were dropped during component failure" )
2069 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2070 main.params[ 'TESTONIP' ] )
2071 LossInPings = main.FALSE
2072 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2073 for i in range( 8, 18 ):
2074 main.log.info(
2075 "Checking for a loss in pings along flow from s" +
2076 str( i ) )
2077 LossInPings = main.Mininet2.checkForLoss(
2078 "/tmp/ping.h" +
2079 str( i ) ) or LossInPings
2080 if LossInPings == main.TRUE:
2081 main.log.info( "Loss in ping detected" )
2082 elif LossInPings == main.ERROR:
2083 main.log.info( "There are multiple mininet process running" )
2084 elif LossInPings == main.FALSE:
2085 main.log.info( "No Loss in the pings" )
2086 main.log.info( "No loss of dataplane connectivity" )
2087 utilities.assert_equals(
2088 expect=main.FALSE,
2089 actual=LossInPings,
2090 onpass="No Loss of connectivity",
2091 onfail="Loss of dataplane connectivity detected" )
2092 '''
2093
2094 main.step( "Leadership Election is still functional" )
2095 # Test of LeadershipElection
2096 leaderList = []
Jon Hall5cf14d52015-07-16 12:15:19 -07002097
Jon Hall3b489db2015-10-05 14:38:37 -07002098 restarted = []
2099 for i in main.kill:
2100 restarted.append( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07002101 leaderResult = main.TRUE
Jon Hall3b489db2015-10-05 14:38:37 -07002102
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002103 for i in main.activeNodes:
2104 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002105 leaderN = cli.electionTestLeader()
2106 leaderList.append( leaderN )
2107 if leaderN == main.FALSE:
2108 # error in response
2109 main.log.error( "Something is wrong with " +
2110 "electionTestLeader function, check the" +
2111 " error logs" )
2112 leaderResult = main.FALSE
2113 elif leaderN is None:
2114 main.log.error( cli.name +
2115 " shows no leader for the election-app was" +
2116 " elected after the old one died" )
2117 leaderResult = main.FALSE
2118 elif leaderN in restarted:
2119 main.log.error( cli.name + " shows " + str( leaderN ) +
2120 " as leader for the election-app, but it " +
2121 "was restarted" )
2122 leaderResult = main.FALSE
2123 if len( set( leaderList ) ) != 1:
2124 leaderResult = main.FALSE
2125 main.log.error(
2126 "Inconsistent view of leader for the election test app" )
2127 # TODO: print the list
2128 utilities.assert_equals(
2129 expect=main.TRUE,
2130 actual=leaderResult,
2131 onpass="Leadership election passed",
2132 onfail="Something went wrong with Leadership election" )
2133
2134 def CASE8( self, main ):
2135 """
2136 Compare topo
2137 """
2138 import json
2139 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002140 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002141 assert main, "main not defined"
2142 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002143 assert main.CLIs, "main.CLIs not defined"
2144 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002145
2146 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002147 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002148 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002149 topoResult = main.FALSE
2150 elapsed = 0
2151 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002152 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002153 startTime = time.time()
2154 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002155 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002156 devicesResults = main.TRUE
2157 linksResults = main.TRUE
2158 hostsResults = main.TRUE
2159 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002160 count += 1
2161 cliStart = time.time()
2162 devices = []
2163 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002164 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002165 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07002166 name="devices-" + str( i ),
2167 args=[ ] )
2168 threads.append( t )
2169 t.start()
2170
2171 for t in threads:
2172 t.join()
2173 devices.append( t.result )
2174 hosts = []
2175 ipResult = main.TRUE
2176 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002177 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002178 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07002179 name="hosts-" + str( i ),
2180 args=[ ] )
2181 threads.append( t )
2182 t.start()
2183
2184 for t in threads:
2185 t.join()
2186 try:
2187 hosts.append( json.loads( t.result ) )
2188 except ( ValueError, TypeError ):
2189 main.log.exception( "Error parsing hosts results" )
2190 main.log.error( repr( t.result ) )
Jon Halle9b1fa32015-12-08 15:32:21 -08002191 hosts.append( [] )
Jon Hall5cf14d52015-07-16 12:15:19 -07002192 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002193 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002194 for host in hosts[ controller ]:
2195 if host is None or host.get( 'ipAddresses', [] ) == []:
2196 main.log.error(
2197 "DEBUG:Error with host ipAddresses on controller" +
2198 controllerStr + ": " + str( host ) )
2199 ipResult = main.FALSE
2200 ports = []
2201 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002202 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002203 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07002204 name="ports-" + str( i ),
2205 args=[ ] )
2206 threads.append( t )
2207 t.start()
2208
2209 for t in threads:
2210 t.join()
2211 ports.append( t.result )
2212 links = []
2213 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002214 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002215 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07002216 name="links-" + str( i ),
2217 args=[ ] )
2218 threads.append( t )
2219 t.start()
2220
2221 for t in threads:
2222 t.join()
2223 links.append( t.result )
2224 clusters = []
2225 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002226 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002227 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07002228 name="clusters-" + str( i ),
2229 args=[ ] )
2230 threads.append( t )
2231 t.start()
2232
2233 for t in threads:
2234 t.join()
2235 clusters.append( t.result )
2236
2237 elapsed = time.time() - startTime
2238 cliTime = time.time() - cliStart
2239 print "Elapsed time: " + str( elapsed )
2240 print "CLI time: " + str( cliTime )
2241
2242 mnSwitches = main.Mininet1.getSwitches()
2243 mnLinks = main.Mininet1.getLinks()
2244 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002245 for controller in range( len( main.activeNodes ) ):
2246 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002247 if devices[ controller ] and ports[ controller ] and\
2248 "Error" not in devices[ controller ] and\
2249 "Error" not in ports[ controller ]:
2250
2251 currentDevicesResult = main.Mininet1.compareSwitches(
2252 mnSwitches,
2253 json.loads( devices[ controller ] ),
2254 json.loads( ports[ controller ] ) )
2255 else:
2256 currentDevicesResult = main.FALSE
2257 utilities.assert_equals( expect=main.TRUE,
2258 actual=currentDevicesResult,
2259 onpass="ONOS" + controllerStr +
2260 " Switches view is correct",
2261 onfail="ONOS" + controllerStr +
2262 " Switches view is incorrect" )
2263
2264 if links[ controller ] and "Error" not in links[ controller ]:
2265 currentLinksResult = main.Mininet1.compareLinks(
2266 mnSwitches, mnLinks,
2267 json.loads( links[ controller ] ) )
2268 else:
2269 currentLinksResult = main.FALSE
2270 utilities.assert_equals( expect=main.TRUE,
2271 actual=currentLinksResult,
2272 onpass="ONOS" + controllerStr +
2273 " links view is correct",
2274 onfail="ONOS" + controllerStr +
2275 " links view is incorrect" )
2276
2277 if hosts[ controller ] or "Error" not in hosts[ controller ]:
2278 currentHostsResult = main.Mininet1.compareHosts(
2279 mnHosts,
2280 hosts[ controller ] )
2281 else:
2282 currentHostsResult = main.FALSE
2283 utilities.assert_equals( expect=main.TRUE,
2284 actual=currentHostsResult,
2285 onpass="ONOS" + controllerStr +
2286 " hosts exist in Mininet",
2287 onfail="ONOS" + controllerStr +
2288 " hosts don't match Mininet" )
2289 # CHECKING HOST ATTACHMENT POINTS
2290 hostAttachment = True
2291 zeroHosts = False
2292 # FIXME: topo-HA/obelisk specific mappings:
2293 # key is mac and value is dpid
2294 mappings = {}
2295 for i in range( 1, 29 ): # hosts 1 through 28
2296 # set up correct variables:
2297 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2298 if i == 1:
2299 deviceId = "1000".zfill(16)
2300 elif i == 2:
2301 deviceId = "2000".zfill(16)
2302 elif i == 3:
2303 deviceId = "3000".zfill(16)
2304 elif i == 4:
2305 deviceId = "3004".zfill(16)
2306 elif i == 5:
2307 deviceId = "5000".zfill(16)
2308 elif i == 6:
2309 deviceId = "6000".zfill(16)
2310 elif i == 7:
2311 deviceId = "6007".zfill(16)
2312 elif i >= 8 and i <= 17:
2313 dpid = '3' + str( i ).zfill( 3 )
2314 deviceId = dpid.zfill(16)
2315 elif i >= 18 and i <= 27:
2316 dpid = '6' + str( i ).zfill( 3 )
2317 deviceId = dpid.zfill(16)
2318 elif i == 28:
2319 deviceId = "2800".zfill(16)
2320 mappings[ macId ] = deviceId
2321 if hosts[ controller ] or "Error" not in hosts[ controller ]:
2322 if hosts[ controller ] == []:
2323 main.log.warn( "There are no hosts discovered" )
2324 zeroHosts = True
2325 else:
2326 for host in hosts[ controller ]:
2327 mac = None
2328 location = None
2329 device = None
2330 port = None
2331 try:
2332 mac = host.get( 'mac' )
2333 assert mac, "mac field could not be found for this host object"
2334
2335 location = host.get( 'location' )
2336 assert location, "location field could not be found for this host object"
2337
2338 # Trim the protocol identifier off deviceId
2339 device = str( location.get( 'elementId' ) ).split(':')[1]
2340 assert device, "elementId field could not be found for this host location object"
2341
2342 port = location.get( 'port' )
2343 assert port, "port field could not be found for this host location object"
2344
2345 # Now check if this matches where they should be
2346 if mac and device and port:
2347 if str( port ) != "1":
2348 main.log.error( "The attachment port is incorrect for " +
2349 "host " + str( mac ) +
2350 ". Expected: 1 Actual: " + str( port) )
2351 hostAttachment = False
2352 if device != mappings[ str( mac ) ]:
2353 main.log.error( "The attachment device is incorrect for " +
2354 "host " + str( mac ) +
2355 ". Expected: " + mappings[ str( mac ) ] +
2356 " Actual: " + device )
2357 hostAttachment = False
2358 else:
2359 hostAttachment = False
2360 except AssertionError:
2361 main.log.exception( "Json object not as expected" )
2362 main.log.error( repr( host ) )
2363 hostAttachment = False
2364 else:
2365 main.log.error( "No hosts json output or \"Error\"" +
2366 " in output. hosts = " +
2367 repr( hosts[ controller ] ) )
2368 if zeroHosts is False:
2369 hostAttachment = True
2370
2371 # END CHECKING HOST ATTACHMENT POINTS
2372 devicesResults = devicesResults and currentDevicesResult
2373 linksResults = linksResults and currentLinksResult
2374 hostsResults = hostsResults and currentHostsResult
2375 hostAttachmentResults = hostAttachmentResults and\
2376 hostAttachment
Jon Halle9b1fa32015-12-08 15:32:21 -08002377 topoResult = devicesResults and linksResults and\
2378 hostsResults and hostAttachmentResults
2379 utilities.assert_equals( expect=True,
2380 actual=topoResult,
2381 onpass="ONOS topology matches Mininet",
2382 onfail="ONOS topology don't match Mininet" )
2383 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002384
2385 # Compare json objects for hosts and dataplane clusters
2386
2387 # hosts
2388 main.step( "Hosts view is consistent across all ONOS nodes" )
2389 consistentHostsResult = main.TRUE
2390 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002391 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002392 if "Error" not in hosts[ controller ]:
2393 if hosts[ controller ] == hosts[ 0 ]:
2394 continue
2395 else: # hosts not consistent
2396 main.log.error( "hosts from ONOS" + controllerStr +
2397 " is inconsistent with ONOS1" )
2398 main.log.warn( repr( hosts[ controller ] ) )
2399 consistentHostsResult = main.FALSE
2400
2401 else:
2402 main.log.error( "Error in getting ONOS hosts from ONOS" +
2403 controllerStr )
2404 consistentHostsResult = main.FALSE
2405 main.log.warn( "ONOS" + controllerStr +
2406 " hosts response: " +
2407 repr( hosts[ controller ] ) )
2408 utilities.assert_equals(
2409 expect=main.TRUE,
2410 actual=consistentHostsResult,
2411 onpass="Hosts view is consistent across all ONOS nodes",
2412 onfail="ONOS nodes have different views of hosts" )
2413
2414 main.step( "Hosts information is correct" )
2415 hostsResults = hostsResults and ipResult
2416 utilities.assert_equals(
2417 expect=main.TRUE,
2418 actual=hostsResults,
2419 onpass="Host information is correct",
2420 onfail="Host information is incorrect" )
2421
2422 main.step( "Host attachment points to the network" )
2423 utilities.assert_equals(
2424 expect=True,
2425 actual=hostAttachmentResults,
2426 onpass="Hosts are correctly attached to the network",
2427 onfail="ONOS did not correctly attach hosts to the network" )
2428
2429 # Strongly connected clusters of devices
2430 main.step( "Clusters view is consistent across all ONOS nodes" )
2431 consistentClustersResult = main.TRUE
2432 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002433 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002434 if "Error" not in clusters[ controller ]:
2435 if clusters[ controller ] == clusters[ 0 ]:
2436 continue
2437 else: # clusters not consistent
2438 main.log.error( "clusters from ONOS" +
2439 controllerStr +
2440 " is inconsistent with ONOS1" )
2441 consistentClustersResult = main.FALSE
2442
2443 else:
2444 main.log.error( "Error in getting dataplane clusters " +
2445 "from ONOS" + controllerStr )
2446 consistentClustersResult = main.FALSE
2447 main.log.warn( "ONOS" + controllerStr +
2448 " clusters response: " +
2449 repr( clusters[ controller ] ) )
2450 utilities.assert_equals(
2451 expect=main.TRUE,
2452 actual=consistentClustersResult,
2453 onpass="Clusters view is consistent across all ONOS nodes",
2454 onfail="ONOS nodes have different views of clusters" )
2455
2456 main.step( "There is only one SCC" )
2457 # there should always only be one cluster
2458 try:
2459 numClusters = len( json.loads( clusters[ 0 ] ) )
2460 except ( ValueError, TypeError ):
2461 main.log.exception( "Error parsing clusters[0]: " +
2462 repr( clusters[0] ) )
2463 clusterResults = main.FALSE
2464 if numClusters == 1:
2465 clusterResults = main.TRUE
2466 utilities.assert_equals(
2467 expect=1,
2468 actual=numClusters,
2469 onpass="ONOS shows 1 SCC",
2470 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2471
2472 topoResult = ( devicesResults and linksResults
2473 and hostsResults and consistentHostsResult
2474 and consistentClustersResult and clusterResults
2475 and ipResult and hostAttachmentResults )
2476
2477 topoResult = topoResult and int( count <= 2 )
2478 note = "note it takes about " + str( int( cliTime ) ) + \
2479 " seconds for the test to make all the cli calls to fetch " +\
2480 "the topology from each ONOS instance"
2481 main.log.info(
2482 "Very crass estimate for topology discovery/convergence( " +
2483 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2484 str( count ) + " tries" )
2485
2486 main.step( "Device information is correct" )
2487 utilities.assert_equals(
2488 expect=main.TRUE,
2489 actual=devicesResults,
2490 onpass="Device information is correct",
2491 onfail="Device information is incorrect" )
2492
2493 main.step( "Links are correct" )
2494 utilities.assert_equals(
2495 expect=main.TRUE,
2496 actual=linksResults,
2497 onpass="Link are correct",
2498 onfail="Links are incorrect" )
2499
2500 # FIXME: move this to an ONOS state case
2501 main.step( "Checking ONOS nodes" )
2502 nodesOutput = []
2503 nodeResults = main.TRUE
2504 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002505 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002506 t = main.Thread( target=main.CLIs[i].nodes,
Jon Hall5cf14d52015-07-16 12:15:19 -07002507 name="nodes-" + str( i ),
2508 args=[ ] )
2509 threads.append( t )
2510 t.start()
2511
2512 for t in threads:
2513 t.join()
2514 nodesOutput.append( t.result )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002515 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
Jon Halle9b1fa32015-12-08 15:32:21 -08002516 ips.sort()
Jon Hall5cf14d52015-07-16 12:15:19 -07002517 for i in nodesOutput:
2518 try:
2519 current = json.loads( i )
Jon Halle9b1fa32015-12-08 15:32:21 -08002520 activeIps = []
2521 currentResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002522 for node in current:
Jon Halle9b1fa32015-12-08 15:32:21 -08002523 if node['state'] == 'ACTIVE':
2524 activeIps.append( node['ip'] )
2525 activeIps.sort()
2526 if ips == activeIps:
2527 currentResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002528 except ( ValueError, TypeError ):
2529 main.log.error( "Error parsing nodes output" )
2530 main.log.warn( repr( i ) )
Jon Halle9b1fa32015-12-08 15:32:21 -08002531 currentResult = main.FALSE
2532 nodeResults = nodeResults and currentResult
Jon Hall5cf14d52015-07-16 12:15:19 -07002533 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2534 onpass="Nodes check successful",
2535 onfail="Nodes check NOT successful" )
2536
2537 def CASE9( self, main ):
2538 """
2539 Link s3-s28 down
2540 """
2541 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002542 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002543 assert main, "main not defined"
2544 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002545 assert main.CLIs, "main.CLIs not defined"
2546 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002547 # NOTE: You should probably run a topology check after this
2548
2549 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2550
2551 description = "Turn off a link to ensure that Link Discovery " +\
2552 "is working properly"
2553 main.case( description )
2554
2555 main.step( "Kill Link between s3 and s28" )
2556 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2557 main.log.info( "Waiting " + str( linkSleep ) +
2558 " seconds for link down to be discovered" )
2559 time.sleep( linkSleep )
2560 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2561 onpass="Link down successful",
2562 onfail="Failed to bring link down" )
2563 # TODO do some sort of check here
2564
2565 def CASE10( self, main ):
2566 """
2567 Link s3-s28 up
2568 """
2569 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002570 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002571 assert main, "main not defined"
2572 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002573 assert main.CLIs, "main.CLIs not defined"
2574 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002575 # NOTE: You should probably run a topology check after this
2576
2577 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2578
2579 description = "Restore a link to ensure that Link Discovery is " + \
2580 "working properly"
2581 main.case( description )
2582
2583 main.step( "Bring link between s3 and s28 back up" )
2584 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2585 main.log.info( "Waiting " + str( linkSleep ) +
2586 " seconds for link up to be discovered" )
2587 time.sleep( linkSleep )
2588 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2589 onpass="Link up successful",
2590 onfail="Failed to bring link up" )
2591 # TODO do some sort of check here
2592
2593 def CASE11( self, main ):
2594 """
2595 Switch Down
2596 """
2597 # NOTE: You should probably run a topology check after this
2598 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002599 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002600 assert main, "main not defined"
2601 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002602 assert main.CLIs, "main.CLIs not defined"
2603 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002604
2605 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2606
2607 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002608 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002609 main.case( description )
2610 switch = main.params[ 'kill' ][ 'switch' ]
2611 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2612
2613 # TODO: Make this switch parameterizable
2614 main.step( "Kill " + switch )
2615 main.log.info( "Deleting " + switch )
2616 main.Mininet1.delSwitch( switch )
2617 main.log.info( "Waiting " + str( switchSleep ) +
2618 " seconds for switch down to be discovered" )
2619 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002620 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002621 # Peek at the deleted switch
2622 main.log.warn( str( device ) )
2623 result = main.FALSE
2624 if device and device[ 'available' ] is False:
2625 result = main.TRUE
2626 utilities.assert_equals( expect=main.TRUE, actual=result,
2627 onpass="Kill switch successful",
2628 onfail="Failed to kill switch?" )
2629
2630 def CASE12( self, main ):
2631 """
2632 Switch Up
2633 """
2634 # NOTE: You should probably run a topology check after this
2635 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002636 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002637 assert main, "main not defined"
2638 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002639 assert main.CLIs, "main.CLIs not defined"
2640 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002641 assert ONOS1Port, "ONOS1Port not defined"
2642 assert ONOS2Port, "ONOS2Port not defined"
2643 assert ONOS3Port, "ONOS3Port not defined"
2644 assert ONOS4Port, "ONOS4Port not defined"
2645 assert ONOS5Port, "ONOS5Port not defined"
2646 assert ONOS6Port, "ONOS6Port not defined"
2647 assert ONOS7Port, "ONOS7Port not defined"
2648
2649 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2650 switch = main.params[ 'kill' ][ 'switch' ]
2651 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2652 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002653 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002654 description = "Adding a switch to ensure it is discovered correctly"
2655 main.case( description )
2656
2657 main.step( "Add back " + switch )
2658 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2659 for peer in links:
2660 main.Mininet1.addLink( switch, peer )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002661 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002662 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2663 main.log.info( "Waiting " + str( switchSleep ) +
2664 " seconds for switch up to be discovered" )
2665 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002666 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002667 # Peek at the deleted switch
2668 main.log.warn( str( device ) )
2669 result = main.FALSE
2670 if device and device[ 'available' ]:
2671 result = main.TRUE
2672 utilities.assert_equals( expect=main.TRUE, actual=result,
2673 onpass="add switch successful",
2674 onfail="Failed to add switch?" )
2675
2676 def CASE13( self, main ):
2677 """
2678 Clean up
2679 """
2680 import os
2681 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002682 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002683 assert main, "main not defined"
2684 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002685 assert main.CLIs, "main.CLIs not defined"
2686 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002687
2688 # printing colors to terminal
2689 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2690 'blue': '\033[94m', 'green': '\033[92m',
2691 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2692 main.case( "Test Cleanup" )
2693 main.step( "Killing tcpdumps" )
2694 main.Mininet2.stopTcpdump()
2695
2696 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002697 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002698 main.step( "Copying MN pcap and ONOS log files to test station" )
2699 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2700 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002701 # NOTE: MN Pcap file is being saved to logdir.
2702 # We scp this file as MN and TestON aren't necessarily the same vm
2703
2704 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002705 # TODO: Load these from params
2706 # NOTE: must end in /
2707 logFolder = "/opt/onos/log/"
2708 logFiles = [ "karaf.log", "karaf.log.1" ]
2709 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002710 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002711 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002712 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002713 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2714 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002715 # std*.log's
2716 # NOTE: must end in /
2717 logFolder = "/opt/onos/var/"
2718 logFiles = [ "stderr.log", "stdout.log" ]
2719 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002720 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002721 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002722 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002723 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2724 logFolder + f, dstName )
2725 else:
2726 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002727
2728 main.step( "Stopping Mininet" )
2729 mnResult = main.Mininet1.stopNet()
2730 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2731 onpass="Mininet stopped",
2732 onfail="MN cleanup NOT successful" )
2733
2734 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002735 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002736 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2737 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002738
2739 try:
2740 timerLog = open( main.logdir + "/Timers.csv", 'w')
2741 # Overwrite with empty line and close
2742 labels = "Gossip Intents, Restart"
2743 data = str( gossipTime ) + ", " + str( main.restartTime )
2744 timerLog.write( labels + "\n" + data )
2745 timerLog.close()
2746 except NameError, e:
2747 main.log.exception(e)
2748
2749 def CASE14( self, main ):
2750 """
2751 start election app on all onos nodes
2752 """
Jon Halle1a3b752015-07-22 13:02:46 -07002753 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002754 assert main, "main not defined"
2755 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002756 assert main.CLIs, "main.CLIs not defined"
2757 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002758
2759 main.case("Start Leadership Election app")
2760 main.step( "Install leadership election app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002761 onosCli = main.CLIs[ main.activeNodes[0] ]
2762 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002763 utilities.assert_equals(
2764 expect=main.TRUE,
2765 actual=appResult,
2766 onpass="Election app installed",
2767 onfail="Something went wrong with installing Leadership election" )
2768
2769 main.step( "Run for election on each node" )
2770 leaderResult = main.TRUE
2771 leaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002772 for i in main.activeNodes:
2773 main.CLIs[i].electionTestRun()
2774 for i in main.activeNodes:
2775 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002776 leader = cli.electionTestLeader()
2777 if leader is None or leader == main.FALSE:
2778 main.log.error( cli.name + ": Leader for the election app " +
2779 "should be an ONOS node, instead got '" +
2780 str( leader ) + "'" )
2781 leaderResult = main.FALSE
2782 leaders.append( leader )
2783 utilities.assert_equals(
2784 expect=main.TRUE,
2785 actual=leaderResult,
2786 onpass="Successfully ran for leadership",
2787 onfail="Failed to run for leadership" )
2788
2789 main.step( "Check that each node shows the same leader" )
2790 sameLeader = main.TRUE
2791 if len( set( leaders ) ) != 1:
2792 sameLeader = main.FALSE
Jon Halle1a3b752015-07-22 13:02:46 -07002793 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
Jon Hall5cf14d52015-07-16 12:15:19 -07002794 str( leaders ) )
2795 utilities.assert_equals(
2796 expect=main.TRUE,
2797 actual=sameLeader,
2798 onpass="Leadership is consistent for the election topic",
2799 onfail="Nodes have different leaders" )
2800
2801 def CASE15( self, main ):
2802 """
2803 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002804 15.1 Run election on each node
2805 15.2 Check that each node has the same leaders and candidates
2806 15.3 Find current leader and withdraw
2807 15.4 Check that a new node was elected leader
2808 15.5 Check that that new leader was the candidate of old leader
2809 15.6 Run for election on old leader
2810 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2811 15.8 Make sure that the old leader was added to the candidate list
2812
2813 old and new variable prefixes refer to data from before vs after
2814 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002815 """
2816 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002817 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002818 assert main, "main not defined"
2819 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002820 assert main.CLIs, "main.CLIs not defined"
2821 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002822
Jon Hall5cf14d52015-07-16 12:15:19 -07002823 description = "Check that Leadership Election is still functional"
2824 main.case( description )
acsmars71adceb2015-08-31 15:09:26 -07002825 # NOTE: Need to re-run since being a canidate is not persistant
2826 # TODO: add check for "Command not found:" in the driver, this
2827 # means the election test app isn't loaded
Jon Hall5cf14d52015-07-16 12:15:19 -07002828
acsmars71adceb2015-08-31 15:09:26 -07002829 oldLeaders = [] # leaders by node before withdrawl from candidates
2830 newLeaders = [] # leaders by node after withdrawl from candidates
2831 oldAllCandidates = [] # list of lists of each nodes' candidates before
2832 newAllCandidates = [] # list of lists of each nodes' candidates after
2833 oldCandidates = [] # list of candidates from node 0 before withdrawl
2834 newCandidates = [] # list of candidates from node 0 after withdrawl
2835 oldLeader = '' # the old leader from oldLeaders, None if not same
2836 newLeader = '' # the new leaders fron newLoeaders, None if not same
2837 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2838 expectNoLeader = False # True when there is only one leader
2839 if main.numCtrls == 1:
2840 expectNoLeader = True
2841
2842 main.step( "Run for election on each node" )
2843 electionResult = main.TRUE
2844
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002845 for i in main.activeNodes: # run test election on each node
2846 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002847 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002848 utilities.assert_equals(
2849 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002850 actual=electionResult,
2851 onpass="All nodes successfully ran for leadership",
2852 onfail="At least one node failed to run for leadership" )
2853
acsmars3a72bde2015-09-02 14:16:22 -07002854 if electionResult == main.FALSE:
2855 main.log.error(
2856 "Skipping Test Case because Election Test App isn't loaded" )
2857 main.skipCase()
2858
acsmars71adceb2015-08-31 15:09:26 -07002859 main.step( "Check that each node shows the same leader and candidates" )
2860 sameResult = main.TRUE
2861 failMessage = "Nodes have different leaders"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002862 for i in main.activeNodes:
2863 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002864 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2865 oldAllCandidates.append( node )
2866 oldLeaders.append( node[ 0 ] )
2867 oldCandidates = oldAllCandidates[ 0 ]
2868
2869 # Check that each node has the same leader. Defines oldLeader
2870 if len( set( oldLeaders ) ) != 1:
2871 sameResult = main.FALSE
2872 main.log.error( "More than one leader present:" + str( oldLeaders ) )
2873 oldLeader = None
2874 else:
2875 oldLeader = oldLeaders[ 0 ]
2876
2877 # Check that each node's candidate list is the same
acsmars29233db2015-11-04 11:15:00 -08002878 candidateDiscrepancy = False # Boolean of candidate mismatches
acsmars71adceb2015-08-31 15:09:26 -07002879 for candidates in oldAllCandidates:
2880 if set( candidates ) != set( oldCandidates ):
2881 sameResult = main.FALSE
acsmars29233db2015-11-04 11:15:00 -08002882 candidateDiscrepancy = True
2883
2884 if candidateDiscrepancy:
2885 failMessage += " and candidates"
2886
acsmars71adceb2015-08-31 15:09:26 -07002887 utilities.assert_equals(
2888 expect=main.TRUE,
2889 actual=sameResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002890 onpass="Leadership is consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002891 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002892
2893 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002894 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002895 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002896 if oldLeader is None:
2897 main.log.error( "Leadership isn't consistent." )
2898 withdrawResult = main.FALSE
2899 # Get the CLI of the oldLeader
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002900 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07002901 if oldLeader == main.nodes[ i ].ip_address:
2902 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002903 break
2904 else: # FOR/ELSE statement
2905 main.log.error( "Leader election, could not find current leader" )
2906 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07002907 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07002908 utilities.assert_equals(
2909 expect=main.TRUE,
2910 actual=withdrawResult,
2911 onpass="Node was withdrawn from election",
2912 onfail="Node was not withdrawn from election" )
2913
acsmars71adceb2015-08-31 15:09:26 -07002914 main.step( "Check that a new node was elected leader" )
2915
Jon Hall5cf14d52015-07-16 12:15:19 -07002916 # FIXME: use threads
acsmars71adceb2015-08-31 15:09:26 -07002917 newLeaderResult = main.TRUE
2918 failMessage = "Nodes have different leaders"
2919
2920 # Get new leaders and candidates
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002921 for i in main.activeNodes:
2922 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002923 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2924 # elections might no have finished yet
2925 if node[ 0 ] == 'none' and not expectNoLeader:
2926 main.log.info( "Node has no leader, waiting 5 seconds to be " +
2927 "sure elections are complete." )
2928 time.sleep(5)
2929 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2930 # election still isn't done or there is a problem
2931 if node[ 0 ] == 'none':
2932 main.log.error( "No leader was elected on at least 1 node" )
2933 newLeaderResult = main.FALSE
2934 newAllCandidates.append( node )
2935 newLeaders.append( node[ 0 ] )
2936 newCandidates = newAllCandidates[ 0 ]
2937
2938 # Check that each node has the same leader. Defines newLeader
2939 if len( set( newLeaders ) ) != 1:
2940 newLeaderResult = main.FALSE
2941 main.log.error( "Nodes have different leaders: " +
2942 str( newLeaders ) )
2943 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07002944 else:
acsmars71adceb2015-08-31 15:09:26 -07002945 newLeader = newLeaders[ 0 ]
2946
2947 # Check that each node's candidate list is the same
2948 for candidates in newAllCandidates:
2949 if set( candidates ) != set( newCandidates ):
2950 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07002951 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07002952
2953 # Check that the new leader is not the older leader, which was withdrawn
2954 if newLeader == oldLeader:
2955 newLeaderResult = main.FALSE
2956 main.log.error( "All nodes still see old leader: " + oldLeader +
2957 " as the current leader" )
2958
Jon Hall5cf14d52015-07-16 12:15:19 -07002959 utilities.assert_equals(
2960 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002961 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002962 onpass="Leadership election passed",
2963 onfail="Something went wrong with Leadership election" )
2964
acsmars71adceb2015-08-31 15:09:26 -07002965 main.step( "Check that that new leader was the candidate of old leader")
2966 # candidates[ 2 ] should be come the top candidate after withdrawl
2967 correctCandidateResult = main.TRUE
2968 if expectNoLeader:
2969 if newLeader == 'none':
2970 main.log.info( "No leader expected. None found. Pass" )
2971 correctCandidateResult = main.TRUE
2972 else:
2973 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2974 correctCandidateResult = main.FALSE
2975 elif newLeader != oldCandidates[ 2 ]:
2976 correctCandidateResult = main.FALSE
2977 main.log.error( "Candidate " + newLeader + " was elected. " +
2978 oldCandidates[ 2 ] + " should have had priority." )
2979
2980 utilities.assert_equals(
2981 expect=main.TRUE,
2982 actual=correctCandidateResult,
2983 onpass="Correct Candidate Elected",
2984 onfail="Incorrect Candidate Elected" )
2985
Jon Hall5cf14d52015-07-16 12:15:19 -07002986 main.step( "Run for election on old leader( just so everyone " +
2987 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07002988 if oldLeaderCLI is not None:
2989 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07002990 else:
acsmars71adceb2015-08-31 15:09:26 -07002991 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002992 runResult = main.FALSE
2993 utilities.assert_equals(
2994 expect=main.TRUE,
2995 actual=runResult,
2996 onpass="App re-ran for election",
2997 onfail="App failed to run for election" )
acsmars71adceb2015-08-31 15:09:26 -07002998 main.step(
2999 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003000 # verify leader didn't just change
acsmars71adceb2015-08-31 15:09:26 -07003001 positionResult = main.TRUE
3002 # Get new leaders and candidates, wait if oldLeader is not a candidate yet
3003
3004 # Reset and reuse the new candidate and leaders lists
3005 newAllCandidates = []
3006 newCandidates = []
3007 newLeaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003008 for i in main.activeNodes:
3009 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07003010 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3011 if oldLeader not in node: # election might no have finished yet
3012 main.log.info( "Old Leader not elected, waiting 5 seconds to " +
3013 "be sure elections are complete" )
3014 time.sleep(5)
3015 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3016 if oldLeader not in node: # election still isn't done, errors
3017 main.log.error(
3018 "Old leader was not elected on at least one node" )
3019 positionResult = main.FALSE
3020 newAllCandidates.append( node )
3021 newLeaders.append( node[ 0 ] )
3022 newCandidates = newAllCandidates[ 0 ]
3023
3024 # Check that each node has the same leader. Defines newLeader
3025 if len( set( newLeaders ) ) != 1:
3026 positionResult = main.FALSE
3027 main.log.error( "Nodes have different leaders: " +
3028 str( newLeaders ) )
3029 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07003030 else:
acsmars71adceb2015-08-31 15:09:26 -07003031 newLeader = newLeaders[ 0 ]
3032
3033 # Check that each node's candidate list is the same
3034 for candidates in newAllCandidates:
3035 if set( candidates ) != set( newCandidates ):
3036 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07003037 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07003038
3039 # Check that the re-elected node is last on the candidate List
3040 if oldLeader != newCandidates[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003041 main.log.error( "Old Leader (" + oldLeader + ") not in the proper position " +
acsmars71adceb2015-08-31 15:09:26 -07003042 str( newCandidates ) )
3043 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003044
3045 utilities.assert_equals(
3046 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07003047 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003048 onpass="Old leader successfully re-ran for election",
3049 onfail="Something went wrong with Leadership election after " +
3050 "the old leader re-ran for election" )
3051
3052 def CASE16( self, main ):
3053 """
3054 Install Distributed Primitives app
3055 """
3056 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003057 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003058 assert main, "main not defined"
3059 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003060 assert main.CLIs, "main.CLIs not defined"
3061 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003062
3063 # Variables for the distributed primitives tests
3064 global pCounterName
3065 global iCounterName
3066 global pCounterValue
3067 global iCounterValue
3068 global onosSet
3069 global onosSetName
3070 pCounterName = "TestON-Partitions"
3071 iCounterName = "TestON-inMemory"
3072 pCounterValue = 0
3073 iCounterValue = 0
3074 onosSet = set([])
3075 onosSetName = "TestON-set"
3076
3077 description = "Install Primitives app"
3078 main.case( description )
3079 main.step( "Install Primitives app" )
3080 appName = "org.onosproject.distributedprimitives"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003081 node = main.activeNodes[0]
3082 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003083 utilities.assert_equals( expect=main.TRUE,
3084 actual=appResults,
3085 onpass="Primitives app activated",
3086 onfail="Primitives app not activated" )
3087 time.sleep( 5 ) # To allow all nodes to activate
3088
3089 def CASE17( self, main ):
3090 """
3091 Check for basic functionality with distributed primitives
3092 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003093 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003094 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003095 assert main, "main not defined"
3096 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003097 assert main.CLIs, "main.CLIs not defined"
3098 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003099 assert pCounterName, "pCounterName not defined"
3100 assert iCounterName, "iCounterName not defined"
3101 assert onosSetName, "onosSetName not defined"
3102 # NOTE: assert fails if value is 0/None/Empty/False
3103 try:
3104 pCounterValue
3105 except NameError:
3106 main.log.error( "pCounterValue not defined, setting to 0" )
3107 pCounterValue = 0
3108 try:
3109 iCounterValue
3110 except NameError:
3111 main.log.error( "iCounterValue not defined, setting to 0" )
3112 iCounterValue = 0
3113 try:
3114 onosSet
3115 except NameError:
3116 main.log.error( "onosSet not defined, setting to empty Set" )
3117 onosSet = set([])
3118 # Variables for the distributed primitives tests. These are local only
3119 addValue = "a"
3120 addAllValue = "a b c d e f"
3121 retainValue = "c d e f"
3122
3123 description = "Check for basic functionality with distributed " +\
3124 "primitives"
3125 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003126 main.caseExplanation = "Test the methods of the distributed " +\
3127 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003128 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003129 # Partitioned counters
3130 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003131 pCounters = []
3132 threads = []
3133 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003134 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003135 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3136 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003137 args=[ pCounterName ] )
3138 pCounterValue += 1
3139 addedPValues.append( pCounterValue )
3140 threads.append( t )
3141 t.start()
3142
3143 for t in threads:
3144 t.join()
3145 pCounters.append( t.result )
3146 # Check that counter incremented numController times
3147 pCounterResults = True
3148 for i in addedPValues:
3149 tmpResult = i in pCounters
3150 pCounterResults = pCounterResults and tmpResult
3151 if not tmpResult:
3152 main.log.error( str( i ) + " is not in partitioned "
3153 "counter incremented results" )
3154 utilities.assert_equals( expect=True,
3155 actual=pCounterResults,
3156 onpass="Default counter incremented",
3157 onfail="Error incrementing default" +
3158 " counter" )
3159
Jon Halle1a3b752015-07-22 13:02:46 -07003160 main.step( "Get then Increment a default counter on each node" )
3161 pCounters = []
3162 threads = []
3163 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003164 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003165 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3166 name="counterGetAndAdd-" + str( i ),
3167 args=[ pCounterName ] )
3168 addedPValues.append( pCounterValue )
3169 pCounterValue += 1
3170 threads.append( t )
3171 t.start()
3172
3173 for t in threads:
3174 t.join()
3175 pCounters.append( t.result )
3176 # Check that counter incremented numController times
3177 pCounterResults = True
3178 for i in addedPValues:
3179 tmpResult = i in pCounters
3180 pCounterResults = pCounterResults and tmpResult
3181 if not tmpResult:
3182 main.log.error( str( i ) + " is not in partitioned "
3183 "counter incremented results" )
3184 utilities.assert_equals( expect=True,
3185 actual=pCounterResults,
3186 onpass="Default counter incremented",
3187 onfail="Error incrementing default" +
3188 " counter" )
3189
3190 main.step( "Counters we added have the correct values" )
3191 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3192 utilities.assert_equals( expect=main.TRUE,
3193 actual=incrementCheck,
3194 onpass="Added counters are correct",
3195 onfail="Added counters are incorrect" )
3196
3197 main.step( "Add -8 to then get a default counter on each node" )
3198 pCounters = []
3199 threads = []
3200 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003201 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003202 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3203 name="counterIncrement-" + str( i ),
3204 args=[ pCounterName ],
3205 kwargs={ "delta": -8 } )
3206 pCounterValue += -8
3207 addedPValues.append( pCounterValue )
3208 threads.append( t )
3209 t.start()
3210
3211 for t in threads:
3212 t.join()
3213 pCounters.append( t.result )
3214 # Check that counter incremented numController times
3215 pCounterResults = True
3216 for i in addedPValues:
3217 tmpResult = i in pCounters
3218 pCounterResults = pCounterResults and tmpResult
3219 if not tmpResult:
3220 main.log.error( str( i ) + " is not in partitioned "
3221 "counter incremented results" )
3222 utilities.assert_equals( expect=True,
3223 actual=pCounterResults,
3224 onpass="Default counter incremented",
3225 onfail="Error incrementing default" +
3226 " counter" )
3227
3228 main.step( "Add 5 to then get a default counter on each node" )
3229 pCounters = []
3230 threads = []
3231 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003232 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003233 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3234 name="counterIncrement-" + str( i ),
3235 args=[ pCounterName ],
3236 kwargs={ "delta": 5 } )
3237 pCounterValue += 5
3238 addedPValues.append( pCounterValue )
3239 threads.append( t )
3240 t.start()
3241
3242 for t in threads:
3243 t.join()
3244 pCounters.append( t.result )
3245 # Check that counter incremented numController times
3246 pCounterResults = True
3247 for i in addedPValues:
3248 tmpResult = i in pCounters
3249 pCounterResults = pCounterResults and tmpResult
3250 if not tmpResult:
3251 main.log.error( str( i ) + " is not in partitioned "
3252 "counter incremented results" )
3253 utilities.assert_equals( expect=True,
3254 actual=pCounterResults,
3255 onpass="Default counter incremented",
3256 onfail="Error incrementing default" +
3257 " counter" )
3258
3259 main.step( "Get then add 5 to a default counter on each node" )
3260 pCounters = []
3261 threads = []
3262 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003263 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003264 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3265 name="counterIncrement-" + str( i ),
3266 args=[ pCounterName ],
3267 kwargs={ "delta": 5 } )
3268 addedPValues.append( pCounterValue )
3269 pCounterValue += 5
3270 threads.append( t )
3271 t.start()
3272
3273 for t in threads:
3274 t.join()
3275 pCounters.append( t.result )
3276 # Check that counter incremented numController times
3277 pCounterResults = True
3278 for i in addedPValues:
3279 tmpResult = i in pCounters
3280 pCounterResults = pCounterResults and tmpResult
3281 if not tmpResult:
3282 main.log.error( str( i ) + " is not in partitioned "
3283 "counter incremented results" )
3284 utilities.assert_equals( expect=True,
3285 actual=pCounterResults,
3286 onpass="Default counter incremented",
3287 onfail="Error incrementing default" +
3288 " counter" )
3289
3290 main.step( "Counters we added have the correct values" )
3291 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3292 utilities.assert_equals( expect=main.TRUE,
3293 actual=incrementCheck,
3294 onpass="Added counters are correct",
3295 onfail="Added counters are incorrect" )
3296
3297 # In-Memory counters
3298 main.step( "Increment and get an in-memory counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003299 iCounters = []
3300 addedIValues = []
3301 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003302 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003303 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003304 name="icounterIncrement-" + str( i ),
3305 args=[ iCounterName ],
3306 kwargs={ "inMemory": True } )
3307 iCounterValue += 1
3308 addedIValues.append( iCounterValue )
3309 threads.append( t )
3310 t.start()
3311
3312 for t in threads:
3313 t.join()
3314 iCounters.append( t.result )
3315 # Check that counter incremented numController times
3316 iCounterResults = True
3317 for i in addedIValues:
3318 tmpResult = i in iCounters
3319 iCounterResults = iCounterResults and tmpResult
3320 if not tmpResult:
3321 main.log.error( str( i ) + " is not in the in-memory "
3322 "counter incremented results" )
3323 utilities.assert_equals( expect=True,
3324 actual=iCounterResults,
Jon Halle1a3b752015-07-22 13:02:46 -07003325 onpass="In-memory counter incremented",
3326 onfail="Error incrementing in-memory" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003327 " counter" )
3328
Jon Halle1a3b752015-07-22 13:02:46 -07003329 main.step( "Get then Increment a in-memory counter on each node" )
3330 iCounters = []
3331 threads = []
3332 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003333 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003334 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3335 name="counterGetAndAdd-" + str( i ),
3336 args=[ iCounterName ],
3337 kwargs={ "inMemory": True } )
3338 addedIValues.append( iCounterValue )
3339 iCounterValue += 1
3340 threads.append( t )
3341 t.start()
3342
3343 for t in threads:
3344 t.join()
3345 iCounters.append( t.result )
3346 # Check that counter incremented numController times
3347 iCounterResults = True
3348 for i in addedIValues:
3349 tmpResult = i in iCounters
3350 iCounterResults = iCounterResults and tmpResult
3351 if not tmpResult:
3352 main.log.error( str( i ) + " is not in in-memory "
3353 "counter incremented results" )
3354 utilities.assert_equals( expect=True,
3355 actual=iCounterResults,
3356 onpass="In-memory counter incremented",
3357 onfail="Error incrementing in-memory" +
3358 " counter" )
3359
3360 main.step( "Counters we added have the correct values" )
3361 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3362 utilities.assert_equals( expect=main.TRUE,
3363 actual=incrementCheck,
3364 onpass="Added counters are correct",
3365 onfail="Added counters are incorrect" )
3366
3367 main.step( "Add -8 to then get a in-memory counter on each node" )
3368 iCounters = []
3369 threads = []
3370 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003371 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003372 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3373 name="counterIncrement-" + str( i ),
3374 args=[ iCounterName ],
3375 kwargs={ "delta": -8, "inMemory": True } )
3376 iCounterValue += -8
3377 addedIValues.append( iCounterValue )
3378 threads.append( t )
3379 t.start()
3380
3381 for t in threads:
3382 t.join()
3383 iCounters.append( t.result )
3384 # Check that counter incremented numController times
3385 iCounterResults = True
3386 for i in addedIValues:
3387 tmpResult = i in iCounters
3388 iCounterResults = iCounterResults and tmpResult
3389 if not tmpResult:
3390 main.log.error( str( i ) + " is not in in-memory "
3391 "counter incremented results" )
3392 utilities.assert_equals( expect=True,
3393 actual=pCounterResults,
3394 onpass="In-memory counter incremented",
3395 onfail="Error incrementing in-memory" +
3396 " counter" )
3397
3398 main.step( "Add 5 to then get a in-memory counter on each node" )
3399 iCounters = []
3400 threads = []
3401 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003402 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003403 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3404 name="counterIncrement-" + str( i ),
3405 args=[ iCounterName ],
3406 kwargs={ "delta": 5, "inMemory": True } )
3407 iCounterValue += 5
3408 addedIValues.append( iCounterValue )
3409 threads.append( t )
3410 t.start()
3411
3412 for t in threads:
3413 t.join()
3414 iCounters.append( t.result )
3415 # Check that counter incremented numController times
3416 iCounterResults = True
3417 for i in addedIValues:
3418 tmpResult = i in iCounters
3419 iCounterResults = iCounterResults and tmpResult
3420 if not tmpResult:
3421 main.log.error( str( i ) + " is not in in-memory "
3422 "counter incremented results" )
3423 utilities.assert_equals( expect=True,
3424 actual=pCounterResults,
3425 onpass="In-memory counter incremented",
3426 onfail="Error incrementing in-memory" +
3427 " counter" )
3428
3429 main.step( "Get then add 5 to a in-memory counter on each node" )
3430 iCounters = []
3431 threads = []
3432 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003433 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003434 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3435 name="counterIncrement-" + str( i ),
3436 args=[ iCounterName ],
3437 kwargs={ "delta": 5, "inMemory": True } )
3438 addedIValues.append( iCounterValue )
3439 iCounterValue += 5
3440 threads.append( t )
3441 t.start()
3442
3443 for t in threads:
3444 t.join()
3445 iCounters.append( t.result )
3446 # Check that counter incremented numController times
3447 iCounterResults = True
3448 for i in addedIValues:
3449 tmpResult = i in iCounters
3450 iCounterResults = iCounterResults and tmpResult
3451 if not tmpResult:
3452 main.log.error( str( i ) + " is not in in-memory "
3453 "counter incremented results" )
3454 utilities.assert_equals( expect=True,
3455 actual=iCounterResults,
3456 onpass="In-memory counter incremented",
3457 onfail="Error incrementing in-memory" +
3458 " counter" )
3459
3460 main.step( "Counters we added have the correct values" )
3461 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3462 utilities.assert_equals( expect=main.TRUE,
3463 actual=incrementCheck,
3464 onpass="Added counters are correct",
3465 onfail="Added counters are incorrect" )
3466
Jon Hall5cf14d52015-07-16 12:15:19 -07003467 main.step( "Check counters are consistant across nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07003468 onosCounters, consistentCounterResults = main.Counters.consistentCheck()
Jon Hall5cf14d52015-07-16 12:15:19 -07003469 utilities.assert_equals( expect=main.TRUE,
3470 actual=consistentCounterResults,
3471 onpass="ONOS counters are consistent " +
3472 "across nodes",
3473 onfail="ONOS Counters are inconsistent " +
3474 "across nodes" )
3475
3476 main.step( "Counters we added have the correct values" )
Jon Halle1a3b752015-07-22 13:02:46 -07003477 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3478 incrementCheck = incrementCheck and \
3479 main.Counters.counterCheck( iCounterName, iCounterValue )
Jon Hall5cf14d52015-07-16 12:15:19 -07003480 utilities.assert_equals( expect=main.TRUE,
Jon Halle1a3b752015-07-22 13:02:46 -07003481 actual=incrementCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -07003482 onpass="Added counters are correct",
3483 onfail="Added counters are incorrect" )
3484 # DISTRIBUTED SETS
3485 main.step( "Distributed Set get" )
3486 size = len( onosSet )
3487 getResponses = []
3488 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003489 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003490 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003491 name="setTestGet-" + str( i ),
3492 args=[ onosSetName ] )
3493 threads.append( t )
3494 t.start()
3495 for t in threads:
3496 t.join()
3497 getResponses.append( t.result )
3498
3499 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003500 for i in range( len( main.activeNodes ) ):
3501 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003502 if isinstance( getResponses[ i ], list):
3503 current = set( getResponses[ i ] )
3504 if len( current ) == len( getResponses[ i ] ):
3505 # no repeats
3506 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003507 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003508 " has incorrect view" +
3509 " of set " + onosSetName + ":\n" +
3510 str( getResponses[ i ] ) )
3511 main.log.debug( "Expected: " + str( onosSet ) )
3512 main.log.debug( "Actual: " + str( current ) )
3513 getResults = main.FALSE
3514 else:
3515 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003516 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003517 " has repeat elements in" +
3518 " set " + onosSetName + ":\n" +
3519 str( getResponses[ i ] ) )
3520 getResults = main.FALSE
3521 elif getResponses[ i ] == main.ERROR:
3522 getResults = main.FALSE
3523 utilities.assert_equals( expect=main.TRUE,
3524 actual=getResults,
3525 onpass="Set elements are correct",
3526 onfail="Set elements are incorrect" )
3527
3528 main.step( "Distributed Set size" )
3529 sizeResponses = []
3530 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003531 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003532 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003533 name="setTestSize-" + str( i ),
3534 args=[ onosSetName ] )
3535 threads.append( t )
3536 t.start()
3537 for t in threads:
3538 t.join()
3539 sizeResponses.append( t.result )
3540
3541 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003542 for i in range( len( main.activeNodes ) ):
3543 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003544 if size != sizeResponses[ i ]:
3545 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003546 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003547 " expected a size of " + str( size ) +
3548 " for set " + onosSetName +
3549 " but got " + str( sizeResponses[ i ] ) )
3550 utilities.assert_equals( expect=main.TRUE,
3551 actual=sizeResults,
3552 onpass="Set sizes are correct",
3553 onfail="Set sizes are incorrect" )
3554
3555 main.step( "Distributed Set add()" )
3556 onosSet.add( addValue )
3557 addResponses = []
3558 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003559 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003560 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003561 name="setTestAdd-" + str( i ),
3562 args=[ onosSetName, addValue ] )
3563 threads.append( t )
3564 t.start()
3565 for t in threads:
3566 t.join()
3567 addResponses.append( t.result )
3568
3569 # main.TRUE = successfully changed the set
3570 # main.FALSE = action resulted in no change in set
3571 # main.ERROR - Some error in executing the function
3572 addResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003573 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003574 if addResponses[ i ] == main.TRUE:
3575 # All is well
3576 pass
3577 elif addResponses[ i ] == main.FALSE:
3578 # Already in set, probably fine
3579 pass
3580 elif addResponses[ i ] == main.ERROR:
3581 # Error in execution
3582 addResults = main.FALSE
3583 else:
3584 # unexpected result
3585 addResults = main.FALSE
3586 if addResults != main.TRUE:
3587 main.log.error( "Error executing set add" )
3588
3589 # Check if set is still correct
3590 size = len( onosSet )
3591 getResponses = []
3592 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003593 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003594 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003595 name="setTestGet-" + str( i ),
3596 args=[ onosSetName ] )
3597 threads.append( t )
3598 t.start()
3599 for t in threads:
3600 t.join()
3601 getResponses.append( t.result )
3602 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003603 for i in range( len( main.activeNodes ) ):
3604 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003605 if isinstance( getResponses[ i ], list):
3606 current = set( getResponses[ i ] )
3607 if len( current ) == len( getResponses[ i ] ):
3608 # no repeats
3609 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003610 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003611 " of set " + onosSetName + ":\n" +
3612 str( getResponses[ i ] ) )
3613 main.log.debug( "Expected: " + str( onosSet ) )
3614 main.log.debug( "Actual: " + str( current ) )
3615 getResults = main.FALSE
3616 else:
3617 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003618 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003619 " set " + onosSetName + ":\n" +
3620 str( getResponses[ i ] ) )
3621 getResults = main.FALSE
3622 elif getResponses[ i ] == main.ERROR:
3623 getResults = main.FALSE
3624 sizeResponses = []
3625 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003626 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003627 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003628 name="setTestSize-" + str( i ),
3629 args=[ onosSetName ] )
3630 threads.append( t )
3631 t.start()
3632 for t in threads:
3633 t.join()
3634 sizeResponses.append( t.result )
3635 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003636 for i in range( len( main.activeNodes ) ):
3637 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003638 if size != sizeResponses[ i ]:
3639 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003640 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003641 " expected a size of " + str( size ) +
3642 " for set " + onosSetName +
3643 " but got " + str( sizeResponses[ i ] ) )
3644 addResults = addResults and getResults and sizeResults
3645 utilities.assert_equals( expect=main.TRUE,
3646 actual=addResults,
3647 onpass="Set add correct",
3648 onfail="Set add was incorrect" )
3649
3650 main.step( "Distributed Set addAll()" )
3651 onosSet.update( addAllValue.split() )
3652 addResponses = []
3653 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003654 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003655 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003656 name="setTestAddAll-" + str( i ),
3657 args=[ onosSetName, addAllValue ] )
3658 threads.append( t )
3659 t.start()
3660 for t in threads:
3661 t.join()
3662 addResponses.append( t.result )
3663
3664 # main.TRUE = successfully changed the set
3665 # main.FALSE = action resulted in no change in set
3666 # main.ERROR - Some error in executing the function
3667 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003668 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003669 if addResponses[ i ] == main.TRUE:
3670 # All is well
3671 pass
3672 elif addResponses[ i ] == main.FALSE:
3673 # Already in set, probably fine
3674 pass
3675 elif addResponses[ i ] == main.ERROR:
3676 # Error in execution
3677 addAllResults = main.FALSE
3678 else:
3679 # unexpected result
3680 addAllResults = main.FALSE
3681 if addAllResults != main.TRUE:
3682 main.log.error( "Error executing set addAll" )
3683
3684 # Check if set is still correct
3685 size = len( onosSet )
3686 getResponses = []
3687 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003688 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003689 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003690 name="setTestGet-" + str( i ),
3691 args=[ onosSetName ] )
3692 threads.append( t )
3693 t.start()
3694 for t in threads:
3695 t.join()
3696 getResponses.append( t.result )
3697 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003698 for i in range( len( main.activeNodes ) ):
3699 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003700 if isinstance( getResponses[ i ], list):
3701 current = set( getResponses[ i ] )
3702 if len( current ) == len( getResponses[ i ] ):
3703 # no repeats
3704 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003705 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003706 " has incorrect view" +
3707 " of set " + onosSetName + ":\n" +
3708 str( getResponses[ i ] ) )
3709 main.log.debug( "Expected: " + str( onosSet ) )
3710 main.log.debug( "Actual: " + str( current ) )
3711 getResults = main.FALSE
3712 else:
3713 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003714 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003715 " has repeat elements in" +
3716 " set " + onosSetName + ":\n" +
3717 str( getResponses[ i ] ) )
3718 getResults = main.FALSE
3719 elif getResponses[ i ] == main.ERROR:
3720 getResults = main.FALSE
3721 sizeResponses = []
3722 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003723 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003724 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003725 name="setTestSize-" + str( i ),
3726 args=[ onosSetName ] )
3727 threads.append( t )
3728 t.start()
3729 for t in threads:
3730 t.join()
3731 sizeResponses.append( t.result )
3732 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003733 for i in range( len( main.activeNodes ) ):
3734 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003735 if size != sizeResponses[ i ]:
3736 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003737 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003738 " expected a size of " + str( size ) +
3739 " for set " + onosSetName +
3740 " but got " + str( sizeResponses[ i ] ) )
3741 addAllResults = addAllResults and getResults and sizeResults
3742 utilities.assert_equals( expect=main.TRUE,
3743 actual=addAllResults,
3744 onpass="Set addAll correct",
3745 onfail="Set addAll was incorrect" )
3746
3747 main.step( "Distributed Set contains()" )
3748 containsResponses = []
3749 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003750 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003751 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003752 name="setContains-" + str( i ),
3753 args=[ onosSetName ],
3754 kwargs={ "values": addValue } )
3755 threads.append( t )
3756 t.start()
3757 for t in threads:
3758 t.join()
3759 # NOTE: This is the tuple
3760 containsResponses.append( t.result )
3761
3762 containsResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003763 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003764 if containsResponses[ i ] == main.ERROR:
3765 containsResults = main.FALSE
3766 else:
3767 containsResults = containsResults and\
3768 containsResponses[ i ][ 1 ]
3769 utilities.assert_equals( expect=main.TRUE,
3770 actual=containsResults,
3771 onpass="Set contains is functional",
3772 onfail="Set contains failed" )
3773
3774 main.step( "Distributed Set containsAll()" )
3775 containsAllResponses = []
3776 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003777 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003778 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003779 name="setContainsAll-" + str( i ),
3780 args=[ onosSetName ],
3781 kwargs={ "values": addAllValue } )
3782 threads.append( t )
3783 t.start()
3784 for t in threads:
3785 t.join()
3786 # NOTE: This is the tuple
3787 containsAllResponses.append( t.result )
3788
3789 containsAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003790 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003791 if containsResponses[ i ] == main.ERROR:
3792 containsResults = main.FALSE
3793 else:
3794 containsResults = containsResults and\
3795 containsResponses[ i ][ 1 ]
3796 utilities.assert_equals( expect=main.TRUE,
3797 actual=containsAllResults,
3798 onpass="Set containsAll is functional",
3799 onfail="Set containsAll failed" )
3800
3801 main.step( "Distributed Set remove()" )
3802 onosSet.remove( addValue )
3803 removeResponses = []
3804 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003805 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003806 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003807 name="setTestRemove-" + str( i ),
3808 args=[ onosSetName, addValue ] )
3809 threads.append( t )
3810 t.start()
3811 for t in threads:
3812 t.join()
3813 removeResponses.append( t.result )
3814
3815 # main.TRUE = successfully changed the set
3816 # main.FALSE = action resulted in no change in set
3817 # main.ERROR - Some error in executing the function
3818 removeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003819 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003820 if removeResponses[ i ] == main.TRUE:
3821 # All is well
3822 pass
3823 elif removeResponses[ i ] == main.FALSE:
3824 # not in set, probably fine
3825 pass
3826 elif removeResponses[ i ] == main.ERROR:
3827 # Error in execution
3828 removeResults = main.FALSE
3829 else:
3830 # unexpected result
3831 removeResults = main.FALSE
3832 if removeResults != main.TRUE:
3833 main.log.error( "Error executing set remove" )
3834
3835 # Check if set is still correct
3836 size = len( onosSet )
3837 getResponses = []
3838 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003839 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003840 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003841 name="setTestGet-" + str( i ),
3842 args=[ onosSetName ] )
3843 threads.append( t )
3844 t.start()
3845 for t in threads:
3846 t.join()
3847 getResponses.append( t.result )
3848 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003849 for i in range( len( main.activeNodes ) ):
3850 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003851 if isinstance( getResponses[ i ], list):
3852 current = set( getResponses[ i ] )
3853 if len( current ) == len( getResponses[ i ] ):
3854 # no repeats
3855 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003856 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003857 " has incorrect view" +
3858 " of set " + onosSetName + ":\n" +
3859 str( getResponses[ i ] ) )
3860 main.log.debug( "Expected: " + str( onosSet ) )
3861 main.log.debug( "Actual: " + str( current ) )
3862 getResults = main.FALSE
3863 else:
3864 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003865 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003866 " has repeat elements in" +
3867 " set " + onosSetName + ":\n" +
3868 str( getResponses[ i ] ) )
3869 getResults = main.FALSE
3870 elif getResponses[ i ] == main.ERROR:
3871 getResults = main.FALSE
3872 sizeResponses = []
3873 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003874 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003875 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003876 name="setTestSize-" + str( i ),
3877 args=[ onosSetName ] )
3878 threads.append( t )
3879 t.start()
3880 for t in threads:
3881 t.join()
3882 sizeResponses.append( t.result )
3883 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003884 for i in range( len( main.activeNodes ) ):
3885 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003886 if size != sizeResponses[ i ]:
3887 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003888 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003889 " expected a size of " + str( size ) +
3890 " for set " + onosSetName +
3891 " but got " + str( sizeResponses[ i ] ) )
3892 removeResults = removeResults and getResults and sizeResults
3893 utilities.assert_equals( expect=main.TRUE,
3894 actual=removeResults,
3895 onpass="Set remove correct",
3896 onfail="Set remove was incorrect" )
3897
3898 main.step( "Distributed Set removeAll()" )
3899 onosSet.difference_update( addAllValue.split() )
3900 removeAllResponses = []
3901 threads = []
3902 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003903 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003904 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003905 name="setTestRemoveAll-" + str( i ),
3906 args=[ onosSetName, addAllValue ] )
3907 threads.append( t )
3908 t.start()
3909 for t in threads:
3910 t.join()
3911 removeAllResponses.append( t.result )
3912 except Exception, e:
3913 main.log.exception(e)
3914
3915 # main.TRUE = successfully changed the set
3916 # main.FALSE = action resulted in no change in set
3917 # main.ERROR - Some error in executing the function
3918 removeAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003919 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003920 if removeAllResponses[ i ] == main.TRUE:
3921 # All is well
3922 pass
3923 elif removeAllResponses[ i ] == main.FALSE:
3924 # not in set, probably fine
3925 pass
3926 elif removeAllResponses[ i ] == main.ERROR:
3927 # Error in execution
3928 removeAllResults = main.FALSE
3929 else:
3930 # unexpected result
3931 removeAllResults = main.FALSE
3932 if removeAllResults != main.TRUE:
3933 main.log.error( "Error executing set removeAll" )
3934
3935 # Check if set is still correct
3936 size = len( onosSet )
3937 getResponses = []
3938 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003939 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003940 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003941 name="setTestGet-" + str( i ),
3942 args=[ onosSetName ] )
3943 threads.append( t )
3944 t.start()
3945 for t in threads:
3946 t.join()
3947 getResponses.append( t.result )
3948 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003949 for i in range( len( main.activeNodes ) ):
3950 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003951 if isinstance( getResponses[ i ], list):
3952 current = set( getResponses[ i ] )
3953 if len( current ) == len( getResponses[ i ] ):
3954 # no repeats
3955 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003956 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003957 " has incorrect view" +
3958 " of set " + onosSetName + ":\n" +
3959 str( getResponses[ i ] ) )
3960 main.log.debug( "Expected: " + str( onosSet ) )
3961 main.log.debug( "Actual: " + str( current ) )
3962 getResults = main.FALSE
3963 else:
3964 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003965 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003966 " has repeat elements in" +
3967 " set " + onosSetName + ":\n" +
3968 str( getResponses[ i ] ) )
3969 getResults = main.FALSE
3970 elif getResponses[ i ] == main.ERROR:
3971 getResults = main.FALSE
3972 sizeResponses = []
3973 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003974 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003975 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003976 name="setTestSize-" + str( i ),
3977 args=[ onosSetName ] )
3978 threads.append( t )
3979 t.start()
3980 for t in threads:
3981 t.join()
3982 sizeResponses.append( t.result )
3983 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003984 for i in range( len( main.activeNodes ) ):
3985 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003986 if size != sizeResponses[ i ]:
3987 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003988 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003989 " expected a size of " + str( size ) +
3990 " for set " + onosSetName +
3991 " but got " + str( sizeResponses[ i ] ) )
3992 removeAllResults = removeAllResults and getResults and sizeResults
3993 utilities.assert_equals( expect=main.TRUE,
3994 actual=removeAllResults,
3995 onpass="Set removeAll correct",
3996 onfail="Set removeAll was incorrect" )
3997
3998 main.step( "Distributed Set addAll()" )
3999 onosSet.update( addAllValue.split() )
4000 addResponses = []
4001 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004002 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004003 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004004 name="setTestAddAll-" + str( i ),
4005 args=[ onosSetName, addAllValue ] )
4006 threads.append( t )
4007 t.start()
4008 for t in threads:
4009 t.join()
4010 addResponses.append( t.result )
4011
4012 # main.TRUE = successfully changed the set
4013 # main.FALSE = action resulted in no change in set
4014 # main.ERROR - Some error in executing the function
4015 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004016 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004017 if addResponses[ i ] == main.TRUE:
4018 # All is well
4019 pass
4020 elif addResponses[ i ] == main.FALSE:
4021 # Already in set, probably fine
4022 pass
4023 elif addResponses[ i ] == main.ERROR:
4024 # Error in execution
4025 addAllResults = main.FALSE
4026 else:
4027 # unexpected result
4028 addAllResults = main.FALSE
4029 if addAllResults != main.TRUE:
4030 main.log.error( "Error executing set addAll" )
4031
4032 # Check if set is still correct
4033 size = len( onosSet )
4034 getResponses = []
4035 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004036 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004037 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004038 name="setTestGet-" + str( i ),
4039 args=[ onosSetName ] )
4040 threads.append( t )
4041 t.start()
4042 for t in threads:
4043 t.join()
4044 getResponses.append( t.result )
4045 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004046 for i in range( len( main.activeNodes ) ):
4047 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004048 if isinstance( getResponses[ i ], list):
4049 current = set( getResponses[ i ] )
4050 if len( current ) == len( getResponses[ i ] ):
4051 # no repeats
4052 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004053 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004054 " has incorrect view" +
4055 " of set " + onosSetName + ":\n" +
4056 str( getResponses[ i ] ) )
4057 main.log.debug( "Expected: " + str( onosSet ) )
4058 main.log.debug( "Actual: " + str( current ) )
4059 getResults = main.FALSE
4060 else:
4061 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004062 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004063 " has repeat elements in" +
4064 " set " + onosSetName + ":\n" +
4065 str( getResponses[ i ] ) )
4066 getResults = main.FALSE
4067 elif getResponses[ i ] == main.ERROR:
4068 getResults = main.FALSE
4069 sizeResponses = []
4070 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004071 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004072 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004073 name="setTestSize-" + str( i ),
4074 args=[ onosSetName ] )
4075 threads.append( t )
4076 t.start()
4077 for t in threads:
4078 t.join()
4079 sizeResponses.append( t.result )
4080 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004081 for i in range( len( main.activeNodes ) ):
4082 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004083 if size != sizeResponses[ i ]:
4084 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004085 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004086 " expected a size of " + str( size ) +
4087 " for set " + onosSetName +
4088 " but got " + str( sizeResponses[ i ] ) )
4089 addAllResults = addAllResults and getResults and sizeResults
4090 utilities.assert_equals( expect=main.TRUE,
4091 actual=addAllResults,
4092 onpass="Set addAll correct",
4093 onfail="Set addAll was incorrect" )
4094
4095 main.step( "Distributed Set clear()" )
4096 onosSet.clear()
4097 clearResponses = []
4098 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004099 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004100 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004101 name="setTestClear-" + str( i ),
4102 args=[ onosSetName, " "], # Values doesn't matter
4103 kwargs={ "clear": True } )
4104 threads.append( t )
4105 t.start()
4106 for t in threads:
4107 t.join()
4108 clearResponses.append( t.result )
4109
4110 # main.TRUE = successfully changed the set
4111 # main.FALSE = action resulted in no change in set
4112 # main.ERROR - Some error in executing the function
4113 clearResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004114 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004115 if clearResponses[ i ] == main.TRUE:
4116 # All is well
4117 pass
4118 elif clearResponses[ i ] == main.FALSE:
4119 # Nothing set, probably fine
4120 pass
4121 elif clearResponses[ i ] == main.ERROR:
4122 # Error in execution
4123 clearResults = main.FALSE
4124 else:
4125 # unexpected result
4126 clearResults = main.FALSE
4127 if clearResults != main.TRUE:
4128 main.log.error( "Error executing set clear" )
4129
4130 # Check if set is still correct
4131 size = len( onosSet )
4132 getResponses = []
4133 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004134 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004135 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004136 name="setTestGet-" + str( i ),
4137 args=[ onosSetName ] )
4138 threads.append( t )
4139 t.start()
4140 for t in threads:
4141 t.join()
4142 getResponses.append( t.result )
4143 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004144 for i in range( len( main.activeNodes ) ):
4145 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004146 if isinstance( getResponses[ i ], list):
4147 current = set( getResponses[ i ] )
4148 if len( current ) == len( getResponses[ i ] ):
4149 # no repeats
4150 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004151 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004152 " has incorrect view" +
4153 " of set " + onosSetName + ":\n" +
4154 str( getResponses[ i ] ) )
4155 main.log.debug( "Expected: " + str( onosSet ) )
4156 main.log.debug( "Actual: " + str( current ) )
4157 getResults = main.FALSE
4158 else:
4159 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004160 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004161 " has repeat elements in" +
4162 " set " + onosSetName + ":\n" +
4163 str( getResponses[ i ] ) )
4164 getResults = main.FALSE
4165 elif getResponses[ i ] == main.ERROR:
4166 getResults = main.FALSE
4167 sizeResponses = []
4168 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004169 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004170 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004171 name="setTestSize-" + str( i ),
4172 args=[ onosSetName ] )
4173 threads.append( t )
4174 t.start()
4175 for t in threads:
4176 t.join()
4177 sizeResponses.append( t.result )
4178 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004179 for i in range( len( main.activeNodes ) ):
4180 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004181 if size != sizeResponses[ i ]:
4182 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004183 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004184 " expected a size of " + str( size ) +
4185 " for set " + onosSetName +
4186 " but got " + str( sizeResponses[ i ] ) )
4187 clearResults = clearResults and getResults and sizeResults
4188 utilities.assert_equals( expect=main.TRUE,
4189 actual=clearResults,
4190 onpass="Set clear correct",
4191 onfail="Set clear was incorrect" )
4192
4193 main.step( "Distributed Set addAll()" )
4194 onosSet.update( addAllValue.split() )
4195 addResponses = []
4196 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004197 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004198 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004199 name="setTestAddAll-" + str( i ),
4200 args=[ onosSetName, addAllValue ] )
4201 threads.append( t )
4202 t.start()
4203 for t in threads:
4204 t.join()
4205 addResponses.append( t.result )
4206
4207 # main.TRUE = successfully changed the set
4208 # main.FALSE = action resulted in no change in set
4209 # main.ERROR - Some error in executing the function
4210 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004211 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004212 if addResponses[ i ] == main.TRUE:
4213 # All is well
4214 pass
4215 elif addResponses[ i ] == main.FALSE:
4216 # Already in set, probably fine
4217 pass
4218 elif addResponses[ i ] == main.ERROR:
4219 # Error in execution
4220 addAllResults = main.FALSE
4221 else:
4222 # unexpected result
4223 addAllResults = main.FALSE
4224 if addAllResults != main.TRUE:
4225 main.log.error( "Error executing set addAll" )
4226
4227 # Check if set is still correct
4228 size = len( onosSet )
4229 getResponses = []
4230 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004231 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004232 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004233 name="setTestGet-" + str( i ),
4234 args=[ onosSetName ] )
4235 threads.append( t )
4236 t.start()
4237 for t in threads:
4238 t.join()
4239 getResponses.append( t.result )
4240 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004241 for i in range( len( main.activeNodes ) ):
4242 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004243 if isinstance( getResponses[ i ], list):
4244 current = set( getResponses[ i ] )
4245 if len( current ) == len( getResponses[ i ] ):
4246 # no repeats
4247 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004248 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004249 " has incorrect view" +
4250 " of set " + onosSetName + ":\n" +
4251 str( getResponses[ i ] ) )
4252 main.log.debug( "Expected: " + str( onosSet ) )
4253 main.log.debug( "Actual: " + str( current ) )
4254 getResults = main.FALSE
4255 else:
4256 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004257 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004258 " has repeat elements in" +
4259 " set " + onosSetName + ":\n" +
4260 str( getResponses[ i ] ) )
4261 getResults = main.FALSE
4262 elif getResponses[ i ] == main.ERROR:
4263 getResults = main.FALSE
4264 sizeResponses = []
4265 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004266 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004267 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004268 name="setTestSize-" + str( i ),
4269 args=[ onosSetName ] )
4270 threads.append( t )
4271 t.start()
4272 for t in threads:
4273 t.join()
4274 sizeResponses.append( t.result )
4275 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004276 for i in range( len( main.activeNodes ) ):
4277 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004278 if size != sizeResponses[ i ]:
4279 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004280 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004281 " expected a size of " + str( size ) +
4282 " for set " + onosSetName +
4283 " but got " + str( sizeResponses[ i ] ) )
4284 addAllResults = addAllResults and getResults and sizeResults
4285 utilities.assert_equals( expect=main.TRUE,
4286 actual=addAllResults,
4287 onpass="Set addAll correct",
4288 onfail="Set addAll was incorrect" )
4289
4290 main.step( "Distributed Set retain()" )
4291 onosSet.intersection_update( retainValue.split() )
4292 retainResponses = []
4293 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004294 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004295 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004296 name="setTestRetain-" + str( i ),
4297 args=[ onosSetName, retainValue ],
4298 kwargs={ "retain": True } )
4299 threads.append( t )
4300 t.start()
4301 for t in threads:
4302 t.join()
4303 retainResponses.append( t.result )
4304
4305 # main.TRUE = successfully changed the set
4306 # main.FALSE = action resulted in no change in set
4307 # main.ERROR - Some error in executing the function
4308 retainResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004309 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004310 if retainResponses[ i ] == main.TRUE:
4311 # All is well
4312 pass
4313 elif retainResponses[ i ] == main.FALSE:
4314 # Already in set, probably fine
4315 pass
4316 elif retainResponses[ i ] == main.ERROR:
4317 # Error in execution
4318 retainResults = main.FALSE
4319 else:
4320 # unexpected result
4321 retainResults = main.FALSE
4322 if retainResults != main.TRUE:
4323 main.log.error( "Error executing set retain" )
4324
4325 # Check if set is still correct
4326 size = len( onosSet )
4327 getResponses = []
4328 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004329 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004330 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004331 name="setTestGet-" + str( i ),
4332 args=[ onosSetName ] )
4333 threads.append( t )
4334 t.start()
4335 for t in threads:
4336 t.join()
4337 getResponses.append( t.result )
4338 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004339 for i in range( len( main.activeNodes ) ):
4340 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004341 if isinstance( getResponses[ i ], list):
4342 current = set( getResponses[ i ] )
4343 if len( current ) == len( getResponses[ i ] ):
4344 # no repeats
4345 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004346 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004347 " has incorrect view" +
4348 " of set " + onosSetName + ":\n" +
4349 str( getResponses[ i ] ) )
4350 main.log.debug( "Expected: " + str( onosSet ) )
4351 main.log.debug( "Actual: " + str( current ) )
4352 getResults = main.FALSE
4353 else:
4354 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004355 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004356 " has repeat elements in" +
4357 " set " + onosSetName + ":\n" +
4358 str( getResponses[ i ] ) )
4359 getResults = main.FALSE
4360 elif getResponses[ i ] == main.ERROR:
4361 getResults = main.FALSE
4362 sizeResponses = []
4363 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004364 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004365 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004366 name="setTestSize-" + str( i ),
4367 args=[ onosSetName ] )
4368 threads.append( t )
4369 t.start()
4370 for t in threads:
4371 t.join()
4372 sizeResponses.append( t.result )
4373 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004374 for i in range( len( main.activeNodes ) ):
4375 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004376 if size != sizeResponses[ i ]:
4377 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004378 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004379 str( size ) + " for set " + onosSetName +
4380 " but got " + str( sizeResponses[ i ] ) )
4381 retainResults = retainResults and getResults and sizeResults
4382 utilities.assert_equals( expect=main.TRUE,
4383 actual=retainResults,
4384 onpass="Set retain correct",
4385 onfail="Set retain was incorrect" )
4386
Jon Hall2a5002c2015-08-21 16:49:11 -07004387 # Transactional maps
4388 main.step( "Partitioned Transactional maps put" )
4389 tMapValue = "Testing"
4390 numKeys = 100
4391 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004392 node = main.activeNodes[0]
4393 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
Jon Hall2a5002c2015-08-21 16:49:11 -07004394 if len( putResponses ) == 100:
4395 for i in putResponses:
4396 if putResponses[ i ][ 'value' ] != tMapValue:
4397 putResult = False
4398 else:
4399 putResult = False
4400 if not putResult:
4401 main.log.debug( "Put response values: " + str( putResponses ) )
4402 utilities.assert_equals( expect=True,
4403 actual=putResult,
4404 onpass="Partitioned Transactional Map put successful",
4405 onfail="Partitioned Transactional Map put values are incorrect" )
4406
4407 main.step( "Partitioned Transactional maps get" )
4408 getCheck = True
4409 for n in range( 1, numKeys + 1 ):
4410 getResponses = []
4411 threads = []
4412 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004413 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004414 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4415 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004416 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004417 threads.append( t )
4418 t.start()
4419 for t in threads:
4420 t.join()
4421 getResponses.append( t.result )
4422 for node in getResponses:
4423 if node != tMapValue:
4424 valueCheck = False
4425 if not valueCheck:
4426 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4427 main.log.warn( getResponses )
4428 getCheck = getCheck and valueCheck
4429 utilities.assert_equals( expect=True,
4430 actual=getCheck,
4431 onpass="Partitioned Transactional Map get values were correct",
4432 onfail="Partitioned Transactional Map values incorrect" )
4433
4434 main.step( "In-memory Transactional maps put" )
4435 tMapValue = "Testing"
4436 numKeys = 100
4437 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004438 node = main.activeNodes[0]
4439 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue, inMemory=True )
Jon Hall2a5002c2015-08-21 16:49:11 -07004440 if len( putResponses ) == 100:
4441 for i in putResponses:
4442 if putResponses[ i ][ 'value' ] != tMapValue:
4443 putResult = False
4444 else:
4445 putResult = False
4446 if not putResult:
4447 main.log.debug( "Put response values: " + str( putResponses ) )
4448 utilities.assert_equals( expect=True,
4449 actual=putResult,
4450 onpass="In-Memory Transactional Map put successful",
4451 onfail="In-Memory Transactional Map put values are incorrect" )
4452
4453 main.step( "In-Memory Transactional maps get" )
4454 getCheck = True
4455 for n in range( 1, numKeys + 1 ):
4456 getResponses = []
4457 threads = []
4458 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004459 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004460 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4461 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004462 args=[ "Key" + str( n ) ],
Jon Hall2a5002c2015-08-21 16:49:11 -07004463 kwargs={ "inMemory": True } )
4464 threads.append( t )
4465 t.start()
4466 for t in threads:
4467 t.join()
4468 getResponses.append( t.result )
4469 for node in getResponses:
4470 if node != tMapValue:
4471 valueCheck = False
4472 if not valueCheck:
4473 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4474 main.log.warn( getResponses )
4475 getCheck = getCheck and valueCheck
4476 utilities.assert_equals( expect=True,
4477 actual=getCheck,
4478 onpass="In-Memory Transactional Map get values were correct",
4479 onfail="In-Memory Transactional Map values incorrect" )