blob: 5002305526658da5f50f7ffa2e27c305a460a399 [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
Jon Hallb3ed8ed2015-10-28 16:43:55 -070012CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
Jon Hall5cf14d52015-07-16 12:15:19 -070014CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
Jon Hallb3ed8ed2015-10-28 16:43:55 -070028class HAkillNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -070029
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hall3b489db2015-10-05 14:38:37 -070052 import pexpect
Jon Hall5cf14d52015-07-16 12:15:19 -070053 main.log.info( "ONOS HA test: Restart minority of ONOS nodes - " +
54 "initialization" )
55 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070056 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070057 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
59 # TODO: save all the timers and output them for plotting
60
61 # load some variables from the params file
62 PULLCODE = False
63 if main.params[ 'Git' ] == 'True':
64 PULLCODE = True
65 gitBranch = main.params[ 'branch' ]
66 cellName = main.params[ 'ENV' ][ 'cellName' ]
67
Jon Halle1a3b752015-07-22 13:02:46 -070068 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070069 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070070 if main.ONOSbench.maxNodes < main.numCtrls:
71 main.numCtrls = int( main.ONOSbench.maxNodes )
72 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070073 global ONOS1Port
74 global ONOS2Port
75 global ONOS3Port
76 global ONOS4Port
77 global ONOS5Port
78 global ONOS6Port
79 global ONOS7Port
80
81 # FIXME: just get controller port from params?
82 # TODO: do we really need all these?
83 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
84 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
85 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
86 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
87 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
88 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
89 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
90
Jon Halle1a3b752015-07-22 13:02:46 -070091 try:
92 fileName = "Counters"
93 # TODO: Maybe make a library folder somewhere?
94 path = main.params[ 'imports' ][ 'path' ]
95 main.Counters = imp.load_source( fileName,
96 path + fileName + ".py" )
97 except Exception as e:
98 main.log.exception( e )
99 main.cleanup()
100 main.exit()
101
102 main.CLIs = []
103 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700104 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700105 for i in range( 1, main.numCtrls + 1 ):
106 try:
107 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
108 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
109 ipList.append( main.nodes[ -1 ].ip_address )
110 except AttributeError:
111 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700112
113 main.step( "Create cell file" )
114 cellAppString = main.params[ 'ENV' ][ 'appString' ]
115 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
116 main.Mininet1.ip_address,
117 cellAppString, ipList )
118 main.step( "Applying cell variable to environment" )
119 cellResult = main.ONOSbench.setCell( cellName )
120 verifyResult = main.ONOSbench.verifyCell()
121
122 # FIXME:this is short term fix
123 main.log.info( "Removing raft logs" )
124 main.ONOSbench.onosRemoveRaftLogs()
125
126 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700127 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700128 main.ONOSbench.onosUninstall( node.ip_address )
129
130 # Make sure ONOS is DEAD
131 main.log.info( "Killing any ONOS processes" )
132 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700133 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700134 killed = main.ONOSbench.onosKill( node.ip_address )
135 killResults = killResults and killed
136
137 cleanInstallResult = main.TRUE
138 gitPullResult = main.TRUE
139
140 main.step( "Starting Mininet" )
141 # scp topo file to mininet
142 # TODO: move to params?
143 topoName = "obelisk.py"
144 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700145 main.ONOSbench.scp( main.Mininet1,
146 filePath + topoName,
147 main.Mininet1.home,
148 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700149 mnResult = main.Mininet1.startNet( )
150 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
151 onpass="Mininet Started",
152 onfail="Error starting Mininet" )
153
154 main.step( "Git checkout and pull " + gitBranch )
155 if PULLCODE:
156 main.ONOSbench.gitCheckout( gitBranch )
157 gitPullResult = main.ONOSbench.gitPull()
158 # values of 1 or 3 are good
159 utilities.assert_lesser( expect=0, actual=gitPullResult,
160 onpass="Git pull successful",
161 onfail="Git pull failed" )
162 main.ONOSbench.getVersion( report=True )
163
164 main.step( "Using mvn clean install" )
165 cleanInstallResult = main.TRUE
166 if PULLCODE and gitPullResult == main.TRUE:
167 cleanInstallResult = main.ONOSbench.cleanInstall()
168 else:
169 main.log.warn( "Did not pull new code so skipping mvn " +
170 "clean install" )
171 utilities.assert_equals( expect=main.TRUE,
172 actual=cleanInstallResult,
173 onpass="MCI successful",
174 onfail="MCI failed" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700175
176 main.step( "Make sure ONOS service doesn't automatically respawn" )
177 handle = main.ONOSbench.handle
178 handle.sendline( "sed -i -e 's/^respawn$/#respawn/g' tools/package/init/onos.conf" )
179 handle.expect( "\$" ) # $ from the command
180 handle.expect( "\$" ) # $ from the prompt
181
Jon Hall5cf14d52015-07-16 12:15:19 -0700182 # GRAPHS
183 # NOTE: important params here:
184 # job = name of Jenkins job
185 # Plot Name = Plot-HA, only can be used if multiple plots
186 # index = The number of the graph under plot name
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700187 job = "HAkillNodes"
Jon Hall5cf14d52015-07-16 12:15:19 -0700188 plotName = "Plot-HA"
189 graphs = '<ac:structured-macro ac:name="html">\n'
190 graphs += '<ac:plain-text-body><![CDATA[\n'
191 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
192 '/plot/' + plotName + '/getPlot?index=0' +\
193 '&width=500&height=300"' +\
194 'noborder="0" width="500" height="300" scrolling="yes" ' +\
195 'seamless="seamless"></iframe>\n'
196 graphs += ']]></ac:plain-text-body>\n'
197 graphs += '</ac:structured-macro>\n'
198 main.log.wiki(graphs)
199
200 main.step( "Creating ONOS package" )
Jon Hall3b489db2015-10-05 14:38:37 -0700201 # copy gen-partions file to ONOS
202 # NOTE: this assumes TestON and ONOS are on the same machine
203 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
204 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
205 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
206 main.ONOSbench.ip_address,
207 srcFile,
208 dstDir,
209 pwd=main.ONOSbench.pwd,
210 direction="from" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700211 packageResult = main.ONOSbench.onosPackage()
212 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
213 onpass="ONOS package successful",
214 onfail="ONOS package failed" )
215
216 main.step( "Installing ONOS package" )
217 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700218 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700219 tmpResult = main.ONOSbench.onosInstall( options="-f",
220 node=node.ip_address )
221 onosInstallResult = onosInstallResult and tmpResult
222 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
223 onpass="ONOS install successful",
224 onfail="ONOS install failed" )
Jon Hall3b489db2015-10-05 14:38:37 -0700225 # clean up gen-partitions file
226 try:
227 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
228 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
229 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
230 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
231 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
232 str( main.ONOSbench.handle.before ) )
233 except ( pexpect.TIMEOUT, pexpect.EOF ):
234 main.log.exception( "ONOSbench: pexpect exception found:" +
235 main.ONOSbench.handle.before )
236 main.cleanup()
237 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700238
239 main.step( "Checking if ONOS is up yet" )
240 for i in range( 2 ):
241 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700242 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700243 started = main.ONOSbench.isup( node.ip_address )
244 if not started:
245 main.log.error( node.name + " didn't start!" )
246 main.ONOSbench.onosStop( node.ip_address )
247 main.ONOSbench.onosStart( node.ip_address )
248 onosIsupResult = onosIsupResult and started
249 if onosIsupResult == main.TRUE:
250 break
251 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
252 onpass="ONOS startup successful",
253 onfail="ONOS startup failed" )
254
255 main.log.step( "Starting ONOS CLI sessions" )
256 cliResults = main.TRUE
257 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700258 for i in range( main.numCtrls ):
259 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700260 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700261 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700262 threads.append( t )
263 t.start()
264
265 for t in threads:
266 t.join()
267 cliResults = cliResults and t.result
268 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
269 onpass="ONOS cli startup successful",
270 onfail="ONOS cli startup failed" )
271
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700272 # Create a list of active nodes for use when some nodes are stopped
273 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
274
Jon Hall5cf14d52015-07-16 12:15:19 -0700275 if main.params[ 'tcpdump' ].lower() == "true":
276 main.step( "Start Packet Capture MN" )
277 main.Mininet2.startTcpdump(
278 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
279 + "-MN.pcap",
280 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
281 port=main.params[ 'MNtcpdump' ][ 'port' ] )
282
283 main.step( "App Ids check" )
284 appCheck = main.TRUE
285 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700286 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700287 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700288 name="appToIDCheck-" + str( i ),
289 args=[] )
290 threads.append( t )
291 t.start()
292
293 for t in threads:
294 t.join()
295 appCheck = appCheck and t.result
296 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700297 node = main.activeNodes[0]
298 main.log.warn( main.CLIs[node].apps() )
299 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700300 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
301 onpass="App Ids seem to be correct",
302 onfail="Something is wrong with app Ids" )
303
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700304 main.step( "Clean up ONOS service changes" )
305 handle.sendline( "git checkout -- tools/package/init/onos.conf" )
306 handle.expect( "\$" )
307
Jon Hall5cf14d52015-07-16 12:15:19 -0700308 if cliResults == main.FALSE:
309 main.log.error( "Failed to start ONOS, stopping test" )
310 main.cleanup()
311 main.exit()
312
313 def CASE2( self, main ):
314 """
315 Assign devices to controllers
316 """
317 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700318 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700319 assert main, "main not defined"
320 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700321 assert main.CLIs, "main.CLIs not defined"
322 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700323 assert ONOS1Port, "ONOS1Port not defined"
324 assert ONOS2Port, "ONOS2Port not defined"
325 assert ONOS3Port, "ONOS3Port not defined"
326 assert ONOS4Port, "ONOS4Port not defined"
327 assert ONOS5Port, "ONOS5Port not defined"
328 assert ONOS6Port, "ONOS6Port not defined"
329 assert ONOS7Port, "ONOS7Port not defined"
330
331 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700332 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700333 "and check that an ONOS node becomes the " +\
334 "master of the device."
335 main.step( "Assign switches to controllers" )
336
337 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700338 for i in range( main.numCtrls ):
339 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700340 swList = []
341 for i in range( 1, 29 ):
342 swList.append( "s" + str( i ) )
343 main.Mininet1.assignSwController( sw=swList, ip=ipList )
344
345 mastershipCheck = main.TRUE
346 for i in range( 1, 29 ):
347 response = main.Mininet1.getSwController( "s" + str( i ) )
348 try:
349 main.log.info( str( response ) )
350 except Exception:
351 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700352 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700353 if re.search( "tcp:" + node.ip_address, response ):
354 mastershipCheck = mastershipCheck and main.TRUE
355 else:
356 main.log.error( "Error, node " + node.ip_address + " is " +
357 "not in the list of controllers s" +
358 str( i ) + " is connecting to." )
359 mastershipCheck = main.FALSE
360 utilities.assert_equals(
361 expect=main.TRUE,
362 actual=mastershipCheck,
363 onpass="Switch mastership assigned correctly",
364 onfail="Switches not assigned correctly to controllers" )
365
366 def CASE21( self, main ):
367 """
368 Assign mastership to controllers
369 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700370 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700371 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700372 assert main, "main not defined"
373 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700374 assert main.CLIs, "main.CLIs not defined"
375 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700376 assert ONOS1Port, "ONOS1Port not defined"
377 assert ONOS2Port, "ONOS2Port not defined"
378 assert ONOS3Port, "ONOS3Port not defined"
379 assert ONOS4Port, "ONOS4Port not defined"
380 assert ONOS5Port, "ONOS5Port not defined"
381 assert ONOS6Port, "ONOS6Port not defined"
382 assert ONOS7Port, "ONOS7Port not defined"
383
384 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700385 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700386 "device. Then manually assign" +\
387 " mastership to specific ONOS nodes using" +\
388 " 'device-role'"
389 main.step( "Assign mastership of switches to specific controllers" )
390 # Manually assign mastership to the controller we want
391 roleCall = main.TRUE
392
393 ipList = [ ]
394 deviceList = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700395 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700396 try:
397 # Assign mastership to specific controllers. This assignment was
398 # determined for a 7 node cluser, but will work with any sized
399 # cluster
400 for i in range( 1, 29 ): # switches 1 through 28
401 # set up correct variables:
402 if i == 1:
403 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700404 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700405 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700406 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700407 c = 1 % main.numCtrls
408 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700409 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700410 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700411 c = 1 % main.numCtrls
412 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700413 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700414 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700415 c = 3 % main.numCtrls
416 ip = main.nodes[ c ].ip_address # ONOS4
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700417 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700418 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700419 c = 2 % main.numCtrls
420 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700421 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700422 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700423 c = 2 % main.numCtrls
424 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700425 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700426 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700427 c = 5 % main.numCtrls
428 ip = main.nodes[ c ].ip_address # ONOS6
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700429 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700430 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700431 c = 4 % main.numCtrls
432 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700433 dpid = '3' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700434 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700435 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700436 c = 6 % main.numCtrls
437 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700438 dpid = '6' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700439 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700440 elif i == 28:
441 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700442 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700443 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700444 else:
445 main.log.error( "You didn't write an else statement for " +
446 "switch s" + str( i ) )
447 roleCall = main.FALSE
448 # Assign switch
449 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
450 # TODO: make this controller dynamic
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700451 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700452 ipList.append( ip )
453 deviceList.append( deviceId )
454 except ( AttributeError, AssertionError ):
455 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700456 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700457 utilities.assert_equals(
458 expect=main.TRUE,
459 actual=roleCall,
460 onpass="Re-assigned switch mastership to designated controller",
461 onfail="Something wrong with deviceRole calls" )
462
463 main.step( "Check mastership was correctly assigned" )
464 roleCheck = main.TRUE
465 # NOTE: This is due to the fact that device mastership change is not
466 # atomic and is actually a multi step process
467 time.sleep( 5 )
468 for i in range( len( ipList ) ):
469 ip = ipList[i]
470 deviceId = deviceList[i]
471 # Check assignment
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700472 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700473 if ip in master:
474 roleCheck = roleCheck and main.TRUE
475 else:
476 roleCheck = roleCheck and main.FALSE
477 main.log.error( "Error, controller " + ip + " is not" +
478 " master " + "of device " +
479 str( deviceId ) + ". Master is " +
480 repr( master ) + "." )
481 utilities.assert_equals(
482 expect=main.TRUE,
483 actual=roleCheck,
484 onpass="Switches were successfully reassigned to designated " +
485 "controller",
486 onfail="Switches were not successfully reassigned" )
487
488 def CASE3( self, main ):
489 """
490 Assign intents
491 """
492 import time
493 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700494 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700495 assert main, "main not defined"
496 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700497 assert main.CLIs, "main.CLIs not defined"
498 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700499 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700500 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700501 "assign predetermined host-to-host intents." +\
502 " After installation, check that the intent" +\
503 " is distributed to all nodes and the state" +\
504 " is INSTALLED"
505
506 # install onos-app-fwd
507 main.step( "Install reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700508 onosCli = main.CLIs[ main.activeNodes[0] ]
509 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700510 utilities.assert_equals( expect=main.TRUE, actual=installResults,
511 onpass="Install fwd successful",
512 onfail="Install fwd failed" )
513
514 main.step( "Check app ids" )
515 appCheck = main.TRUE
516 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700517 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700518 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700519 name="appToIDCheck-" + str( i ),
520 args=[] )
521 threads.append( t )
522 t.start()
523
524 for t in threads:
525 t.join()
526 appCheck = appCheck and t.result
527 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700528 main.log.warn( onosCli.apps() )
529 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700530 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
531 onpass="App Ids seem to be correct",
532 onfail="Something is wrong with app Ids" )
533
534 main.step( "Discovering Hosts( Via pingall for now )" )
535 # FIXME: Once we have a host discovery mechanism, use that instead
536 # REACTIVE FWD test
537 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700538 passMsg = "Reactive Pingall test passed"
539 time1 = time.time()
540 pingResult = main.Mininet1.pingall()
541 time2 = time.time()
542 if not pingResult:
543 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700544 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700545 passMsg += " on the second try"
546 utilities.assert_equals(
547 expect=main.TRUE,
548 actual=pingResult,
549 onpass= passMsg,
550 onfail="Reactive Pingall failed, " +
551 "one or more ping pairs failed" )
552 main.log.info( "Time for pingall: %2f seconds" %
553 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700554 # timeout for fwd flows
555 time.sleep( 11 )
556 # uninstall onos-app-fwd
557 main.step( "Uninstall reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700558 node = main.activeNodes[0]
559 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700560 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
561 onpass="Uninstall fwd successful",
562 onfail="Uninstall fwd failed" )
563
564 main.step( "Check app ids" )
565 threads = []
566 appCheck2 = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700567 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700568 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700569 name="appToIDCheck-" + str( i ),
570 args=[] )
571 threads.append( t )
572 t.start()
573
574 for t in threads:
575 t.join()
576 appCheck2 = appCheck2 and t.result
577 if appCheck2 != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700578 node = main.activeNodes[0]
579 main.log.warn( main.CLIs[node].apps() )
580 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700581 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
582 onpass="App Ids seem to be correct",
583 onfail="Something is wrong with app Ids" )
584
585 main.step( "Add host intents via cli" )
586 intentIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700587 # TODO: move the host numbers to params
588 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700589 intentAddResult = True
590 hostResult = main.TRUE
591 for i in range( 8, 18 ):
592 main.log.info( "Adding host intent between h" + str( i ) +
593 " and h" + str( i + 10 ) )
594 host1 = "00:00:00:00:00:" + \
595 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
596 host2 = "00:00:00:00:00:" + \
597 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
598 # NOTE: getHost can return None
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700599 host1Dict = onosCli.getHost( host1 )
600 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700601 host1Id = None
602 host2Id = None
603 if host1Dict and host2Dict:
604 host1Id = host1Dict.get( 'id', None )
605 host2Id = host2Dict.get( 'id', None )
606 if host1Id and host2Id:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700607 nodeNum = ( i % len( main.activeNodes ) )
608 node = main.activeNodes[nodeNum]
609 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700610 if tmpId:
611 main.log.info( "Added intent with id: " + tmpId )
612 intentIds.append( tmpId )
613 else:
614 main.log.error( "addHostIntent returned: " +
615 repr( tmpId ) )
616 else:
617 main.log.error( "Error, getHost() failed for h" + str( i ) +
618 " and/or h" + str( i + 10 ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700619 node = main.activeNodes[0]
620 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700621 main.log.warn( "Hosts output: " )
622 try:
623 main.log.warn( json.dumps( json.loads( hosts ),
624 sort_keys=True,
625 indent=4,
626 separators=( ',', ': ' ) ) )
627 except ( ValueError, TypeError ):
628 main.log.warn( repr( hosts ) )
629 hostResult = main.FALSE
630 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
631 onpass="Found a host id for each host",
632 onfail="Error looking up host ids" )
633
634 intentStart = time.time()
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700635 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700636 main.log.info( "Submitted intents: " + str( intentIds ) )
637 main.log.info( "Intents in ONOS: " + str( onosIds ) )
638 for intent in intentIds:
639 if intent in onosIds:
640 pass # intent submitted is in onos
641 else:
642 intentAddResult = False
643 if intentAddResult:
644 intentStop = time.time()
645 else:
646 intentStop = None
647 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700648 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700649 intentStates = []
650 installedCheck = True
651 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
652 count = 0
653 try:
654 for intent in json.loads( intents ):
655 state = intent.get( 'state', None )
656 if "INSTALLED" not in state:
657 installedCheck = False
658 intentId = intent.get( 'id', None )
659 intentStates.append( ( intentId, state ) )
660 except ( ValueError, TypeError ):
661 main.log.exception( "Error parsing intents" )
662 # add submitted intents not in the store
663 tmplist = [ i for i, s in intentStates ]
664 missingIntents = False
665 for i in intentIds:
666 if i not in tmplist:
667 intentStates.append( ( i, " - " ) )
668 missingIntents = True
669 intentStates.sort()
670 for i, s in intentStates:
671 count += 1
672 main.log.info( "%-6s%-15s%-15s" %
673 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700674 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700675 try:
676 missing = False
677 if leaders:
678 parsedLeaders = json.loads( leaders )
679 main.log.warn( json.dumps( parsedLeaders,
680 sort_keys=True,
681 indent=4,
682 separators=( ',', ': ' ) ) )
683 # check for all intent partitions
684 topics = []
685 for i in range( 14 ):
686 topics.append( "intent-partition-" + str( i ) )
687 main.log.debug( topics )
688 ONOStopics = [ j['topic'] for j in parsedLeaders ]
689 for topic in topics:
690 if topic not in ONOStopics:
691 main.log.error( "Error: " + topic +
692 " not in leaders" )
693 missing = True
694 else:
695 main.log.error( "leaders() returned None" )
696 except ( ValueError, TypeError ):
697 main.log.exception( "Error parsing leaders" )
698 main.log.error( repr( leaders ) )
699 # Check all nodes
700 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700701 for i in main.activeNodes:
702 response = main.CLIs[i].leaders( jsonFormat=False)
703 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700704 str( response ) )
705
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700706 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700707 try:
708 if partitions :
709 parsedPartitions = json.loads( partitions )
710 main.log.warn( json.dumps( parsedPartitions,
711 sort_keys=True,
712 indent=4,
713 separators=( ',', ': ' ) ) )
714 # TODO check for a leader in all paritions
715 # TODO check for consistency among nodes
716 else:
717 main.log.error( "partitions() returned None" )
718 except ( ValueError, TypeError ):
719 main.log.exception( "Error parsing partitions" )
720 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700721 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700722 try:
723 if pendingMap :
724 parsedPending = json.loads( pendingMap )
725 main.log.warn( json.dumps( parsedPending,
726 sort_keys=True,
727 indent=4,
728 separators=( ',', ': ' ) ) )
729 # TODO check something here?
730 else:
731 main.log.error( "pendingMap() returned None" )
732 except ( ValueError, TypeError ):
733 main.log.exception( "Error parsing pending map" )
734 main.log.error( repr( pendingMap ) )
735
736 intentAddResult = bool( intentAddResult and not missingIntents and
737 installedCheck )
738 if not intentAddResult:
739 main.log.error( "Error in pushing host intents to ONOS" )
740
741 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700742 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700743 correct = True
744 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700745 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700746 onosIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700747 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700748 onosIds.append( ids )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700749 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700750 str( sorted( onosIds ) ) )
751 if sorted( ids ) != sorted( intentIds ):
752 main.log.warn( "Set of intent IDs doesn't match" )
753 correct = False
754 break
755 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700756 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700757 for intent in intents:
758 if intent[ 'state' ] != "INSTALLED":
759 main.log.warn( "Intent " + intent[ 'id' ] +
760 " is " + intent[ 'state' ] )
761 correct = False
762 break
763 if correct:
764 break
765 else:
766 time.sleep(1)
767 if not intentStop:
768 intentStop = time.time()
769 global gossipTime
770 gossipTime = intentStop - intentStart
771 main.log.info( "It took about " + str( gossipTime ) +
772 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700773 gossipPeriod = int( main.params['timers']['gossip'] )
774 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700775 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700776 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700777 onpass="ECM anti-entropy for intents worked within " +
778 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700779 onfail="Intent ECM anti-entropy took too long. " +
780 "Expected time:{}, Actual time:{}".format( maxGossipTime,
781 gossipTime ) )
782 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700783 intentAddResult = True
784
785 if not intentAddResult or "key" in pendingMap:
786 import time
787 installedCheck = True
788 main.log.info( "Sleeping 60 seconds to see if intents are found" )
789 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700790 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700791 main.log.info( "Submitted intents: " + str( intentIds ) )
792 main.log.info( "Intents in ONOS: " + str( onosIds ) )
793 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700794 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700795 intentStates = []
796 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
797 count = 0
798 try:
799 for intent in json.loads( intents ):
800 # Iter through intents of a node
801 state = intent.get( 'state', None )
802 if "INSTALLED" not in state:
803 installedCheck = False
804 intentId = intent.get( 'id', None )
805 intentStates.append( ( intentId, state ) )
806 except ( ValueError, TypeError ):
807 main.log.exception( "Error parsing intents" )
808 # add submitted intents not in the store
809 tmplist = [ i for i, s in intentStates ]
810 for i in intentIds:
811 if i not in tmplist:
812 intentStates.append( ( i, " - " ) )
813 intentStates.sort()
814 for i, s in intentStates:
815 count += 1
816 main.log.info( "%-6s%-15s%-15s" %
817 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700818 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700819 try:
820 missing = False
821 if leaders:
822 parsedLeaders = json.loads( leaders )
823 main.log.warn( json.dumps( parsedLeaders,
824 sort_keys=True,
825 indent=4,
826 separators=( ',', ': ' ) ) )
827 # check for all intent partitions
828 # check for election
829 topics = []
830 for i in range( 14 ):
831 topics.append( "intent-partition-" + str( i ) )
832 # FIXME: this should only be after we start the app
833 topics.append( "org.onosproject.election" )
834 main.log.debug( topics )
835 ONOStopics = [ j['topic'] for j in parsedLeaders ]
836 for topic in topics:
837 if topic not in ONOStopics:
838 main.log.error( "Error: " + topic +
839 " not in leaders" )
840 missing = True
841 else:
842 main.log.error( "leaders() returned None" )
843 except ( ValueError, TypeError ):
844 main.log.exception( "Error parsing leaders" )
845 main.log.error( repr( leaders ) )
846 # Check all nodes
847 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700848 for i in main.activeNodes:
849 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700850 response = node.leaders( jsonFormat=False)
851 main.log.warn( str( node.name ) + " leaders output: \n" +
852 str( response ) )
853
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700854 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700855 try:
856 if partitions :
857 parsedPartitions = json.loads( partitions )
858 main.log.warn( json.dumps( parsedPartitions,
859 sort_keys=True,
860 indent=4,
861 separators=( ',', ': ' ) ) )
862 # TODO check for a leader in all paritions
863 # TODO check for consistency among nodes
864 else:
865 main.log.error( "partitions() returned None" )
866 except ( ValueError, TypeError ):
867 main.log.exception( "Error parsing partitions" )
868 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700869 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700870 try:
871 if pendingMap :
872 parsedPending = json.loads( pendingMap )
873 main.log.warn( json.dumps( parsedPending,
874 sort_keys=True,
875 indent=4,
876 separators=( ',', ': ' ) ) )
877 # TODO check something here?
878 else:
879 main.log.error( "pendingMap() returned None" )
880 except ( ValueError, TypeError ):
881 main.log.exception( "Error parsing pending map" )
882 main.log.error( repr( pendingMap ) )
883
884 def CASE4( self, main ):
885 """
886 Ping across added host intents
887 """
888 import json
889 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700890 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700891 assert main, "main not defined"
892 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700893 assert main.CLIs, "main.CLIs not defined"
894 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700895 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700896 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700897 "functionality and check the state of " +\
898 "the intent"
899 main.step( "Ping across added host intents" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700900 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700901 PingResult = main.TRUE
902 for i in range( 8, 18 ):
903 ping = main.Mininet1.pingHost( src="h" + str( i ),
904 target="h" + str( i + 10 ) )
905 PingResult = PingResult and ping
906 if ping == main.FALSE:
907 main.log.warn( "Ping failed between h" + str( i ) +
908 " and h" + str( i + 10 ) )
909 elif ping == main.TRUE:
910 main.log.info( "Ping test passed!" )
911 # Don't set PingResult or you'd override failures
912 if PingResult == main.FALSE:
913 main.log.error(
914 "Intents have not been installed correctly, pings failed." )
915 # TODO: pretty print
916 main.log.warn( "ONOS1 intents: " )
917 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700918 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700919 main.log.warn( json.dumps( json.loads( tmpIntents ),
920 sort_keys=True,
921 indent=4,
922 separators=( ',', ': ' ) ) )
923 except ( ValueError, TypeError ):
924 main.log.warn( repr( tmpIntents ) )
925 utilities.assert_equals(
926 expect=main.TRUE,
927 actual=PingResult,
928 onpass="Intents have been installed correctly and pings work",
929 onfail="Intents have not been installed correctly, pings failed." )
930
931 main.step( "Check Intent state" )
932 installedCheck = False
933 loopCount = 0
934 while not installedCheck and loopCount < 40:
935 installedCheck = True
936 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700937 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700938 intentStates = []
939 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
940 count = 0
941 # Iter through intents of a node
942 try:
943 for intent in json.loads( intents ):
944 state = intent.get( 'state', None )
945 if "INSTALLED" not in state:
946 installedCheck = False
947 intentId = intent.get( 'id', None )
948 intentStates.append( ( intentId, state ) )
949 except ( ValueError, TypeError ):
950 main.log.exception( "Error parsing intents." )
951 # Print states
952 intentStates.sort()
953 for i, s in intentStates:
954 count += 1
955 main.log.info( "%-6s%-15s%-15s" %
956 ( str( count ), str( i ), str( s ) ) )
957 if not installedCheck:
958 time.sleep( 1 )
959 loopCount += 1
960 utilities.assert_equals( expect=True, actual=installedCheck,
961 onpass="Intents are all INSTALLED",
962 onfail="Intents are not all in " +
963 "INSTALLED state" )
964
965 main.step( "Check leadership of topics" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700966 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700967 topicCheck = main.TRUE
968 try:
969 if leaders:
970 parsedLeaders = json.loads( leaders )
971 main.log.warn( json.dumps( parsedLeaders,
972 sort_keys=True,
973 indent=4,
974 separators=( ',', ': ' ) ) )
975 # check for all intent partitions
976 # check for election
977 # TODO: Look at Devices as topics now that it uses this system
978 topics = []
979 for i in range( 14 ):
980 topics.append( "intent-partition-" + str( i ) )
981 # FIXME: this should only be after we start the app
982 # FIXME: topics.append( "org.onosproject.election" )
983 # Print leaders output
984 main.log.debug( topics )
985 ONOStopics = [ j['topic'] for j in parsedLeaders ]
986 for topic in topics:
987 if topic not in ONOStopics:
988 main.log.error( "Error: " + topic +
989 " not in leaders" )
990 topicCheck = main.FALSE
991 else:
992 main.log.error( "leaders() returned None" )
993 topicCheck = main.FALSE
994 except ( ValueError, TypeError ):
995 topicCheck = main.FALSE
996 main.log.exception( "Error parsing leaders" )
997 main.log.error( repr( leaders ) )
998 # TODO: Check for a leader of these topics
999 # Check all nodes
1000 if topicCheck:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001001 for i in main.activeNodes:
1002 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001003 response = node.leaders( jsonFormat=False)
1004 main.log.warn( str( node.name ) + " leaders output: \n" +
1005 str( response ) )
1006
1007 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1008 onpass="intent Partitions is in leaders",
1009 onfail="Some topics were lost " )
1010 # Print partitions
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001011 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001012 try:
1013 if partitions :
1014 parsedPartitions = json.loads( partitions )
1015 main.log.warn( json.dumps( parsedPartitions,
1016 sort_keys=True,
1017 indent=4,
1018 separators=( ',', ': ' ) ) )
1019 # TODO check for a leader in all paritions
1020 # TODO check for consistency among nodes
1021 else:
1022 main.log.error( "partitions() returned None" )
1023 except ( ValueError, TypeError ):
1024 main.log.exception( "Error parsing partitions" )
1025 main.log.error( repr( partitions ) )
1026 # Print Pending Map
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001027 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001028 try:
1029 if pendingMap :
1030 parsedPending = json.loads( pendingMap )
1031 main.log.warn( json.dumps( parsedPending,
1032 sort_keys=True,
1033 indent=4,
1034 separators=( ',', ': ' ) ) )
1035 # TODO check something here?
1036 else:
1037 main.log.error( "pendingMap() returned None" )
1038 except ( ValueError, TypeError ):
1039 main.log.exception( "Error parsing pending map" )
1040 main.log.error( repr( pendingMap ) )
1041
1042 if not installedCheck:
1043 main.log.info( "Waiting 60 seconds to see if the state of " +
1044 "intents change" )
1045 time.sleep( 60 )
1046 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001047 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001048 intentStates = []
1049 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1050 count = 0
1051 # Iter through intents of a node
1052 try:
1053 for intent in json.loads( intents ):
1054 state = intent.get( 'state', None )
1055 if "INSTALLED" not in state:
1056 installedCheck = False
1057 intentId = intent.get( 'id', None )
1058 intentStates.append( ( intentId, state ) )
1059 except ( ValueError, TypeError ):
1060 main.log.exception( "Error parsing intents." )
1061 intentStates.sort()
1062 for i, s in intentStates:
1063 count += 1
1064 main.log.info( "%-6s%-15s%-15s" %
1065 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001066 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001067 try:
1068 missing = False
1069 if leaders:
1070 parsedLeaders = json.loads( leaders )
1071 main.log.warn( json.dumps( parsedLeaders,
1072 sort_keys=True,
1073 indent=4,
1074 separators=( ',', ': ' ) ) )
1075 # check for all intent partitions
1076 # check for election
1077 topics = []
1078 for i in range( 14 ):
1079 topics.append( "intent-partition-" + str( i ) )
1080 # FIXME: this should only be after we start the app
1081 topics.append( "org.onosproject.election" )
1082 main.log.debug( topics )
1083 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1084 for topic in topics:
1085 if topic not in ONOStopics:
1086 main.log.error( "Error: " + topic +
1087 " not in leaders" )
1088 missing = True
1089 else:
1090 main.log.error( "leaders() returned None" )
1091 except ( ValueError, TypeError ):
1092 main.log.exception( "Error parsing leaders" )
1093 main.log.error( repr( leaders ) )
1094 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001095 for i in main.activeNodes:
1096 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001097 response = node.leaders( jsonFormat=False)
1098 main.log.warn( str( node.name ) + " leaders output: \n" +
1099 str( response ) )
1100
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001101 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001102 try:
1103 if partitions :
1104 parsedPartitions = json.loads( partitions )
1105 main.log.warn( json.dumps( parsedPartitions,
1106 sort_keys=True,
1107 indent=4,
1108 separators=( ',', ': ' ) ) )
1109 # TODO check for a leader in all paritions
1110 # TODO check for consistency among nodes
1111 else:
1112 main.log.error( "partitions() returned None" )
1113 except ( ValueError, TypeError ):
1114 main.log.exception( "Error parsing partitions" )
1115 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001116 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001117 try:
1118 if pendingMap :
1119 parsedPending = json.loads( pendingMap )
1120 main.log.warn( json.dumps( parsedPending,
1121 sort_keys=True,
1122 indent=4,
1123 separators=( ',', ': ' ) ) )
1124 # TODO check something here?
1125 else:
1126 main.log.error( "pendingMap() returned None" )
1127 except ( ValueError, TypeError ):
1128 main.log.exception( "Error parsing pending map" )
1129 main.log.error( repr( pendingMap ) )
1130 # Print flowrules
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001131 node = main.activeNodes[0]
1132 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001133 main.step( "Wait a minute then ping again" )
1134 # the wait is above
1135 PingResult = main.TRUE
1136 for i in range( 8, 18 ):
1137 ping = main.Mininet1.pingHost( src="h" + str( i ),
1138 target="h" + str( i + 10 ) )
1139 PingResult = PingResult and ping
1140 if ping == main.FALSE:
1141 main.log.warn( "Ping failed between h" + str( i ) +
1142 " and h" + str( i + 10 ) )
1143 elif ping == main.TRUE:
1144 main.log.info( "Ping test passed!" )
1145 # Don't set PingResult or you'd override failures
1146 if PingResult == main.FALSE:
1147 main.log.error(
1148 "Intents have not been installed correctly, pings failed." )
1149 # TODO: pretty print
1150 main.log.warn( "ONOS1 intents: " )
1151 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001152 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001153 main.log.warn( json.dumps( json.loads( tmpIntents ),
1154 sort_keys=True,
1155 indent=4,
1156 separators=( ',', ': ' ) ) )
1157 except ( ValueError, TypeError ):
1158 main.log.warn( repr( tmpIntents ) )
1159 utilities.assert_equals(
1160 expect=main.TRUE,
1161 actual=PingResult,
1162 onpass="Intents have been installed correctly and pings work",
1163 onfail="Intents have not been installed correctly, pings failed." )
1164
1165 def CASE5( self, main ):
1166 """
1167 Reading state of ONOS
1168 """
1169 import json
1170 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001171 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001172 assert main, "main not defined"
1173 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001174 assert main.CLIs, "main.CLIs not defined"
1175 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001176
1177 main.case( "Setting up and gathering data for current state" )
1178 # The general idea for this test case is to pull the state of
1179 # ( intents,flows, topology,... ) from each ONOS node
1180 # We can then compare them with each other and also with past states
1181
1182 main.step( "Check that each switch has a master" )
1183 global mastershipState
1184 mastershipState = '[]'
1185
1186 # Assert that each device has a master
1187 rolesNotNull = main.TRUE
1188 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001189 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001190 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001191 name="rolesNotNull-" + str( i ),
1192 args=[] )
1193 threads.append( t )
1194 t.start()
1195
1196 for t in threads:
1197 t.join()
1198 rolesNotNull = rolesNotNull and t.result
1199 utilities.assert_equals(
1200 expect=main.TRUE,
1201 actual=rolesNotNull,
1202 onpass="Each device has a master",
1203 onfail="Some devices don't have a master assigned" )
1204
1205 main.step( "Get the Mastership of each switch from each controller" )
1206 ONOSMastership = []
1207 mastershipCheck = main.FALSE
1208 consistentMastership = True
1209 rolesResults = True
1210 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001211 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001212 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001213 name="roles-" + str( i ),
1214 args=[] )
1215 threads.append( t )
1216 t.start()
1217
1218 for t in threads:
1219 t.join()
1220 ONOSMastership.append( t.result )
1221
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001222 for i in range( len( ONOSMastership ) ):
1223 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001224 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001225 main.log.error( "Error in getting ONOS" + node + " roles" )
1226 main.log.warn( "ONOS" + node + " mastership response: " +
1227 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001228 rolesResults = False
1229 utilities.assert_equals(
1230 expect=True,
1231 actual=rolesResults,
1232 onpass="No error in reading roles output",
1233 onfail="Error in reading roles from ONOS" )
1234
1235 main.step( "Check for consistency in roles from each controller" )
1236 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1237 main.log.info(
1238 "Switch roles are consistent across all ONOS nodes" )
1239 else:
1240 consistentMastership = False
1241 utilities.assert_equals(
1242 expect=True,
1243 actual=consistentMastership,
1244 onpass="Switch roles are consistent across all ONOS nodes",
1245 onfail="ONOS nodes have different views of switch roles" )
1246
1247 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001248 for i in range( len( main.activeNodes ) ):
1249 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001250 try:
1251 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001252 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001253 json.dumps(
1254 json.loads( ONOSMastership[ i ] ),
1255 sort_keys=True,
1256 indent=4,
1257 separators=( ',', ': ' ) ) )
1258 except ( ValueError, TypeError ):
1259 main.log.warn( repr( ONOSMastership[ i ] ) )
1260 elif rolesResults and consistentMastership:
1261 mastershipCheck = main.TRUE
1262 mastershipState = ONOSMastership[ 0 ]
1263
1264 main.step( "Get the intents from each controller" )
1265 global intentState
1266 intentState = []
1267 ONOSIntents = []
1268 intentCheck = main.FALSE
1269 consistentIntents = True
1270 intentsResults = True
1271 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001272 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001273 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001274 name="intents-" + str( i ),
1275 args=[],
1276 kwargs={ 'jsonFormat': True } )
1277 threads.append( t )
1278 t.start()
1279
1280 for t in threads:
1281 t.join()
1282 ONOSIntents.append( t.result )
1283
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001284 for i in range( len( ONOSIntents ) ):
1285 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001286 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001287 main.log.error( "Error in getting ONOS" + node + " intents" )
1288 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001289 repr( ONOSIntents[ i ] ) )
1290 intentsResults = False
1291 utilities.assert_equals(
1292 expect=True,
1293 actual=intentsResults,
1294 onpass="No error in reading intents output",
1295 onfail="Error in reading intents from ONOS" )
1296
1297 main.step( "Check for consistency in Intents from each controller" )
1298 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1299 main.log.info( "Intents are consistent across all ONOS " +
1300 "nodes" )
1301 else:
1302 consistentIntents = False
1303 main.log.error( "Intents not consistent" )
1304 utilities.assert_equals(
1305 expect=True,
1306 actual=consistentIntents,
1307 onpass="Intents are consistent across all ONOS nodes",
1308 onfail="ONOS nodes have different views of intents" )
1309
1310 if intentsResults:
1311 # Try to make it easy to figure out what is happening
1312 #
1313 # Intent ONOS1 ONOS2 ...
1314 # 0x01 INSTALLED INSTALLING
1315 # ... ... ...
1316 # ... ... ...
1317 title = " Id"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001318 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001319 title += " " * 10 + "ONOS" + str( n + 1 )
1320 main.log.warn( title )
1321 # get all intent keys in the cluster
1322 keys = []
1323 for nodeStr in ONOSIntents:
1324 node = json.loads( nodeStr )
1325 for intent in node:
1326 keys.append( intent.get( 'id' ) )
1327 keys = set( keys )
1328 for key in keys:
1329 row = "%-13s" % key
1330 for nodeStr in ONOSIntents:
1331 node = json.loads( nodeStr )
1332 for intent in node:
1333 if intent.get( 'id', "Error" ) == key:
1334 row += "%-15s" % intent.get( 'state' )
1335 main.log.warn( row )
1336 # End table view
1337
1338 if intentsResults and not consistentIntents:
1339 # print the json objects
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001340 n = str( main.activeNodes[-1] + 1 )
1341 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001342 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1343 sort_keys=True,
1344 indent=4,
1345 separators=( ',', ': ' ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001346 for i in range( len( ONOSIntents ) ):
1347 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001348 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001349 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001350 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1351 sort_keys=True,
1352 indent=4,
1353 separators=( ',', ': ' ) ) )
1354 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001355 main.log.debug( "ONOS" + node + " intents match ONOS" +
1356 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001357 elif intentsResults and consistentIntents:
1358 intentCheck = main.TRUE
1359 intentState = ONOSIntents[ 0 ]
1360
1361 main.step( "Get the flows from each controller" )
1362 global flowState
1363 flowState = []
1364 ONOSFlows = []
1365 ONOSFlowsJson = []
1366 flowCheck = main.FALSE
1367 consistentFlows = True
1368 flowsResults = True
1369 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001370 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001371 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001372 name="flows-" + str( i ),
1373 args=[],
1374 kwargs={ 'jsonFormat': True } )
1375 threads.append( t )
1376 t.start()
1377
1378 # NOTE: Flows command can take some time to run
1379 time.sleep(30)
1380 for t in threads:
1381 t.join()
1382 result = t.result
1383 ONOSFlows.append( result )
1384
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001385 for i in range( len( ONOSFlows ) ):
1386 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001387 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1388 main.log.error( "Error in getting ONOS" + num + " flows" )
1389 main.log.warn( "ONOS" + num + " flows response: " +
1390 repr( ONOSFlows[ i ] ) )
1391 flowsResults = False
1392 ONOSFlowsJson.append( None )
1393 else:
1394 try:
1395 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1396 except ( ValueError, TypeError ):
1397 # FIXME: change this to log.error?
1398 main.log.exception( "Error in parsing ONOS" + num +
1399 " response as json." )
1400 main.log.error( repr( ONOSFlows[ i ] ) )
1401 ONOSFlowsJson.append( None )
1402 flowsResults = False
1403 utilities.assert_equals(
1404 expect=True,
1405 actual=flowsResults,
1406 onpass="No error in reading flows output",
1407 onfail="Error in reading flows from ONOS" )
1408
1409 main.step( "Check for consistency in Flows from each controller" )
1410 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1411 if all( tmp ):
1412 main.log.info( "Flow count is consistent across all ONOS nodes" )
1413 else:
1414 consistentFlows = False
1415 utilities.assert_equals(
1416 expect=True,
1417 actual=consistentFlows,
1418 onpass="The flow count is consistent across all ONOS nodes",
1419 onfail="ONOS nodes have different flow counts" )
1420
1421 if flowsResults and not consistentFlows:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001422 for i in range( len( ONOSFlows ) ):
1423 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001424 try:
1425 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001426 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001427 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1428 indent=4, separators=( ',', ': ' ) ) )
1429 except ( ValueError, TypeError ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001430 main.log.warn( "ONOS" + node + " flows: " +
1431 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001432 elif flowsResults and consistentFlows:
1433 flowCheck = main.TRUE
1434 flowState = ONOSFlows[ 0 ]
1435
1436 main.step( "Get the OF Table entries" )
1437 global flows
1438 flows = []
1439 for i in range( 1, 29 ):
Jon Hallca7ac292015-11-11 09:28:12 -08001440 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3" ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001441 if flowCheck == main.FALSE:
1442 for table in flows:
1443 main.log.warn( table )
1444 # TODO: Compare switch flow tables with ONOS flow tables
1445
1446 main.step( "Start continuous pings" )
1447 main.Mininet2.pingLong(
1448 src=main.params[ 'PING' ][ 'source1' ],
1449 target=main.params[ 'PING' ][ 'target1' ],
1450 pingTime=500 )
1451 main.Mininet2.pingLong(
1452 src=main.params[ 'PING' ][ 'source2' ],
1453 target=main.params[ 'PING' ][ 'target2' ],
1454 pingTime=500 )
1455 main.Mininet2.pingLong(
1456 src=main.params[ 'PING' ][ 'source3' ],
1457 target=main.params[ 'PING' ][ 'target3' ],
1458 pingTime=500 )
1459 main.Mininet2.pingLong(
1460 src=main.params[ 'PING' ][ 'source4' ],
1461 target=main.params[ 'PING' ][ 'target4' ],
1462 pingTime=500 )
1463 main.Mininet2.pingLong(
1464 src=main.params[ 'PING' ][ 'source5' ],
1465 target=main.params[ 'PING' ][ 'target5' ],
1466 pingTime=500 )
1467 main.Mininet2.pingLong(
1468 src=main.params[ 'PING' ][ 'source6' ],
1469 target=main.params[ 'PING' ][ 'target6' ],
1470 pingTime=500 )
1471 main.Mininet2.pingLong(
1472 src=main.params[ 'PING' ][ 'source7' ],
1473 target=main.params[ 'PING' ][ 'target7' ],
1474 pingTime=500 )
1475 main.Mininet2.pingLong(
1476 src=main.params[ 'PING' ][ 'source8' ],
1477 target=main.params[ 'PING' ][ 'target8' ],
1478 pingTime=500 )
1479 main.Mininet2.pingLong(
1480 src=main.params[ 'PING' ][ 'source9' ],
1481 target=main.params[ 'PING' ][ 'target9' ],
1482 pingTime=500 )
1483 main.Mininet2.pingLong(
1484 src=main.params[ 'PING' ][ 'source10' ],
1485 target=main.params[ 'PING' ][ 'target10' ],
1486 pingTime=500 )
1487
1488 main.step( "Collecting topology information from ONOS" )
1489 devices = []
1490 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001491 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001492 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001493 name="devices-" + str( i ),
1494 args=[ ] )
1495 threads.append( t )
1496 t.start()
1497
1498 for t in threads:
1499 t.join()
1500 devices.append( t.result )
1501 hosts = []
1502 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001503 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001504 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001505 name="hosts-" + str( i ),
1506 args=[ ] )
1507 threads.append( t )
1508 t.start()
1509
1510 for t in threads:
1511 t.join()
1512 try:
1513 hosts.append( json.loads( t.result ) )
1514 except ( ValueError, TypeError ):
1515 # FIXME: better handling of this, print which node
1516 # Maybe use thread name?
1517 main.log.exception( "Error parsing json output of hosts" )
1518 # FIXME: should this be an empty json object instead?
1519 hosts.append( None )
1520
1521 ports = []
1522 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001523 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001524 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001525 name="ports-" + str( i ),
1526 args=[ ] )
1527 threads.append( t )
1528 t.start()
1529
1530 for t in threads:
1531 t.join()
1532 ports.append( t.result )
1533 links = []
1534 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001535 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001536 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001537 name="links-" + str( i ),
1538 args=[ ] )
1539 threads.append( t )
1540 t.start()
1541
1542 for t in threads:
1543 t.join()
1544 links.append( t.result )
1545 clusters = []
1546 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001547 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001548 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001549 name="clusters-" + str( i ),
1550 args=[ ] )
1551 threads.append( t )
1552 t.start()
1553
1554 for t in threads:
1555 t.join()
1556 clusters.append( t.result )
1557 # Compare json objects for hosts and dataplane clusters
1558
1559 # hosts
1560 main.step( "Host view is consistent across ONOS nodes" )
1561 consistentHostsResult = main.TRUE
1562 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001563 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001564 if "Error" not in hosts[ controller ]:
1565 if hosts[ controller ] == hosts[ 0 ]:
1566 continue
1567 else: # hosts not consistent
1568 main.log.error( "hosts from ONOS" +
1569 controllerStr +
1570 " is inconsistent with ONOS1" )
1571 main.log.warn( repr( hosts[ controller ] ) )
1572 consistentHostsResult = main.FALSE
1573
1574 else:
1575 main.log.error( "Error in getting ONOS hosts from ONOS" +
1576 controllerStr )
1577 consistentHostsResult = main.FALSE
1578 main.log.warn( "ONOS" + controllerStr +
1579 " hosts response: " +
1580 repr( hosts[ controller ] ) )
1581 utilities.assert_equals(
1582 expect=main.TRUE,
1583 actual=consistentHostsResult,
1584 onpass="Hosts view is consistent across all ONOS nodes",
1585 onfail="ONOS nodes have different views of hosts" )
1586
1587 main.step( "Each host has an IP address" )
1588 ipResult = main.TRUE
1589 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001590 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001591 for host in hosts[ controller ]:
1592 if not host.get( 'ipAddresses', [ ] ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001593 main.log.error( "Error with host ips on controller" +
Jon Hall5cf14d52015-07-16 12:15:19 -07001594 controllerStr + ": " + str( host ) )
1595 ipResult = main.FALSE
1596 utilities.assert_equals(
1597 expect=main.TRUE,
1598 actual=ipResult,
1599 onpass="The ips of the hosts aren't empty",
1600 onfail="The ip of at least one host is missing" )
1601
1602 # Strongly connected clusters of devices
1603 main.step( "Cluster view is consistent across ONOS nodes" )
1604 consistentClustersResult = main.TRUE
1605 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001606 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001607 if "Error" not in clusters[ controller ]:
1608 if clusters[ controller ] == clusters[ 0 ]:
1609 continue
1610 else: # clusters not consistent
1611 main.log.error( "clusters from ONOS" + controllerStr +
1612 " is inconsistent with ONOS1" )
1613 consistentClustersResult = main.FALSE
1614
1615 else:
1616 main.log.error( "Error in getting dataplane clusters " +
1617 "from ONOS" + controllerStr )
1618 consistentClustersResult = main.FALSE
1619 main.log.warn( "ONOS" + controllerStr +
1620 " clusters response: " +
1621 repr( clusters[ controller ] ) )
1622 utilities.assert_equals(
1623 expect=main.TRUE,
1624 actual=consistentClustersResult,
1625 onpass="Clusters view is consistent across all ONOS nodes",
1626 onfail="ONOS nodes have different views of clusters" )
1627 # there should always only be one cluster
1628 main.step( "Cluster view correct across ONOS nodes" )
1629 try:
1630 numClusters = len( json.loads( clusters[ 0 ] ) )
1631 except ( ValueError, TypeError ):
1632 main.log.exception( "Error parsing clusters[0]: " +
1633 repr( clusters[ 0 ] ) )
1634 clusterResults = main.FALSE
1635 if numClusters == 1:
1636 clusterResults = main.TRUE
1637 utilities.assert_equals(
1638 expect=1,
1639 actual=numClusters,
1640 onpass="ONOS shows 1 SCC",
1641 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1642
1643 main.step( "Comparing ONOS topology to MN" )
1644 devicesResults = main.TRUE
1645 linksResults = main.TRUE
1646 hostsResults = main.TRUE
1647 mnSwitches = main.Mininet1.getSwitches()
1648 mnLinks = main.Mininet1.getLinks()
1649 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001650 for controller in main.activeNodes:
1651 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001652 if devices[ controller ] and ports[ controller ] and\
1653 "Error" not in devices[ controller ] and\
1654 "Error" not in ports[ controller ]:
1655
1656 currentDevicesResult = main.Mininet1.compareSwitches(
1657 mnSwitches,
1658 json.loads( devices[ controller ] ),
1659 json.loads( ports[ controller ] ) )
1660 else:
1661 currentDevicesResult = main.FALSE
1662 utilities.assert_equals( expect=main.TRUE,
1663 actual=currentDevicesResult,
1664 onpass="ONOS" + controllerStr +
1665 " Switches view is correct",
1666 onfail="ONOS" + controllerStr +
1667 " Switches view is incorrect" )
1668 if links[ controller ] and "Error" not in links[ controller ]:
1669 currentLinksResult = main.Mininet1.compareLinks(
1670 mnSwitches, mnLinks,
1671 json.loads( links[ controller ] ) )
1672 else:
1673 currentLinksResult = main.FALSE
1674 utilities.assert_equals( expect=main.TRUE,
1675 actual=currentLinksResult,
1676 onpass="ONOS" + controllerStr +
1677 " links view is correct",
1678 onfail="ONOS" + controllerStr +
1679 " links view is incorrect" )
1680
1681 if hosts[ controller ] or "Error" not in hosts[ controller ]:
1682 currentHostsResult = main.Mininet1.compareHosts(
1683 mnHosts,
1684 hosts[ controller ] )
1685 else:
1686 currentHostsResult = main.FALSE
1687 utilities.assert_equals( expect=main.TRUE,
1688 actual=currentHostsResult,
1689 onpass="ONOS" + controllerStr +
1690 " hosts exist in Mininet",
1691 onfail="ONOS" + controllerStr +
1692 " hosts don't match Mininet" )
1693
1694 devicesResults = devicesResults and currentDevicesResult
1695 linksResults = linksResults and currentLinksResult
1696 hostsResults = hostsResults and currentHostsResult
1697
1698 main.step( "Device information is correct" )
1699 utilities.assert_equals(
1700 expect=main.TRUE,
1701 actual=devicesResults,
1702 onpass="Device information is correct",
1703 onfail="Device information is incorrect" )
1704
1705 main.step( "Links are correct" )
1706 utilities.assert_equals(
1707 expect=main.TRUE,
1708 actual=linksResults,
1709 onpass="Link are correct",
1710 onfail="Links are incorrect" )
1711
1712 main.step( "Hosts are correct" )
1713 utilities.assert_equals(
1714 expect=main.TRUE,
1715 actual=hostsResults,
1716 onpass="Hosts are correct",
1717 onfail="Hosts are incorrect" )
1718
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001719 def CASE61( self, main ):
Jon Hall5cf14d52015-07-16 12:15:19 -07001720 """
1721 The Failure case.
1722 """
Jon Halle1a3b752015-07-22 13:02:46 -07001723 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001724 assert main, "main not defined"
1725 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001726 assert main.CLIs, "main.CLIs not defined"
1727 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001728 main.case( "Kill minority of ONOS nodes" )
Jon Hall96091e62015-09-21 17:34:17 -07001729
1730 main.step( "Checking ONOS Logs for errors" )
1731 for node in main.nodes:
1732 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1733 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1734
Jon Hall3b489db2015-10-05 14:38:37 -07001735 n = len( main.nodes ) # Number of nodes
1736 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1737 main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
1738 if n > 3:
1739 main.kill.append( p - 1 )
1740 # NOTE: This only works for cluster sizes of 3,5, or 7.
1741
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001742 main.step( "Kill " + str( len( main.kill ) ) + " ONOS nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07001743 killResults = main.TRUE
1744 for i in main.kill:
1745 killResults = killResults and\
1746 main.ONOSbench.onosKill( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001747 main.activeNodes.remove( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001748 utilities.assert_equals( expect=main.TRUE, actual=killResults,
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001749 onpass="ONOS nodes killed successfully",
1750 onfail="ONOS nodes NOT successfully killed" )
1751
1752 def CASE62( self, main ):
1753 """
1754 The bring up stopped nodes
1755 """
1756 import time
1757 assert main.numCtrls, "main.numCtrls not defined"
1758 assert main, "main not defined"
1759 assert utilities.assert_equals, "utilities.assert_equals not defined"
1760 assert main.CLIs, "main.CLIs not defined"
1761 assert main.nodes, "main.nodes not defined"
1762 assert main.kill, "main.kill not defined"
1763 main.case( "Restart minority of ONOS nodes" )
1764
1765 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1766 startResults = main.TRUE
1767 restartTime = time.time()
1768 for i in main.kill:
1769 startResults = startResults and\
1770 main.ONOSbench.onosStart( main.nodes[i].ip_address )
1771 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1772 onpass="ONOS nodes started successfully",
1773 onfail="ONOS nodes NOT successfully started" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001774
1775 main.step( "Checking if ONOS is up yet" )
1776 count = 0
1777 onosIsupResult = main.FALSE
1778 while onosIsupResult == main.FALSE and count < 10:
Jon Hall3b489db2015-10-05 14:38:37 -07001779 onosIsupResult = main.TRUE
1780 for i in main.kill:
1781 onosIsupResult = onosIsupResult and\
1782 main.ONOSbench.isup( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07001783 count = count + 1
Jon Hall5cf14d52015-07-16 12:15:19 -07001784 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1785 onpass="ONOS restarted successfully",
1786 onfail="ONOS restart NOT successful" )
1787
Jon Halle1a3b752015-07-22 13:02:46 -07001788 main.step( "Restarting ONOS main.CLIs" )
Jon Hall3b489db2015-10-05 14:38:37 -07001789 cliResults = main.TRUE
1790 for i in main.kill:
1791 cliResults = cliResults and\
1792 main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001793 main.activeNodes.append( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001794 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1795 onpass="ONOS cli restarted",
1796 onfail="ONOS cli did not restart" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001797 main.activeNodes.sort()
1798 try:
1799 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1800 "List of active nodes has duplicates, this likely indicates something was run out of order"
1801 except AssertionError:
1802 main.log.exception( "" )
1803 main.cleanup()
1804 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -07001805
1806 # Grab the time of restart so we chan check how long the gossip
1807 # protocol has had time to work
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001808 main.restartTime = time.time() - restartTime
Jon Hall5cf14d52015-07-16 12:15:19 -07001809 main.log.debug( "Restart time: " + str( main.restartTime ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001810 # TODO: MAke this configurable. Also, we are breaking the above timer
1811 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001812 node = main.activeNodes[0]
1813 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1814 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1815 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001816
1817 def CASE7( self, main ):
1818 """
1819 Check state after ONOS failure
1820 """
1821 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001822 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001823 assert main, "main not defined"
1824 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001825 assert main.CLIs, "main.CLIs not defined"
1826 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001827 try:
1828 main.kill
1829 except AttributeError:
1830 main.kill = []
1831
Jon Hall5cf14d52015-07-16 12:15:19 -07001832 main.case( "Running ONOS Constant State Tests" )
1833
1834 main.step( "Check that each switch has a master" )
1835 # Assert that each device has a master
1836 rolesNotNull = main.TRUE
1837 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001838 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001839 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001840 name="rolesNotNull-" + str( i ),
1841 args=[ ] )
1842 threads.append( t )
1843 t.start()
1844
1845 for t in threads:
1846 t.join()
1847 rolesNotNull = rolesNotNull and t.result
1848 utilities.assert_equals(
1849 expect=main.TRUE,
1850 actual=rolesNotNull,
1851 onpass="Each device has a master",
1852 onfail="Some devices don't have a master assigned" )
1853
1854 main.step( "Read device roles from ONOS" )
1855 ONOSMastership = []
1856 consistentMastership = True
1857 rolesResults = True
1858 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001859 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001860 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001861 name="roles-" + str( i ),
1862 args=[] )
1863 threads.append( t )
1864 t.start()
1865
1866 for t in threads:
1867 t.join()
1868 ONOSMastership.append( t.result )
1869
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001870 for i in range( len( ONOSMastership ) ):
1871 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001872 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001873 main.log.error( "Error in getting ONOS" + node + " roles" )
1874 main.log.warn( "ONOS" + node + " mastership response: " +
1875 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001876 rolesResults = False
1877 utilities.assert_equals(
1878 expect=True,
1879 actual=rolesResults,
1880 onpass="No error in reading roles output",
1881 onfail="Error in reading roles from ONOS" )
1882
1883 main.step( "Check for consistency in roles from each controller" )
1884 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1885 main.log.info(
1886 "Switch roles are consistent across all ONOS nodes" )
1887 else:
1888 consistentMastership = False
1889 utilities.assert_equals(
1890 expect=True,
1891 actual=consistentMastership,
1892 onpass="Switch roles are consistent across all ONOS nodes",
1893 onfail="ONOS nodes have different views of switch roles" )
1894
1895 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001896 for i in range( len( ONOSMastership ) ):
1897 node = str( main.activeNodes[i] + 1 )
1898 main.log.warn( "ONOS" + node + " roles: ",
1899 json.dumps( json.loads( ONOSMastership[ i ] ),
1900 sort_keys=True,
1901 indent=4,
1902 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001903
1904 # NOTE: we expect mastership to change on controller failure
Jon Hall5cf14d52015-07-16 12:15:19 -07001905
1906 main.step( "Get the intents and compare across all nodes" )
1907 ONOSIntents = []
1908 intentCheck = main.FALSE
1909 consistentIntents = True
1910 intentsResults = True
1911 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001912 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001913 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001914 name="intents-" + str( i ),
1915 args=[],
1916 kwargs={ 'jsonFormat': True } )
1917 threads.append( t )
1918 t.start()
1919
1920 for t in threads:
1921 t.join()
1922 ONOSIntents.append( t.result )
1923
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001924 for i in range( len( ONOSIntents) ):
1925 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001926 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001927 main.log.error( "Error in getting ONOS" + node + " intents" )
1928 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001929 repr( ONOSIntents[ i ] ) )
1930 intentsResults = False
1931 utilities.assert_equals(
1932 expect=True,
1933 actual=intentsResults,
1934 onpass="No error in reading intents output",
1935 onfail="Error in reading intents from ONOS" )
1936
1937 main.step( "Check for consistency in Intents from each controller" )
1938 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1939 main.log.info( "Intents are consistent across all ONOS " +
1940 "nodes" )
1941 else:
1942 consistentIntents = False
1943
1944 # Try to make it easy to figure out what is happening
1945 #
1946 # Intent ONOS1 ONOS2 ...
1947 # 0x01 INSTALLED INSTALLING
1948 # ... ... ...
1949 # ... ... ...
1950 title = " ID"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001951 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001952 title += " " * 10 + "ONOS" + str( n + 1 )
1953 main.log.warn( title )
1954 # get all intent keys in the cluster
1955 keys = []
1956 for nodeStr in ONOSIntents:
1957 node = json.loads( nodeStr )
1958 for intent in node:
1959 keys.append( intent.get( 'id' ) )
1960 keys = set( keys )
1961 for key in keys:
1962 row = "%-13s" % key
1963 for nodeStr in ONOSIntents:
1964 node = json.loads( nodeStr )
1965 for intent in node:
1966 if intent.get( 'id' ) == key:
1967 row += "%-15s" % intent.get( 'state' )
1968 main.log.warn( row )
1969 # End table view
1970
1971 utilities.assert_equals(
1972 expect=True,
1973 actual=consistentIntents,
1974 onpass="Intents are consistent across all ONOS nodes",
1975 onfail="ONOS nodes have different views of intents" )
1976 intentStates = []
1977 for node in ONOSIntents: # Iter through ONOS nodes
1978 nodeStates = []
1979 # Iter through intents of a node
1980 try:
1981 for intent in json.loads( node ):
1982 nodeStates.append( intent[ 'state' ] )
1983 except ( ValueError, TypeError ):
1984 main.log.exception( "Error in parsing intents" )
1985 main.log.error( repr( node ) )
1986 intentStates.append( nodeStates )
1987 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1988 main.log.info( dict( out ) )
1989
1990 if intentsResults and not consistentIntents:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001991 for i in range( len( main.activeNodes ) ):
1992 node = str( main.activeNodes[i] + 1 )
1993 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001994 main.log.warn( json.dumps(
1995 json.loads( ONOSIntents[ i ] ),
1996 sort_keys=True,
1997 indent=4,
1998 separators=( ',', ': ' ) ) )
1999 elif intentsResults and consistentIntents:
2000 intentCheck = main.TRUE
2001
2002 # NOTE: Store has no durability, so intents are lost across system
2003 # restarts
2004 main.step( "Compare current intents with intents before the failure" )
2005 # NOTE: this requires case 5 to pass for intentState to be set.
2006 # maybe we should stop the test if that fails?
2007 sameIntents = main.FALSE
2008 if intentState and intentState == ONOSIntents[ 0 ]:
2009 sameIntents = main.TRUE
2010 main.log.info( "Intents are consistent with before failure" )
2011 # TODO: possibly the states have changed? we may need to figure out
2012 # what the acceptable states are
2013 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2014 sameIntents = main.TRUE
2015 try:
2016 before = json.loads( intentState )
2017 after = json.loads( ONOSIntents[ 0 ] )
2018 for intent in before:
2019 if intent not in after:
2020 sameIntents = main.FALSE
2021 main.log.debug( "Intent is not currently in ONOS " +
2022 "(at least in the same form):" )
2023 main.log.debug( json.dumps( intent ) )
2024 except ( ValueError, TypeError ):
2025 main.log.exception( "Exception printing intents" )
2026 main.log.debug( repr( ONOSIntents[0] ) )
2027 main.log.debug( repr( intentState ) )
2028 if sameIntents == main.FALSE:
2029 try:
2030 main.log.debug( "ONOS intents before: " )
2031 main.log.debug( json.dumps( json.loads( intentState ),
2032 sort_keys=True, indent=4,
2033 separators=( ',', ': ' ) ) )
2034 main.log.debug( "Current ONOS intents: " )
2035 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2036 sort_keys=True, indent=4,
2037 separators=( ',', ': ' ) ) )
2038 except ( ValueError, TypeError ):
2039 main.log.exception( "Exception printing intents" )
2040 main.log.debug( repr( ONOSIntents[0] ) )
2041 main.log.debug( repr( intentState ) )
2042 utilities.assert_equals(
2043 expect=main.TRUE,
2044 actual=sameIntents,
2045 onpass="Intents are consistent with before failure",
2046 onfail="The Intents changed during failure" )
2047 intentCheck = intentCheck and sameIntents
2048
2049 main.step( "Get the OF Table entries and compare to before " +
2050 "component failure" )
2051 FlowTables = main.TRUE
2052 flows2 = []
2053 for i in range( 28 ):
2054 main.log.info( "Checking flow table on s" + str( i + 1 ) )
Jon Hallca7ac292015-11-11 09:28:12 -08002055 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002056 flows2.append( tmpFlows )
Jon Hall9043c902015-07-30 14:23:44 -07002057 tempResult = main.Mininet1.flowComp(
Jon Hall5cf14d52015-07-16 12:15:19 -07002058 flow1=flows[ i ],
2059 flow2=tmpFlows )
2060 FlowTables = FlowTables and tempResult
2061 if FlowTables == main.FALSE:
2062 main.log.info( "Differences in flow table for switch: s" +
2063 str( i + 1 ) )
2064 utilities.assert_equals(
2065 expect=main.TRUE,
2066 actual=FlowTables,
2067 onpass="No changes were found in the flow tables",
2068 onfail="Changes were found in the flow tables" )
2069
2070 main.Mininet2.pingLongKill()
2071 '''
2072 main.step( "Check the continuous pings to ensure that no packets " +
2073 "were dropped during component failure" )
2074 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2075 main.params[ 'TESTONIP' ] )
2076 LossInPings = main.FALSE
2077 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2078 for i in range( 8, 18 ):
2079 main.log.info(
2080 "Checking for a loss in pings along flow from s" +
2081 str( i ) )
2082 LossInPings = main.Mininet2.checkForLoss(
2083 "/tmp/ping.h" +
2084 str( i ) ) or LossInPings
2085 if LossInPings == main.TRUE:
2086 main.log.info( "Loss in ping detected" )
2087 elif LossInPings == main.ERROR:
2088 main.log.info( "There are multiple mininet process running" )
2089 elif LossInPings == main.FALSE:
2090 main.log.info( "No Loss in the pings" )
2091 main.log.info( "No loss of dataplane connectivity" )
2092 utilities.assert_equals(
2093 expect=main.FALSE,
2094 actual=LossInPings,
2095 onpass="No Loss of connectivity",
2096 onfail="Loss of dataplane connectivity detected" )
2097 '''
2098
2099 main.step( "Leadership Election is still functional" )
2100 # Test of LeadershipElection
2101 leaderList = []
Jon Hall5cf14d52015-07-16 12:15:19 -07002102
Jon Hall3b489db2015-10-05 14:38:37 -07002103 restarted = []
2104 for i in main.kill:
2105 restarted.append( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07002106 leaderResult = main.TRUE
Jon Hall3b489db2015-10-05 14:38:37 -07002107
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002108 for i in main.activeNodes:
2109 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002110 leaderN = cli.electionTestLeader()
2111 leaderList.append( leaderN )
2112 if leaderN == main.FALSE:
2113 # error in response
2114 main.log.error( "Something is wrong with " +
2115 "electionTestLeader function, check the" +
2116 " error logs" )
2117 leaderResult = main.FALSE
2118 elif leaderN is None:
2119 main.log.error( cli.name +
2120 " shows no leader for the election-app was" +
2121 " elected after the old one died" )
2122 leaderResult = main.FALSE
2123 elif leaderN in restarted:
2124 main.log.error( cli.name + " shows " + str( leaderN ) +
2125 " as leader for the election-app, but it " +
2126 "was restarted" )
2127 leaderResult = main.FALSE
2128 if len( set( leaderList ) ) != 1:
2129 leaderResult = main.FALSE
2130 main.log.error(
2131 "Inconsistent view of leader for the election test app" )
2132 # TODO: print the list
2133 utilities.assert_equals(
2134 expect=main.TRUE,
2135 actual=leaderResult,
2136 onpass="Leadership election passed",
2137 onfail="Something went wrong with Leadership election" )
2138
2139 def CASE8( self, main ):
2140 """
2141 Compare topo
2142 """
2143 import json
2144 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002145 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002146 assert main, "main not defined"
2147 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002148 assert main.CLIs, "main.CLIs not defined"
2149 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002150
2151 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002152 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002153 " and ONOS"
2154
2155 main.step( "Comparing ONOS topology to MN" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002156 topoResult = main.FALSE
2157 elapsed = 0
2158 count = 0
2159 main.step( "Collecting topology information from ONOS" )
2160 startTime = time.time()
2161 # Give time for Gossip to work
2162 while topoResult == main.FALSE and elapsed < 60:
Jon Hall96091e62015-09-21 17:34:17 -07002163 devicesResults = main.TRUE
2164 linksResults = main.TRUE
2165 hostsResults = main.TRUE
2166 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002167 count += 1
2168 cliStart = time.time()
2169 devices = []
2170 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002171 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002172 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07002173 name="devices-" + str( i ),
2174 args=[ ] )
2175 threads.append( t )
2176 t.start()
2177
2178 for t in threads:
2179 t.join()
2180 devices.append( t.result )
2181 hosts = []
2182 ipResult = main.TRUE
2183 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002184 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002185 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07002186 name="hosts-" + str( i ),
2187 args=[ ] )
2188 threads.append( t )
2189 t.start()
2190
2191 for t in threads:
2192 t.join()
2193 try:
2194 hosts.append( json.loads( t.result ) )
2195 except ( ValueError, TypeError ):
2196 main.log.exception( "Error parsing hosts results" )
2197 main.log.error( repr( t.result ) )
2198 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002199 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002200 for host in hosts[ controller ]:
2201 if host is None or host.get( 'ipAddresses', [] ) == []:
2202 main.log.error(
2203 "DEBUG:Error with host ipAddresses on controller" +
2204 controllerStr + ": " + str( host ) )
2205 ipResult = main.FALSE
2206 ports = []
2207 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002208 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002209 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07002210 name="ports-" + str( i ),
2211 args=[ ] )
2212 threads.append( t )
2213 t.start()
2214
2215 for t in threads:
2216 t.join()
2217 ports.append( t.result )
2218 links = []
2219 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002220 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002221 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07002222 name="links-" + str( i ),
2223 args=[ ] )
2224 threads.append( t )
2225 t.start()
2226
2227 for t in threads:
2228 t.join()
2229 links.append( t.result )
2230 clusters = []
2231 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002232 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002233 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07002234 name="clusters-" + str( i ),
2235 args=[ ] )
2236 threads.append( t )
2237 t.start()
2238
2239 for t in threads:
2240 t.join()
2241 clusters.append( t.result )
2242
2243 elapsed = time.time() - startTime
2244 cliTime = time.time() - cliStart
2245 print "Elapsed time: " + str( elapsed )
2246 print "CLI time: " + str( cliTime )
2247
2248 mnSwitches = main.Mininet1.getSwitches()
2249 mnLinks = main.Mininet1.getLinks()
2250 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002251 for controller in range( len( main.activeNodes ) ):
2252 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002253 if devices[ controller ] and ports[ controller ] and\
2254 "Error" not in devices[ controller ] and\
2255 "Error" not in ports[ controller ]:
2256
2257 currentDevicesResult = main.Mininet1.compareSwitches(
2258 mnSwitches,
2259 json.loads( devices[ controller ] ),
2260 json.loads( ports[ controller ] ) )
2261 else:
2262 currentDevicesResult = main.FALSE
2263 utilities.assert_equals( expect=main.TRUE,
2264 actual=currentDevicesResult,
2265 onpass="ONOS" + controllerStr +
2266 " Switches view is correct",
2267 onfail="ONOS" + controllerStr +
2268 " Switches view is incorrect" )
2269
2270 if links[ controller ] and "Error" not in links[ controller ]:
2271 currentLinksResult = main.Mininet1.compareLinks(
2272 mnSwitches, mnLinks,
2273 json.loads( links[ controller ] ) )
2274 else:
2275 currentLinksResult = main.FALSE
2276 utilities.assert_equals( expect=main.TRUE,
2277 actual=currentLinksResult,
2278 onpass="ONOS" + controllerStr +
2279 " links view is correct",
2280 onfail="ONOS" + controllerStr +
2281 " links view is incorrect" )
2282
2283 if hosts[ controller ] or "Error" not in hosts[ controller ]:
2284 currentHostsResult = main.Mininet1.compareHosts(
2285 mnHosts,
2286 hosts[ controller ] )
2287 else:
2288 currentHostsResult = main.FALSE
2289 utilities.assert_equals( expect=main.TRUE,
2290 actual=currentHostsResult,
2291 onpass="ONOS" + controllerStr +
2292 " hosts exist in Mininet",
2293 onfail="ONOS" + controllerStr +
2294 " hosts don't match Mininet" )
2295 # CHECKING HOST ATTACHMENT POINTS
2296 hostAttachment = True
2297 zeroHosts = False
2298 # FIXME: topo-HA/obelisk specific mappings:
2299 # key is mac and value is dpid
2300 mappings = {}
2301 for i in range( 1, 29 ): # hosts 1 through 28
2302 # set up correct variables:
2303 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2304 if i == 1:
2305 deviceId = "1000".zfill(16)
2306 elif i == 2:
2307 deviceId = "2000".zfill(16)
2308 elif i == 3:
2309 deviceId = "3000".zfill(16)
2310 elif i == 4:
2311 deviceId = "3004".zfill(16)
2312 elif i == 5:
2313 deviceId = "5000".zfill(16)
2314 elif i == 6:
2315 deviceId = "6000".zfill(16)
2316 elif i == 7:
2317 deviceId = "6007".zfill(16)
2318 elif i >= 8 and i <= 17:
2319 dpid = '3' + str( i ).zfill( 3 )
2320 deviceId = dpid.zfill(16)
2321 elif i >= 18 and i <= 27:
2322 dpid = '6' + str( i ).zfill( 3 )
2323 deviceId = dpid.zfill(16)
2324 elif i == 28:
2325 deviceId = "2800".zfill(16)
2326 mappings[ macId ] = deviceId
2327 if hosts[ controller ] or "Error" not in hosts[ controller ]:
2328 if hosts[ controller ] == []:
2329 main.log.warn( "There are no hosts discovered" )
2330 zeroHosts = True
2331 else:
2332 for host in hosts[ controller ]:
2333 mac = None
2334 location = None
2335 device = None
2336 port = None
2337 try:
2338 mac = host.get( 'mac' )
2339 assert mac, "mac field could not be found for this host object"
2340
2341 location = host.get( 'location' )
2342 assert location, "location field could not be found for this host object"
2343
2344 # Trim the protocol identifier off deviceId
2345 device = str( location.get( 'elementId' ) ).split(':')[1]
2346 assert device, "elementId field could not be found for this host location object"
2347
2348 port = location.get( 'port' )
2349 assert port, "port field could not be found for this host location object"
2350
2351 # Now check if this matches where they should be
2352 if mac and device and port:
2353 if str( port ) != "1":
2354 main.log.error( "The attachment port is incorrect for " +
2355 "host " + str( mac ) +
2356 ". Expected: 1 Actual: " + str( port) )
2357 hostAttachment = False
2358 if device != mappings[ str( mac ) ]:
2359 main.log.error( "The attachment device is incorrect for " +
2360 "host " + str( mac ) +
2361 ". Expected: " + mappings[ str( mac ) ] +
2362 " Actual: " + device )
2363 hostAttachment = False
2364 else:
2365 hostAttachment = False
2366 except AssertionError:
2367 main.log.exception( "Json object not as expected" )
2368 main.log.error( repr( host ) )
2369 hostAttachment = False
2370 else:
2371 main.log.error( "No hosts json output or \"Error\"" +
2372 " in output. hosts = " +
2373 repr( hosts[ controller ] ) )
2374 if zeroHosts is False:
2375 hostAttachment = True
2376
2377 # END CHECKING HOST ATTACHMENT POINTS
2378 devicesResults = devicesResults and currentDevicesResult
2379 linksResults = linksResults and currentLinksResult
2380 hostsResults = hostsResults and currentHostsResult
2381 hostAttachmentResults = hostAttachmentResults and\
2382 hostAttachment
2383
2384 # Compare json objects for hosts and dataplane clusters
2385
2386 # hosts
2387 main.step( "Hosts view is consistent across all ONOS nodes" )
2388 consistentHostsResult = main.TRUE
2389 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002390 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002391 if "Error" not in hosts[ controller ]:
2392 if hosts[ controller ] == hosts[ 0 ]:
2393 continue
2394 else: # hosts not consistent
2395 main.log.error( "hosts from ONOS" + controllerStr +
2396 " is inconsistent with ONOS1" )
2397 main.log.warn( repr( hosts[ controller ] ) )
2398 consistentHostsResult = main.FALSE
2399
2400 else:
2401 main.log.error( "Error in getting ONOS hosts from ONOS" +
2402 controllerStr )
2403 consistentHostsResult = main.FALSE
2404 main.log.warn( "ONOS" + controllerStr +
2405 " hosts response: " +
2406 repr( hosts[ controller ] ) )
2407 utilities.assert_equals(
2408 expect=main.TRUE,
2409 actual=consistentHostsResult,
2410 onpass="Hosts view is consistent across all ONOS nodes",
2411 onfail="ONOS nodes have different views of hosts" )
2412
2413 main.step( "Hosts information is correct" )
2414 hostsResults = hostsResults and ipResult
2415 utilities.assert_equals(
2416 expect=main.TRUE,
2417 actual=hostsResults,
2418 onpass="Host information is correct",
2419 onfail="Host information is incorrect" )
2420
2421 main.step( "Host attachment points to the network" )
2422 utilities.assert_equals(
2423 expect=True,
2424 actual=hostAttachmentResults,
2425 onpass="Hosts are correctly attached to the network",
2426 onfail="ONOS did not correctly attach hosts to the network" )
2427
2428 # Strongly connected clusters of devices
2429 main.step( "Clusters view is consistent across all ONOS nodes" )
2430 consistentClustersResult = main.TRUE
2431 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002432 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002433 if "Error" not in clusters[ controller ]:
2434 if clusters[ controller ] == clusters[ 0 ]:
2435 continue
2436 else: # clusters not consistent
2437 main.log.error( "clusters from ONOS" +
2438 controllerStr +
2439 " is inconsistent with ONOS1" )
2440 consistentClustersResult = main.FALSE
2441
2442 else:
2443 main.log.error( "Error in getting dataplane clusters " +
2444 "from ONOS" + controllerStr )
2445 consistentClustersResult = main.FALSE
2446 main.log.warn( "ONOS" + controllerStr +
2447 " clusters response: " +
2448 repr( clusters[ controller ] ) )
2449 utilities.assert_equals(
2450 expect=main.TRUE,
2451 actual=consistentClustersResult,
2452 onpass="Clusters view is consistent across all ONOS nodes",
2453 onfail="ONOS nodes have different views of clusters" )
2454
2455 main.step( "There is only one SCC" )
2456 # there should always only be one cluster
2457 try:
2458 numClusters = len( json.loads( clusters[ 0 ] ) )
2459 except ( ValueError, TypeError ):
2460 main.log.exception( "Error parsing clusters[0]: " +
2461 repr( clusters[0] ) )
2462 clusterResults = main.FALSE
2463 if numClusters == 1:
2464 clusterResults = main.TRUE
2465 utilities.assert_equals(
2466 expect=1,
2467 actual=numClusters,
2468 onpass="ONOS shows 1 SCC",
2469 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2470
2471 topoResult = ( devicesResults and linksResults
2472 and hostsResults and consistentHostsResult
2473 and consistentClustersResult and clusterResults
2474 and ipResult and hostAttachmentResults )
2475
2476 topoResult = topoResult and int( count <= 2 )
2477 note = "note it takes about " + str( int( cliTime ) ) + \
2478 " seconds for the test to make all the cli calls to fetch " +\
2479 "the topology from each ONOS instance"
2480 main.log.info(
2481 "Very crass estimate for topology discovery/convergence( " +
2482 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2483 str( count ) + " tries" )
2484
2485 main.step( "Device information is correct" )
2486 utilities.assert_equals(
2487 expect=main.TRUE,
2488 actual=devicesResults,
2489 onpass="Device information is correct",
2490 onfail="Device information is incorrect" )
2491
2492 main.step( "Links are correct" )
2493 utilities.assert_equals(
2494 expect=main.TRUE,
2495 actual=linksResults,
2496 onpass="Link are correct",
2497 onfail="Links are incorrect" )
2498
2499 # FIXME: move this to an ONOS state case
2500 main.step( "Checking ONOS nodes" )
2501 nodesOutput = []
2502 nodeResults = main.TRUE
2503 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002504 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002505 t = main.Thread( target=main.CLIs[i].nodes,
Jon Hall5cf14d52015-07-16 12:15:19 -07002506 name="nodes-" + str( i ),
2507 args=[ ] )
2508 threads.append( t )
2509 t.start()
2510
2511 for t in threads:
2512 t.join()
2513 nodesOutput.append( t.result )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002514 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002515 for i in nodesOutput:
2516 try:
2517 current = json.loads( i )
2518 for node in current:
2519 currentResult = main.FALSE
2520 if node['ip'] in ips: # node in nodes() output is in cell
2521 if node['state'] == 'ACTIVE':
2522 currentResult = main.TRUE
2523 else:
2524 main.log.error( "Error in ONOS node availability" )
2525 main.log.error(
2526 json.dumps( current,
2527 sort_keys=True,
2528 indent=4,
2529 separators=( ',', ': ' ) ) )
2530 break
2531 nodeResults = nodeResults and currentResult
2532 except ( ValueError, TypeError ):
2533 main.log.error( "Error parsing nodes output" )
2534 main.log.warn( repr( i ) )
2535 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2536 onpass="Nodes check successful",
2537 onfail="Nodes check NOT successful" )
2538
2539 def CASE9( self, main ):
2540 """
2541 Link s3-s28 down
2542 """
2543 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002544 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002545 assert main, "main not defined"
2546 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002547 assert main.CLIs, "main.CLIs not defined"
2548 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002549 # NOTE: You should probably run a topology check after this
2550
2551 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2552
2553 description = "Turn off a link to ensure that Link Discovery " +\
2554 "is working properly"
2555 main.case( description )
2556
2557 main.step( "Kill Link between s3 and s28" )
2558 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2559 main.log.info( "Waiting " + str( linkSleep ) +
2560 " seconds for link down to be discovered" )
2561 time.sleep( linkSleep )
2562 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2563 onpass="Link down successful",
2564 onfail="Failed to bring link down" )
2565 # TODO do some sort of check here
2566
2567 def CASE10( self, main ):
2568 """
2569 Link s3-s28 up
2570 """
2571 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002572 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002573 assert main, "main not defined"
2574 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002575 assert main.CLIs, "main.CLIs not defined"
2576 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002577 # NOTE: You should probably run a topology check after this
2578
2579 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2580
2581 description = "Restore a link to ensure that Link Discovery is " + \
2582 "working properly"
2583 main.case( description )
2584
2585 main.step( "Bring link between s3 and s28 back up" )
2586 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2587 main.log.info( "Waiting " + str( linkSleep ) +
2588 " seconds for link up to be discovered" )
2589 time.sleep( linkSleep )
2590 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2591 onpass="Link up successful",
2592 onfail="Failed to bring link up" )
2593 # TODO do some sort of check here
2594
2595 def CASE11( self, main ):
2596 """
2597 Switch Down
2598 """
2599 # NOTE: You should probably run a topology check after this
2600 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002601 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002602 assert main, "main not defined"
2603 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002604 assert main.CLIs, "main.CLIs not defined"
2605 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002606
2607 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2608
2609 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002610 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002611 main.case( description )
2612 switch = main.params[ 'kill' ][ 'switch' ]
2613 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2614
2615 # TODO: Make this switch parameterizable
2616 main.step( "Kill " + switch )
2617 main.log.info( "Deleting " + switch )
2618 main.Mininet1.delSwitch( switch )
2619 main.log.info( "Waiting " + str( switchSleep ) +
2620 " seconds for switch down to be discovered" )
2621 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002622 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002623 # Peek at the deleted switch
2624 main.log.warn( str( device ) )
2625 result = main.FALSE
2626 if device and device[ 'available' ] is False:
2627 result = main.TRUE
2628 utilities.assert_equals( expect=main.TRUE, actual=result,
2629 onpass="Kill switch successful",
2630 onfail="Failed to kill switch?" )
2631
2632 def CASE12( self, main ):
2633 """
2634 Switch Up
2635 """
2636 # NOTE: You should probably run a topology check after this
2637 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002638 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002639 assert main, "main not defined"
2640 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002641 assert main.CLIs, "main.CLIs not defined"
2642 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002643 assert ONOS1Port, "ONOS1Port not defined"
2644 assert ONOS2Port, "ONOS2Port not defined"
2645 assert ONOS3Port, "ONOS3Port not defined"
2646 assert ONOS4Port, "ONOS4Port not defined"
2647 assert ONOS5Port, "ONOS5Port not defined"
2648 assert ONOS6Port, "ONOS6Port not defined"
2649 assert ONOS7Port, "ONOS7Port not defined"
2650
2651 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2652 switch = main.params[ 'kill' ][ 'switch' ]
2653 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2654 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002655 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002656 description = "Adding a switch to ensure it is discovered correctly"
2657 main.case( description )
2658
2659 main.step( "Add back " + switch )
2660 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2661 for peer in links:
2662 main.Mininet1.addLink( switch, peer )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002663 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002664 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2665 main.log.info( "Waiting " + str( switchSleep ) +
2666 " seconds for switch up to be discovered" )
2667 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002668 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002669 # Peek at the deleted switch
2670 main.log.warn( str( device ) )
2671 result = main.FALSE
2672 if device and device[ 'available' ]:
2673 result = main.TRUE
2674 utilities.assert_equals( expect=main.TRUE, actual=result,
2675 onpass="add switch successful",
2676 onfail="Failed to add switch?" )
2677
2678 def CASE13( self, main ):
2679 """
2680 Clean up
2681 """
2682 import os
2683 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002684 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002685 assert main, "main not defined"
2686 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002687 assert main.CLIs, "main.CLIs not defined"
2688 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002689
2690 # printing colors to terminal
2691 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2692 'blue': '\033[94m', 'green': '\033[92m',
2693 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2694 main.case( "Test Cleanup" )
2695 main.step( "Killing tcpdumps" )
2696 main.Mininet2.stopTcpdump()
2697
2698 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002699 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002700 main.step( "Copying MN pcap and ONOS log files to test station" )
2701 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2702 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002703 # NOTE: MN Pcap file is being saved to logdir.
2704 # We scp this file as MN and TestON aren't necessarily the same vm
2705
2706 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002707 # TODO: Load these from params
2708 # NOTE: must end in /
2709 logFolder = "/opt/onos/log/"
2710 logFiles = [ "karaf.log", "karaf.log.1" ]
2711 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002712 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002713 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002714 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002715 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2716 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002717 # std*.log's
2718 # NOTE: must end in /
2719 logFolder = "/opt/onos/var/"
2720 logFiles = [ "stderr.log", "stdout.log" ]
2721 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002722 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002723 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002724 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002725 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2726 logFolder + f, dstName )
2727 else:
2728 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002729
2730 main.step( "Stopping Mininet" )
2731 mnResult = main.Mininet1.stopNet()
2732 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2733 onpass="Mininet stopped",
2734 onfail="MN cleanup NOT successful" )
2735
2736 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002737 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002738 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2739 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002740
2741 try:
2742 timerLog = open( main.logdir + "/Timers.csv", 'w')
2743 # Overwrite with empty line and close
2744 labels = "Gossip Intents, Restart"
2745 data = str( gossipTime ) + ", " + str( main.restartTime )
2746 timerLog.write( labels + "\n" + data )
2747 timerLog.close()
2748 except NameError, e:
2749 main.log.exception(e)
2750
2751 def CASE14( self, main ):
2752 """
2753 start election app on all onos nodes
2754 """
Jon Halle1a3b752015-07-22 13:02:46 -07002755 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002756 assert main, "main not defined"
2757 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002758 assert main.CLIs, "main.CLIs not defined"
2759 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002760
2761 main.case("Start Leadership Election app")
2762 main.step( "Install leadership election app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002763 onosCli = main.CLIs[ main.activeNodes[0] ]
2764 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002765 utilities.assert_equals(
2766 expect=main.TRUE,
2767 actual=appResult,
2768 onpass="Election app installed",
2769 onfail="Something went wrong with installing Leadership election" )
2770
2771 main.step( "Run for election on each node" )
2772 leaderResult = main.TRUE
2773 leaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002774 for i in main.activeNodes:
2775 main.CLIs[i].electionTestRun()
2776 for i in main.activeNodes:
2777 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002778 leader = cli.electionTestLeader()
2779 if leader is None or leader == main.FALSE:
2780 main.log.error( cli.name + ": Leader for the election app " +
2781 "should be an ONOS node, instead got '" +
2782 str( leader ) + "'" )
2783 leaderResult = main.FALSE
2784 leaders.append( leader )
2785 utilities.assert_equals(
2786 expect=main.TRUE,
2787 actual=leaderResult,
2788 onpass="Successfully ran for leadership",
2789 onfail="Failed to run for leadership" )
2790
2791 main.step( "Check that each node shows the same leader" )
2792 sameLeader = main.TRUE
2793 if len( set( leaders ) ) != 1:
2794 sameLeader = main.FALSE
Jon Halle1a3b752015-07-22 13:02:46 -07002795 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
Jon Hall5cf14d52015-07-16 12:15:19 -07002796 str( leaders ) )
2797 utilities.assert_equals(
2798 expect=main.TRUE,
2799 actual=sameLeader,
2800 onpass="Leadership is consistent for the election topic",
2801 onfail="Nodes have different leaders" )
2802
2803 def CASE15( self, main ):
2804 """
2805 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002806 15.1 Run election on each node
2807 15.2 Check that each node has the same leaders and candidates
2808 15.3 Find current leader and withdraw
2809 15.4 Check that a new node was elected leader
2810 15.5 Check that that new leader was the candidate of old leader
2811 15.6 Run for election on old leader
2812 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2813 15.8 Make sure that the old leader was added to the candidate list
2814
2815 old and new variable prefixes refer to data from before vs after
2816 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002817 """
2818 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002819 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002820 assert main, "main not defined"
2821 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002822 assert main.CLIs, "main.CLIs not defined"
2823 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002824
Jon Hall5cf14d52015-07-16 12:15:19 -07002825 description = "Check that Leadership Election is still functional"
2826 main.case( description )
acsmars71adceb2015-08-31 15:09:26 -07002827 # NOTE: Need to re-run since being a canidate is not persistant
2828 # TODO: add check for "Command not found:" in the driver, this
2829 # means the election test app isn't loaded
Jon Hall5cf14d52015-07-16 12:15:19 -07002830
acsmars71adceb2015-08-31 15:09:26 -07002831 oldLeaders = [] # leaders by node before withdrawl from candidates
2832 newLeaders = [] # leaders by node after withdrawl from candidates
2833 oldAllCandidates = [] # list of lists of each nodes' candidates before
2834 newAllCandidates = [] # list of lists of each nodes' candidates after
2835 oldCandidates = [] # list of candidates from node 0 before withdrawl
2836 newCandidates = [] # list of candidates from node 0 after withdrawl
2837 oldLeader = '' # the old leader from oldLeaders, None if not same
2838 newLeader = '' # the new leaders fron newLoeaders, None if not same
2839 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2840 expectNoLeader = False # True when there is only one leader
2841 if main.numCtrls == 1:
2842 expectNoLeader = True
2843
2844 main.step( "Run for election on each node" )
2845 electionResult = main.TRUE
2846
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002847 for i in main.activeNodes: # run test election on each node
2848 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002849 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002850 utilities.assert_equals(
2851 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002852 actual=electionResult,
2853 onpass="All nodes successfully ran for leadership",
2854 onfail="At least one node failed to run for leadership" )
2855
acsmars3a72bde2015-09-02 14:16:22 -07002856 if electionResult == main.FALSE:
2857 main.log.error(
2858 "Skipping Test Case because Election Test App isn't loaded" )
2859 main.skipCase()
2860
acsmars71adceb2015-08-31 15:09:26 -07002861 main.step( "Check that each node shows the same leader and candidates" )
2862 sameResult = main.TRUE
2863 failMessage = "Nodes have different leaders"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002864 for i in main.activeNodes:
2865 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002866 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2867 oldAllCandidates.append( node )
2868 oldLeaders.append( node[ 0 ] )
2869 oldCandidates = oldAllCandidates[ 0 ]
2870
2871 # Check that each node has the same leader. Defines oldLeader
2872 if len( set( oldLeaders ) ) != 1:
2873 sameResult = main.FALSE
2874 main.log.error( "More than one leader present:" + str( oldLeaders ) )
2875 oldLeader = None
2876 else:
2877 oldLeader = oldLeaders[ 0 ]
2878
2879 # Check that each node's candidate list is the same
acsmars29233db2015-11-04 11:15:00 -08002880 candidateDiscrepancy = False # Boolean of candidate mismatches
acsmars71adceb2015-08-31 15:09:26 -07002881 for candidates in oldAllCandidates:
2882 if set( candidates ) != set( oldCandidates ):
2883 sameResult = main.FALSE
acsmars29233db2015-11-04 11:15:00 -08002884 candidateDiscrepancy = True
2885
2886 if candidateDiscrepancy:
2887 failMessage += " and candidates"
2888
acsmars71adceb2015-08-31 15:09:26 -07002889 utilities.assert_equals(
2890 expect=main.TRUE,
2891 actual=sameResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002892 onpass="Leadership is consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002893 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002894
2895 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002896 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002897 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002898 if oldLeader is None:
2899 main.log.error( "Leadership isn't consistent." )
2900 withdrawResult = main.FALSE
2901 # Get the CLI of the oldLeader
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002902 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07002903 if oldLeader == main.nodes[ i ].ip_address:
2904 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002905 break
2906 else: # FOR/ELSE statement
2907 main.log.error( "Leader election, could not find current leader" )
2908 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07002909 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07002910 utilities.assert_equals(
2911 expect=main.TRUE,
2912 actual=withdrawResult,
2913 onpass="Node was withdrawn from election",
2914 onfail="Node was not withdrawn from election" )
2915
acsmars71adceb2015-08-31 15:09:26 -07002916 main.step( "Check that a new node was elected leader" )
2917
Jon Hall5cf14d52015-07-16 12:15:19 -07002918 # FIXME: use threads
acsmars71adceb2015-08-31 15:09:26 -07002919 newLeaderResult = main.TRUE
2920 failMessage = "Nodes have different leaders"
2921
2922 # Get new leaders and candidates
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002923 for i in main.activeNodes:
2924 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002925 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2926 # elections might no have finished yet
2927 if node[ 0 ] == 'none' and not expectNoLeader:
2928 main.log.info( "Node has no leader, waiting 5 seconds to be " +
2929 "sure elections are complete." )
2930 time.sleep(5)
2931 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2932 # election still isn't done or there is a problem
2933 if node[ 0 ] == 'none':
2934 main.log.error( "No leader was elected on at least 1 node" )
2935 newLeaderResult = main.FALSE
2936 newAllCandidates.append( node )
2937 newLeaders.append( node[ 0 ] )
2938 newCandidates = newAllCandidates[ 0 ]
2939
2940 # Check that each node has the same leader. Defines newLeader
2941 if len( set( newLeaders ) ) != 1:
2942 newLeaderResult = main.FALSE
2943 main.log.error( "Nodes have different leaders: " +
2944 str( newLeaders ) )
2945 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07002946 else:
acsmars71adceb2015-08-31 15:09:26 -07002947 newLeader = newLeaders[ 0 ]
2948
2949 # Check that each node's candidate list is the same
2950 for candidates in newAllCandidates:
2951 if set( candidates ) != set( newCandidates ):
2952 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07002953 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07002954
2955 # Check that the new leader is not the older leader, which was withdrawn
2956 if newLeader == oldLeader:
2957 newLeaderResult = main.FALSE
2958 main.log.error( "All nodes still see old leader: " + oldLeader +
2959 " as the current leader" )
2960
Jon Hall5cf14d52015-07-16 12:15:19 -07002961 utilities.assert_equals(
2962 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002963 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002964 onpass="Leadership election passed",
2965 onfail="Something went wrong with Leadership election" )
2966
acsmars71adceb2015-08-31 15:09:26 -07002967 main.step( "Check that that new leader was the candidate of old leader")
2968 # candidates[ 2 ] should be come the top candidate after withdrawl
2969 correctCandidateResult = main.TRUE
2970 if expectNoLeader:
2971 if newLeader == 'none':
2972 main.log.info( "No leader expected. None found. Pass" )
2973 correctCandidateResult = main.TRUE
2974 else:
2975 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2976 correctCandidateResult = main.FALSE
2977 elif newLeader != oldCandidates[ 2 ]:
2978 correctCandidateResult = main.FALSE
2979 main.log.error( "Candidate " + newLeader + " was elected. " +
2980 oldCandidates[ 2 ] + " should have had priority." )
2981
2982 utilities.assert_equals(
2983 expect=main.TRUE,
2984 actual=correctCandidateResult,
2985 onpass="Correct Candidate Elected",
2986 onfail="Incorrect Candidate Elected" )
2987
Jon Hall5cf14d52015-07-16 12:15:19 -07002988 main.step( "Run for election on old leader( just so everyone " +
2989 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07002990 if oldLeaderCLI is not None:
2991 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07002992 else:
acsmars71adceb2015-08-31 15:09:26 -07002993 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002994 runResult = main.FALSE
2995 utilities.assert_equals(
2996 expect=main.TRUE,
2997 actual=runResult,
2998 onpass="App re-ran for election",
2999 onfail="App failed to run for election" )
acsmars71adceb2015-08-31 15:09:26 -07003000 main.step(
3001 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003002 # verify leader didn't just change
acsmars71adceb2015-08-31 15:09:26 -07003003 positionResult = main.TRUE
3004 # Get new leaders and candidates, wait if oldLeader is not a candidate yet
3005
3006 # Reset and reuse the new candidate and leaders lists
3007 newAllCandidates = []
3008 newCandidates = []
3009 newLeaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003010 for i in main.activeNodes:
3011 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07003012 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3013 if oldLeader not in node: # election might no have finished yet
3014 main.log.info( "Old Leader not elected, waiting 5 seconds to " +
3015 "be sure elections are complete" )
3016 time.sleep(5)
3017 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3018 if oldLeader not in node: # election still isn't done, errors
3019 main.log.error(
3020 "Old leader was not elected on at least one node" )
3021 positionResult = main.FALSE
3022 newAllCandidates.append( node )
3023 newLeaders.append( node[ 0 ] )
3024 newCandidates = newAllCandidates[ 0 ]
3025
3026 # Check that each node has the same leader. Defines newLeader
3027 if len( set( newLeaders ) ) != 1:
3028 positionResult = main.FALSE
3029 main.log.error( "Nodes have different leaders: " +
3030 str( newLeaders ) )
3031 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07003032 else:
acsmars71adceb2015-08-31 15:09:26 -07003033 newLeader = newLeaders[ 0 ]
3034
3035 # Check that each node's candidate list is the same
3036 for candidates in newAllCandidates:
3037 if set( candidates ) != set( newCandidates ):
3038 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07003039 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07003040
3041 # Check that the re-elected node is last on the candidate List
3042 if oldLeader != newCandidates[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003043 main.log.error( "Old Leader (" + oldLeader + ") not in the proper position " +
acsmars71adceb2015-08-31 15:09:26 -07003044 str( newCandidates ) )
3045 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003046
3047 utilities.assert_equals(
3048 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07003049 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003050 onpass="Old leader successfully re-ran for election",
3051 onfail="Something went wrong with Leadership election after " +
3052 "the old leader re-ran for election" )
3053
3054 def CASE16( self, main ):
3055 """
3056 Install Distributed Primitives app
3057 """
3058 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003059 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003060 assert main, "main not defined"
3061 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003062 assert main.CLIs, "main.CLIs not defined"
3063 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003064
3065 # Variables for the distributed primitives tests
3066 global pCounterName
3067 global iCounterName
3068 global pCounterValue
3069 global iCounterValue
3070 global onosSet
3071 global onosSetName
3072 pCounterName = "TestON-Partitions"
3073 iCounterName = "TestON-inMemory"
3074 pCounterValue = 0
3075 iCounterValue = 0
3076 onosSet = set([])
3077 onosSetName = "TestON-set"
3078
3079 description = "Install Primitives app"
3080 main.case( description )
3081 main.step( "Install Primitives app" )
3082 appName = "org.onosproject.distributedprimitives"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003083 node = main.activeNodes[0]
3084 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003085 utilities.assert_equals( expect=main.TRUE,
3086 actual=appResults,
3087 onpass="Primitives app activated",
3088 onfail="Primitives app not activated" )
3089 time.sleep( 5 ) # To allow all nodes to activate
3090
3091 def CASE17( self, main ):
3092 """
3093 Check for basic functionality with distributed primitives
3094 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003095 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003096 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003097 assert main, "main not defined"
3098 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003099 assert main.CLIs, "main.CLIs not defined"
3100 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003101 assert pCounterName, "pCounterName not defined"
3102 assert iCounterName, "iCounterName not defined"
3103 assert onosSetName, "onosSetName not defined"
3104 # NOTE: assert fails if value is 0/None/Empty/False
3105 try:
3106 pCounterValue
3107 except NameError:
3108 main.log.error( "pCounterValue not defined, setting to 0" )
3109 pCounterValue = 0
3110 try:
3111 iCounterValue
3112 except NameError:
3113 main.log.error( "iCounterValue not defined, setting to 0" )
3114 iCounterValue = 0
3115 try:
3116 onosSet
3117 except NameError:
3118 main.log.error( "onosSet not defined, setting to empty Set" )
3119 onosSet = set([])
3120 # Variables for the distributed primitives tests. These are local only
3121 addValue = "a"
3122 addAllValue = "a b c d e f"
3123 retainValue = "c d e f"
3124
3125 description = "Check for basic functionality with distributed " +\
3126 "primitives"
3127 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003128 main.caseExplanation = "Test the methods of the distributed " +\
3129 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003130 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003131 # Partitioned counters
3132 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003133 pCounters = []
3134 threads = []
3135 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003136 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003137 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3138 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003139 args=[ pCounterName ] )
3140 pCounterValue += 1
3141 addedPValues.append( pCounterValue )
3142 threads.append( t )
3143 t.start()
3144
3145 for t in threads:
3146 t.join()
3147 pCounters.append( t.result )
3148 # Check that counter incremented numController times
3149 pCounterResults = True
3150 for i in addedPValues:
3151 tmpResult = i in pCounters
3152 pCounterResults = pCounterResults and tmpResult
3153 if not tmpResult:
3154 main.log.error( str( i ) + " is not in partitioned "
3155 "counter incremented results" )
3156 utilities.assert_equals( expect=True,
3157 actual=pCounterResults,
3158 onpass="Default counter incremented",
3159 onfail="Error incrementing default" +
3160 " counter" )
3161
Jon Halle1a3b752015-07-22 13:02:46 -07003162 main.step( "Get then Increment a default counter on each node" )
3163 pCounters = []
3164 threads = []
3165 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003166 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003167 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3168 name="counterGetAndAdd-" + str( i ),
3169 args=[ pCounterName ] )
3170 addedPValues.append( pCounterValue )
3171 pCounterValue += 1
3172 threads.append( t )
3173 t.start()
3174
3175 for t in threads:
3176 t.join()
3177 pCounters.append( t.result )
3178 # Check that counter incremented numController times
3179 pCounterResults = True
3180 for i in addedPValues:
3181 tmpResult = i in pCounters
3182 pCounterResults = pCounterResults and tmpResult
3183 if not tmpResult:
3184 main.log.error( str( i ) + " is not in partitioned "
3185 "counter incremented results" )
3186 utilities.assert_equals( expect=True,
3187 actual=pCounterResults,
3188 onpass="Default counter incremented",
3189 onfail="Error incrementing default" +
3190 " counter" )
3191
3192 main.step( "Counters we added have the correct values" )
3193 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3194 utilities.assert_equals( expect=main.TRUE,
3195 actual=incrementCheck,
3196 onpass="Added counters are correct",
3197 onfail="Added counters are incorrect" )
3198
3199 main.step( "Add -8 to then get a default counter on each node" )
3200 pCounters = []
3201 threads = []
3202 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003203 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003204 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3205 name="counterIncrement-" + str( i ),
3206 args=[ pCounterName ],
3207 kwargs={ "delta": -8 } )
3208 pCounterValue += -8
3209 addedPValues.append( pCounterValue )
3210 threads.append( t )
3211 t.start()
3212
3213 for t in threads:
3214 t.join()
3215 pCounters.append( t.result )
3216 # Check that counter incremented numController times
3217 pCounterResults = True
3218 for i in addedPValues:
3219 tmpResult = i in pCounters
3220 pCounterResults = pCounterResults and tmpResult
3221 if not tmpResult:
3222 main.log.error( str( i ) + " is not in partitioned "
3223 "counter incremented results" )
3224 utilities.assert_equals( expect=True,
3225 actual=pCounterResults,
3226 onpass="Default counter incremented",
3227 onfail="Error incrementing default" +
3228 " counter" )
3229
3230 main.step( "Add 5 to then get a default counter on each node" )
3231 pCounters = []
3232 threads = []
3233 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003234 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003235 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3236 name="counterIncrement-" + str( i ),
3237 args=[ pCounterName ],
3238 kwargs={ "delta": 5 } )
3239 pCounterValue += 5
3240 addedPValues.append( pCounterValue )
3241 threads.append( t )
3242 t.start()
3243
3244 for t in threads:
3245 t.join()
3246 pCounters.append( t.result )
3247 # Check that counter incremented numController times
3248 pCounterResults = True
3249 for i in addedPValues:
3250 tmpResult = i in pCounters
3251 pCounterResults = pCounterResults and tmpResult
3252 if not tmpResult:
3253 main.log.error( str( i ) + " is not in partitioned "
3254 "counter incremented results" )
3255 utilities.assert_equals( expect=True,
3256 actual=pCounterResults,
3257 onpass="Default counter incremented",
3258 onfail="Error incrementing default" +
3259 " counter" )
3260
3261 main.step( "Get then add 5 to a default counter on each node" )
3262 pCounters = []
3263 threads = []
3264 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003265 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003266 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3267 name="counterIncrement-" + str( i ),
3268 args=[ pCounterName ],
3269 kwargs={ "delta": 5 } )
3270 addedPValues.append( pCounterValue )
3271 pCounterValue += 5
3272 threads.append( t )
3273 t.start()
3274
3275 for t in threads:
3276 t.join()
3277 pCounters.append( t.result )
3278 # Check that counter incremented numController times
3279 pCounterResults = True
3280 for i in addedPValues:
3281 tmpResult = i in pCounters
3282 pCounterResults = pCounterResults and tmpResult
3283 if not tmpResult:
3284 main.log.error( str( i ) + " is not in partitioned "
3285 "counter incremented results" )
3286 utilities.assert_equals( expect=True,
3287 actual=pCounterResults,
3288 onpass="Default counter incremented",
3289 onfail="Error incrementing default" +
3290 " counter" )
3291
3292 main.step( "Counters we added have the correct values" )
3293 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3294 utilities.assert_equals( expect=main.TRUE,
3295 actual=incrementCheck,
3296 onpass="Added counters are correct",
3297 onfail="Added counters are incorrect" )
3298
3299 # In-Memory counters
3300 main.step( "Increment and get an in-memory counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003301 iCounters = []
3302 addedIValues = []
3303 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003304 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003305 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003306 name="icounterIncrement-" + str( i ),
3307 args=[ iCounterName ],
3308 kwargs={ "inMemory": True } )
3309 iCounterValue += 1
3310 addedIValues.append( iCounterValue )
3311 threads.append( t )
3312 t.start()
3313
3314 for t in threads:
3315 t.join()
3316 iCounters.append( t.result )
3317 # Check that counter incremented numController times
3318 iCounterResults = True
3319 for i in addedIValues:
3320 tmpResult = i in iCounters
3321 iCounterResults = iCounterResults and tmpResult
3322 if not tmpResult:
3323 main.log.error( str( i ) + " is not in the in-memory "
3324 "counter incremented results" )
3325 utilities.assert_equals( expect=True,
3326 actual=iCounterResults,
Jon Halle1a3b752015-07-22 13:02:46 -07003327 onpass="In-memory counter incremented",
3328 onfail="Error incrementing in-memory" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003329 " counter" )
3330
Jon Halle1a3b752015-07-22 13:02:46 -07003331 main.step( "Get then Increment a in-memory counter on each node" )
3332 iCounters = []
3333 threads = []
3334 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003335 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003336 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3337 name="counterGetAndAdd-" + str( i ),
3338 args=[ iCounterName ],
3339 kwargs={ "inMemory": True } )
3340 addedIValues.append( iCounterValue )
3341 iCounterValue += 1
3342 threads.append( t )
3343 t.start()
3344
3345 for t in threads:
3346 t.join()
3347 iCounters.append( t.result )
3348 # Check that counter incremented numController times
3349 iCounterResults = True
3350 for i in addedIValues:
3351 tmpResult = i in iCounters
3352 iCounterResults = iCounterResults and tmpResult
3353 if not tmpResult:
3354 main.log.error( str( i ) + " is not in in-memory "
3355 "counter incremented results" )
3356 utilities.assert_equals( expect=True,
3357 actual=iCounterResults,
3358 onpass="In-memory counter incremented",
3359 onfail="Error incrementing in-memory" +
3360 " counter" )
3361
3362 main.step( "Counters we added have the correct values" )
3363 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3364 utilities.assert_equals( expect=main.TRUE,
3365 actual=incrementCheck,
3366 onpass="Added counters are correct",
3367 onfail="Added counters are incorrect" )
3368
3369 main.step( "Add -8 to then get a in-memory counter on each node" )
3370 iCounters = []
3371 threads = []
3372 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003373 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003374 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3375 name="counterIncrement-" + str( i ),
3376 args=[ iCounterName ],
3377 kwargs={ "delta": -8, "inMemory": True } )
3378 iCounterValue += -8
3379 addedIValues.append( iCounterValue )
3380 threads.append( t )
3381 t.start()
3382
3383 for t in threads:
3384 t.join()
3385 iCounters.append( t.result )
3386 # Check that counter incremented numController times
3387 iCounterResults = True
3388 for i in addedIValues:
3389 tmpResult = i in iCounters
3390 iCounterResults = iCounterResults and tmpResult
3391 if not tmpResult:
3392 main.log.error( str( i ) + " is not in in-memory "
3393 "counter incremented results" )
3394 utilities.assert_equals( expect=True,
3395 actual=pCounterResults,
3396 onpass="In-memory counter incremented",
3397 onfail="Error incrementing in-memory" +
3398 " counter" )
3399
3400 main.step( "Add 5 to then get a in-memory counter on each node" )
3401 iCounters = []
3402 threads = []
3403 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003404 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003405 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3406 name="counterIncrement-" + str( i ),
3407 args=[ iCounterName ],
3408 kwargs={ "delta": 5, "inMemory": True } )
3409 iCounterValue += 5
3410 addedIValues.append( iCounterValue )
3411 threads.append( t )
3412 t.start()
3413
3414 for t in threads:
3415 t.join()
3416 iCounters.append( t.result )
3417 # Check that counter incremented numController times
3418 iCounterResults = True
3419 for i in addedIValues:
3420 tmpResult = i in iCounters
3421 iCounterResults = iCounterResults and tmpResult
3422 if not tmpResult:
3423 main.log.error( str( i ) + " is not in in-memory "
3424 "counter incremented results" )
3425 utilities.assert_equals( expect=True,
3426 actual=pCounterResults,
3427 onpass="In-memory counter incremented",
3428 onfail="Error incrementing in-memory" +
3429 " counter" )
3430
3431 main.step( "Get then add 5 to a in-memory counter on each node" )
3432 iCounters = []
3433 threads = []
3434 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003435 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003436 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3437 name="counterIncrement-" + str( i ),
3438 args=[ iCounterName ],
3439 kwargs={ "delta": 5, "inMemory": True } )
3440 addedIValues.append( iCounterValue )
3441 iCounterValue += 5
3442 threads.append( t )
3443 t.start()
3444
3445 for t in threads:
3446 t.join()
3447 iCounters.append( t.result )
3448 # Check that counter incremented numController times
3449 iCounterResults = True
3450 for i in addedIValues:
3451 tmpResult = i in iCounters
3452 iCounterResults = iCounterResults and tmpResult
3453 if not tmpResult:
3454 main.log.error( str( i ) + " is not in in-memory "
3455 "counter incremented results" )
3456 utilities.assert_equals( expect=True,
3457 actual=iCounterResults,
3458 onpass="In-memory counter incremented",
3459 onfail="Error incrementing in-memory" +
3460 " counter" )
3461
3462 main.step( "Counters we added have the correct values" )
3463 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3464 utilities.assert_equals( expect=main.TRUE,
3465 actual=incrementCheck,
3466 onpass="Added counters are correct",
3467 onfail="Added counters are incorrect" )
3468
Jon Hall5cf14d52015-07-16 12:15:19 -07003469 main.step( "Check counters are consistant across nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07003470 onosCounters, consistentCounterResults = main.Counters.consistentCheck()
Jon Hall5cf14d52015-07-16 12:15:19 -07003471 utilities.assert_equals( expect=main.TRUE,
3472 actual=consistentCounterResults,
3473 onpass="ONOS counters are consistent " +
3474 "across nodes",
3475 onfail="ONOS Counters are inconsistent " +
3476 "across nodes" )
3477
3478 main.step( "Counters we added have the correct values" )
Jon Halle1a3b752015-07-22 13:02:46 -07003479 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3480 incrementCheck = incrementCheck and \
3481 main.Counters.counterCheck( iCounterName, iCounterValue )
Jon Hall5cf14d52015-07-16 12:15:19 -07003482 utilities.assert_equals( expect=main.TRUE,
Jon Halle1a3b752015-07-22 13:02:46 -07003483 actual=incrementCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -07003484 onpass="Added counters are correct",
3485 onfail="Added counters are incorrect" )
3486 # DISTRIBUTED SETS
3487 main.step( "Distributed Set get" )
3488 size = len( onosSet )
3489 getResponses = []
3490 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003491 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003492 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003493 name="setTestGet-" + str( i ),
3494 args=[ onosSetName ] )
3495 threads.append( t )
3496 t.start()
3497 for t in threads:
3498 t.join()
3499 getResponses.append( t.result )
3500
3501 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003502 for i in range( len( main.activeNodes ) ):
3503 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003504 if isinstance( getResponses[ i ], list):
3505 current = set( getResponses[ i ] )
3506 if len( current ) == len( getResponses[ i ] ):
3507 # no repeats
3508 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003509 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003510 " has incorrect view" +
3511 " of set " + onosSetName + ":\n" +
3512 str( getResponses[ i ] ) )
3513 main.log.debug( "Expected: " + str( onosSet ) )
3514 main.log.debug( "Actual: " + str( current ) )
3515 getResults = main.FALSE
3516 else:
3517 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003518 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003519 " has repeat elements in" +
3520 " set " + onosSetName + ":\n" +
3521 str( getResponses[ i ] ) )
3522 getResults = main.FALSE
3523 elif getResponses[ i ] == main.ERROR:
3524 getResults = main.FALSE
3525 utilities.assert_equals( expect=main.TRUE,
3526 actual=getResults,
3527 onpass="Set elements are correct",
3528 onfail="Set elements are incorrect" )
3529
3530 main.step( "Distributed Set size" )
3531 sizeResponses = []
3532 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003533 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003534 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003535 name="setTestSize-" + str( i ),
3536 args=[ onosSetName ] )
3537 threads.append( t )
3538 t.start()
3539 for t in threads:
3540 t.join()
3541 sizeResponses.append( t.result )
3542
3543 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003544 for i in range( len( main.activeNodes ) ):
3545 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003546 if size != sizeResponses[ i ]:
3547 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003548 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003549 " expected a size of " + str( size ) +
3550 " for set " + onosSetName +
3551 " but got " + str( sizeResponses[ i ] ) )
3552 utilities.assert_equals( expect=main.TRUE,
3553 actual=sizeResults,
3554 onpass="Set sizes are correct",
3555 onfail="Set sizes are incorrect" )
3556
3557 main.step( "Distributed Set add()" )
3558 onosSet.add( addValue )
3559 addResponses = []
3560 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003561 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003562 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003563 name="setTestAdd-" + str( i ),
3564 args=[ onosSetName, addValue ] )
3565 threads.append( t )
3566 t.start()
3567 for t in threads:
3568 t.join()
3569 addResponses.append( t.result )
3570
3571 # main.TRUE = successfully changed the set
3572 # main.FALSE = action resulted in no change in set
3573 # main.ERROR - Some error in executing the function
3574 addResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003575 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003576 if addResponses[ i ] == main.TRUE:
3577 # All is well
3578 pass
3579 elif addResponses[ i ] == main.FALSE:
3580 # Already in set, probably fine
3581 pass
3582 elif addResponses[ i ] == main.ERROR:
3583 # Error in execution
3584 addResults = main.FALSE
3585 else:
3586 # unexpected result
3587 addResults = main.FALSE
3588 if addResults != main.TRUE:
3589 main.log.error( "Error executing set add" )
3590
3591 # Check if set is still correct
3592 size = len( onosSet )
3593 getResponses = []
3594 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003595 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003596 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003597 name="setTestGet-" + str( i ),
3598 args=[ onosSetName ] )
3599 threads.append( t )
3600 t.start()
3601 for t in threads:
3602 t.join()
3603 getResponses.append( t.result )
3604 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003605 for i in range( len( main.activeNodes ) ):
3606 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003607 if isinstance( getResponses[ i ], list):
3608 current = set( getResponses[ i ] )
3609 if len( current ) == len( getResponses[ i ] ):
3610 # no repeats
3611 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003612 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003613 " of set " + onosSetName + ":\n" +
3614 str( getResponses[ i ] ) )
3615 main.log.debug( "Expected: " + str( onosSet ) )
3616 main.log.debug( "Actual: " + str( current ) )
3617 getResults = main.FALSE
3618 else:
3619 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003620 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003621 " set " + onosSetName + ":\n" +
3622 str( getResponses[ i ] ) )
3623 getResults = main.FALSE
3624 elif getResponses[ i ] == main.ERROR:
3625 getResults = main.FALSE
3626 sizeResponses = []
3627 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003628 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003629 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003630 name="setTestSize-" + str( i ),
3631 args=[ onosSetName ] )
3632 threads.append( t )
3633 t.start()
3634 for t in threads:
3635 t.join()
3636 sizeResponses.append( t.result )
3637 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003638 for i in range( len( main.activeNodes ) ):
3639 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003640 if size != sizeResponses[ i ]:
3641 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003642 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003643 " expected a size of " + str( size ) +
3644 " for set " + onosSetName +
3645 " but got " + str( sizeResponses[ i ] ) )
3646 addResults = addResults and getResults and sizeResults
3647 utilities.assert_equals( expect=main.TRUE,
3648 actual=addResults,
3649 onpass="Set add correct",
3650 onfail="Set add was incorrect" )
3651
3652 main.step( "Distributed Set addAll()" )
3653 onosSet.update( addAllValue.split() )
3654 addResponses = []
3655 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003656 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003657 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003658 name="setTestAddAll-" + str( i ),
3659 args=[ onosSetName, addAllValue ] )
3660 threads.append( t )
3661 t.start()
3662 for t in threads:
3663 t.join()
3664 addResponses.append( t.result )
3665
3666 # main.TRUE = successfully changed the set
3667 # main.FALSE = action resulted in no change in set
3668 # main.ERROR - Some error in executing the function
3669 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003670 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003671 if addResponses[ i ] == main.TRUE:
3672 # All is well
3673 pass
3674 elif addResponses[ i ] == main.FALSE:
3675 # Already in set, probably fine
3676 pass
3677 elif addResponses[ i ] == main.ERROR:
3678 # Error in execution
3679 addAllResults = main.FALSE
3680 else:
3681 # unexpected result
3682 addAllResults = main.FALSE
3683 if addAllResults != main.TRUE:
3684 main.log.error( "Error executing set addAll" )
3685
3686 # Check if set is still correct
3687 size = len( onosSet )
3688 getResponses = []
3689 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003690 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003691 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003692 name="setTestGet-" + str( i ),
3693 args=[ onosSetName ] )
3694 threads.append( t )
3695 t.start()
3696 for t in threads:
3697 t.join()
3698 getResponses.append( t.result )
3699 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003700 for i in range( len( main.activeNodes ) ):
3701 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003702 if isinstance( getResponses[ i ], list):
3703 current = set( getResponses[ i ] )
3704 if len( current ) == len( getResponses[ i ] ):
3705 # no repeats
3706 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003707 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003708 " has incorrect view" +
3709 " of set " + onosSetName + ":\n" +
3710 str( getResponses[ i ] ) )
3711 main.log.debug( "Expected: " + str( onosSet ) )
3712 main.log.debug( "Actual: " + str( current ) )
3713 getResults = main.FALSE
3714 else:
3715 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003716 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003717 " has repeat elements in" +
3718 " set " + onosSetName + ":\n" +
3719 str( getResponses[ i ] ) )
3720 getResults = main.FALSE
3721 elif getResponses[ i ] == main.ERROR:
3722 getResults = main.FALSE
3723 sizeResponses = []
3724 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003725 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003726 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003727 name="setTestSize-" + str( i ),
3728 args=[ onosSetName ] )
3729 threads.append( t )
3730 t.start()
3731 for t in threads:
3732 t.join()
3733 sizeResponses.append( t.result )
3734 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003735 for i in range( len( main.activeNodes ) ):
3736 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003737 if size != sizeResponses[ i ]:
3738 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003739 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003740 " expected a size of " + str( size ) +
3741 " for set " + onosSetName +
3742 " but got " + str( sizeResponses[ i ] ) )
3743 addAllResults = addAllResults and getResults and sizeResults
3744 utilities.assert_equals( expect=main.TRUE,
3745 actual=addAllResults,
3746 onpass="Set addAll correct",
3747 onfail="Set addAll was incorrect" )
3748
3749 main.step( "Distributed Set contains()" )
3750 containsResponses = []
3751 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003752 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003753 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003754 name="setContains-" + str( i ),
3755 args=[ onosSetName ],
3756 kwargs={ "values": addValue } )
3757 threads.append( t )
3758 t.start()
3759 for t in threads:
3760 t.join()
3761 # NOTE: This is the tuple
3762 containsResponses.append( t.result )
3763
3764 containsResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003765 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003766 if containsResponses[ i ] == main.ERROR:
3767 containsResults = main.FALSE
3768 else:
3769 containsResults = containsResults and\
3770 containsResponses[ i ][ 1 ]
3771 utilities.assert_equals( expect=main.TRUE,
3772 actual=containsResults,
3773 onpass="Set contains is functional",
3774 onfail="Set contains failed" )
3775
3776 main.step( "Distributed Set containsAll()" )
3777 containsAllResponses = []
3778 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003779 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003780 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003781 name="setContainsAll-" + str( i ),
3782 args=[ onosSetName ],
3783 kwargs={ "values": addAllValue } )
3784 threads.append( t )
3785 t.start()
3786 for t in threads:
3787 t.join()
3788 # NOTE: This is the tuple
3789 containsAllResponses.append( t.result )
3790
3791 containsAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003792 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003793 if containsResponses[ i ] == main.ERROR:
3794 containsResults = main.FALSE
3795 else:
3796 containsResults = containsResults and\
3797 containsResponses[ i ][ 1 ]
3798 utilities.assert_equals( expect=main.TRUE,
3799 actual=containsAllResults,
3800 onpass="Set containsAll is functional",
3801 onfail="Set containsAll failed" )
3802
3803 main.step( "Distributed Set remove()" )
3804 onosSet.remove( addValue )
3805 removeResponses = []
3806 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003807 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003808 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003809 name="setTestRemove-" + str( i ),
3810 args=[ onosSetName, addValue ] )
3811 threads.append( t )
3812 t.start()
3813 for t in threads:
3814 t.join()
3815 removeResponses.append( t.result )
3816
3817 # main.TRUE = successfully changed the set
3818 # main.FALSE = action resulted in no change in set
3819 # main.ERROR - Some error in executing the function
3820 removeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003821 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003822 if removeResponses[ i ] == main.TRUE:
3823 # All is well
3824 pass
3825 elif removeResponses[ i ] == main.FALSE:
3826 # not in set, probably fine
3827 pass
3828 elif removeResponses[ i ] == main.ERROR:
3829 # Error in execution
3830 removeResults = main.FALSE
3831 else:
3832 # unexpected result
3833 removeResults = main.FALSE
3834 if removeResults != main.TRUE:
3835 main.log.error( "Error executing set remove" )
3836
3837 # Check if set is still correct
3838 size = len( onosSet )
3839 getResponses = []
3840 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003841 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003842 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003843 name="setTestGet-" + str( i ),
3844 args=[ onosSetName ] )
3845 threads.append( t )
3846 t.start()
3847 for t in threads:
3848 t.join()
3849 getResponses.append( t.result )
3850 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003851 for i in range( len( main.activeNodes ) ):
3852 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003853 if isinstance( getResponses[ i ], list):
3854 current = set( getResponses[ i ] )
3855 if len( current ) == len( getResponses[ i ] ):
3856 # no repeats
3857 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003858 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003859 " has incorrect view" +
3860 " of set " + onosSetName + ":\n" +
3861 str( getResponses[ i ] ) )
3862 main.log.debug( "Expected: " + str( onosSet ) )
3863 main.log.debug( "Actual: " + str( current ) )
3864 getResults = main.FALSE
3865 else:
3866 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003867 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003868 " has repeat elements in" +
3869 " set " + onosSetName + ":\n" +
3870 str( getResponses[ i ] ) )
3871 getResults = main.FALSE
3872 elif getResponses[ i ] == main.ERROR:
3873 getResults = main.FALSE
3874 sizeResponses = []
3875 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003876 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003877 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003878 name="setTestSize-" + str( i ),
3879 args=[ onosSetName ] )
3880 threads.append( t )
3881 t.start()
3882 for t in threads:
3883 t.join()
3884 sizeResponses.append( t.result )
3885 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003886 for i in range( len( main.activeNodes ) ):
3887 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003888 if size != sizeResponses[ i ]:
3889 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003890 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003891 " expected a size of " + str( size ) +
3892 " for set " + onosSetName +
3893 " but got " + str( sizeResponses[ i ] ) )
3894 removeResults = removeResults and getResults and sizeResults
3895 utilities.assert_equals( expect=main.TRUE,
3896 actual=removeResults,
3897 onpass="Set remove correct",
3898 onfail="Set remove was incorrect" )
3899
3900 main.step( "Distributed Set removeAll()" )
3901 onosSet.difference_update( addAllValue.split() )
3902 removeAllResponses = []
3903 threads = []
3904 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003905 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003906 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003907 name="setTestRemoveAll-" + str( i ),
3908 args=[ onosSetName, addAllValue ] )
3909 threads.append( t )
3910 t.start()
3911 for t in threads:
3912 t.join()
3913 removeAllResponses.append( t.result )
3914 except Exception, e:
3915 main.log.exception(e)
3916
3917 # main.TRUE = successfully changed the set
3918 # main.FALSE = action resulted in no change in set
3919 # main.ERROR - Some error in executing the function
3920 removeAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003921 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003922 if removeAllResponses[ i ] == main.TRUE:
3923 # All is well
3924 pass
3925 elif removeAllResponses[ i ] == main.FALSE:
3926 # not in set, probably fine
3927 pass
3928 elif removeAllResponses[ i ] == main.ERROR:
3929 # Error in execution
3930 removeAllResults = main.FALSE
3931 else:
3932 # unexpected result
3933 removeAllResults = main.FALSE
3934 if removeAllResults != main.TRUE:
3935 main.log.error( "Error executing set removeAll" )
3936
3937 # Check if set is still correct
3938 size = len( onosSet )
3939 getResponses = []
3940 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003941 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003942 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003943 name="setTestGet-" + str( i ),
3944 args=[ onosSetName ] )
3945 threads.append( t )
3946 t.start()
3947 for t in threads:
3948 t.join()
3949 getResponses.append( t.result )
3950 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003951 for i in range( len( main.activeNodes ) ):
3952 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003953 if isinstance( getResponses[ i ], list):
3954 current = set( getResponses[ i ] )
3955 if len( current ) == len( getResponses[ i ] ):
3956 # no repeats
3957 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003958 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003959 " has incorrect view" +
3960 " of set " + onosSetName + ":\n" +
3961 str( getResponses[ i ] ) )
3962 main.log.debug( "Expected: " + str( onosSet ) )
3963 main.log.debug( "Actual: " + str( current ) )
3964 getResults = main.FALSE
3965 else:
3966 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003967 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003968 " has repeat elements in" +
3969 " set " + onosSetName + ":\n" +
3970 str( getResponses[ i ] ) )
3971 getResults = main.FALSE
3972 elif getResponses[ i ] == main.ERROR:
3973 getResults = main.FALSE
3974 sizeResponses = []
3975 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003976 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003977 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003978 name="setTestSize-" + str( i ),
3979 args=[ onosSetName ] )
3980 threads.append( t )
3981 t.start()
3982 for t in threads:
3983 t.join()
3984 sizeResponses.append( t.result )
3985 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003986 for i in range( len( main.activeNodes ) ):
3987 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003988 if size != sizeResponses[ i ]:
3989 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003990 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003991 " expected a size of " + str( size ) +
3992 " for set " + onosSetName +
3993 " but got " + str( sizeResponses[ i ] ) )
3994 removeAllResults = removeAllResults and getResults and sizeResults
3995 utilities.assert_equals( expect=main.TRUE,
3996 actual=removeAllResults,
3997 onpass="Set removeAll correct",
3998 onfail="Set removeAll was incorrect" )
3999
4000 main.step( "Distributed Set addAll()" )
4001 onosSet.update( addAllValue.split() )
4002 addResponses = []
4003 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004004 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004005 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004006 name="setTestAddAll-" + str( i ),
4007 args=[ onosSetName, addAllValue ] )
4008 threads.append( t )
4009 t.start()
4010 for t in threads:
4011 t.join()
4012 addResponses.append( t.result )
4013
4014 # main.TRUE = successfully changed the set
4015 # main.FALSE = action resulted in no change in set
4016 # main.ERROR - Some error in executing the function
4017 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004018 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004019 if addResponses[ i ] == main.TRUE:
4020 # All is well
4021 pass
4022 elif addResponses[ i ] == main.FALSE:
4023 # Already in set, probably fine
4024 pass
4025 elif addResponses[ i ] == main.ERROR:
4026 # Error in execution
4027 addAllResults = main.FALSE
4028 else:
4029 # unexpected result
4030 addAllResults = main.FALSE
4031 if addAllResults != main.TRUE:
4032 main.log.error( "Error executing set addAll" )
4033
4034 # Check if set is still correct
4035 size = len( onosSet )
4036 getResponses = []
4037 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004038 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004039 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004040 name="setTestGet-" + str( i ),
4041 args=[ onosSetName ] )
4042 threads.append( t )
4043 t.start()
4044 for t in threads:
4045 t.join()
4046 getResponses.append( t.result )
4047 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004048 for i in range( len( main.activeNodes ) ):
4049 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004050 if isinstance( getResponses[ i ], list):
4051 current = set( getResponses[ i ] )
4052 if len( current ) == len( getResponses[ i ] ):
4053 # no repeats
4054 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004055 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004056 " has incorrect view" +
4057 " of set " + onosSetName + ":\n" +
4058 str( getResponses[ i ] ) )
4059 main.log.debug( "Expected: " + str( onosSet ) )
4060 main.log.debug( "Actual: " + str( current ) )
4061 getResults = main.FALSE
4062 else:
4063 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004064 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004065 " has repeat elements in" +
4066 " set " + onosSetName + ":\n" +
4067 str( getResponses[ i ] ) )
4068 getResults = main.FALSE
4069 elif getResponses[ i ] == main.ERROR:
4070 getResults = main.FALSE
4071 sizeResponses = []
4072 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004073 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004074 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004075 name="setTestSize-" + str( i ),
4076 args=[ onosSetName ] )
4077 threads.append( t )
4078 t.start()
4079 for t in threads:
4080 t.join()
4081 sizeResponses.append( t.result )
4082 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004083 for i in range( len( main.activeNodes ) ):
4084 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004085 if size != sizeResponses[ i ]:
4086 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004087 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004088 " expected a size of " + str( size ) +
4089 " for set " + onosSetName +
4090 " but got " + str( sizeResponses[ i ] ) )
4091 addAllResults = addAllResults and getResults and sizeResults
4092 utilities.assert_equals( expect=main.TRUE,
4093 actual=addAllResults,
4094 onpass="Set addAll correct",
4095 onfail="Set addAll was incorrect" )
4096
4097 main.step( "Distributed Set clear()" )
4098 onosSet.clear()
4099 clearResponses = []
4100 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004101 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004102 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004103 name="setTestClear-" + str( i ),
4104 args=[ onosSetName, " "], # Values doesn't matter
4105 kwargs={ "clear": True } )
4106 threads.append( t )
4107 t.start()
4108 for t in threads:
4109 t.join()
4110 clearResponses.append( t.result )
4111
4112 # main.TRUE = successfully changed the set
4113 # main.FALSE = action resulted in no change in set
4114 # main.ERROR - Some error in executing the function
4115 clearResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004116 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004117 if clearResponses[ i ] == main.TRUE:
4118 # All is well
4119 pass
4120 elif clearResponses[ i ] == main.FALSE:
4121 # Nothing set, probably fine
4122 pass
4123 elif clearResponses[ i ] == main.ERROR:
4124 # Error in execution
4125 clearResults = main.FALSE
4126 else:
4127 # unexpected result
4128 clearResults = main.FALSE
4129 if clearResults != main.TRUE:
4130 main.log.error( "Error executing set clear" )
4131
4132 # Check if set is still correct
4133 size = len( onosSet )
4134 getResponses = []
4135 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004136 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004137 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004138 name="setTestGet-" + str( i ),
4139 args=[ onosSetName ] )
4140 threads.append( t )
4141 t.start()
4142 for t in threads:
4143 t.join()
4144 getResponses.append( t.result )
4145 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004146 for i in range( len( main.activeNodes ) ):
4147 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004148 if isinstance( getResponses[ i ], list):
4149 current = set( getResponses[ i ] )
4150 if len( current ) == len( getResponses[ i ] ):
4151 # no repeats
4152 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004153 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004154 " has incorrect view" +
4155 " of set " + onosSetName + ":\n" +
4156 str( getResponses[ i ] ) )
4157 main.log.debug( "Expected: " + str( onosSet ) )
4158 main.log.debug( "Actual: " + str( current ) )
4159 getResults = main.FALSE
4160 else:
4161 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004162 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004163 " has repeat elements in" +
4164 " set " + onosSetName + ":\n" +
4165 str( getResponses[ i ] ) )
4166 getResults = main.FALSE
4167 elif getResponses[ i ] == main.ERROR:
4168 getResults = main.FALSE
4169 sizeResponses = []
4170 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004171 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004172 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004173 name="setTestSize-" + str( i ),
4174 args=[ onosSetName ] )
4175 threads.append( t )
4176 t.start()
4177 for t in threads:
4178 t.join()
4179 sizeResponses.append( t.result )
4180 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004181 for i in range( len( main.activeNodes ) ):
4182 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004183 if size != sizeResponses[ i ]:
4184 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004185 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004186 " expected a size of " + str( size ) +
4187 " for set " + onosSetName +
4188 " but got " + str( sizeResponses[ i ] ) )
4189 clearResults = clearResults and getResults and sizeResults
4190 utilities.assert_equals( expect=main.TRUE,
4191 actual=clearResults,
4192 onpass="Set clear correct",
4193 onfail="Set clear was incorrect" )
4194
4195 main.step( "Distributed Set addAll()" )
4196 onosSet.update( addAllValue.split() )
4197 addResponses = []
4198 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004199 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004200 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004201 name="setTestAddAll-" + str( i ),
4202 args=[ onosSetName, addAllValue ] )
4203 threads.append( t )
4204 t.start()
4205 for t in threads:
4206 t.join()
4207 addResponses.append( t.result )
4208
4209 # main.TRUE = successfully changed the set
4210 # main.FALSE = action resulted in no change in set
4211 # main.ERROR - Some error in executing the function
4212 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004213 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004214 if addResponses[ i ] == main.TRUE:
4215 # All is well
4216 pass
4217 elif addResponses[ i ] == main.FALSE:
4218 # Already in set, probably fine
4219 pass
4220 elif addResponses[ i ] == main.ERROR:
4221 # Error in execution
4222 addAllResults = main.FALSE
4223 else:
4224 # unexpected result
4225 addAllResults = main.FALSE
4226 if addAllResults != main.TRUE:
4227 main.log.error( "Error executing set addAll" )
4228
4229 # Check if set is still correct
4230 size = len( onosSet )
4231 getResponses = []
4232 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004233 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004234 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004235 name="setTestGet-" + str( i ),
4236 args=[ onosSetName ] )
4237 threads.append( t )
4238 t.start()
4239 for t in threads:
4240 t.join()
4241 getResponses.append( t.result )
4242 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004243 for i in range( len( main.activeNodes ) ):
4244 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004245 if isinstance( getResponses[ i ], list):
4246 current = set( getResponses[ i ] )
4247 if len( current ) == len( getResponses[ i ] ):
4248 # no repeats
4249 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004250 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004251 " has incorrect view" +
4252 " of set " + onosSetName + ":\n" +
4253 str( getResponses[ i ] ) )
4254 main.log.debug( "Expected: " + str( onosSet ) )
4255 main.log.debug( "Actual: " + str( current ) )
4256 getResults = main.FALSE
4257 else:
4258 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004259 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004260 " has repeat elements in" +
4261 " set " + onosSetName + ":\n" +
4262 str( getResponses[ i ] ) )
4263 getResults = main.FALSE
4264 elif getResponses[ i ] == main.ERROR:
4265 getResults = main.FALSE
4266 sizeResponses = []
4267 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004268 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004269 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004270 name="setTestSize-" + str( i ),
4271 args=[ onosSetName ] )
4272 threads.append( t )
4273 t.start()
4274 for t in threads:
4275 t.join()
4276 sizeResponses.append( t.result )
4277 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004278 for i in range( len( main.activeNodes ) ):
4279 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004280 if size != sizeResponses[ i ]:
4281 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004282 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004283 " expected a size of " + str( size ) +
4284 " for set " + onosSetName +
4285 " but got " + str( sizeResponses[ i ] ) )
4286 addAllResults = addAllResults and getResults and sizeResults
4287 utilities.assert_equals( expect=main.TRUE,
4288 actual=addAllResults,
4289 onpass="Set addAll correct",
4290 onfail="Set addAll was incorrect" )
4291
4292 main.step( "Distributed Set retain()" )
4293 onosSet.intersection_update( retainValue.split() )
4294 retainResponses = []
4295 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004296 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004297 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004298 name="setTestRetain-" + str( i ),
4299 args=[ onosSetName, retainValue ],
4300 kwargs={ "retain": True } )
4301 threads.append( t )
4302 t.start()
4303 for t in threads:
4304 t.join()
4305 retainResponses.append( t.result )
4306
4307 # main.TRUE = successfully changed the set
4308 # main.FALSE = action resulted in no change in set
4309 # main.ERROR - Some error in executing the function
4310 retainResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004311 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004312 if retainResponses[ i ] == main.TRUE:
4313 # All is well
4314 pass
4315 elif retainResponses[ i ] == main.FALSE:
4316 # Already in set, probably fine
4317 pass
4318 elif retainResponses[ i ] == main.ERROR:
4319 # Error in execution
4320 retainResults = main.FALSE
4321 else:
4322 # unexpected result
4323 retainResults = main.FALSE
4324 if retainResults != main.TRUE:
4325 main.log.error( "Error executing set retain" )
4326
4327 # Check if set is still correct
4328 size = len( onosSet )
4329 getResponses = []
4330 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004331 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004332 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004333 name="setTestGet-" + str( i ),
4334 args=[ onosSetName ] )
4335 threads.append( t )
4336 t.start()
4337 for t in threads:
4338 t.join()
4339 getResponses.append( t.result )
4340 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004341 for i in range( len( main.activeNodes ) ):
4342 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004343 if isinstance( getResponses[ i ], list):
4344 current = set( getResponses[ i ] )
4345 if len( current ) == len( getResponses[ i ] ):
4346 # no repeats
4347 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004348 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004349 " has incorrect view" +
4350 " of set " + onosSetName + ":\n" +
4351 str( getResponses[ i ] ) )
4352 main.log.debug( "Expected: " + str( onosSet ) )
4353 main.log.debug( "Actual: " + str( current ) )
4354 getResults = main.FALSE
4355 else:
4356 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004357 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004358 " has repeat elements in" +
4359 " set " + onosSetName + ":\n" +
4360 str( getResponses[ i ] ) )
4361 getResults = main.FALSE
4362 elif getResponses[ i ] == main.ERROR:
4363 getResults = main.FALSE
4364 sizeResponses = []
4365 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004366 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004367 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004368 name="setTestSize-" + str( i ),
4369 args=[ onosSetName ] )
4370 threads.append( t )
4371 t.start()
4372 for t in threads:
4373 t.join()
4374 sizeResponses.append( t.result )
4375 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004376 for i in range( len( main.activeNodes ) ):
4377 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004378 if size != sizeResponses[ i ]:
4379 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004380 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004381 str( size ) + " for set " + onosSetName +
4382 " but got " + str( sizeResponses[ i ] ) )
4383 retainResults = retainResults and getResults and sizeResults
4384 utilities.assert_equals( expect=main.TRUE,
4385 actual=retainResults,
4386 onpass="Set retain correct",
4387 onfail="Set retain was incorrect" )
4388
Jon Hall2a5002c2015-08-21 16:49:11 -07004389 # Transactional maps
4390 main.step( "Partitioned Transactional maps put" )
4391 tMapValue = "Testing"
4392 numKeys = 100
4393 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004394 node = main.activeNodes[0]
4395 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
Jon Hall2a5002c2015-08-21 16:49:11 -07004396 if len( putResponses ) == 100:
4397 for i in putResponses:
4398 if putResponses[ i ][ 'value' ] != tMapValue:
4399 putResult = False
4400 else:
4401 putResult = False
4402 if not putResult:
4403 main.log.debug( "Put response values: " + str( putResponses ) )
4404 utilities.assert_equals( expect=True,
4405 actual=putResult,
4406 onpass="Partitioned Transactional Map put successful",
4407 onfail="Partitioned Transactional Map put values are incorrect" )
4408
4409 main.step( "Partitioned Transactional maps get" )
4410 getCheck = True
4411 for n in range( 1, numKeys + 1 ):
4412 getResponses = []
4413 threads = []
4414 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004415 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004416 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4417 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004418 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004419 threads.append( t )
4420 t.start()
4421 for t in threads:
4422 t.join()
4423 getResponses.append( t.result )
4424 for node in getResponses:
4425 if node != tMapValue:
4426 valueCheck = False
4427 if not valueCheck:
4428 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4429 main.log.warn( getResponses )
4430 getCheck = getCheck and valueCheck
4431 utilities.assert_equals( expect=True,
4432 actual=getCheck,
4433 onpass="Partitioned Transactional Map get values were correct",
4434 onfail="Partitioned Transactional Map values incorrect" )
4435
4436 main.step( "In-memory Transactional maps put" )
4437 tMapValue = "Testing"
4438 numKeys = 100
4439 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004440 node = main.activeNodes[0]
4441 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue, inMemory=True )
Jon Hall2a5002c2015-08-21 16:49:11 -07004442 if len( putResponses ) == 100:
4443 for i in putResponses:
4444 if putResponses[ i ][ 'value' ] != tMapValue:
4445 putResult = False
4446 else:
4447 putResult = False
4448 if not putResult:
4449 main.log.debug( "Put response values: " + str( putResponses ) )
4450 utilities.assert_equals( expect=True,
4451 actual=putResult,
4452 onpass="In-Memory Transactional Map put successful",
4453 onfail="In-Memory Transactional Map put values are incorrect" )
4454
4455 main.step( "In-Memory Transactional maps get" )
4456 getCheck = True
4457 for n in range( 1, numKeys + 1 ):
4458 getResponses = []
4459 threads = []
4460 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004461 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004462 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4463 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004464 args=[ "Key" + str( n ) ],
Jon Hall2a5002c2015-08-21 16:49:11 -07004465 kwargs={ "inMemory": True } )
4466 threads.append( t )
4467 t.start()
4468 for t in threads:
4469 t.join()
4470 getResponses.append( t.result )
4471 for node in getResponses:
4472 if node != tMapValue:
4473 valueCheck = False
4474 if not valueCheck:
4475 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4476 main.log.warn( getResponses )
4477 getCheck = getCheck and valueCheck
4478 utilities.assert_equals( expect=True,
4479 actual=getCheck,
4480 onpass="In-Memory Transactional Map get values were correct",
4481 onfail="In-Memory Transactional Map values incorrect" )