blob: 84e7d9c0a19a767b69be5750b1605a16cc67c948 [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
Jon Hallb3ed8ed2015-10-28 16:43:55 -070012CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
Jon Hall5cf14d52015-07-16 12:15:19 -070014CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
Jon Hallb3ed8ed2015-10-28 16:43:55 -070028class HAstopNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -070029
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hall3b489db2015-10-05 14:38:37 -070052 import pexpect
Jon Hallf3d16e72015-12-16 17:45:08 -080053 import time
Jon Hallb3ed8ed2015-10-28 16:43:55 -070054 main.log.info( "ONOS HA test: Stop a minority of ONOS nodes - " +
Jon Hall5cf14d52015-07-16 12:15:19 -070055 "initialization" )
56 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070057 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070058 "installing ONOS, starting Mininet and ONOS" +\
59 "cli sessions."
60 # TODO: save all the timers and output them for plotting
61
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
Jon Halle1a3b752015-07-22 13:02:46 -070069 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070070 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070071 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070074 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
81
82 # FIXME: just get controller port from params?
83 # TODO: do we really need all these?
84 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
85 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
86 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
87 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
88 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
89 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
90 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
91
Jon Halle1a3b752015-07-22 13:02:46 -070092 try:
93 fileName = "Counters"
94 # TODO: Maybe make a library folder somewhere?
95 path = main.params[ 'imports' ][ 'path' ]
96 main.Counters = imp.load_source( fileName,
97 path + fileName + ".py" )
98 except Exception as e:
99 main.log.exception( e )
100 main.cleanup()
101 main.exit()
102
103 main.CLIs = []
104 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700105 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700106 for i in range( 1, main.numCtrls + 1 ):
107 try:
108 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
109 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
110 ipList.append( main.nodes[ -1 ].ip_address )
111 except AttributeError:
112 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700113
114 main.step( "Create cell file" )
115 cellAppString = main.params[ 'ENV' ][ 'appString' ]
116 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
117 main.Mininet1.ip_address,
118 cellAppString, ipList )
119 main.step( "Applying cell variable to environment" )
120 cellResult = main.ONOSbench.setCell( cellName )
121 verifyResult = main.ONOSbench.verifyCell()
122
123 # FIXME:this is short term fix
124 main.log.info( "Removing raft logs" )
125 main.ONOSbench.onosRemoveRaftLogs()
126
127 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700128 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700129 main.ONOSbench.onosUninstall( node.ip_address )
130
131 # Make sure ONOS is DEAD
132 main.log.info( "Killing any ONOS processes" )
133 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700134 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700135 killed = main.ONOSbench.onosKill( node.ip_address )
136 killResults = killResults and killed
137
138 cleanInstallResult = main.TRUE
139 gitPullResult = main.TRUE
140
141 main.step( "Starting Mininet" )
142 # scp topo file to mininet
143 # TODO: move to params?
144 topoName = "obelisk.py"
145 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700146 main.ONOSbench.scp( main.Mininet1,
147 filePath + topoName,
148 main.Mininet1.home,
149 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700150 mnResult = main.Mininet1.startNet( )
151 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
152 onpass="Mininet Started",
153 onfail="Error starting Mininet" )
154
155 main.step( "Git checkout and pull " + gitBranch )
156 if PULLCODE:
157 main.ONOSbench.gitCheckout( gitBranch )
158 gitPullResult = main.ONOSbench.gitPull()
159 # values of 1 or 3 are good
160 utilities.assert_lesser( expect=0, actual=gitPullResult,
161 onpass="Git pull successful",
162 onfail="Git pull failed" )
163 main.ONOSbench.getVersion( report=True )
164
165 main.step( "Using mvn clean install" )
166 cleanInstallResult = main.TRUE
167 if PULLCODE and gitPullResult == main.TRUE:
168 cleanInstallResult = main.ONOSbench.cleanInstall()
169 else:
170 main.log.warn( "Did not pull new code so skipping mvn " +
171 "clean install" )
172 utilities.assert_equals( expect=main.TRUE,
173 actual=cleanInstallResult,
174 onpass="MCI successful",
175 onfail="MCI failed" )
176 # GRAPHS
177 # NOTE: important params here:
178 # job = name of Jenkins job
179 # Plot Name = Plot-HA, only can be used if multiple plots
180 # index = The number of the graph under plot name
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700181 job = "HAstopNodes"
Jon Hall5cf14d52015-07-16 12:15:19 -0700182 plotName = "Plot-HA"
183 graphs = '<ac:structured-macro ac:name="html">\n'
184 graphs += '<ac:plain-text-body><![CDATA[\n'
185 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
186 '/plot/' + plotName + '/getPlot?index=0' +\
187 '&width=500&height=300"' +\
188 'noborder="0" width="500" height="300" scrolling="yes" ' +\
189 'seamless="seamless"></iframe>\n'
190 graphs += ']]></ac:plain-text-body>\n'
191 graphs += '</ac:structured-macro>\n'
192 main.log.wiki(graphs)
193
194 main.step( "Creating ONOS package" )
Jon Hall3b489db2015-10-05 14:38:37 -0700195 # copy gen-partions file to ONOS
196 # NOTE: this assumes TestON and ONOS are on the same machine
197 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
198 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
199 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
200 main.ONOSbench.ip_address,
201 srcFile,
202 dstDir,
203 pwd=main.ONOSbench.pwd,
204 direction="from" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700205 packageResult = main.ONOSbench.onosPackage()
206 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
207 onpass="ONOS package successful",
208 onfail="ONOS package failed" )
209
210 main.step( "Installing ONOS package" )
211 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700212 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700213 tmpResult = main.ONOSbench.onosInstall( options="-f",
214 node=node.ip_address )
215 onosInstallResult = onosInstallResult and tmpResult
216 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
217 onpass="ONOS install successful",
218 onfail="ONOS install failed" )
Jon Hall3b489db2015-10-05 14:38:37 -0700219 # clean up gen-partitions file
220 try:
221 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
222 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
223 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
224 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
225 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
226 str( main.ONOSbench.handle.before ) )
227 except ( pexpect.TIMEOUT, pexpect.EOF ):
228 main.log.exception( "ONOSbench: pexpect exception found:" +
229 main.ONOSbench.handle.before )
230 main.cleanup()
231 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700232
233 main.step( "Checking if ONOS is up yet" )
234 for i in range( 2 ):
235 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700236 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700237 started = main.ONOSbench.isup( node.ip_address )
238 if not started:
239 main.log.error( node.name + " didn't start!" )
240 main.ONOSbench.onosStop( node.ip_address )
241 main.ONOSbench.onosStart( node.ip_address )
242 onosIsupResult = onosIsupResult and started
243 if onosIsupResult == main.TRUE:
244 break
245 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
246 onpass="ONOS startup successful",
247 onfail="ONOS startup failed" )
248
249 main.log.step( "Starting ONOS CLI sessions" )
250 cliResults = main.TRUE
251 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700252 for i in range( main.numCtrls ):
253 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700254 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700255 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700256 threads.append( t )
257 t.start()
258
259 for t in threads:
260 t.join()
261 cliResults = cliResults and t.result
262 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
263 onpass="ONOS cli startup successful",
264 onfail="ONOS cli startup failed" )
265
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700266 # Create a list of active nodes for use when some nodes are stopped
267 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
268
Jon Hall5cf14d52015-07-16 12:15:19 -0700269 if main.params[ 'tcpdump' ].lower() == "true":
270 main.step( "Start Packet Capture MN" )
271 main.Mininet2.startTcpdump(
272 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
273 + "-MN.pcap",
274 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
275 port=main.params[ 'MNtcpdump' ][ 'port' ] )
276
277 main.step( "App Ids check" )
Jon Hallf3d16e72015-12-16 17:45:08 -0800278 time.sleep(60)
Jon Hall5cf14d52015-07-16 12:15:19 -0700279 appCheck = main.TRUE
280 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700281 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700282 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700283 name="appToIDCheck-" + str( i ),
284 args=[] )
285 threads.append( t )
286 t.start()
287
288 for t in threads:
289 t.join()
290 appCheck = appCheck and t.result
291 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700292 node = main.activeNodes[0]
293 main.log.warn( main.CLIs[node].apps() )
294 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700295 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
296 onpass="App Ids seem to be correct",
297 onfail="Something is wrong with app Ids" )
298
299 if cliResults == main.FALSE:
300 main.log.error( "Failed to start ONOS, stopping test" )
301 main.cleanup()
302 main.exit()
303
304 def CASE2( self, main ):
305 """
306 Assign devices to controllers
307 """
308 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700309 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700310 assert main, "main not defined"
311 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700312 assert main.CLIs, "main.CLIs not defined"
313 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700314 assert ONOS1Port, "ONOS1Port not defined"
315 assert ONOS2Port, "ONOS2Port not defined"
316 assert ONOS3Port, "ONOS3Port not defined"
317 assert ONOS4Port, "ONOS4Port not defined"
318 assert ONOS5Port, "ONOS5Port not defined"
319 assert ONOS6Port, "ONOS6Port not defined"
320 assert ONOS7Port, "ONOS7Port not defined"
321
322 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700323 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700324 "and check that an ONOS node becomes the " +\
325 "master of the device."
326 main.step( "Assign switches to controllers" )
327
328 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700329 for i in range( main.numCtrls ):
330 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700331 swList = []
332 for i in range( 1, 29 ):
333 swList.append( "s" + str( i ) )
334 main.Mininet1.assignSwController( sw=swList, ip=ipList )
335
336 mastershipCheck = main.TRUE
337 for i in range( 1, 29 ):
338 response = main.Mininet1.getSwController( "s" + str( i ) )
339 try:
340 main.log.info( str( response ) )
341 except Exception:
342 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700343 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700344 if re.search( "tcp:" + node.ip_address, response ):
345 mastershipCheck = mastershipCheck and main.TRUE
346 else:
347 main.log.error( "Error, node " + node.ip_address + " is " +
348 "not in the list of controllers s" +
349 str( i ) + " is connecting to." )
350 mastershipCheck = main.FALSE
351 utilities.assert_equals(
352 expect=main.TRUE,
353 actual=mastershipCheck,
354 onpass="Switch mastership assigned correctly",
355 onfail="Switches not assigned correctly to controllers" )
356
357 def CASE21( self, main ):
358 """
359 Assign mastership to controllers
360 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700361 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700362 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700363 assert main, "main not defined"
364 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700365 assert main.CLIs, "main.CLIs not defined"
366 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700367 assert ONOS1Port, "ONOS1Port not defined"
368 assert ONOS2Port, "ONOS2Port not defined"
369 assert ONOS3Port, "ONOS3Port not defined"
370 assert ONOS4Port, "ONOS4Port not defined"
371 assert ONOS5Port, "ONOS5Port not defined"
372 assert ONOS6Port, "ONOS6Port not defined"
373 assert ONOS7Port, "ONOS7Port not defined"
374
375 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700376 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700377 "device. Then manually assign" +\
378 " mastership to specific ONOS nodes using" +\
379 " 'device-role'"
380 main.step( "Assign mastership of switches to specific controllers" )
381 # Manually assign mastership to the controller we want
382 roleCall = main.TRUE
383
384 ipList = [ ]
385 deviceList = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700386 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700387 try:
388 # Assign mastership to specific controllers. This assignment was
389 # determined for a 7 node cluser, but will work with any sized
390 # cluster
391 for i in range( 1, 29 ): # switches 1 through 28
392 # set up correct variables:
393 if i == 1:
394 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700395 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700396 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700397 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700398 c = 1 % main.numCtrls
399 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700400 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700401 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700402 c = 1 % main.numCtrls
403 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700404 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700405 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700406 c = 3 % main.numCtrls
407 ip = main.nodes[ c ].ip_address # ONOS4
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700408 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700409 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700410 c = 2 % main.numCtrls
411 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700412 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700413 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700414 c = 2 % main.numCtrls
415 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700416 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700417 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700418 c = 5 % main.numCtrls
419 ip = main.nodes[ c ].ip_address # ONOS6
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700420 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700421 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700422 c = 4 % main.numCtrls
423 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700424 dpid = '3' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700425 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700426 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700427 c = 6 % main.numCtrls
428 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700429 dpid = '6' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700430 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700431 elif i == 28:
432 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700433 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700434 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700435 else:
436 main.log.error( "You didn't write an else statement for " +
437 "switch s" + str( i ) )
438 roleCall = main.FALSE
439 # Assign switch
440 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
441 # TODO: make this controller dynamic
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700442 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700443 ipList.append( ip )
444 deviceList.append( deviceId )
445 except ( AttributeError, AssertionError ):
446 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700447 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700448 utilities.assert_equals(
449 expect=main.TRUE,
450 actual=roleCall,
451 onpass="Re-assigned switch mastership to designated controller",
452 onfail="Something wrong with deviceRole calls" )
453
454 main.step( "Check mastership was correctly assigned" )
455 roleCheck = main.TRUE
456 # NOTE: This is due to the fact that device mastership change is not
457 # atomic and is actually a multi step process
458 time.sleep( 5 )
459 for i in range( len( ipList ) ):
460 ip = ipList[i]
461 deviceId = deviceList[i]
462 # Check assignment
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700463 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700464 if ip in master:
465 roleCheck = roleCheck and main.TRUE
466 else:
467 roleCheck = roleCheck and main.FALSE
468 main.log.error( "Error, controller " + ip + " is not" +
469 " master " + "of device " +
470 str( deviceId ) + ". Master is " +
471 repr( master ) + "." )
472 utilities.assert_equals(
473 expect=main.TRUE,
474 actual=roleCheck,
475 onpass="Switches were successfully reassigned to designated " +
476 "controller",
477 onfail="Switches were not successfully reassigned" )
478
479 def CASE3( self, main ):
480 """
481 Assign intents
482 """
483 import time
484 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700485 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700486 assert main, "main not defined"
487 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700488 assert main.CLIs, "main.CLIs not defined"
489 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700490 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700491 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700492 "assign predetermined host-to-host intents." +\
493 " After installation, check that the intent" +\
494 " is distributed to all nodes and the state" +\
495 " is INSTALLED"
496
497 # install onos-app-fwd
498 main.step( "Install reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700499 onosCli = main.CLIs[ main.activeNodes[0] ]
500 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700501 utilities.assert_equals( expect=main.TRUE, actual=installResults,
502 onpass="Install fwd successful",
503 onfail="Install fwd failed" )
504
505 main.step( "Check app ids" )
506 appCheck = main.TRUE
507 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700508 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700509 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700510 name="appToIDCheck-" + str( i ),
511 args=[] )
512 threads.append( t )
513 t.start()
514
515 for t in threads:
516 t.join()
517 appCheck = appCheck and t.result
518 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700519 main.log.warn( onosCli.apps() )
520 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700521 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
522 onpass="App Ids seem to be correct",
523 onfail="Something is wrong with app Ids" )
524
525 main.step( "Discovering Hosts( Via pingall for now )" )
526 # FIXME: Once we have a host discovery mechanism, use that instead
527 # REACTIVE FWD test
528 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700529 passMsg = "Reactive Pingall test passed"
530 time1 = time.time()
531 pingResult = main.Mininet1.pingall()
532 time2 = time.time()
533 if not pingResult:
534 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700535 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700536 passMsg += " on the second try"
537 utilities.assert_equals(
538 expect=main.TRUE,
539 actual=pingResult,
540 onpass= passMsg,
541 onfail="Reactive Pingall failed, " +
542 "one or more ping pairs failed" )
543 main.log.info( "Time for pingall: %2f seconds" %
544 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700545 # timeout for fwd flows
546 time.sleep( 11 )
547 # uninstall onos-app-fwd
548 main.step( "Uninstall reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700549 node = main.activeNodes[0]
550 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700551 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
552 onpass="Uninstall fwd successful",
553 onfail="Uninstall fwd failed" )
554
555 main.step( "Check app ids" )
556 threads = []
557 appCheck2 = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700558 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700559 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700560 name="appToIDCheck-" + str( i ),
561 args=[] )
562 threads.append( t )
563 t.start()
564
565 for t in threads:
566 t.join()
567 appCheck2 = appCheck2 and t.result
568 if appCheck2 != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700569 node = main.activeNodes[0]
570 main.log.warn( main.CLIs[node].apps() )
571 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700572 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
573 onpass="App Ids seem to be correct",
574 onfail="Something is wrong with app Ids" )
575
576 main.step( "Add host intents via cli" )
577 intentIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700578 # TODO: move the host numbers to params
579 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700580 intentAddResult = True
581 hostResult = main.TRUE
582 for i in range( 8, 18 ):
583 main.log.info( "Adding host intent between h" + str( i ) +
584 " and h" + str( i + 10 ) )
585 host1 = "00:00:00:00:00:" + \
586 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
587 host2 = "00:00:00:00:00:" + \
588 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
589 # NOTE: getHost can return None
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700590 host1Dict = onosCli.getHost( host1 )
591 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700592 host1Id = None
593 host2Id = None
594 if host1Dict and host2Dict:
595 host1Id = host1Dict.get( 'id', None )
596 host2Id = host2Dict.get( 'id', None )
597 if host1Id and host2Id:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700598 nodeNum = ( i % len( main.activeNodes ) )
599 node = main.activeNodes[nodeNum]
600 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700601 if tmpId:
602 main.log.info( "Added intent with id: " + tmpId )
603 intentIds.append( tmpId )
604 else:
605 main.log.error( "addHostIntent returned: " +
606 repr( tmpId ) )
607 else:
608 main.log.error( "Error, getHost() failed for h" + str( i ) +
609 " and/or h" + str( i + 10 ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700610 node = main.activeNodes[0]
611 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700612 main.log.warn( "Hosts output: " )
613 try:
614 main.log.warn( json.dumps( json.loads( hosts ),
615 sort_keys=True,
616 indent=4,
617 separators=( ',', ': ' ) ) )
618 except ( ValueError, TypeError ):
619 main.log.warn( repr( hosts ) )
620 hostResult = main.FALSE
621 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
622 onpass="Found a host id for each host",
623 onfail="Error looking up host ids" )
624
625 intentStart = time.time()
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700626 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700627 main.log.info( "Submitted intents: " + str( intentIds ) )
628 main.log.info( "Intents in ONOS: " + str( onosIds ) )
629 for intent in intentIds:
630 if intent in onosIds:
631 pass # intent submitted is in onos
632 else:
633 intentAddResult = False
634 if intentAddResult:
635 intentStop = time.time()
636 else:
637 intentStop = None
638 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700639 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700640 intentStates = []
641 installedCheck = True
642 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
643 count = 0
644 try:
645 for intent in json.loads( intents ):
646 state = intent.get( 'state', None )
647 if "INSTALLED" not in state:
648 installedCheck = False
649 intentId = intent.get( 'id', None )
650 intentStates.append( ( intentId, state ) )
651 except ( ValueError, TypeError ):
652 main.log.exception( "Error parsing intents" )
653 # add submitted intents not in the store
654 tmplist = [ i for i, s in intentStates ]
655 missingIntents = False
656 for i in intentIds:
657 if i not in tmplist:
658 intentStates.append( ( i, " - " ) )
659 missingIntents = True
660 intentStates.sort()
661 for i, s in intentStates:
662 count += 1
663 main.log.info( "%-6s%-15s%-15s" %
664 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700665 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700666 try:
667 missing = False
668 if leaders:
669 parsedLeaders = json.loads( leaders )
670 main.log.warn( json.dumps( parsedLeaders,
671 sort_keys=True,
672 indent=4,
673 separators=( ',', ': ' ) ) )
674 # check for all intent partitions
675 topics = []
676 for i in range( 14 ):
677 topics.append( "intent-partition-" + str( i ) )
678 main.log.debug( topics )
679 ONOStopics = [ j['topic'] for j in parsedLeaders ]
680 for topic in topics:
681 if topic not in ONOStopics:
682 main.log.error( "Error: " + topic +
683 " not in leaders" )
684 missing = True
685 else:
686 main.log.error( "leaders() returned None" )
687 except ( ValueError, TypeError ):
688 main.log.exception( "Error parsing leaders" )
689 main.log.error( repr( leaders ) )
690 # Check all nodes
691 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700692 for i in main.activeNodes:
693 response = main.CLIs[i].leaders( jsonFormat=False)
694 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700695 str( response ) )
696
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700697 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700698 try:
699 if partitions :
700 parsedPartitions = json.loads( partitions )
701 main.log.warn( json.dumps( parsedPartitions,
702 sort_keys=True,
703 indent=4,
704 separators=( ',', ': ' ) ) )
705 # TODO check for a leader in all paritions
706 # TODO check for consistency among nodes
707 else:
708 main.log.error( "partitions() returned None" )
709 except ( ValueError, TypeError ):
710 main.log.exception( "Error parsing partitions" )
711 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700712 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700713 try:
714 if pendingMap :
715 parsedPending = json.loads( pendingMap )
716 main.log.warn( json.dumps( parsedPending,
717 sort_keys=True,
718 indent=4,
719 separators=( ',', ': ' ) ) )
720 # TODO check something here?
721 else:
722 main.log.error( "pendingMap() returned None" )
723 except ( ValueError, TypeError ):
724 main.log.exception( "Error parsing pending map" )
725 main.log.error( repr( pendingMap ) )
726
727 intentAddResult = bool( intentAddResult and not missingIntents and
728 installedCheck )
729 if not intentAddResult:
730 main.log.error( "Error in pushing host intents to ONOS" )
731
732 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700733 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700734 correct = True
735 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700736 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700737 onosIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700738 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700739 onosIds.append( ids )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700740 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700741 str( sorted( onosIds ) ) )
742 if sorted( ids ) != sorted( intentIds ):
743 main.log.warn( "Set of intent IDs doesn't match" )
744 correct = False
745 break
746 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700747 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700748 for intent in intents:
749 if intent[ 'state' ] != "INSTALLED":
750 main.log.warn( "Intent " + intent[ 'id' ] +
751 " is " + intent[ 'state' ] )
752 correct = False
753 break
754 if correct:
755 break
756 else:
757 time.sleep(1)
758 if not intentStop:
759 intentStop = time.time()
760 global gossipTime
761 gossipTime = intentStop - intentStart
762 main.log.info( "It took about " + str( gossipTime ) +
763 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700764 gossipPeriod = int( main.params['timers']['gossip'] )
765 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700766 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700767 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700768 onpass="ECM anti-entropy for intents worked within " +
769 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700770 onfail="Intent ECM anti-entropy took too long. " +
771 "Expected time:{}, Actual time:{}".format( maxGossipTime,
772 gossipTime ) )
773 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700774 intentAddResult = True
775
776 if not intentAddResult or "key" in pendingMap:
777 import time
778 installedCheck = True
779 main.log.info( "Sleeping 60 seconds to see if intents are found" )
780 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700781 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700782 main.log.info( "Submitted intents: " + str( intentIds ) )
783 main.log.info( "Intents in ONOS: " + str( onosIds ) )
784 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700785 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700786 intentStates = []
787 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
788 count = 0
789 try:
790 for intent in json.loads( intents ):
791 # Iter through intents of a node
792 state = intent.get( 'state', None )
793 if "INSTALLED" not in state:
794 installedCheck = False
795 intentId = intent.get( 'id', None )
796 intentStates.append( ( intentId, state ) )
797 except ( ValueError, TypeError ):
798 main.log.exception( "Error parsing intents" )
799 # add submitted intents not in the store
800 tmplist = [ i for i, s in intentStates ]
801 for i in intentIds:
802 if i not in tmplist:
803 intentStates.append( ( i, " - " ) )
804 intentStates.sort()
805 for i, s in intentStates:
806 count += 1
807 main.log.info( "%-6s%-15s%-15s" %
808 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700809 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700810 try:
811 missing = False
812 if leaders:
813 parsedLeaders = json.loads( leaders )
814 main.log.warn( json.dumps( parsedLeaders,
815 sort_keys=True,
816 indent=4,
817 separators=( ',', ': ' ) ) )
818 # check for all intent partitions
819 # check for election
820 topics = []
821 for i in range( 14 ):
822 topics.append( "intent-partition-" + str( i ) )
823 # FIXME: this should only be after we start the app
824 topics.append( "org.onosproject.election" )
825 main.log.debug( topics )
826 ONOStopics = [ j['topic'] for j in parsedLeaders ]
827 for topic in topics:
828 if topic not in ONOStopics:
829 main.log.error( "Error: " + topic +
830 " not in leaders" )
831 missing = True
832 else:
833 main.log.error( "leaders() returned None" )
834 except ( ValueError, TypeError ):
835 main.log.exception( "Error parsing leaders" )
836 main.log.error( repr( leaders ) )
837 # Check all nodes
838 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700839 for i in main.activeNodes:
840 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700841 response = node.leaders( jsonFormat=False)
842 main.log.warn( str( node.name ) + " leaders output: \n" +
843 str( response ) )
844
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700845 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700846 try:
847 if partitions :
848 parsedPartitions = json.loads( partitions )
849 main.log.warn( json.dumps( parsedPartitions,
850 sort_keys=True,
851 indent=4,
852 separators=( ',', ': ' ) ) )
853 # TODO check for a leader in all paritions
854 # TODO check for consistency among nodes
855 else:
856 main.log.error( "partitions() returned None" )
857 except ( ValueError, TypeError ):
858 main.log.exception( "Error parsing partitions" )
859 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700860 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700861 try:
862 if pendingMap :
863 parsedPending = json.loads( pendingMap )
864 main.log.warn( json.dumps( parsedPending,
865 sort_keys=True,
866 indent=4,
867 separators=( ',', ': ' ) ) )
868 # TODO check something here?
869 else:
870 main.log.error( "pendingMap() returned None" )
871 except ( ValueError, TypeError ):
872 main.log.exception( "Error parsing pending map" )
873 main.log.error( repr( pendingMap ) )
874
875 def CASE4( self, main ):
876 """
877 Ping across added host intents
878 """
879 import json
880 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700881 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700882 assert main, "main not defined"
883 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700884 assert main.CLIs, "main.CLIs not defined"
885 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700886 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700887 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700888 "functionality and check the state of " +\
889 "the intent"
890 main.step( "Ping across added host intents" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700891 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700892 PingResult = main.TRUE
893 for i in range( 8, 18 ):
894 ping = main.Mininet1.pingHost( src="h" + str( i ),
895 target="h" + str( i + 10 ) )
896 PingResult = PingResult and ping
897 if ping == main.FALSE:
898 main.log.warn( "Ping failed between h" + str( i ) +
899 " and h" + str( i + 10 ) )
900 elif ping == main.TRUE:
901 main.log.info( "Ping test passed!" )
902 # Don't set PingResult or you'd override failures
903 if PingResult == main.FALSE:
904 main.log.error(
905 "Intents have not been installed correctly, pings failed." )
906 # TODO: pretty print
907 main.log.warn( "ONOS1 intents: " )
908 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700909 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700910 main.log.warn( json.dumps( json.loads( tmpIntents ),
911 sort_keys=True,
912 indent=4,
913 separators=( ',', ': ' ) ) )
914 except ( ValueError, TypeError ):
915 main.log.warn( repr( tmpIntents ) )
916 utilities.assert_equals(
917 expect=main.TRUE,
918 actual=PingResult,
919 onpass="Intents have been installed correctly and pings work",
920 onfail="Intents have not been installed correctly, pings failed." )
921
922 main.step( "Check Intent state" )
923 installedCheck = False
924 loopCount = 0
925 while not installedCheck and loopCount < 40:
926 installedCheck = True
927 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700928 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700929 intentStates = []
930 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
931 count = 0
932 # Iter through intents of a node
933 try:
934 for intent in json.loads( intents ):
935 state = intent.get( 'state', None )
936 if "INSTALLED" not in state:
937 installedCheck = False
938 intentId = intent.get( 'id', None )
939 intentStates.append( ( intentId, state ) )
940 except ( ValueError, TypeError ):
941 main.log.exception( "Error parsing intents." )
942 # Print states
943 intentStates.sort()
944 for i, s in intentStates:
945 count += 1
946 main.log.info( "%-6s%-15s%-15s" %
947 ( str( count ), str( i ), str( s ) ) )
948 if not installedCheck:
949 time.sleep( 1 )
950 loopCount += 1
951 utilities.assert_equals( expect=True, actual=installedCheck,
952 onpass="Intents are all INSTALLED",
953 onfail="Intents are not all in " +
954 "INSTALLED state" )
955
956 main.step( "Check leadership of topics" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700957 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700958 topicCheck = main.TRUE
959 try:
960 if leaders:
961 parsedLeaders = json.loads( leaders )
962 main.log.warn( json.dumps( parsedLeaders,
963 sort_keys=True,
964 indent=4,
965 separators=( ',', ': ' ) ) )
966 # check for all intent partitions
967 # check for election
968 # TODO: Look at Devices as topics now that it uses this system
969 topics = []
970 for i in range( 14 ):
971 topics.append( "intent-partition-" + str( i ) )
972 # FIXME: this should only be after we start the app
973 # FIXME: topics.append( "org.onosproject.election" )
974 # Print leaders output
975 main.log.debug( topics )
976 ONOStopics = [ j['topic'] for j in parsedLeaders ]
977 for topic in topics:
978 if topic not in ONOStopics:
979 main.log.error( "Error: " + topic +
980 " not in leaders" )
981 topicCheck = main.FALSE
982 else:
983 main.log.error( "leaders() returned None" )
984 topicCheck = main.FALSE
985 except ( ValueError, TypeError ):
986 topicCheck = main.FALSE
987 main.log.exception( "Error parsing leaders" )
988 main.log.error( repr( leaders ) )
989 # TODO: Check for a leader of these topics
990 # Check all nodes
991 if topicCheck:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700992 for i in main.activeNodes:
993 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700994 response = node.leaders( jsonFormat=False)
995 main.log.warn( str( node.name ) + " leaders output: \n" +
996 str( response ) )
997
998 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
999 onpass="intent Partitions is in leaders",
1000 onfail="Some topics were lost " )
1001 # Print partitions
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001002 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001003 try:
1004 if partitions :
1005 parsedPartitions = json.loads( partitions )
1006 main.log.warn( json.dumps( parsedPartitions,
1007 sort_keys=True,
1008 indent=4,
1009 separators=( ',', ': ' ) ) )
1010 # TODO check for a leader in all paritions
1011 # TODO check for consistency among nodes
1012 else:
1013 main.log.error( "partitions() returned None" )
1014 except ( ValueError, TypeError ):
1015 main.log.exception( "Error parsing partitions" )
1016 main.log.error( repr( partitions ) )
1017 # Print Pending Map
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001018 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001019 try:
1020 if pendingMap :
1021 parsedPending = json.loads( pendingMap )
1022 main.log.warn( json.dumps( parsedPending,
1023 sort_keys=True,
1024 indent=4,
1025 separators=( ',', ': ' ) ) )
1026 # TODO check something here?
1027 else:
1028 main.log.error( "pendingMap() returned None" )
1029 except ( ValueError, TypeError ):
1030 main.log.exception( "Error parsing pending map" )
1031 main.log.error( repr( pendingMap ) )
1032
1033 if not installedCheck:
1034 main.log.info( "Waiting 60 seconds to see if the state of " +
1035 "intents change" )
1036 time.sleep( 60 )
1037 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001038 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001039 intentStates = []
1040 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1041 count = 0
1042 # Iter through intents of a node
1043 try:
1044 for intent in json.loads( intents ):
1045 state = intent.get( 'state', None )
1046 if "INSTALLED" not in state:
1047 installedCheck = False
1048 intentId = intent.get( 'id', None )
1049 intentStates.append( ( intentId, state ) )
1050 except ( ValueError, TypeError ):
1051 main.log.exception( "Error parsing intents." )
1052 intentStates.sort()
1053 for i, s in intentStates:
1054 count += 1
1055 main.log.info( "%-6s%-15s%-15s" %
1056 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001057 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001058 try:
1059 missing = False
1060 if leaders:
1061 parsedLeaders = json.loads( leaders )
1062 main.log.warn( json.dumps( parsedLeaders,
1063 sort_keys=True,
1064 indent=4,
1065 separators=( ',', ': ' ) ) )
1066 # check for all intent partitions
1067 # check for election
1068 topics = []
1069 for i in range( 14 ):
1070 topics.append( "intent-partition-" + str( i ) )
1071 # FIXME: this should only be after we start the app
1072 topics.append( "org.onosproject.election" )
1073 main.log.debug( topics )
1074 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1075 for topic in topics:
1076 if topic not in ONOStopics:
1077 main.log.error( "Error: " + topic +
1078 " not in leaders" )
1079 missing = True
1080 else:
1081 main.log.error( "leaders() returned None" )
1082 except ( ValueError, TypeError ):
1083 main.log.exception( "Error parsing leaders" )
1084 main.log.error( repr( leaders ) )
1085 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001086 for i in main.activeNodes:
1087 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001088 response = node.leaders( jsonFormat=False)
1089 main.log.warn( str( node.name ) + " leaders output: \n" +
1090 str( response ) )
1091
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001092 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001093 try:
1094 if partitions :
1095 parsedPartitions = json.loads( partitions )
1096 main.log.warn( json.dumps( parsedPartitions,
1097 sort_keys=True,
1098 indent=4,
1099 separators=( ',', ': ' ) ) )
1100 # TODO check for a leader in all paritions
1101 # TODO check for consistency among nodes
1102 else:
1103 main.log.error( "partitions() returned None" )
1104 except ( ValueError, TypeError ):
1105 main.log.exception( "Error parsing partitions" )
1106 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001107 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001108 try:
1109 if pendingMap :
1110 parsedPending = json.loads( pendingMap )
1111 main.log.warn( json.dumps( parsedPending,
1112 sort_keys=True,
1113 indent=4,
1114 separators=( ',', ': ' ) ) )
1115 # TODO check something here?
1116 else:
1117 main.log.error( "pendingMap() returned None" )
1118 except ( ValueError, TypeError ):
1119 main.log.exception( "Error parsing pending map" )
1120 main.log.error( repr( pendingMap ) )
1121 # Print flowrules
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001122 node = main.activeNodes[0]
1123 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001124 main.step( "Wait a minute then ping again" )
1125 # the wait is above
1126 PingResult = main.TRUE
1127 for i in range( 8, 18 ):
1128 ping = main.Mininet1.pingHost( src="h" + str( i ),
1129 target="h" + str( i + 10 ) )
1130 PingResult = PingResult and ping
1131 if ping == main.FALSE:
1132 main.log.warn( "Ping failed between h" + str( i ) +
1133 " and h" + str( i + 10 ) )
1134 elif ping == main.TRUE:
1135 main.log.info( "Ping test passed!" )
1136 # Don't set PingResult or you'd override failures
1137 if PingResult == main.FALSE:
1138 main.log.error(
1139 "Intents have not been installed correctly, pings failed." )
1140 # TODO: pretty print
1141 main.log.warn( "ONOS1 intents: " )
1142 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001143 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001144 main.log.warn( json.dumps( json.loads( tmpIntents ),
1145 sort_keys=True,
1146 indent=4,
1147 separators=( ',', ': ' ) ) )
1148 except ( ValueError, TypeError ):
1149 main.log.warn( repr( tmpIntents ) )
1150 utilities.assert_equals(
1151 expect=main.TRUE,
1152 actual=PingResult,
1153 onpass="Intents have been installed correctly and pings work",
1154 onfail="Intents have not been installed correctly, pings failed." )
1155
1156 def CASE5( self, main ):
1157 """
1158 Reading state of ONOS
1159 """
1160 import json
1161 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001162 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001163 assert main, "main not defined"
1164 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001165 assert main.CLIs, "main.CLIs not defined"
1166 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001167
1168 main.case( "Setting up and gathering data for current state" )
1169 # The general idea for this test case is to pull the state of
1170 # ( intents,flows, topology,... ) from each ONOS node
1171 # We can then compare them with each other and also with past states
1172
1173 main.step( "Check that each switch has a master" )
1174 global mastershipState
1175 mastershipState = '[]'
1176
1177 # Assert that each device has a master
1178 rolesNotNull = main.TRUE
1179 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001180 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001181 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001182 name="rolesNotNull-" + str( i ),
1183 args=[] )
1184 threads.append( t )
1185 t.start()
1186
1187 for t in threads:
1188 t.join()
1189 rolesNotNull = rolesNotNull and t.result
1190 utilities.assert_equals(
1191 expect=main.TRUE,
1192 actual=rolesNotNull,
1193 onpass="Each device has a master",
1194 onfail="Some devices don't have a master assigned" )
1195
1196 main.step( "Get the Mastership of each switch from each controller" )
1197 ONOSMastership = []
1198 mastershipCheck = main.FALSE
1199 consistentMastership = True
1200 rolesResults = True
1201 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001202 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001203 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001204 name="roles-" + str( i ),
1205 args=[] )
1206 threads.append( t )
1207 t.start()
1208
1209 for t in threads:
1210 t.join()
1211 ONOSMastership.append( t.result )
1212
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001213 for i in range( len( ONOSMastership ) ):
1214 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001215 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001216 main.log.error( "Error in getting ONOS" + node + " roles" )
1217 main.log.warn( "ONOS" + node + " mastership response: " +
1218 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001219 rolesResults = False
1220 utilities.assert_equals(
1221 expect=True,
1222 actual=rolesResults,
1223 onpass="No error in reading roles output",
1224 onfail="Error in reading roles from ONOS" )
1225
1226 main.step( "Check for consistency in roles from each controller" )
1227 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1228 main.log.info(
1229 "Switch roles are consistent across all ONOS nodes" )
1230 else:
1231 consistentMastership = False
1232 utilities.assert_equals(
1233 expect=True,
1234 actual=consistentMastership,
1235 onpass="Switch roles are consistent across all ONOS nodes",
1236 onfail="ONOS nodes have different views of switch roles" )
1237
1238 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001239 for i in range( len( main.activeNodes ) ):
1240 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001241 try:
1242 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001243 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001244 json.dumps(
1245 json.loads( ONOSMastership[ i ] ),
1246 sort_keys=True,
1247 indent=4,
1248 separators=( ',', ': ' ) ) )
1249 except ( ValueError, TypeError ):
1250 main.log.warn( repr( ONOSMastership[ i ] ) )
1251 elif rolesResults and consistentMastership:
1252 mastershipCheck = main.TRUE
1253 mastershipState = ONOSMastership[ 0 ]
1254
1255 main.step( "Get the intents from each controller" )
1256 global intentState
1257 intentState = []
1258 ONOSIntents = []
1259 intentCheck = main.FALSE
1260 consistentIntents = True
1261 intentsResults = True
1262 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001263 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001264 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001265 name="intents-" + str( i ),
1266 args=[],
1267 kwargs={ 'jsonFormat': True } )
1268 threads.append( t )
1269 t.start()
1270
1271 for t in threads:
1272 t.join()
1273 ONOSIntents.append( t.result )
1274
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001275 for i in range( len( ONOSIntents ) ):
1276 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001277 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001278 main.log.error( "Error in getting ONOS" + node + " intents" )
1279 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001280 repr( ONOSIntents[ i ] ) )
1281 intentsResults = False
1282 utilities.assert_equals(
1283 expect=True,
1284 actual=intentsResults,
1285 onpass="No error in reading intents output",
1286 onfail="Error in reading intents from ONOS" )
1287
1288 main.step( "Check for consistency in Intents from each controller" )
1289 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1290 main.log.info( "Intents are consistent across all ONOS " +
1291 "nodes" )
1292 else:
1293 consistentIntents = False
1294 main.log.error( "Intents not consistent" )
1295 utilities.assert_equals(
1296 expect=True,
1297 actual=consistentIntents,
1298 onpass="Intents are consistent across all ONOS nodes",
1299 onfail="ONOS nodes have different views of intents" )
1300
1301 if intentsResults:
1302 # Try to make it easy to figure out what is happening
1303 #
1304 # Intent ONOS1 ONOS2 ...
1305 # 0x01 INSTALLED INSTALLING
1306 # ... ... ...
1307 # ... ... ...
1308 title = " Id"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001309 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001310 title += " " * 10 + "ONOS" + str( n + 1 )
1311 main.log.warn( title )
1312 # get all intent keys in the cluster
1313 keys = []
1314 for nodeStr in ONOSIntents:
1315 node = json.loads( nodeStr )
1316 for intent in node:
1317 keys.append( intent.get( 'id' ) )
1318 keys = set( keys )
1319 for key in keys:
1320 row = "%-13s" % key
1321 for nodeStr in ONOSIntents:
1322 node = json.loads( nodeStr )
1323 for intent in node:
1324 if intent.get( 'id', "Error" ) == key:
1325 row += "%-15s" % intent.get( 'state' )
1326 main.log.warn( row )
1327 # End table view
1328
1329 if intentsResults and not consistentIntents:
1330 # print the json objects
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001331 n = str( main.activeNodes[-1] + 1 )
1332 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001333 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1334 sort_keys=True,
1335 indent=4,
1336 separators=( ',', ': ' ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001337 for i in range( len( ONOSIntents ) ):
1338 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001339 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001340 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001341 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1342 sort_keys=True,
1343 indent=4,
1344 separators=( ',', ': ' ) ) )
1345 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001346 main.log.debug( "ONOS" + node + " intents match ONOS" +
1347 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001348 elif intentsResults and consistentIntents:
1349 intentCheck = main.TRUE
1350 intentState = ONOSIntents[ 0 ]
1351
1352 main.step( "Get the flows from each controller" )
1353 global flowState
1354 flowState = []
1355 ONOSFlows = []
1356 ONOSFlowsJson = []
1357 flowCheck = main.FALSE
1358 consistentFlows = True
1359 flowsResults = True
1360 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001361 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001362 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001363 name="flows-" + str( i ),
1364 args=[],
1365 kwargs={ 'jsonFormat': True } )
1366 threads.append( t )
1367 t.start()
1368
1369 # NOTE: Flows command can take some time to run
1370 time.sleep(30)
1371 for t in threads:
1372 t.join()
1373 result = t.result
1374 ONOSFlows.append( result )
1375
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001376 for i in range( len( ONOSFlows ) ):
1377 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001378 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1379 main.log.error( "Error in getting ONOS" + num + " flows" )
1380 main.log.warn( "ONOS" + num + " flows response: " +
1381 repr( ONOSFlows[ i ] ) )
1382 flowsResults = False
1383 ONOSFlowsJson.append( None )
1384 else:
1385 try:
1386 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1387 except ( ValueError, TypeError ):
1388 # FIXME: change this to log.error?
1389 main.log.exception( "Error in parsing ONOS" + num +
1390 " response as json." )
1391 main.log.error( repr( ONOSFlows[ i ] ) )
1392 ONOSFlowsJson.append( None )
1393 flowsResults = False
1394 utilities.assert_equals(
1395 expect=True,
1396 actual=flowsResults,
1397 onpass="No error in reading flows output",
1398 onfail="Error in reading flows from ONOS" )
1399
1400 main.step( "Check for consistency in Flows from each controller" )
1401 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1402 if all( tmp ):
1403 main.log.info( "Flow count is consistent across all ONOS nodes" )
1404 else:
1405 consistentFlows = False
1406 utilities.assert_equals(
1407 expect=True,
1408 actual=consistentFlows,
1409 onpass="The flow count is consistent across all ONOS nodes",
1410 onfail="ONOS nodes have different flow counts" )
1411
1412 if flowsResults and not consistentFlows:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001413 for i in range( len( ONOSFlows ) ):
1414 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001415 try:
1416 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001417 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001418 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1419 indent=4, separators=( ',', ': ' ) ) )
1420 except ( ValueError, TypeError ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001421 main.log.warn( "ONOS" + node + " flows: " +
1422 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001423 elif flowsResults and consistentFlows:
1424 flowCheck = main.TRUE
1425 flowState = ONOSFlows[ 0 ]
1426
1427 main.step( "Get the OF Table entries" )
1428 global flows
1429 flows = []
1430 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001431 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001432 if flowCheck == main.FALSE:
1433 for table in flows:
1434 main.log.warn( table )
1435 # TODO: Compare switch flow tables with ONOS flow tables
1436
1437 main.step( "Start continuous pings" )
1438 main.Mininet2.pingLong(
1439 src=main.params[ 'PING' ][ 'source1' ],
1440 target=main.params[ 'PING' ][ 'target1' ],
1441 pingTime=500 )
1442 main.Mininet2.pingLong(
1443 src=main.params[ 'PING' ][ 'source2' ],
1444 target=main.params[ 'PING' ][ 'target2' ],
1445 pingTime=500 )
1446 main.Mininet2.pingLong(
1447 src=main.params[ 'PING' ][ 'source3' ],
1448 target=main.params[ 'PING' ][ 'target3' ],
1449 pingTime=500 )
1450 main.Mininet2.pingLong(
1451 src=main.params[ 'PING' ][ 'source4' ],
1452 target=main.params[ 'PING' ][ 'target4' ],
1453 pingTime=500 )
1454 main.Mininet2.pingLong(
1455 src=main.params[ 'PING' ][ 'source5' ],
1456 target=main.params[ 'PING' ][ 'target5' ],
1457 pingTime=500 )
1458 main.Mininet2.pingLong(
1459 src=main.params[ 'PING' ][ 'source6' ],
1460 target=main.params[ 'PING' ][ 'target6' ],
1461 pingTime=500 )
1462 main.Mininet2.pingLong(
1463 src=main.params[ 'PING' ][ 'source7' ],
1464 target=main.params[ 'PING' ][ 'target7' ],
1465 pingTime=500 )
1466 main.Mininet2.pingLong(
1467 src=main.params[ 'PING' ][ 'source8' ],
1468 target=main.params[ 'PING' ][ 'target8' ],
1469 pingTime=500 )
1470 main.Mininet2.pingLong(
1471 src=main.params[ 'PING' ][ 'source9' ],
1472 target=main.params[ 'PING' ][ 'target9' ],
1473 pingTime=500 )
1474 main.Mininet2.pingLong(
1475 src=main.params[ 'PING' ][ 'source10' ],
1476 target=main.params[ 'PING' ][ 'target10' ],
1477 pingTime=500 )
1478
1479 main.step( "Collecting topology information from ONOS" )
1480 devices = []
1481 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001482 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001483 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001484 name="devices-" + str( i ),
1485 args=[ ] )
1486 threads.append( t )
1487 t.start()
1488
1489 for t in threads:
1490 t.join()
1491 devices.append( t.result )
1492 hosts = []
1493 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001494 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001495 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001496 name="hosts-" + str( i ),
1497 args=[ ] )
1498 threads.append( t )
1499 t.start()
1500
1501 for t in threads:
1502 t.join()
1503 try:
1504 hosts.append( json.loads( t.result ) )
1505 except ( ValueError, TypeError ):
1506 # FIXME: better handling of this, print which node
1507 # Maybe use thread name?
1508 main.log.exception( "Error parsing json output of hosts" )
Jon Hallf3d16e72015-12-16 17:45:08 -08001509 main.log.warn( repr( t.result ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001510 hosts.append( None )
1511
1512 ports = []
1513 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001514 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001515 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001516 name="ports-" + str( i ),
1517 args=[ ] )
1518 threads.append( t )
1519 t.start()
1520
1521 for t in threads:
1522 t.join()
1523 ports.append( t.result )
1524 links = []
1525 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001526 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001527 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001528 name="links-" + str( i ),
1529 args=[ ] )
1530 threads.append( t )
1531 t.start()
1532
1533 for t in threads:
1534 t.join()
1535 links.append( t.result )
1536 clusters = []
1537 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001538 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001539 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001540 name="clusters-" + str( i ),
1541 args=[ ] )
1542 threads.append( t )
1543 t.start()
1544
1545 for t in threads:
1546 t.join()
1547 clusters.append( t.result )
1548 # Compare json objects for hosts and dataplane clusters
1549
1550 # hosts
1551 main.step( "Host view is consistent across ONOS nodes" )
1552 consistentHostsResult = main.TRUE
1553 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001554 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001555 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001556 if hosts[ controller ] == hosts[ 0 ]:
1557 continue
1558 else: # hosts not consistent
1559 main.log.error( "hosts from ONOS" +
1560 controllerStr +
1561 " is inconsistent with ONOS1" )
1562 main.log.warn( repr( hosts[ controller ] ) )
1563 consistentHostsResult = main.FALSE
1564
1565 else:
1566 main.log.error( "Error in getting ONOS hosts from ONOS" +
1567 controllerStr )
1568 consistentHostsResult = main.FALSE
1569 main.log.warn( "ONOS" + controllerStr +
1570 " hosts response: " +
1571 repr( hosts[ controller ] ) )
1572 utilities.assert_equals(
1573 expect=main.TRUE,
1574 actual=consistentHostsResult,
1575 onpass="Hosts view is consistent across all ONOS nodes",
1576 onfail="ONOS nodes have different views of hosts" )
1577
1578 main.step( "Each host has an IP address" )
1579 ipResult = main.TRUE
1580 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001581 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001582 if hosts[ controller ]:
1583 for host in hosts[ controller ]:
1584 if not host.get( 'ipAddresses', [ ] ):
1585 main.log.error( "Error with host ips on controller" +
1586 controllerStr + ": " + str( host ) )
1587 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001588 utilities.assert_equals(
1589 expect=main.TRUE,
1590 actual=ipResult,
1591 onpass="The ips of the hosts aren't empty",
1592 onfail="The ip of at least one host is missing" )
1593
1594 # Strongly connected clusters of devices
1595 main.step( "Cluster view is consistent across ONOS nodes" )
1596 consistentClustersResult = main.TRUE
1597 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001598 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001599 if "Error" not in clusters[ controller ]:
1600 if clusters[ controller ] == clusters[ 0 ]:
1601 continue
1602 else: # clusters not consistent
1603 main.log.error( "clusters from ONOS" + controllerStr +
1604 " is inconsistent with ONOS1" )
1605 consistentClustersResult = main.FALSE
1606
1607 else:
1608 main.log.error( "Error in getting dataplane clusters " +
1609 "from ONOS" + controllerStr )
1610 consistentClustersResult = main.FALSE
1611 main.log.warn( "ONOS" + controllerStr +
1612 " clusters response: " +
1613 repr( clusters[ controller ] ) )
1614 utilities.assert_equals(
1615 expect=main.TRUE,
1616 actual=consistentClustersResult,
1617 onpass="Clusters view is consistent across all ONOS nodes",
1618 onfail="ONOS nodes have different views of clusters" )
1619 # there should always only be one cluster
1620 main.step( "Cluster view correct across ONOS nodes" )
1621 try:
1622 numClusters = len( json.loads( clusters[ 0 ] ) )
1623 except ( ValueError, TypeError ):
1624 main.log.exception( "Error parsing clusters[0]: " +
1625 repr( clusters[ 0 ] ) )
1626 clusterResults = main.FALSE
1627 if numClusters == 1:
1628 clusterResults = main.TRUE
1629 utilities.assert_equals(
1630 expect=1,
1631 actual=numClusters,
1632 onpass="ONOS shows 1 SCC",
1633 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1634
1635 main.step( "Comparing ONOS topology to MN" )
1636 devicesResults = main.TRUE
1637 linksResults = main.TRUE
1638 hostsResults = main.TRUE
1639 mnSwitches = main.Mininet1.getSwitches()
1640 mnLinks = main.Mininet1.getLinks()
1641 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001642 for controller in main.activeNodes:
1643 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001644 if devices[ controller ] and ports[ controller ] and\
1645 "Error" not in devices[ controller ] and\
1646 "Error" not in ports[ controller ]:
1647
1648 currentDevicesResult = main.Mininet1.compareSwitches(
1649 mnSwitches,
1650 json.loads( devices[ controller ] ),
1651 json.loads( ports[ controller ] ) )
1652 else:
1653 currentDevicesResult = main.FALSE
1654 utilities.assert_equals( expect=main.TRUE,
1655 actual=currentDevicesResult,
1656 onpass="ONOS" + controllerStr +
1657 " Switches view is correct",
1658 onfail="ONOS" + controllerStr +
1659 " Switches view is incorrect" )
1660 if links[ controller ] and "Error" not in links[ controller ]:
1661 currentLinksResult = main.Mininet1.compareLinks(
1662 mnSwitches, mnLinks,
1663 json.loads( links[ controller ] ) )
1664 else:
1665 currentLinksResult = main.FALSE
1666 utilities.assert_equals( expect=main.TRUE,
1667 actual=currentLinksResult,
1668 onpass="ONOS" + controllerStr +
1669 " links view is correct",
1670 onfail="ONOS" + controllerStr +
1671 " links view is incorrect" )
1672
1673 if hosts[ controller ] or "Error" not in hosts[ controller ]:
1674 currentHostsResult = main.Mininet1.compareHosts(
1675 mnHosts,
1676 hosts[ controller ] )
1677 else:
1678 currentHostsResult = main.FALSE
1679 utilities.assert_equals( expect=main.TRUE,
1680 actual=currentHostsResult,
1681 onpass="ONOS" + controllerStr +
1682 " hosts exist in Mininet",
1683 onfail="ONOS" + controllerStr +
1684 " hosts don't match Mininet" )
1685
1686 devicesResults = devicesResults and currentDevicesResult
1687 linksResults = linksResults and currentLinksResult
1688 hostsResults = hostsResults and currentHostsResult
1689
1690 main.step( "Device information is correct" )
1691 utilities.assert_equals(
1692 expect=main.TRUE,
1693 actual=devicesResults,
1694 onpass="Device information is correct",
1695 onfail="Device information is incorrect" )
1696
1697 main.step( "Links are correct" )
1698 utilities.assert_equals(
1699 expect=main.TRUE,
1700 actual=linksResults,
1701 onpass="Link are correct",
1702 onfail="Links are incorrect" )
1703
1704 main.step( "Hosts are correct" )
1705 utilities.assert_equals(
1706 expect=main.TRUE,
1707 actual=hostsResults,
1708 onpass="Hosts are correct",
1709 onfail="Hosts are incorrect" )
1710
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001711 def CASE61( self, main ):
Jon Hall5cf14d52015-07-16 12:15:19 -07001712 """
1713 The Failure case.
1714 """
Jon Halle1a3b752015-07-22 13:02:46 -07001715 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001716 assert main, "main not defined"
1717 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001718 assert main.CLIs, "main.CLIs not defined"
1719 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001720 main.case( "Stop minority of ONOS nodes" )
Jon Hall96091e62015-09-21 17:34:17 -07001721
1722 main.step( "Checking ONOS Logs for errors" )
1723 for node in main.nodes:
1724 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1725 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1726
Jon Hall3b489db2015-10-05 14:38:37 -07001727 n = len( main.nodes ) # Number of nodes
1728 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1729 main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
1730 if n > 3:
1731 main.kill.append( p - 1 )
1732 # NOTE: This only works for cluster sizes of 3,5, or 7.
1733
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001734 main.step( "Stopping " + str( len( main.kill ) ) + " ONOS nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07001735 killResults = main.TRUE
1736 for i in main.kill:
1737 killResults = killResults and\
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001738 main.ONOSbench.onosStop( main.nodes[i].ip_address )
1739 main.activeNodes.remove( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001740 utilities.assert_equals( expect=main.TRUE, actual=killResults,
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001741 onpass="ONOS nodes stopped successfully",
1742 onfail="ONOS nodes NOT successfully stopped" )
1743
1744 def CASE62( self, main ):
1745 """
1746 The bring up stopped nodes
1747 """
1748 import time
1749 assert main.numCtrls, "main.numCtrls not defined"
1750 assert main, "main not defined"
1751 assert utilities.assert_equals, "utilities.assert_equals not defined"
1752 assert main.CLIs, "main.CLIs not defined"
1753 assert main.nodes, "main.nodes not defined"
1754 assert main.kill, "main.kill not defined"
1755 main.case( "Restart minority of ONOS nodes" )
1756
1757 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1758 startResults = main.TRUE
1759 restartTime = time.time()
1760 for i in main.kill:
1761 startResults = startResults and\
1762 main.ONOSbench.onosStart( main.nodes[i].ip_address )
1763 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1764 onpass="ONOS nodes started successfully",
1765 onfail="ONOS nodes NOT successfully started" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001766
1767 main.step( "Checking if ONOS is up yet" )
1768 count = 0
1769 onosIsupResult = main.FALSE
1770 while onosIsupResult == main.FALSE and count < 10:
Jon Hall3b489db2015-10-05 14:38:37 -07001771 onosIsupResult = main.TRUE
1772 for i in main.kill:
1773 onosIsupResult = onosIsupResult and\
1774 main.ONOSbench.isup( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07001775 count = count + 1
Jon Hall5cf14d52015-07-16 12:15:19 -07001776 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1777 onpass="ONOS restarted successfully",
1778 onfail="ONOS restart NOT successful" )
1779
Jon Halle1a3b752015-07-22 13:02:46 -07001780 main.step( "Restarting ONOS main.CLIs" )
Jon Hall3b489db2015-10-05 14:38:37 -07001781 cliResults = main.TRUE
1782 for i in main.kill:
1783 cliResults = cliResults and\
1784 main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001785 main.activeNodes.append( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001786 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1787 onpass="ONOS cli restarted",
1788 onfail="ONOS cli did not restart" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001789 main.activeNodes.sort()
1790 try:
1791 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1792 "List of active nodes has duplicates, this likely indicates something was run out of order"
1793 except AssertionError:
1794 main.log.exception( "" )
1795 main.cleanup()
1796 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -07001797
1798 # Grab the time of restart so we chan check how long the gossip
1799 # protocol has had time to work
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001800 main.restartTime = time.time() - restartTime
Jon Hall5cf14d52015-07-16 12:15:19 -07001801 main.log.debug( "Restart time: " + str( main.restartTime ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001802 # TODO: MAke this configurable. Also, we are breaking the above timer
1803 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001804 node = main.activeNodes[0]
1805 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1806 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1807 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001808
1809 def CASE7( self, main ):
1810 """
1811 Check state after ONOS failure
1812 """
1813 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001814 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001815 assert main, "main not defined"
1816 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001817 assert main.CLIs, "main.CLIs not defined"
1818 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001819 try:
1820 main.kill
1821 except AttributeError:
1822 main.kill = []
1823
Jon Hall5cf14d52015-07-16 12:15:19 -07001824 main.case( "Running ONOS Constant State Tests" )
1825
1826 main.step( "Check that each switch has a master" )
1827 # Assert that each device has a master
1828 rolesNotNull = main.TRUE
1829 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001830 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001831 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001832 name="rolesNotNull-" + str( i ),
1833 args=[ ] )
1834 threads.append( t )
1835 t.start()
1836
1837 for t in threads:
1838 t.join()
1839 rolesNotNull = rolesNotNull and t.result
1840 utilities.assert_equals(
1841 expect=main.TRUE,
1842 actual=rolesNotNull,
1843 onpass="Each device has a master",
1844 onfail="Some devices don't have a master assigned" )
1845
1846 main.step( "Read device roles from ONOS" )
1847 ONOSMastership = []
1848 consistentMastership = True
1849 rolesResults = True
1850 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001851 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001852 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001853 name="roles-" + str( i ),
1854 args=[] )
1855 threads.append( t )
1856 t.start()
1857
1858 for t in threads:
1859 t.join()
1860 ONOSMastership.append( t.result )
1861
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001862 for i in range( len( ONOSMastership ) ):
1863 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001864 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001865 main.log.error( "Error in getting ONOS" + node + " roles" )
1866 main.log.warn( "ONOS" + node + " mastership response: " +
1867 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001868 rolesResults = False
1869 utilities.assert_equals(
1870 expect=True,
1871 actual=rolesResults,
1872 onpass="No error in reading roles output",
1873 onfail="Error in reading roles from ONOS" )
1874
1875 main.step( "Check for consistency in roles from each controller" )
1876 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1877 main.log.info(
1878 "Switch roles are consistent across all ONOS nodes" )
1879 else:
1880 consistentMastership = False
1881 utilities.assert_equals(
1882 expect=True,
1883 actual=consistentMastership,
1884 onpass="Switch roles are consistent across all ONOS nodes",
1885 onfail="ONOS nodes have different views of switch roles" )
1886
1887 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001888 for i in range( len( ONOSMastership ) ):
1889 node = str( main.activeNodes[i] + 1 )
1890 main.log.warn( "ONOS" + node + " roles: ",
1891 json.dumps( json.loads( ONOSMastership[ i ] ),
1892 sort_keys=True,
1893 indent=4,
1894 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001895
1896 # NOTE: we expect mastership to change on controller failure
Jon Hall5cf14d52015-07-16 12:15:19 -07001897
1898 main.step( "Get the intents and compare across all nodes" )
1899 ONOSIntents = []
1900 intentCheck = main.FALSE
1901 consistentIntents = True
1902 intentsResults = True
1903 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001904 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001905 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001906 name="intents-" + str( i ),
1907 args=[],
1908 kwargs={ 'jsonFormat': True } )
1909 threads.append( t )
1910 t.start()
1911
1912 for t in threads:
1913 t.join()
1914 ONOSIntents.append( t.result )
1915
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001916 for i in range( len( ONOSIntents) ):
1917 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001918 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001919 main.log.error( "Error in getting ONOS" + node + " intents" )
1920 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001921 repr( ONOSIntents[ i ] ) )
1922 intentsResults = False
1923 utilities.assert_equals(
1924 expect=True,
1925 actual=intentsResults,
1926 onpass="No error in reading intents output",
1927 onfail="Error in reading intents from ONOS" )
1928
1929 main.step( "Check for consistency in Intents from each controller" )
1930 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1931 main.log.info( "Intents are consistent across all ONOS " +
1932 "nodes" )
1933 else:
1934 consistentIntents = False
1935
1936 # Try to make it easy to figure out what is happening
1937 #
1938 # Intent ONOS1 ONOS2 ...
1939 # 0x01 INSTALLED INSTALLING
1940 # ... ... ...
1941 # ... ... ...
1942 title = " ID"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001943 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001944 title += " " * 10 + "ONOS" + str( n + 1 )
1945 main.log.warn( title )
1946 # get all intent keys in the cluster
1947 keys = []
1948 for nodeStr in ONOSIntents:
1949 node = json.loads( nodeStr )
1950 for intent in node:
1951 keys.append( intent.get( 'id' ) )
1952 keys = set( keys )
1953 for key in keys:
1954 row = "%-13s" % key
1955 for nodeStr in ONOSIntents:
1956 node = json.loads( nodeStr )
1957 for intent in node:
1958 if intent.get( 'id' ) == key:
1959 row += "%-15s" % intent.get( 'state' )
1960 main.log.warn( row )
1961 # End table view
1962
1963 utilities.assert_equals(
1964 expect=True,
1965 actual=consistentIntents,
1966 onpass="Intents are consistent across all ONOS nodes",
1967 onfail="ONOS nodes have different views of intents" )
1968 intentStates = []
1969 for node in ONOSIntents: # Iter through ONOS nodes
1970 nodeStates = []
1971 # Iter through intents of a node
1972 try:
1973 for intent in json.loads( node ):
1974 nodeStates.append( intent[ 'state' ] )
1975 except ( ValueError, TypeError ):
1976 main.log.exception( "Error in parsing intents" )
1977 main.log.error( repr( node ) )
1978 intentStates.append( nodeStates )
1979 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1980 main.log.info( dict( out ) )
1981
1982 if intentsResults and not consistentIntents:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001983 for i in range( len( main.activeNodes ) ):
1984 node = str( main.activeNodes[i] + 1 )
1985 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001986 main.log.warn( json.dumps(
1987 json.loads( ONOSIntents[ i ] ),
1988 sort_keys=True,
1989 indent=4,
1990 separators=( ',', ': ' ) ) )
1991 elif intentsResults and consistentIntents:
1992 intentCheck = main.TRUE
1993
1994 # NOTE: Store has no durability, so intents are lost across system
1995 # restarts
1996 main.step( "Compare current intents with intents before the failure" )
1997 # NOTE: this requires case 5 to pass for intentState to be set.
1998 # maybe we should stop the test if that fails?
1999 sameIntents = main.FALSE
2000 if intentState and intentState == ONOSIntents[ 0 ]:
2001 sameIntents = main.TRUE
2002 main.log.info( "Intents are consistent with before failure" )
2003 # TODO: possibly the states have changed? we may need to figure out
2004 # what the acceptable states are
2005 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2006 sameIntents = main.TRUE
2007 try:
2008 before = json.loads( intentState )
2009 after = json.loads( ONOSIntents[ 0 ] )
2010 for intent in before:
2011 if intent not in after:
2012 sameIntents = main.FALSE
2013 main.log.debug( "Intent is not currently in ONOS " +
2014 "(at least in the same form):" )
2015 main.log.debug( json.dumps( intent ) )
2016 except ( ValueError, TypeError ):
2017 main.log.exception( "Exception printing intents" )
2018 main.log.debug( repr( ONOSIntents[0] ) )
2019 main.log.debug( repr( intentState ) )
2020 if sameIntents == main.FALSE:
2021 try:
2022 main.log.debug( "ONOS intents before: " )
2023 main.log.debug( json.dumps( json.loads( intentState ),
2024 sort_keys=True, indent=4,
2025 separators=( ',', ': ' ) ) )
2026 main.log.debug( "Current ONOS intents: " )
2027 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2028 sort_keys=True, indent=4,
2029 separators=( ',', ': ' ) ) )
2030 except ( ValueError, TypeError ):
2031 main.log.exception( "Exception printing intents" )
2032 main.log.debug( repr( ONOSIntents[0] ) )
2033 main.log.debug( repr( intentState ) )
2034 utilities.assert_equals(
2035 expect=main.TRUE,
2036 actual=sameIntents,
2037 onpass="Intents are consistent with before failure",
2038 onfail="The Intents changed during failure" )
2039 intentCheck = intentCheck and sameIntents
2040
2041 main.step( "Get the OF Table entries and compare to before " +
2042 "component failure" )
2043 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002044 for i in range( 28 ):
2045 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002046 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2047 FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
Jon Hall5cf14d52015-07-16 12:15:19 -07002048 if FlowTables == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002049 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2050
Jon Hall5cf14d52015-07-16 12:15:19 -07002051 utilities.assert_equals(
2052 expect=main.TRUE,
2053 actual=FlowTables,
2054 onpass="No changes were found in the flow tables",
2055 onfail="Changes were found in the flow tables" )
2056
2057 main.Mininet2.pingLongKill()
2058 '''
2059 main.step( "Check the continuous pings to ensure that no packets " +
2060 "were dropped during component failure" )
2061 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2062 main.params[ 'TESTONIP' ] )
2063 LossInPings = main.FALSE
2064 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2065 for i in range( 8, 18 ):
2066 main.log.info(
2067 "Checking for a loss in pings along flow from s" +
2068 str( i ) )
2069 LossInPings = main.Mininet2.checkForLoss(
2070 "/tmp/ping.h" +
2071 str( i ) ) or LossInPings
2072 if LossInPings == main.TRUE:
2073 main.log.info( "Loss in ping detected" )
2074 elif LossInPings == main.ERROR:
2075 main.log.info( "There are multiple mininet process running" )
2076 elif LossInPings == main.FALSE:
2077 main.log.info( "No Loss in the pings" )
2078 main.log.info( "No loss of dataplane connectivity" )
2079 utilities.assert_equals(
2080 expect=main.FALSE,
2081 actual=LossInPings,
2082 onpass="No Loss of connectivity",
2083 onfail="Loss of dataplane connectivity detected" )
2084 '''
2085
2086 main.step( "Leadership Election is still functional" )
2087 # Test of LeadershipElection
2088 leaderList = []
Jon Hall5cf14d52015-07-16 12:15:19 -07002089
Jon Hall3b489db2015-10-05 14:38:37 -07002090 restarted = []
2091 for i in main.kill:
2092 restarted.append( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07002093 leaderResult = main.TRUE
Jon Hall3b489db2015-10-05 14:38:37 -07002094
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002095 for i in main.activeNodes:
2096 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002097 leaderN = cli.electionTestLeader()
2098 leaderList.append( leaderN )
2099 if leaderN == main.FALSE:
2100 # error in response
2101 main.log.error( "Something is wrong with " +
2102 "electionTestLeader function, check the" +
2103 " error logs" )
2104 leaderResult = main.FALSE
2105 elif leaderN is None:
2106 main.log.error( cli.name +
2107 " shows no leader for the election-app was" +
2108 " elected after the old one died" )
2109 leaderResult = main.FALSE
2110 elif leaderN in restarted:
2111 main.log.error( cli.name + " shows " + str( leaderN ) +
2112 " as leader for the election-app, but it " +
2113 "was restarted" )
2114 leaderResult = main.FALSE
2115 if len( set( leaderList ) ) != 1:
2116 leaderResult = main.FALSE
2117 main.log.error(
2118 "Inconsistent view of leader for the election test app" )
2119 # TODO: print the list
2120 utilities.assert_equals(
2121 expect=main.TRUE,
2122 actual=leaderResult,
2123 onpass="Leadership election passed",
2124 onfail="Something went wrong with Leadership election" )
2125
2126 def CASE8( self, main ):
2127 """
2128 Compare topo
2129 """
2130 import json
2131 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002132 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002133 assert main, "main not defined"
2134 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002135 assert main.CLIs, "main.CLIs not defined"
2136 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002137
2138 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002139 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002140 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002141 topoResult = main.FALSE
2142 elapsed = 0
2143 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002144 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002145 startTime = time.time()
2146 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002147 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002148 devicesResults = main.TRUE
2149 linksResults = main.TRUE
2150 hostsResults = main.TRUE
2151 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002152 count += 1
2153 cliStart = time.time()
2154 devices = []
2155 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002156 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002157 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07002158 name="devices-" + str( i ),
2159 args=[ ] )
2160 threads.append( t )
2161 t.start()
2162
2163 for t in threads:
2164 t.join()
2165 devices.append( t.result )
2166 hosts = []
2167 ipResult = main.TRUE
2168 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002169 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002170 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07002171 name="hosts-" + str( i ),
2172 args=[ ] )
2173 threads.append( t )
2174 t.start()
2175
2176 for t in threads:
2177 t.join()
2178 try:
2179 hosts.append( json.loads( t.result ) )
2180 except ( ValueError, TypeError ):
2181 main.log.exception( "Error parsing hosts results" )
2182 main.log.error( repr( t.result ) )
Jon Hallf3d16e72015-12-16 17:45:08 -08002183 hosts.append( None )
Jon Hall5cf14d52015-07-16 12:15:19 -07002184 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002185 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallacd1b182015-12-17 11:43:20 -08002186 if hosts[ controller ]:
2187 for host in hosts[ controller ]:
2188 if host is None or host.get( 'ipAddresses', [] ) == []:
2189 main.log.error(
2190 "Error with host ipAddresses on controller" +
2191 controllerStr + ": " + str( host ) )
2192 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002193 ports = []
2194 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002195 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002196 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07002197 name="ports-" + str( i ),
2198 args=[ ] )
2199 threads.append( t )
2200 t.start()
2201
2202 for t in threads:
2203 t.join()
2204 ports.append( t.result )
2205 links = []
2206 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002207 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002208 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07002209 name="links-" + str( i ),
2210 args=[ ] )
2211 threads.append( t )
2212 t.start()
2213
2214 for t in threads:
2215 t.join()
2216 links.append( t.result )
2217 clusters = []
2218 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002219 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002220 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07002221 name="clusters-" + str( i ),
2222 args=[ ] )
2223 threads.append( t )
2224 t.start()
2225
2226 for t in threads:
2227 t.join()
2228 clusters.append( t.result )
2229
2230 elapsed = time.time() - startTime
2231 cliTime = time.time() - cliStart
2232 print "Elapsed time: " + str( elapsed )
2233 print "CLI time: " + str( cliTime )
2234
2235 mnSwitches = main.Mininet1.getSwitches()
2236 mnLinks = main.Mininet1.getLinks()
2237 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002238 for controller in range( len( main.activeNodes ) ):
2239 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002240 if devices[ controller ] and ports[ controller ] and\
2241 "Error" not in devices[ controller ] and\
2242 "Error" not in ports[ controller ]:
2243
2244 currentDevicesResult = main.Mininet1.compareSwitches(
2245 mnSwitches,
2246 json.loads( devices[ controller ] ),
2247 json.loads( ports[ controller ] ) )
2248 else:
2249 currentDevicesResult = main.FALSE
2250 utilities.assert_equals( expect=main.TRUE,
2251 actual=currentDevicesResult,
2252 onpass="ONOS" + controllerStr +
2253 " Switches view is correct",
2254 onfail="ONOS" + controllerStr +
2255 " Switches view is incorrect" )
2256
2257 if links[ controller ] and "Error" not in links[ controller ]:
2258 currentLinksResult = main.Mininet1.compareLinks(
2259 mnSwitches, mnLinks,
2260 json.loads( links[ controller ] ) )
2261 else:
2262 currentLinksResult = main.FALSE
2263 utilities.assert_equals( expect=main.TRUE,
2264 actual=currentLinksResult,
2265 onpass="ONOS" + controllerStr +
2266 " links view is correct",
2267 onfail="ONOS" + controllerStr +
2268 " links view is incorrect" )
2269
2270 if hosts[ controller ] or "Error" not in hosts[ controller ]:
2271 currentHostsResult = main.Mininet1.compareHosts(
2272 mnHosts,
2273 hosts[ controller ] )
2274 else:
2275 currentHostsResult = main.FALSE
2276 utilities.assert_equals( expect=main.TRUE,
2277 actual=currentHostsResult,
2278 onpass="ONOS" + controllerStr +
2279 " hosts exist in Mininet",
2280 onfail="ONOS" + controllerStr +
2281 " hosts don't match Mininet" )
2282 # CHECKING HOST ATTACHMENT POINTS
2283 hostAttachment = True
2284 zeroHosts = False
2285 # FIXME: topo-HA/obelisk specific mappings:
2286 # key is mac and value is dpid
2287 mappings = {}
2288 for i in range( 1, 29 ): # hosts 1 through 28
2289 # set up correct variables:
2290 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2291 if i == 1:
2292 deviceId = "1000".zfill(16)
2293 elif i == 2:
2294 deviceId = "2000".zfill(16)
2295 elif i == 3:
2296 deviceId = "3000".zfill(16)
2297 elif i == 4:
2298 deviceId = "3004".zfill(16)
2299 elif i == 5:
2300 deviceId = "5000".zfill(16)
2301 elif i == 6:
2302 deviceId = "6000".zfill(16)
2303 elif i == 7:
2304 deviceId = "6007".zfill(16)
2305 elif i >= 8 and i <= 17:
2306 dpid = '3' + str( i ).zfill( 3 )
2307 deviceId = dpid.zfill(16)
2308 elif i >= 18 and i <= 27:
2309 dpid = '6' + str( i ).zfill( 3 )
2310 deviceId = dpid.zfill(16)
2311 elif i == 28:
2312 deviceId = "2800".zfill(16)
2313 mappings[ macId ] = deviceId
2314 if hosts[ controller ] or "Error" not in hosts[ controller ]:
2315 if hosts[ controller ] == []:
2316 main.log.warn( "There are no hosts discovered" )
2317 zeroHosts = True
2318 else:
2319 for host in hosts[ controller ]:
2320 mac = None
2321 location = None
2322 device = None
2323 port = None
2324 try:
2325 mac = host.get( 'mac' )
2326 assert mac, "mac field could not be found for this host object"
2327
2328 location = host.get( 'location' )
2329 assert location, "location field could not be found for this host object"
2330
2331 # Trim the protocol identifier off deviceId
2332 device = str( location.get( 'elementId' ) ).split(':')[1]
2333 assert device, "elementId field could not be found for this host location object"
2334
2335 port = location.get( 'port' )
2336 assert port, "port field could not be found for this host location object"
2337
2338 # Now check if this matches where they should be
2339 if mac and device and port:
2340 if str( port ) != "1":
2341 main.log.error( "The attachment port is incorrect for " +
2342 "host " + str( mac ) +
2343 ". Expected: 1 Actual: " + str( port) )
2344 hostAttachment = False
2345 if device != mappings[ str( mac ) ]:
2346 main.log.error( "The attachment device is incorrect for " +
2347 "host " + str( mac ) +
2348 ". Expected: " + mappings[ str( mac ) ] +
2349 " Actual: " + device )
2350 hostAttachment = False
2351 else:
2352 hostAttachment = False
2353 except AssertionError:
2354 main.log.exception( "Json object not as expected" )
2355 main.log.error( repr( host ) )
2356 hostAttachment = False
2357 else:
2358 main.log.error( "No hosts json output or \"Error\"" +
2359 " in output. hosts = " +
2360 repr( hosts[ controller ] ) )
2361 if zeroHosts is False:
2362 hostAttachment = True
2363
2364 # END CHECKING HOST ATTACHMENT POINTS
2365 devicesResults = devicesResults and currentDevicesResult
2366 linksResults = linksResults and currentLinksResult
2367 hostsResults = hostsResults and currentHostsResult
2368 hostAttachmentResults = hostAttachmentResults and\
2369 hostAttachment
Jon Halle9b1fa32015-12-08 15:32:21 -08002370 topoResult = devicesResults and linksResults and\
2371 hostsResults and hostAttachmentResults
2372 utilities.assert_equals( expect=True,
2373 actual=topoResult,
2374 onpass="ONOS topology matches Mininet",
2375 onfail="ONOS topology don't match Mininet" )
2376 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002377
2378 # Compare json objects for hosts and dataplane clusters
2379
2380 # hosts
2381 main.step( "Hosts view is consistent across all ONOS nodes" )
2382 consistentHostsResult = main.TRUE
2383 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002384 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08002385 if hosts[ controller ] or "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002386 if hosts[ controller ] == hosts[ 0 ]:
2387 continue
2388 else: # hosts not consistent
2389 main.log.error( "hosts from ONOS" + controllerStr +
2390 " is inconsistent with ONOS1" )
2391 main.log.warn( repr( hosts[ controller ] ) )
2392 consistentHostsResult = main.FALSE
2393
2394 else:
2395 main.log.error( "Error in getting ONOS hosts from ONOS" +
2396 controllerStr )
2397 consistentHostsResult = main.FALSE
2398 main.log.warn( "ONOS" + controllerStr +
2399 " hosts response: " +
2400 repr( hosts[ controller ] ) )
2401 utilities.assert_equals(
2402 expect=main.TRUE,
2403 actual=consistentHostsResult,
2404 onpass="Hosts view is consistent across all ONOS nodes",
2405 onfail="ONOS nodes have different views of hosts" )
2406
2407 main.step( "Hosts information is correct" )
2408 hostsResults = hostsResults and ipResult
2409 utilities.assert_equals(
2410 expect=main.TRUE,
2411 actual=hostsResults,
2412 onpass="Host information is correct",
2413 onfail="Host information is incorrect" )
2414
2415 main.step( "Host attachment points to the network" )
2416 utilities.assert_equals(
2417 expect=True,
2418 actual=hostAttachmentResults,
2419 onpass="Hosts are correctly attached to the network",
2420 onfail="ONOS did not correctly attach hosts to the network" )
2421
2422 # Strongly connected clusters of devices
2423 main.step( "Clusters view is consistent across all ONOS nodes" )
2424 consistentClustersResult = main.TRUE
2425 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002426 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002427 if "Error" not in clusters[ controller ]:
2428 if clusters[ controller ] == clusters[ 0 ]:
2429 continue
2430 else: # clusters not consistent
2431 main.log.error( "clusters from ONOS" +
2432 controllerStr +
2433 " is inconsistent with ONOS1" )
2434 consistentClustersResult = main.FALSE
2435
2436 else:
2437 main.log.error( "Error in getting dataplane clusters " +
2438 "from ONOS" + controllerStr )
2439 consistentClustersResult = main.FALSE
2440 main.log.warn( "ONOS" + controllerStr +
2441 " clusters response: " +
2442 repr( clusters[ controller ] ) )
2443 utilities.assert_equals(
2444 expect=main.TRUE,
2445 actual=consistentClustersResult,
2446 onpass="Clusters view is consistent across all ONOS nodes",
2447 onfail="ONOS nodes have different views of clusters" )
2448
2449 main.step( "There is only one SCC" )
2450 # there should always only be one cluster
2451 try:
2452 numClusters = len( json.loads( clusters[ 0 ] ) )
2453 except ( ValueError, TypeError ):
2454 main.log.exception( "Error parsing clusters[0]: " +
2455 repr( clusters[0] ) )
2456 clusterResults = main.FALSE
2457 if numClusters == 1:
2458 clusterResults = main.TRUE
2459 utilities.assert_equals(
2460 expect=1,
2461 actual=numClusters,
2462 onpass="ONOS shows 1 SCC",
2463 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2464
2465 topoResult = ( devicesResults and linksResults
2466 and hostsResults and consistentHostsResult
2467 and consistentClustersResult and clusterResults
2468 and ipResult and hostAttachmentResults )
2469
2470 topoResult = topoResult and int( count <= 2 )
2471 note = "note it takes about " + str( int( cliTime ) ) + \
2472 " seconds for the test to make all the cli calls to fetch " +\
2473 "the topology from each ONOS instance"
2474 main.log.info(
2475 "Very crass estimate for topology discovery/convergence( " +
2476 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2477 str( count ) + " tries" )
2478
2479 main.step( "Device information is correct" )
2480 utilities.assert_equals(
2481 expect=main.TRUE,
2482 actual=devicesResults,
2483 onpass="Device information is correct",
2484 onfail="Device information is incorrect" )
2485
2486 main.step( "Links are correct" )
2487 utilities.assert_equals(
2488 expect=main.TRUE,
2489 actual=linksResults,
2490 onpass="Link are correct",
2491 onfail="Links are incorrect" )
2492
2493 # FIXME: move this to an ONOS state case
2494 main.step( "Checking ONOS nodes" )
2495 nodesOutput = []
2496 nodeResults = main.TRUE
2497 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002498 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002499 t = main.Thread( target=main.CLIs[i].nodes,
Jon Hall5cf14d52015-07-16 12:15:19 -07002500 name="nodes-" + str( i ),
2501 args=[ ] )
2502 threads.append( t )
2503 t.start()
2504
2505 for t in threads:
2506 t.join()
2507 nodesOutput.append( t.result )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002508 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
Jon Halle9b1fa32015-12-08 15:32:21 -08002509 ips.sort()
Jon Hall5cf14d52015-07-16 12:15:19 -07002510 for i in nodesOutput:
2511 try:
2512 current = json.loads( i )
Jon Halle9b1fa32015-12-08 15:32:21 -08002513 activeIps = []
2514 currentResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002515 for node in current:
Jon Halle9b1fa32015-12-08 15:32:21 -08002516 if node['state'] == 'ACTIVE':
2517 activeIps.append( node['ip'] )
2518 activeIps.sort()
2519 if ips == activeIps:
2520 currentResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002521 except ( ValueError, TypeError ):
2522 main.log.error( "Error parsing nodes output" )
2523 main.log.warn( repr( i ) )
Jon Halle9b1fa32015-12-08 15:32:21 -08002524 currentResult = main.FALSE
2525 nodeResults = nodeResults and currentResult
Jon Hall5cf14d52015-07-16 12:15:19 -07002526 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2527 onpass="Nodes check successful",
2528 onfail="Nodes check NOT successful" )
2529
2530 def CASE9( self, main ):
2531 """
2532 Link s3-s28 down
2533 """
2534 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002535 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002536 assert main, "main not defined"
2537 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002538 assert main.CLIs, "main.CLIs not defined"
2539 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002540 # NOTE: You should probably run a topology check after this
2541
2542 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2543
2544 description = "Turn off a link to ensure that Link Discovery " +\
2545 "is working properly"
2546 main.case( description )
2547
2548 main.step( "Kill Link between s3 and s28" )
2549 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2550 main.log.info( "Waiting " + str( linkSleep ) +
2551 " seconds for link down to be discovered" )
2552 time.sleep( linkSleep )
2553 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2554 onpass="Link down successful",
2555 onfail="Failed to bring link down" )
2556 # TODO do some sort of check here
2557
2558 def CASE10( self, main ):
2559 """
2560 Link s3-s28 up
2561 """
2562 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002563 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002564 assert main, "main not defined"
2565 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002566 assert main.CLIs, "main.CLIs not defined"
2567 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002568 # NOTE: You should probably run a topology check after this
2569
2570 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2571
2572 description = "Restore a link to ensure that Link Discovery is " + \
2573 "working properly"
2574 main.case( description )
2575
2576 main.step( "Bring link between s3 and s28 back up" )
2577 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2578 main.log.info( "Waiting " + str( linkSleep ) +
2579 " seconds for link up to be discovered" )
2580 time.sleep( linkSleep )
2581 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2582 onpass="Link up successful",
2583 onfail="Failed to bring link up" )
2584 # TODO do some sort of check here
2585
2586 def CASE11( self, main ):
2587 """
2588 Switch Down
2589 """
2590 # NOTE: You should probably run a topology check after this
2591 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002592 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002593 assert main, "main not defined"
2594 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002595 assert main.CLIs, "main.CLIs not defined"
2596 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002597
2598 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2599
2600 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002601 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002602 main.case( description )
2603 switch = main.params[ 'kill' ][ 'switch' ]
2604 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2605
2606 # TODO: Make this switch parameterizable
2607 main.step( "Kill " + switch )
2608 main.log.info( "Deleting " + switch )
2609 main.Mininet1.delSwitch( switch )
2610 main.log.info( "Waiting " + str( switchSleep ) +
2611 " seconds for switch down to be discovered" )
2612 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002613 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002614 # Peek at the deleted switch
2615 main.log.warn( str( device ) )
2616 result = main.FALSE
2617 if device and device[ 'available' ] is False:
2618 result = main.TRUE
2619 utilities.assert_equals( expect=main.TRUE, actual=result,
2620 onpass="Kill switch successful",
2621 onfail="Failed to kill switch?" )
2622
2623 def CASE12( self, main ):
2624 """
2625 Switch Up
2626 """
2627 # NOTE: You should probably run a topology check after this
2628 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002629 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002630 assert main, "main not defined"
2631 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002632 assert main.CLIs, "main.CLIs not defined"
2633 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002634 assert ONOS1Port, "ONOS1Port not defined"
2635 assert ONOS2Port, "ONOS2Port not defined"
2636 assert ONOS3Port, "ONOS3Port not defined"
2637 assert ONOS4Port, "ONOS4Port not defined"
2638 assert ONOS5Port, "ONOS5Port not defined"
2639 assert ONOS6Port, "ONOS6Port not defined"
2640 assert ONOS7Port, "ONOS7Port not defined"
2641
2642 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2643 switch = main.params[ 'kill' ][ 'switch' ]
2644 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2645 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002646 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002647 description = "Adding a switch to ensure it is discovered correctly"
2648 main.case( description )
2649
2650 main.step( "Add back " + switch )
2651 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2652 for peer in links:
2653 main.Mininet1.addLink( switch, peer )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002654 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002655 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2656 main.log.info( "Waiting " + str( switchSleep ) +
2657 " seconds for switch up to be discovered" )
2658 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002659 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002660 # Peek at the deleted switch
2661 main.log.warn( str( device ) )
2662 result = main.FALSE
2663 if device and device[ 'available' ]:
2664 result = main.TRUE
2665 utilities.assert_equals( expect=main.TRUE, actual=result,
2666 onpass="add switch successful",
2667 onfail="Failed to add switch?" )
2668
2669 def CASE13( self, main ):
2670 """
2671 Clean up
2672 """
2673 import os
2674 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002675 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002676 assert main, "main not defined"
2677 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002678 assert main.CLIs, "main.CLIs not defined"
2679 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002680
2681 # printing colors to terminal
2682 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2683 'blue': '\033[94m', 'green': '\033[92m',
2684 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2685 main.case( "Test Cleanup" )
2686 main.step( "Killing tcpdumps" )
2687 main.Mininet2.stopTcpdump()
2688
2689 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002690 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002691 main.step( "Copying MN pcap and ONOS log files to test station" )
2692 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2693 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002694 # NOTE: MN Pcap file is being saved to logdir.
2695 # We scp this file as MN and TestON aren't necessarily the same vm
2696
2697 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002698 # TODO: Load these from params
2699 # NOTE: must end in /
2700 logFolder = "/opt/onos/log/"
2701 logFiles = [ "karaf.log", "karaf.log.1" ]
2702 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002703 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002704 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002705 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002706 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2707 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002708 # std*.log's
2709 # NOTE: must end in /
2710 logFolder = "/opt/onos/var/"
2711 logFiles = [ "stderr.log", "stdout.log" ]
2712 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002713 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002714 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002715 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002716 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2717 logFolder + f, dstName )
2718 else:
2719 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002720
2721 main.step( "Stopping Mininet" )
2722 mnResult = main.Mininet1.stopNet()
2723 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2724 onpass="Mininet stopped",
2725 onfail="MN cleanup NOT successful" )
2726
2727 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002728 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002729 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2730 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002731
2732 try:
2733 timerLog = open( main.logdir + "/Timers.csv", 'w')
2734 # Overwrite with empty line and close
2735 labels = "Gossip Intents, Restart"
2736 data = str( gossipTime ) + ", " + str( main.restartTime )
2737 timerLog.write( labels + "\n" + data )
2738 timerLog.close()
2739 except NameError, e:
2740 main.log.exception(e)
2741
2742 def CASE14( self, main ):
2743 """
2744 start election app on all onos nodes
2745 """
Jon Halle1a3b752015-07-22 13:02:46 -07002746 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002747 assert main, "main not defined"
2748 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002749 assert main.CLIs, "main.CLIs not defined"
2750 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002751
2752 main.case("Start Leadership Election app")
2753 main.step( "Install leadership election app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002754 onosCli = main.CLIs[ main.activeNodes[0] ]
2755 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002756 utilities.assert_equals(
2757 expect=main.TRUE,
2758 actual=appResult,
2759 onpass="Election app installed",
2760 onfail="Something went wrong with installing Leadership election" )
2761
2762 main.step( "Run for election on each node" )
2763 leaderResult = main.TRUE
2764 leaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002765 for i in main.activeNodes:
2766 main.CLIs[i].electionTestRun()
2767 for i in main.activeNodes:
2768 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002769 leader = cli.electionTestLeader()
2770 if leader is None or leader == main.FALSE:
2771 main.log.error( cli.name + ": Leader for the election app " +
2772 "should be an ONOS node, instead got '" +
2773 str( leader ) + "'" )
2774 leaderResult = main.FALSE
2775 leaders.append( leader )
2776 utilities.assert_equals(
2777 expect=main.TRUE,
2778 actual=leaderResult,
2779 onpass="Successfully ran for leadership",
2780 onfail="Failed to run for leadership" )
2781
2782 main.step( "Check that each node shows the same leader" )
2783 sameLeader = main.TRUE
2784 if len( set( leaders ) ) != 1:
2785 sameLeader = main.FALSE
Jon Halle1a3b752015-07-22 13:02:46 -07002786 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
Jon Hall5cf14d52015-07-16 12:15:19 -07002787 str( leaders ) )
2788 utilities.assert_equals(
2789 expect=main.TRUE,
2790 actual=sameLeader,
2791 onpass="Leadership is consistent for the election topic",
2792 onfail="Nodes have different leaders" )
2793
2794 def CASE15( self, main ):
2795 """
2796 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002797 15.1 Run election on each node
2798 15.2 Check that each node has the same leaders and candidates
2799 15.3 Find current leader and withdraw
2800 15.4 Check that a new node was elected leader
2801 15.5 Check that that new leader was the candidate of old leader
2802 15.6 Run for election on old leader
2803 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2804 15.8 Make sure that the old leader was added to the candidate list
2805
2806 old and new variable prefixes refer to data from before vs after
2807 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002808 """
2809 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002810 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002811 assert main, "main not defined"
2812 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002813 assert main.CLIs, "main.CLIs not defined"
2814 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002815
Jon Hall5cf14d52015-07-16 12:15:19 -07002816 description = "Check that Leadership Election is still functional"
2817 main.case( description )
acsmars71adceb2015-08-31 15:09:26 -07002818 # NOTE: Need to re-run since being a canidate is not persistant
2819 # TODO: add check for "Command not found:" in the driver, this
2820 # means the election test app isn't loaded
Jon Hall5cf14d52015-07-16 12:15:19 -07002821
acsmars71adceb2015-08-31 15:09:26 -07002822 oldLeaders = [] # leaders by node before withdrawl from candidates
2823 newLeaders = [] # leaders by node after withdrawl from candidates
2824 oldAllCandidates = [] # list of lists of each nodes' candidates before
2825 newAllCandidates = [] # list of lists of each nodes' candidates after
2826 oldCandidates = [] # list of candidates from node 0 before withdrawl
2827 newCandidates = [] # list of candidates from node 0 after withdrawl
2828 oldLeader = '' # the old leader from oldLeaders, None if not same
2829 newLeader = '' # the new leaders fron newLoeaders, None if not same
2830 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2831 expectNoLeader = False # True when there is only one leader
2832 if main.numCtrls == 1:
2833 expectNoLeader = True
2834
2835 main.step( "Run for election on each node" )
2836 electionResult = main.TRUE
2837
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002838 for i in main.activeNodes: # run test election on each node
2839 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002840 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002841 utilities.assert_equals(
2842 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002843 actual=electionResult,
2844 onpass="All nodes successfully ran for leadership",
2845 onfail="At least one node failed to run for leadership" )
2846
acsmars3a72bde2015-09-02 14:16:22 -07002847 if electionResult == main.FALSE:
2848 main.log.error(
2849 "Skipping Test Case because Election Test App isn't loaded" )
2850 main.skipCase()
2851
acsmars71adceb2015-08-31 15:09:26 -07002852 main.step( "Check that each node shows the same leader and candidates" )
2853 sameResult = main.TRUE
2854 failMessage = "Nodes have different leaders"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002855 for i in main.activeNodes:
2856 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002857 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2858 oldAllCandidates.append( node )
2859 oldLeaders.append( node[ 0 ] )
2860 oldCandidates = oldAllCandidates[ 0 ]
2861
2862 # Check that each node has the same leader. Defines oldLeader
2863 if len( set( oldLeaders ) ) != 1:
2864 sameResult = main.FALSE
2865 main.log.error( "More than one leader present:" + str( oldLeaders ) )
2866 oldLeader = None
2867 else:
2868 oldLeader = oldLeaders[ 0 ]
2869
2870 # Check that each node's candidate list is the same
acsmars29233db2015-11-04 11:15:00 -08002871 candidateDiscrepancy = False # Boolean of candidate mismatches
acsmars71adceb2015-08-31 15:09:26 -07002872 for candidates in oldAllCandidates:
2873 if set( candidates ) != set( oldCandidates ):
2874 sameResult = main.FALSE
acsmars29233db2015-11-04 11:15:00 -08002875 candidateDiscrepancy = True
2876
2877 if candidateDiscrepancy:
2878 failMessage += " and candidates"
2879
acsmars71adceb2015-08-31 15:09:26 -07002880 utilities.assert_equals(
2881 expect=main.TRUE,
2882 actual=sameResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002883 onpass="Leadership is consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002884 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002885
2886 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002887 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002888 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002889 if oldLeader is None:
2890 main.log.error( "Leadership isn't consistent." )
2891 withdrawResult = main.FALSE
2892 # Get the CLI of the oldLeader
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002893 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07002894 if oldLeader == main.nodes[ i ].ip_address:
2895 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002896 break
2897 else: # FOR/ELSE statement
2898 main.log.error( "Leader election, could not find current leader" )
2899 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07002900 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07002901 utilities.assert_equals(
2902 expect=main.TRUE,
2903 actual=withdrawResult,
2904 onpass="Node was withdrawn from election",
2905 onfail="Node was not withdrawn from election" )
2906
acsmars71adceb2015-08-31 15:09:26 -07002907 main.step( "Check that a new node was elected leader" )
2908
Jon Hall5cf14d52015-07-16 12:15:19 -07002909 # FIXME: use threads
acsmars71adceb2015-08-31 15:09:26 -07002910 newLeaderResult = main.TRUE
2911 failMessage = "Nodes have different leaders"
2912
2913 # Get new leaders and candidates
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002914 for i in main.activeNodes:
2915 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002916 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2917 # elections might no have finished yet
2918 if node[ 0 ] == 'none' and not expectNoLeader:
2919 main.log.info( "Node has no leader, waiting 5 seconds to be " +
2920 "sure elections are complete." )
2921 time.sleep(5)
2922 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2923 # election still isn't done or there is a problem
2924 if node[ 0 ] == 'none':
2925 main.log.error( "No leader was elected on at least 1 node" )
2926 newLeaderResult = main.FALSE
2927 newAllCandidates.append( node )
2928 newLeaders.append( node[ 0 ] )
2929 newCandidates = newAllCandidates[ 0 ]
2930
2931 # Check that each node has the same leader. Defines newLeader
2932 if len( set( newLeaders ) ) != 1:
2933 newLeaderResult = main.FALSE
2934 main.log.error( "Nodes have different leaders: " +
2935 str( newLeaders ) )
2936 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07002937 else:
acsmars71adceb2015-08-31 15:09:26 -07002938 newLeader = newLeaders[ 0 ]
2939
2940 # Check that each node's candidate list is the same
2941 for candidates in newAllCandidates:
2942 if set( candidates ) != set( newCandidates ):
2943 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07002944 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07002945
2946 # Check that the new leader is not the older leader, which was withdrawn
2947 if newLeader == oldLeader:
2948 newLeaderResult = main.FALSE
2949 main.log.error( "All nodes still see old leader: " + oldLeader +
2950 " as the current leader" )
2951
Jon Hall5cf14d52015-07-16 12:15:19 -07002952 utilities.assert_equals(
2953 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002954 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002955 onpass="Leadership election passed",
2956 onfail="Something went wrong with Leadership election" )
2957
acsmars71adceb2015-08-31 15:09:26 -07002958 main.step( "Check that that new leader was the candidate of old leader")
2959 # candidates[ 2 ] should be come the top candidate after withdrawl
2960 correctCandidateResult = main.TRUE
2961 if expectNoLeader:
2962 if newLeader == 'none':
2963 main.log.info( "No leader expected. None found. Pass" )
2964 correctCandidateResult = main.TRUE
2965 else:
2966 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2967 correctCandidateResult = main.FALSE
2968 elif newLeader != oldCandidates[ 2 ]:
2969 correctCandidateResult = main.FALSE
2970 main.log.error( "Candidate " + newLeader + " was elected. " +
2971 oldCandidates[ 2 ] + " should have had priority." )
2972
2973 utilities.assert_equals(
2974 expect=main.TRUE,
2975 actual=correctCandidateResult,
2976 onpass="Correct Candidate Elected",
2977 onfail="Incorrect Candidate Elected" )
2978
Jon Hall5cf14d52015-07-16 12:15:19 -07002979 main.step( "Run for election on old leader( just so everyone " +
2980 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07002981 if oldLeaderCLI is not None:
2982 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07002983 else:
acsmars71adceb2015-08-31 15:09:26 -07002984 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002985 runResult = main.FALSE
2986 utilities.assert_equals(
2987 expect=main.TRUE,
2988 actual=runResult,
2989 onpass="App re-ran for election",
2990 onfail="App failed to run for election" )
acsmars71adceb2015-08-31 15:09:26 -07002991 main.step(
2992 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002993 # verify leader didn't just change
acsmars71adceb2015-08-31 15:09:26 -07002994 positionResult = main.TRUE
2995 # Get new leaders and candidates, wait if oldLeader is not a candidate yet
2996
2997 # Reset and reuse the new candidate and leaders lists
2998 newAllCandidates = []
2999 newCandidates = []
3000 newLeaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003001 for i in main.activeNodes:
3002 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07003003 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3004 if oldLeader not in node: # election might no have finished yet
3005 main.log.info( "Old Leader not elected, waiting 5 seconds to " +
3006 "be sure elections are complete" )
3007 time.sleep(5)
3008 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3009 if oldLeader not in node: # election still isn't done, errors
3010 main.log.error(
3011 "Old leader was not elected on at least one node" )
3012 positionResult = main.FALSE
3013 newAllCandidates.append( node )
3014 newLeaders.append( node[ 0 ] )
3015 newCandidates = newAllCandidates[ 0 ]
3016
3017 # Check that each node has the same leader. Defines newLeader
3018 if len( set( newLeaders ) ) != 1:
3019 positionResult = main.FALSE
3020 main.log.error( "Nodes have different leaders: " +
3021 str( newLeaders ) )
3022 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07003023 else:
acsmars71adceb2015-08-31 15:09:26 -07003024 newLeader = newLeaders[ 0 ]
3025
3026 # Check that each node's candidate list is the same
3027 for candidates in newAllCandidates:
3028 if set( candidates ) != set( newCandidates ):
3029 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07003030 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07003031
3032 # Check that the re-elected node is last on the candidate List
3033 if oldLeader != newCandidates[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003034 main.log.error( "Old Leader (" + oldLeader + ") not in the proper position " +
acsmars71adceb2015-08-31 15:09:26 -07003035 str( newCandidates ) )
3036 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003037
3038 utilities.assert_equals(
3039 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07003040 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003041 onpass="Old leader successfully re-ran for election",
3042 onfail="Something went wrong with Leadership election after " +
3043 "the old leader re-ran for election" )
3044
3045 def CASE16( self, main ):
3046 """
3047 Install Distributed Primitives app
3048 """
3049 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003050 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003051 assert main, "main not defined"
3052 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003053 assert main.CLIs, "main.CLIs not defined"
3054 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003055
3056 # Variables for the distributed primitives tests
3057 global pCounterName
3058 global iCounterName
3059 global pCounterValue
3060 global iCounterValue
3061 global onosSet
3062 global onosSetName
3063 pCounterName = "TestON-Partitions"
3064 iCounterName = "TestON-inMemory"
3065 pCounterValue = 0
3066 iCounterValue = 0
3067 onosSet = set([])
3068 onosSetName = "TestON-set"
3069
3070 description = "Install Primitives app"
3071 main.case( description )
3072 main.step( "Install Primitives app" )
3073 appName = "org.onosproject.distributedprimitives"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003074 node = main.activeNodes[0]
3075 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003076 utilities.assert_equals( expect=main.TRUE,
3077 actual=appResults,
3078 onpass="Primitives app activated",
3079 onfail="Primitives app not activated" )
3080 time.sleep( 5 ) # To allow all nodes to activate
3081
3082 def CASE17( self, main ):
3083 """
3084 Check for basic functionality with distributed primitives
3085 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003086 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003087 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003088 assert main, "main not defined"
3089 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003090 assert main.CLIs, "main.CLIs not defined"
3091 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003092 assert pCounterName, "pCounterName not defined"
3093 assert iCounterName, "iCounterName not defined"
3094 assert onosSetName, "onosSetName not defined"
3095 # NOTE: assert fails if value is 0/None/Empty/False
3096 try:
3097 pCounterValue
3098 except NameError:
3099 main.log.error( "pCounterValue not defined, setting to 0" )
3100 pCounterValue = 0
3101 try:
3102 iCounterValue
3103 except NameError:
3104 main.log.error( "iCounterValue not defined, setting to 0" )
3105 iCounterValue = 0
3106 try:
3107 onosSet
3108 except NameError:
3109 main.log.error( "onosSet not defined, setting to empty Set" )
3110 onosSet = set([])
3111 # Variables for the distributed primitives tests. These are local only
3112 addValue = "a"
3113 addAllValue = "a b c d e f"
3114 retainValue = "c d e f"
3115
3116 description = "Check for basic functionality with distributed " +\
3117 "primitives"
3118 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003119 main.caseExplanation = "Test the methods of the distributed " +\
3120 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003121 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003122 # Partitioned counters
3123 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003124 pCounters = []
3125 threads = []
3126 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003127 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003128 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3129 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003130 args=[ pCounterName ] )
3131 pCounterValue += 1
3132 addedPValues.append( pCounterValue )
3133 threads.append( t )
3134 t.start()
3135
3136 for t in threads:
3137 t.join()
3138 pCounters.append( t.result )
3139 # Check that counter incremented numController times
3140 pCounterResults = True
3141 for i in addedPValues:
3142 tmpResult = i in pCounters
3143 pCounterResults = pCounterResults and tmpResult
3144 if not tmpResult:
3145 main.log.error( str( i ) + " is not in partitioned "
3146 "counter incremented results" )
3147 utilities.assert_equals( expect=True,
3148 actual=pCounterResults,
3149 onpass="Default counter incremented",
3150 onfail="Error incrementing default" +
3151 " counter" )
3152
Jon Halle1a3b752015-07-22 13:02:46 -07003153 main.step( "Get then Increment a default counter on each node" )
3154 pCounters = []
3155 threads = []
3156 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003157 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003158 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3159 name="counterGetAndAdd-" + str( i ),
3160 args=[ pCounterName ] )
3161 addedPValues.append( pCounterValue )
3162 pCounterValue += 1
3163 threads.append( t )
3164 t.start()
3165
3166 for t in threads:
3167 t.join()
3168 pCounters.append( t.result )
3169 # Check that counter incremented numController times
3170 pCounterResults = True
3171 for i in addedPValues:
3172 tmpResult = i in pCounters
3173 pCounterResults = pCounterResults and tmpResult
3174 if not tmpResult:
3175 main.log.error( str( i ) + " is not in partitioned "
3176 "counter incremented results" )
3177 utilities.assert_equals( expect=True,
3178 actual=pCounterResults,
3179 onpass="Default counter incremented",
3180 onfail="Error incrementing default" +
3181 " counter" )
3182
3183 main.step( "Counters we added have the correct values" )
3184 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3185 utilities.assert_equals( expect=main.TRUE,
3186 actual=incrementCheck,
3187 onpass="Added counters are correct",
3188 onfail="Added counters are incorrect" )
3189
3190 main.step( "Add -8 to then get a default counter on each node" )
3191 pCounters = []
3192 threads = []
3193 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003194 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003195 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3196 name="counterIncrement-" + str( i ),
3197 args=[ pCounterName ],
3198 kwargs={ "delta": -8 } )
3199 pCounterValue += -8
3200 addedPValues.append( pCounterValue )
3201 threads.append( t )
3202 t.start()
3203
3204 for t in threads:
3205 t.join()
3206 pCounters.append( t.result )
3207 # Check that counter incremented numController times
3208 pCounterResults = True
3209 for i in addedPValues:
3210 tmpResult = i in pCounters
3211 pCounterResults = pCounterResults and tmpResult
3212 if not tmpResult:
3213 main.log.error( str( i ) + " is not in partitioned "
3214 "counter incremented results" )
3215 utilities.assert_equals( expect=True,
3216 actual=pCounterResults,
3217 onpass="Default counter incremented",
3218 onfail="Error incrementing default" +
3219 " counter" )
3220
3221 main.step( "Add 5 to then get a default counter on each node" )
3222 pCounters = []
3223 threads = []
3224 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003225 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003226 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3227 name="counterIncrement-" + str( i ),
3228 args=[ pCounterName ],
3229 kwargs={ "delta": 5 } )
3230 pCounterValue += 5
3231 addedPValues.append( pCounterValue )
3232 threads.append( t )
3233 t.start()
3234
3235 for t in threads:
3236 t.join()
3237 pCounters.append( t.result )
3238 # Check that counter incremented numController times
3239 pCounterResults = True
3240 for i in addedPValues:
3241 tmpResult = i in pCounters
3242 pCounterResults = pCounterResults and tmpResult
3243 if not tmpResult:
3244 main.log.error( str( i ) + " is not in partitioned "
3245 "counter incremented results" )
3246 utilities.assert_equals( expect=True,
3247 actual=pCounterResults,
3248 onpass="Default counter incremented",
3249 onfail="Error incrementing default" +
3250 " counter" )
3251
3252 main.step( "Get then add 5 to a default counter on each node" )
3253 pCounters = []
3254 threads = []
3255 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003256 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003257 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3258 name="counterIncrement-" + str( i ),
3259 args=[ pCounterName ],
3260 kwargs={ "delta": 5 } )
3261 addedPValues.append( pCounterValue )
3262 pCounterValue += 5
3263 threads.append( t )
3264 t.start()
3265
3266 for t in threads:
3267 t.join()
3268 pCounters.append( t.result )
3269 # Check that counter incremented numController times
3270 pCounterResults = True
3271 for i in addedPValues:
3272 tmpResult = i in pCounters
3273 pCounterResults = pCounterResults and tmpResult
3274 if not tmpResult:
3275 main.log.error( str( i ) + " is not in partitioned "
3276 "counter incremented results" )
3277 utilities.assert_equals( expect=True,
3278 actual=pCounterResults,
3279 onpass="Default counter incremented",
3280 onfail="Error incrementing default" +
3281 " counter" )
3282
3283 main.step( "Counters we added have the correct values" )
3284 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3285 utilities.assert_equals( expect=main.TRUE,
3286 actual=incrementCheck,
3287 onpass="Added counters are correct",
3288 onfail="Added counters are incorrect" )
3289
3290 # In-Memory counters
3291 main.step( "Increment and get an in-memory counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003292 iCounters = []
3293 addedIValues = []
3294 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003295 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003296 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003297 name="icounterIncrement-" + str( i ),
3298 args=[ iCounterName ],
3299 kwargs={ "inMemory": True } )
3300 iCounterValue += 1
3301 addedIValues.append( iCounterValue )
3302 threads.append( t )
3303 t.start()
3304
3305 for t in threads:
3306 t.join()
3307 iCounters.append( t.result )
3308 # Check that counter incremented numController times
3309 iCounterResults = True
3310 for i in addedIValues:
3311 tmpResult = i in iCounters
3312 iCounterResults = iCounterResults and tmpResult
3313 if not tmpResult:
3314 main.log.error( str( i ) + " is not in the in-memory "
3315 "counter incremented results" )
3316 utilities.assert_equals( expect=True,
3317 actual=iCounterResults,
Jon Halle1a3b752015-07-22 13:02:46 -07003318 onpass="In-memory counter incremented",
3319 onfail="Error incrementing in-memory" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003320 " counter" )
3321
Jon Halle1a3b752015-07-22 13:02:46 -07003322 main.step( "Get then Increment a in-memory counter on each node" )
3323 iCounters = []
3324 threads = []
3325 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003326 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003327 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3328 name="counterGetAndAdd-" + str( i ),
3329 args=[ iCounterName ],
3330 kwargs={ "inMemory": True } )
3331 addedIValues.append( iCounterValue )
3332 iCounterValue += 1
3333 threads.append( t )
3334 t.start()
3335
3336 for t in threads:
3337 t.join()
3338 iCounters.append( t.result )
3339 # Check that counter incremented numController times
3340 iCounterResults = True
3341 for i in addedIValues:
3342 tmpResult = i in iCounters
3343 iCounterResults = iCounterResults and tmpResult
3344 if not tmpResult:
3345 main.log.error( str( i ) + " is not in in-memory "
3346 "counter incremented results" )
3347 utilities.assert_equals( expect=True,
3348 actual=iCounterResults,
3349 onpass="In-memory counter incremented",
3350 onfail="Error incrementing in-memory" +
3351 " counter" )
3352
3353 main.step( "Counters we added have the correct values" )
3354 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3355 utilities.assert_equals( expect=main.TRUE,
3356 actual=incrementCheck,
3357 onpass="Added counters are correct",
3358 onfail="Added counters are incorrect" )
3359
3360 main.step( "Add -8 to then get a in-memory counter on each node" )
3361 iCounters = []
3362 threads = []
3363 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003364 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003365 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3366 name="counterIncrement-" + str( i ),
3367 args=[ iCounterName ],
3368 kwargs={ "delta": -8, "inMemory": True } )
3369 iCounterValue += -8
3370 addedIValues.append( iCounterValue )
3371 threads.append( t )
3372 t.start()
3373
3374 for t in threads:
3375 t.join()
3376 iCounters.append( t.result )
3377 # Check that counter incremented numController times
3378 iCounterResults = True
3379 for i in addedIValues:
3380 tmpResult = i in iCounters
3381 iCounterResults = iCounterResults and tmpResult
3382 if not tmpResult:
3383 main.log.error( str( i ) + " is not in in-memory "
3384 "counter incremented results" )
3385 utilities.assert_equals( expect=True,
3386 actual=pCounterResults,
3387 onpass="In-memory counter incremented",
3388 onfail="Error incrementing in-memory" +
3389 " counter" )
3390
3391 main.step( "Add 5 to then get a in-memory counter on each node" )
3392 iCounters = []
3393 threads = []
3394 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003395 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003396 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3397 name="counterIncrement-" + str( i ),
3398 args=[ iCounterName ],
3399 kwargs={ "delta": 5, "inMemory": True } )
3400 iCounterValue += 5
3401 addedIValues.append( iCounterValue )
3402 threads.append( t )
3403 t.start()
3404
3405 for t in threads:
3406 t.join()
3407 iCounters.append( t.result )
3408 # Check that counter incremented numController times
3409 iCounterResults = True
3410 for i in addedIValues:
3411 tmpResult = i in iCounters
3412 iCounterResults = iCounterResults and tmpResult
3413 if not tmpResult:
3414 main.log.error( str( i ) + " is not in in-memory "
3415 "counter incremented results" )
3416 utilities.assert_equals( expect=True,
3417 actual=pCounterResults,
3418 onpass="In-memory counter incremented",
3419 onfail="Error incrementing in-memory" +
3420 " counter" )
3421
3422 main.step( "Get then add 5 to a in-memory counter on each node" )
3423 iCounters = []
3424 threads = []
3425 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003426 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003427 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3428 name="counterIncrement-" + str( i ),
3429 args=[ iCounterName ],
3430 kwargs={ "delta": 5, "inMemory": True } )
3431 addedIValues.append( iCounterValue )
3432 iCounterValue += 5
3433 threads.append( t )
3434 t.start()
3435
3436 for t in threads:
3437 t.join()
3438 iCounters.append( t.result )
3439 # Check that counter incremented numController times
3440 iCounterResults = True
3441 for i in addedIValues:
3442 tmpResult = i in iCounters
3443 iCounterResults = iCounterResults and tmpResult
3444 if not tmpResult:
3445 main.log.error( str( i ) + " is not in in-memory "
3446 "counter incremented results" )
3447 utilities.assert_equals( expect=True,
3448 actual=iCounterResults,
3449 onpass="In-memory counter incremented",
3450 onfail="Error incrementing in-memory" +
3451 " counter" )
3452
3453 main.step( "Counters we added have the correct values" )
3454 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3455 utilities.assert_equals( expect=main.TRUE,
3456 actual=incrementCheck,
3457 onpass="Added counters are correct",
3458 onfail="Added counters are incorrect" )
3459
Jon Hall5cf14d52015-07-16 12:15:19 -07003460 main.step( "Check counters are consistant across nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07003461 onosCounters, consistentCounterResults = main.Counters.consistentCheck()
Jon Hall5cf14d52015-07-16 12:15:19 -07003462 utilities.assert_equals( expect=main.TRUE,
3463 actual=consistentCounterResults,
3464 onpass="ONOS counters are consistent " +
3465 "across nodes",
3466 onfail="ONOS Counters are inconsistent " +
3467 "across nodes" )
3468
3469 main.step( "Counters we added have the correct values" )
Jon Halle1a3b752015-07-22 13:02:46 -07003470 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3471 incrementCheck = incrementCheck and \
3472 main.Counters.counterCheck( iCounterName, iCounterValue )
Jon Hall5cf14d52015-07-16 12:15:19 -07003473 utilities.assert_equals( expect=main.TRUE,
Jon Halle1a3b752015-07-22 13:02:46 -07003474 actual=incrementCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -07003475 onpass="Added counters are correct",
3476 onfail="Added counters are incorrect" )
3477 # DISTRIBUTED SETS
3478 main.step( "Distributed Set get" )
3479 size = len( onosSet )
3480 getResponses = []
3481 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003482 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003483 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003484 name="setTestGet-" + str( i ),
3485 args=[ onosSetName ] )
3486 threads.append( t )
3487 t.start()
3488 for t in threads:
3489 t.join()
3490 getResponses.append( t.result )
3491
3492 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003493 for i in range( len( main.activeNodes ) ):
3494 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003495 if isinstance( getResponses[ i ], list):
3496 current = set( getResponses[ i ] )
3497 if len( current ) == len( getResponses[ i ] ):
3498 # no repeats
3499 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003500 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003501 " has incorrect view" +
3502 " of set " + onosSetName + ":\n" +
3503 str( getResponses[ i ] ) )
3504 main.log.debug( "Expected: " + str( onosSet ) )
3505 main.log.debug( "Actual: " + str( current ) )
3506 getResults = main.FALSE
3507 else:
3508 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003509 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003510 " has repeat elements in" +
3511 " set " + onosSetName + ":\n" +
3512 str( getResponses[ i ] ) )
3513 getResults = main.FALSE
3514 elif getResponses[ i ] == main.ERROR:
3515 getResults = main.FALSE
3516 utilities.assert_equals( expect=main.TRUE,
3517 actual=getResults,
3518 onpass="Set elements are correct",
3519 onfail="Set elements are incorrect" )
3520
3521 main.step( "Distributed Set size" )
3522 sizeResponses = []
3523 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003524 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003525 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003526 name="setTestSize-" + str( i ),
3527 args=[ onosSetName ] )
3528 threads.append( t )
3529 t.start()
3530 for t in threads:
3531 t.join()
3532 sizeResponses.append( t.result )
3533
3534 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003535 for i in range( len( main.activeNodes ) ):
3536 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003537 if size != sizeResponses[ i ]:
3538 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003539 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003540 " expected a size of " + str( size ) +
3541 " for set " + onosSetName +
3542 " but got " + str( sizeResponses[ i ] ) )
3543 utilities.assert_equals( expect=main.TRUE,
3544 actual=sizeResults,
3545 onpass="Set sizes are correct",
3546 onfail="Set sizes are incorrect" )
3547
3548 main.step( "Distributed Set add()" )
3549 onosSet.add( addValue )
3550 addResponses = []
3551 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003552 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003553 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003554 name="setTestAdd-" + str( i ),
3555 args=[ onosSetName, addValue ] )
3556 threads.append( t )
3557 t.start()
3558 for t in threads:
3559 t.join()
3560 addResponses.append( t.result )
3561
3562 # main.TRUE = successfully changed the set
3563 # main.FALSE = action resulted in no change in set
3564 # main.ERROR - Some error in executing the function
3565 addResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003566 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003567 if addResponses[ i ] == main.TRUE:
3568 # All is well
3569 pass
3570 elif addResponses[ i ] == main.FALSE:
3571 # Already in set, probably fine
3572 pass
3573 elif addResponses[ i ] == main.ERROR:
3574 # Error in execution
3575 addResults = main.FALSE
3576 else:
3577 # unexpected result
3578 addResults = main.FALSE
3579 if addResults != main.TRUE:
3580 main.log.error( "Error executing set add" )
3581
3582 # Check if set is still correct
3583 size = len( onosSet )
3584 getResponses = []
3585 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003586 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003587 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003588 name="setTestGet-" + str( i ),
3589 args=[ onosSetName ] )
3590 threads.append( t )
3591 t.start()
3592 for t in threads:
3593 t.join()
3594 getResponses.append( t.result )
3595 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003596 for i in range( len( main.activeNodes ) ):
3597 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003598 if isinstance( getResponses[ i ], list):
3599 current = set( getResponses[ i ] )
3600 if len( current ) == len( getResponses[ i ] ):
3601 # no repeats
3602 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003603 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003604 " of set " + onosSetName + ":\n" +
3605 str( getResponses[ i ] ) )
3606 main.log.debug( "Expected: " + str( onosSet ) )
3607 main.log.debug( "Actual: " + str( current ) )
3608 getResults = main.FALSE
3609 else:
3610 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003611 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003612 " set " + onosSetName + ":\n" +
3613 str( getResponses[ i ] ) )
3614 getResults = main.FALSE
3615 elif getResponses[ i ] == main.ERROR:
3616 getResults = main.FALSE
3617 sizeResponses = []
3618 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003619 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003620 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003621 name="setTestSize-" + str( i ),
3622 args=[ onosSetName ] )
3623 threads.append( t )
3624 t.start()
3625 for t in threads:
3626 t.join()
3627 sizeResponses.append( t.result )
3628 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003629 for i in range( len( main.activeNodes ) ):
3630 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003631 if size != sizeResponses[ i ]:
3632 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003633 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003634 " expected a size of " + str( size ) +
3635 " for set " + onosSetName +
3636 " but got " + str( sizeResponses[ i ] ) )
3637 addResults = addResults and getResults and sizeResults
3638 utilities.assert_equals( expect=main.TRUE,
3639 actual=addResults,
3640 onpass="Set add correct",
3641 onfail="Set add was incorrect" )
3642
3643 main.step( "Distributed Set addAll()" )
3644 onosSet.update( addAllValue.split() )
3645 addResponses = []
3646 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003647 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003648 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003649 name="setTestAddAll-" + str( i ),
3650 args=[ onosSetName, addAllValue ] )
3651 threads.append( t )
3652 t.start()
3653 for t in threads:
3654 t.join()
3655 addResponses.append( t.result )
3656
3657 # main.TRUE = successfully changed the set
3658 # main.FALSE = action resulted in no change in set
3659 # main.ERROR - Some error in executing the function
3660 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003661 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003662 if addResponses[ i ] == main.TRUE:
3663 # All is well
3664 pass
3665 elif addResponses[ i ] == main.FALSE:
3666 # Already in set, probably fine
3667 pass
3668 elif addResponses[ i ] == main.ERROR:
3669 # Error in execution
3670 addAllResults = main.FALSE
3671 else:
3672 # unexpected result
3673 addAllResults = main.FALSE
3674 if addAllResults != main.TRUE:
3675 main.log.error( "Error executing set addAll" )
3676
3677 # Check if set is still correct
3678 size = len( onosSet )
3679 getResponses = []
3680 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003681 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003682 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003683 name="setTestGet-" + str( i ),
3684 args=[ onosSetName ] )
3685 threads.append( t )
3686 t.start()
3687 for t in threads:
3688 t.join()
3689 getResponses.append( t.result )
3690 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003691 for i in range( len( main.activeNodes ) ):
3692 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003693 if isinstance( getResponses[ i ], list):
3694 current = set( getResponses[ i ] )
3695 if len( current ) == len( getResponses[ i ] ):
3696 # no repeats
3697 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003698 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003699 " has incorrect view" +
3700 " of set " + onosSetName + ":\n" +
3701 str( getResponses[ i ] ) )
3702 main.log.debug( "Expected: " + str( onosSet ) )
3703 main.log.debug( "Actual: " + str( current ) )
3704 getResults = main.FALSE
3705 else:
3706 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003707 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003708 " has repeat elements in" +
3709 " set " + onosSetName + ":\n" +
3710 str( getResponses[ i ] ) )
3711 getResults = main.FALSE
3712 elif getResponses[ i ] == main.ERROR:
3713 getResults = main.FALSE
3714 sizeResponses = []
3715 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003716 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003717 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003718 name="setTestSize-" + str( i ),
3719 args=[ onosSetName ] )
3720 threads.append( t )
3721 t.start()
3722 for t in threads:
3723 t.join()
3724 sizeResponses.append( t.result )
3725 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003726 for i in range( len( main.activeNodes ) ):
3727 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003728 if size != sizeResponses[ i ]:
3729 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003730 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003731 " expected a size of " + str( size ) +
3732 " for set " + onosSetName +
3733 " but got " + str( sizeResponses[ i ] ) )
3734 addAllResults = addAllResults and getResults and sizeResults
3735 utilities.assert_equals( expect=main.TRUE,
3736 actual=addAllResults,
3737 onpass="Set addAll correct",
3738 onfail="Set addAll was incorrect" )
3739
3740 main.step( "Distributed Set contains()" )
3741 containsResponses = []
3742 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003743 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003744 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003745 name="setContains-" + str( i ),
3746 args=[ onosSetName ],
3747 kwargs={ "values": addValue } )
3748 threads.append( t )
3749 t.start()
3750 for t in threads:
3751 t.join()
3752 # NOTE: This is the tuple
3753 containsResponses.append( t.result )
3754
3755 containsResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003756 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003757 if containsResponses[ i ] == main.ERROR:
3758 containsResults = main.FALSE
3759 else:
3760 containsResults = containsResults and\
3761 containsResponses[ i ][ 1 ]
3762 utilities.assert_equals( expect=main.TRUE,
3763 actual=containsResults,
3764 onpass="Set contains is functional",
3765 onfail="Set contains failed" )
3766
3767 main.step( "Distributed Set containsAll()" )
3768 containsAllResponses = []
3769 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003770 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003771 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003772 name="setContainsAll-" + str( i ),
3773 args=[ onosSetName ],
3774 kwargs={ "values": addAllValue } )
3775 threads.append( t )
3776 t.start()
3777 for t in threads:
3778 t.join()
3779 # NOTE: This is the tuple
3780 containsAllResponses.append( t.result )
3781
3782 containsAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003783 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003784 if containsResponses[ i ] == main.ERROR:
3785 containsResults = main.FALSE
3786 else:
3787 containsResults = containsResults and\
3788 containsResponses[ i ][ 1 ]
3789 utilities.assert_equals( expect=main.TRUE,
3790 actual=containsAllResults,
3791 onpass="Set containsAll is functional",
3792 onfail="Set containsAll failed" )
3793
3794 main.step( "Distributed Set remove()" )
3795 onosSet.remove( addValue )
3796 removeResponses = []
3797 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003798 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003799 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003800 name="setTestRemove-" + str( i ),
3801 args=[ onosSetName, addValue ] )
3802 threads.append( t )
3803 t.start()
3804 for t in threads:
3805 t.join()
3806 removeResponses.append( t.result )
3807
3808 # main.TRUE = successfully changed the set
3809 # main.FALSE = action resulted in no change in set
3810 # main.ERROR - Some error in executing the function
3811 removeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003812 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003813 if removeResponses[ i ] == main.TRUE:
3814 # All is well
3815 pass
3816 elif removeResponses[ i ] == main.FALSE:
3817 # not in set, probably fine
3818 pass
3819 elif removeResponses[ i ] == main.ERROR:
3820 # Error in execution
3821 removeResults = main.FALSE
3822 else:
3823 # unexpected result
3824 removeResults = main.FALSE
3825 if removeResults != main.TRUE:
3826 main.log.error( "Error executing set remove" )
3827
3828 # Check if set is still correct
3829 size = len( onosSet )
3830 getResponses = []
3831 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003832 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003833 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003834 name="setTestGet-" + str( i ),
3835 args=[ onosSetName ] )
3836 threads.append( t )
3837 t.start()
3838 for t in threads:
3839 t.join()
3840 getResponses.append( t.result )
3841 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003842 for i in range( len( main.activeNodes ) ):
3843 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003844 if isinstance( getResponses[ i ], list):
3845 current = set( getResponses[ i ] )
3846 if len( current ) == len( getResponses[ i ] ):
3847 # no repeats
3848 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003849 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003850 " has incorrect view" +
3851 " of set " + onosSetName + ":\n" +
3852 str( getResponses[ i ] ) )
3853 main.log.debug( "Expected: " + str( onosSet ) )
3854 main.log.debug( "Actual: " + str( current ) )
3855 getResults = main.FALSE
3856 else:
3857 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003858 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003859 " has repeat elements in" +
3860 " set " + onosSetName + ":\n" +
3861 str( getResponses[ i ] ) )
3862 getResults = main.FALSE
3863 elif getResponses[ i ] == main.ERROR:
3864 getResults = main.FALSE
3865 sizeResponses = []
3866 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003867 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003868 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003869 name="setTestSize-" + str( i ),
3870 args=[ onosSetName ] )
3871 threads.append( t )
3872 t.start()
3873 for t in threads:
3874 t.join()
3875 sizeResponses.append( t.result )
3876 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003877 for i in range( len( main.activeNodes ) ):
3878 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003879 if size != sizeResponses[ i ]:
3880 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003881 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003882 " expected a size of " + str( size ) +
3883 " for set " + onosSetName +
3884 " but got " + str( sizeResponses[ i ] ) )
3885 removeResults = removeResults and getResults and sizeResults
3886 utilities.assert_equals( expect=main.TRUE,
3887 actual=removeResults,
3888 onpass="Set remove correct",
3889 onfail="Set remove was incorrect" )
3890
3891 main.step( "Distributed Set removeAll()" )
3892 onosSet.difference_update( addAllValue.split() )
3893 removeAllResponses = []
3894 threads = []
3895 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003896 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003897 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003898 name="setTestRemoveAll-" + str( i ),
3899 args=[ onosSetName, addAllValue ] )
3900 threads.append( t )
3901 t.start()
3902 for t in threads:
3903 t.join()
3904 removeAllResponses.append( t.result )
3905 except Exception, e:
3906 main.log.exception(e)
3907
3908 # main.TRUE = successfully changed the set
3909 # main.FALSE = action resulted in no change in set
3910 # main.ERROR - Some error in executing the function
3911 removeAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003912 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003913 if removeAllResponses[ i ] == main.TRUE:
3914 # All is well
3915 pass
3916 elif removeAllResponses[ i ] == main.FALSE:
3917 # not in set, probably fine
3918 pass
3919 elif removeAllResponses[ i ] == main.ERROR:
3920 # Error in execution
3921 removeAllResults = main.FALSE
3922 else:
3923 # unexpected result
3924 removeAllResults = main.FALSE
3925 if removeAllResults != main.TRUE:
3926 main.log.error( "Error executing set removeAll" )
3927
3928 # Check if set is still correct
3929 size = len( onosSet )
3930 getResponses = []
3931 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003932 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003933 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003934 name="setTestGet-" + str( i ),
3935 args=[ onosSetName ] )
3936 threads.append( t )
3937 t.start()
3938 for t in threads:
3939 t.join()
3940 getResponses.append( t.result )
3941 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003942 for i in range( len( main.activeNodes ) ):
3943 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003944 if isinstance( getResponses[ i ], list):
3945 current = set( getResponses[ i ] )
3946 if len( current ) == len( getResponses[ i ] ):
3947 # no repeats
3948 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003949 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003950 " has incorrect view" +
3951 " of set " + onosSetName + ":\n" +
3952 str( getResponses[ i ] ) )
3953 main.log.debug( "Expected: " + str( onosSet ) )
3954 main.log.debug( "Actual: " + str( current ) )
3955 getResults = main.FALSE
3956 else:
3957 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003958 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003959 " has repeat elements in" +
3960 " set " + onosSetName + ":\n" +
3961 str( getResponses[ i ] ) )
3962 getResults = main.FALSE
3963 elif getResponses[ i ] == main.ERROR:
3964 getResults = main.FALSE
3965 sizeResponses = []
3966 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003967 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003968 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003969 name="setTestSize-" + str( i ),
3970 args=[ onosSetName ] )
3971 threads.append( t )
3972 t.start()
3973 for t in threads:
3974 t.join()
3975 sizeResponses.append( t.result )
3976 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003977 for i in range( len( main.activeNodes ) ):
3978 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003979 if size != sizeResponses[ i ]:
3980 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003981 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003982 " expected a size of " + str( size ) +
3983 " for set " + onosSetName +
3984 " but got " + str( sizeResponses[ i ] ) )
3985 removeAllResults = removeAllResults and getResults and sizeResults
3986 utilities.assert_equals( expect=main.TRUE,
3987 actual=removeAllResults,
3988 onpass="Set removeAll correct",
3989 onfail="Set removeAll was incorrect" )
3990
3991 main.step( "Distributed Set addAll()" )
3992 onosSet.update( addAllValue.split() )
3993 addResponses = []
3994 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003995 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003996 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003997 name="setTestAddAll-" + str( i ),
3998 args=[ onosSetName, addAllValue ] )
3999 threads.append( t )
4000 t.start()
4001 for t in threads:
4002 t.join()
4003 addResponses.append( t.result )
4004
4005 # main.TRUE = successfully changed the set
4006 # main.FALSE = action resulted in no change in set
4007 # main.ERROR - Some error in executing the function
4008 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004009 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004010 if addResponses[ i ] == main.TRUE:
4011 # All is well
4012 pass
4013 elif addResponses[ i ] == main.FALSE:
4014 # Already in set, probably fine
4015 pass
4016 elif addResponses[ i ] == main.ERROR:
4017 # Error in execution
4018 addAllResults = main.FALSE
4019 else:
4020 # unexpected result
4021 addAllResults = main.FALSE
4022 if addAllResults != main.TRUE:
4023 main.log.error( "Error executing set addAll" )
4024
4025 # Check if set is still correct
4026 size = len( onosSet )
4027 getResponses = []
4028 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004029 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004030 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004031 name="setTestGet-" + str( i ),
4032 args=[ onosSetName ] )
4033 threads.append( t )
4034 t.start()
4035 for t in threads:
4036 t.join()
4037 getResponses.append( t.result )
4038 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004039 for i in range( len( main.activeNodes ) ):
4040 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004041 if isinstance( getResponses[ i ], list):
4042 current = set( getResponses[ i ] )
4043 if len( current ) == len( getResponses[ i ] ):
4044 # no repeats
4045 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004046 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004047 " has incorrect view" +
4048 " of set " + onosSetName + ":\n" +
4049 str( getResponses[ i ] ) )
4050 main.log.debug( "Expected: " + str( onosSet ) )
4051 main.log.debug( "Actual: " + str( current ) )
4052 getResults = main.FALSE
4053 else:
4054 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004055 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004056 " has repeat elements in" +
4057 " set " + onosSetName + ":\n" +
4058 str( getResponses[ i ] ) )
4059 getResults = main.FALSE
4060 elif getResponses[ i ] == main.ERROR:
4061 getResults = main.FALSE
4062 sizeResponses = []
4063 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004064 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004065 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004066 name="setTestSize-" + str( i ),
4067 args=[ onosSetName ] )
4068 threads.append( t )
4069 t.start()
4070 for t in threads:
4071 t.join()
4072 sizeResponses.append( t.result )
4073 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004074 for i in range( len( main.activeNodes ) ):
4075 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004076 if size != sizeResponses[ i ]:
4077 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004078 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004079 " expected a size of " + str( size ) +
4080 " for set " + onosSetName +
4081 " but got " + str( sizeResponses[ i ] ) )
4082 addAllResults = addAllResults and getResults and sizeResults
4083 utilities.assert_equals( expect=main.TRUE,
4084 actual=addAllResults,
4085 onpass="Set addAll correct",
4086 onfail="Set addAll was incorrect" )
4087
4088 main.step( "Distributed Set clear()" )
4089 onosSet.clear()
4090 clearResponses = []
4091 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004092 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004093 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004094 name="setTestClear-" + str( i ),
4095 args=[ onosSetName, " "], # Values doesn't matter
4096 kwargs={ "clear": True } )
4097 threads.append( t )
4098 t.start()
4099 for t in threads:
4100 t.join()
4101 clearResponses.append( t.result )
4102
4103 # main.TRUE = successfully changed the set
4104 # main.FALSE = action resulted in no change in set
4105 # main.ERROR - Some error in executing the function
4106 clearResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004107 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004108 if clearResponses[ i ] == main.TRUE:
4109 # All is well
4110 pass
4111 elif clearResponses[ i ] == main.FALSE:
4112 # Nothing set, probably fine
4113 pass
4114 elif clearResponses[ i ] == main.ERROR:
4115 # Error in execution
4116 clearResults = main.FALSE
4117 else:
4118 # unexpected result
4119 clearResults = main.FALSE
4120 if clearResults != main.TRUE:
4121 main.log.error( "Error executing set clear" )
4122
4123 # Check if set is still correct
4124 size = len( onosSet )
4125 getResponses = []
4126 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004127 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004128 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004129 name="setTestGet-" + str( i ),
4130 args=[ onosSetName ] )
4131 threads.append( t )
4132 t.start()
4133 for t in threads:
4134 t.join()
4135 getResponses.append( t.result )
4136 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004137 for i in range( len( main.activeNodes ) ):
4138 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004139 if isinstance( getResponses[ i ], list):
4140 current = set( getResponses[ i ] )
4141 if len( current ) == len( getResponses[ i ] ):
4142 # no repeats
4143 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004144 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004145 " has incorrect view" +
4146 " of set " + onosSetName + ":\n" +
4147 str( getResponses[ i ] ) )
4148 main.log.debug( "Expected: " + str( onosSet ) )
4149 main.log.debug( "Actual: " + str( current ) )
4150 getResults = main.FALSE
4151 else:
4152 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004153 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004154 " has repeat elements in" +
4155 " set " + onosSetName + ":\n" +
4156 str( getResponses[ i ] ) )
4157 getResults = main.FALSE
4158 elif getResponses[ i ] == main.ERROR:
4159 getResults = main.FALSE
4160 sizeResponses = []
4161 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004162 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004163 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004164 name="setTestSize-" + str( i ),
4165 args=[ onosSetName ] )
4166 threads.append( t )
4167 t.start()
4168 for t in threads:
4169 t.join()
4170 sizeResponses.append( t.result )
4171 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004172 for i in range( len( main.activeNodes ) ):
4173 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004174 if size != sizeResponses[ i ]:
4175 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004176 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004177 " expected a size of " + str( size ) +
4178 " for set " + onosSetName +
4179 " but got " + str( sizeResponses[ i ] ) )
4180 clearResults = clearResults and getResults and sizeResults
4181 utilities.assert_equals( expect=main.TRUE,
4182 actual=clearResults,
4183 onpass="Set clear correct",
4184 onfail="Set clear was incorrect" )
4185
4186 main.step( "Distributed Set addAll()" )
4187 onosSet.update( addAllValue.split() )
4188 addResponses = []
4189 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004190 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004191 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004192 name="setTestAddAll-" + str( i ),
4193 args=[ onosSetName, addAllValue ] )
4194 threads.append( t )
4195 t.start()
4196 for t in threads:
4197 t.join()
4198 addResponses.append( t.result )
4199
4200 # main.TRUE = successfully changed the set
4201 # main.FALSE = action resulted in no change in set
4202 # main.ERROR - Some error in executing the function
4203 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004204 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004205 if addResponses[ i ] == main.TRUE:
4206 # All is well
4207 pass
4208 elif addResponses[ i ] == main.FALSE:
4209 # Already in set, probably fine
4210 pass
4211 elif addResponses[ i ] == main.ERROR:
4212 # Error in execution
4213 addAllResults = main.FALSE
4214 else:
4215 # unexpected result
4216 addAllResults = main.FALSE
4217 if addAllResults != main.TRUE:
4218 main.log.error( "Error executing set addAll" )
4219
4220 # Check if set is still correct
4221 size = len( onosSet )
4222 getResponses = []
4223 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004224 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004225 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004226 name="setTestGet-" + str( i ),
4227 args=[ onosSetName ] )
4228 threads.append( t )
4229 t.start()
4230 for t in threads:
4231 t.join()
4232 getResponses.append( t.result )
4233 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004234 for i in range( len( main.activeNodes ) ):
4235 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004236 if isinstance( getResponses[ i ], list):
4237 current = set( getResponses[ i ] )
4238 if len( current ) == len( getResponses[ i ] ):
4239 # no repeats
4240 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004241 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004242 " has incorrect view" +
4243 " of set " + onosSetName + ":\n" +
4244 str( getResponses[ i ] ) )
4245 main.log.debug( "Expected: " + str( onosSet ) )
4246 main.log.debug( "Actual: " + str( current ) )
4247 getResults = main.FALSE
4248 else:
4249 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004250 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004251 " has repeat elements in" +
4252 " set " + onosSetName + ":\n" +
4253 str( getResponses[ i ] ) )
4254 getResults = main.FALSE
4255 elif getResponses[ i ] == main.ERROR:
4256 getResults = main.FALSE
4257 sizeResponses = []
4258 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004259 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004260 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004261 name="setTestSize-" + str( i ),
4262 args=[ onosSetName ] )
4263 threads.append( t )
4264 t.start()
4265 for t in threads:
4266 t.join()
4267 sizeResponses.append( t.result )
4268 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004269 for i in range( len( main.activeNodes ) ):
4270 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004271 if size != sizeResponses[ i ]:
4272 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004273 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004274 " expected a size of " + str( size ) +
4275 " for set " + onosSetName +
4276 " but got " + str( sizeResponses[ i ] ) )
4277 addAllResults = addAllResults and getResults and sizeResults
4278 utilities.assert_equals( expect=main.TRUE,
4279 actual=addAllResults,
4280 onpass="Set addAll correct",
4281 onfail="Set addAll was incorrect" )
4282
4283 main.step( "Distributed Set retain()" )
4284 onosSet.intersection_update( retainValue.split() )
4285 retainResponses = []
4286 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004287 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004288 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004289 name="setTestRetain-" + str( i ),
4290 args=[ onosSetName, retainValue ],
4291 kwargs={ "retain": True } )
4292 threads.append( t )
4293 t.start()
4294 for t in threads:
4295 t.join()
4296 retainResponses.append( t.result )
4297
4298 # main.TRUE = successfully changed the set
4299 # main.FALSE = action resulted in no change in set
4300 # main.ERROR - Some error in executing the function
4301 retainResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004302 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004303 if retainResponses[ i ] == main.TRUE:
4304 # All is well
4305 pass
4306 elif retainResponses[ i ] == main.FALSE:
4307 # Already in set, probably fine
4308 pass
4309 elif retainResponses[ i ] == main.ERROR:
4310 # Error in execution
4311 retainResults = main.FALSE
4312 else:
4313 # unexpected result
4314 retainResults = main.FALSE
4315 if retainResults != main.TRUE:
4316 main.log.error( "Error executing set retain" )
4317
4318 # Check if set is still correct
4319 size = len( onosSet )
4320 getResponses = []
4321 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004322 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004323 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004324 name="setTestGet-" + str( i ),
4325 args=[ onosSetName ] )
4326 threads.append( t )
4327 t.start()
4328 for t in threads:
4329 t.join()
4330 getResponses.append( t.result )
4331 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004332 for i in range( len( main.activeNodes ) ):
4333 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004334 if isinstance( getResponses[ i ], list):
4335 current = set( getResponses[ i ] )
4336 if len( current ) == len( getResponses[ i ] ):
4337 # no repeats
4338 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004339 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004340 " has incorrect view" +
4341 " of set " + onosSetName + ":\n" +
4342 str( getResponses[ i ] ) )
4343 main.log.debug( "Expected: " + str( onosSet ) )
4344 main.log.debug( "Actual: " + str( current ) )
4345 getResults = main.FALSE
4346 else:
4347 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004348 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004349 " has repeat elements in" +
4350 " set " + onosSetName + ":\n" +
4351 str( getResponses[ i ] ) )
4352 getResults = main.FALSE
4353 elif getResponses[ i ] == main.ERROR:
4354 getResults = main.FALSE
4355 sizeResponses = []
4356 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004357 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004358 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004359 name="setTestSize-" + str( i ),
4360 args=[ onosSetName ] )
4361 threads.append( t )
4362 t.start()
4363 for t in threads:
4364 t.join()
4365 sizeResponses.append( t.result )
4366 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004367 for i in range( len( main.activeNodes ) ):
4368 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004369 if size != sizeResponses[ i ]:
4370 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004371 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004372 str( size ) + " for set " + onosSetName +
4373 " but got " + str( sizeResponses[ i ] ) )
4374 retainResults = retainResults and getResults and sizeResults
4375 utilities.assert_equals( expect=main.TRUE,
4376 actual=retainResults,
4377 onpass="Set retain correct",
4378 onfail="Set retain was incorrect" )
4379
Jon Hall2a5002c2015-08-21 16:49:11 -07004380 # Transactional maps
4381 main.step( "Partitioned Transactional maps put" )
4382 tMapValue = "Testing"
4383 numKeys = 100
4384 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004385 node = main.activeNodes[0]
4386 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
Jon Hall2a5002c2015-08-21 16:49:11 -07004387 if len( putResponses ) == 100:
4388 for i in putResponses:
4389 if putResponses[ i ][ 'value' ] != tMapValue:
4390 putResult = False
4391 else:
4392 putResult = False
4393 if not putResult:
4394 main.log.debug( "Put response values: " + str( putResponses ) )
4395 utilities.assert_equals( expect=True,
4396 actual=putResult,
4397 onpass="Partitioned Transactional Map put successful",
4398 onfail="Partitioned Transactional Map put values are incorrect" )
4399
4400 main.step( "Partitioned Transactional maps get" )
4401 getCheck = True
4402 for n in range( 1, numKeys + 1 ):
4403 getResponses = []
4404 threads = []
4405 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004406 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004407 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4408 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004409 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004410 threads.append( t )
4411 t.start()
4412 for t in threads:
4413 t.join()
4414 getResponses.append( t.result )
4415 for node in getResponses:
4416 if node != tMapValue:
4417 valueCheck = False
4418 if not valueCheck:
4419 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4420 main.log.warn( getResponses )
4421 getCheck = getCheck and valueCheck
4422 utilities.assert_equals( expect=True,
4423 actual=getCheck,
4424 onpass="Partitioned Transactional Map get values were correct",
4425 onfail="Partitioned Transactional Map values incorrect" )
4426
4427 main.step( "In-memory Transactional maps put" )
4428 tMapValue = "Testing"
4429 numKeys = 100
4430 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004431 node = main.activeNodes[0]
4432 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue, inMemory=True )
Jon Hall2a5002c2015-08-21 16:49:11 -07004433 if len( putResponses ) == 100:
4434 for i in putResponses:
4435 if putResponses[ i ][ 'value' ] != tMapValue:
4436 putResult = False
4437 else:
4438 putResult = False
4439 if not putResult:
4440 main.log.debug( "Put response values: " + str( putResponses ) )
4441 utilities.assert_equals( expect=True,
4442 actual=putResult,
4443 onpass="In-Memory Transactional Map put successful",
4444 onfail="In-Memory Transactional Map put values are incorrect" )
4445
4446 main.step( "In-Memory Transactional maps get" )
4447 getCheck = True
4448 for n in range( 1, numKeys + 1 ):
4449 getResponses = []
4450 threads = []
4451 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004452 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004453 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4454 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004455 args=[ "Key" + str( n ) ],
Jon Hall2a5002c2015-08-21 16:49:11 -07004456 kwargs={ "inMemory": True } )
4457 threads.append( t )
4458 t.start()
4459 for t in threads:
4460 t.join()
4461 getResponses.append( t.result )
4462 for node in getResponses:
4463 if node != tMapValue:
4464 valueCheck = False
4465 if not valueCheck:
4466 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4467 main.log.warn( getResponses )
4468 getCheck = getCheck and valueCheck
4469 utilities.assert_equals( expect=True,
4470 actual=getCheck,
4471 onpass="In-Memory Transactional Map get values were correct",
4472 onfail="In-Memory Transactional Map values incorrect" )