blob: b3a2fd8082dc4c2a9b6f5ba020636719ed5e6586 [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
Jon Hallb3ed8ed2015-10-28 16:43:55 -070012CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
Jon Hall5cf14d52015-07-16 12:15:19 -070014CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
Jon Hallb3ed8ed2015-10-28 16:43:55 -070028class HAstopNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -070029
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hall3b489db2015-10-05 14:38:37 -070052 import pexpect
Jon Hallf3d16e72015-12-16 17:45:08 -080053 import time
Jon Hallb3ed8ed2015-10-28 16:43:55 -070054 main.log.info( "ONOS HA test: Stop a minority of ONOS nodes - " +
Jon Hall5cf14d52015-07-16 12:15:19 -070055 "initialization" )
56 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070057 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070058 "installing ONOS, starting Mininet and ONOS" +\
59 "cli sessions."
60 # TODO: save all the timers and output them for plotting
61
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
Jon Halle1a3b752015-07-22 13:02:46 -070069 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070070 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070071 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070074 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
81
82 # FIXME: just get controller port from params?
83 # TODO: do we really need all these?
84 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
85 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
86 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
87 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
88 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
89 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
90 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
91
Jon Halle1a3b752015-07-22 13:02:46 -070092 try:
93 fileName = "Counters"
94 # TODO: Maybe make a library folder somewhere?
95 path = main.params[ 'imports' ][ 'path' ]
96 main.Counters = imp.load_source( fileName,
97 path + fileName + ".py" )
98 except Exception as e:
99 main.log.exception( e )
100 main.cleanup()
101 main.exit()
102
103 main.CLIs = []
104 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700105 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700106 for i in range( 1, main.numCtrls + 1 ):
107 try:
108 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
109 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
110 ipList.append( main.nodes[ -1 ].ip_address )
111 except AttributeError:
112 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700113
114 main.step( "Create cell file" )
115 cellAppString = main.params[ 'ENV' ][ 'appString' ]
116 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
117 main.Mininet1.ip_address,
118 cellAppString, ipList )
119 main.step( "Applying cell variable to environment" )
120 cellResult = main.ONOSbench.setCell( cellName )
121 verifyResult = main.ONOSbench.verifyCell()
122
123 # FIXME:this is short term fix
124 main.log.info( "Removing raft logs" )
125 main.ONOSbench.onosRemoveRaftLogs()
126
127 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700128 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700129 main.ONOSbench.onosUninstall( node.ip_address )
130
131 # Make sure ONOS is DEAD
132 main.log.info( "Killing any ONOS processes" )
133 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700134 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700135 killed = main.ONOSbench.onosKill( node.ip_address )
136 killResults = killResults and killed
137
138 cleanInstallResult = main.TRUE
139 gitPullResult = main.TRUE
140
141 main.step( "Starting Mininet" )
142 # scp topo file to mininet
143 # TODO: move to params?
144 topoName = "obelisk.py"
145 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700146 main.ONOSbench.scp( main.Mininet1,
147 filePath + topoName,
148 main.Mininet1.home,
149 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700150 mnResult = main.Mininet1.startNet( )
151 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
152 onpass="Mininet Started",
153 onfail="Error starting Mininet" )
154
155 main.step( "Git checkout and pull " + gitBranch )
156 if PULLCODE:
157 main.ONOSbench.gitCheckout( gitBranch )
158 gitPullResult = main.ONOSbench.gitPull()
159 # values of 1 or 3 are good
160 utilities.assert_lesser( expect=0, actual=gitPullResult,
161 onpass="Git pull successful",
162 onfail="Git pull failed" )
163 main.ONOSbench.getVersion( report=True )
164
165 main.step( "Using mvn clean install" )
166 cleanInstallResult = main.TRUE
167 if PULLCODE and gitPullResult == main.TRUE:
168 cleanInstallResult = main.ONOSbench.cleanInstall()
169 else:
170 main.log.warn( "Did not pull new code so skipping mvn " +
171 "clean install" )
172 utilities.assert_equals( expect=main.TRUE,
173 actual=cleanInstallResult,
174 onpass="MCI successful",
175 onfail="MCI failed" )
176 # GRAPHS
177 # NOTE: important params here:
178 # job = name of Jenkins job
179 # Plot Name = Plot-HA, only can be used if multiple plots
180 # index = The number of the graph under plot name
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700181 job = "HAstopNodes"
Jon Hall5cf14d52015-07-16 12:15:19 -0700182 plotName = "Plot-HA"
183 graphs = '<ac:structured-macro ac:name="html">\n'
184 graphs += '<ac:plain-text-body><![CDATA[\n'
185 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
186 '/plot/' + plotName + '/getPlot?index=0' +\
187 '&width=500&height=300"' +\
188 'noborder="0" width="500" height="300" scrolling="yes" ' +\
189 'seamless="seamless"></iframe>\n'
190 graphs += ']]></ac:plain-text-body>\n'
191 graphs += '</ac:structured-macro>\n'
192 main.log.wiki(graphs)
193
194 main.step( "Creating ONOS package" )
Jon Hall3b489db2015-10-05 14:38:37 -0700195 # copy gen-partions file to ONOS
196 # NOTE: this assumes TestON and ONOS are on the same machine
197 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
198 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
199 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
200 main.ONOSbench.ip_address,
201 srcFile,
202 dstDir,
203 pwd=main.ONOSbench.pwd,
204 direction="from" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700205 packageResult = main.ONOSbench.onosPackage()
206 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
207 onpass="ONOS package successful",
208 onfail="ONOS package failed" )
209
210 main.step( "Installing ONOS package" )
211 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700212 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700213 tmpResult = main.ONOSbench.onosInstall( options="-f",
214 node=node.ip_address )
215 onosInstallResult = onosInstallResult and tmpResult
216 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
217 onpass="ONOS install successful",
218 onfail="ONOS install failed" )
Jon Hall3b489db2015-10-05 14:38:37 -0700219 # clean up gen-partitions file
220 try:
221 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
222 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
223 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
224 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
225 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
226 str( main.ONOSbench.handle.before ) )
227 except ( pexpect.TIMEOUT, pexpect.EOF ):
228 main.log.exception( "ONOSbench: pexpect exception found:" +
229 main.ONOSbench.handle.before )
230 main.cleanup()
231 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700232
233 main.step( "Checking if ONOS is up yet" )
234 for i in range( 2 ):
235 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700236 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700237 started = main.ONOSbench.isup( node.ip_address )
238 if not started:
239 main.log.error( node.name + " didn't start!" )
240 main.ONOSbench.onosStop( node.ip_address )
241 main.ONOSbench.onosStart( node.ip_address )
242 onosIsupResult = onosIsupResult and started
243 if onosIsupResult == main.TRUE:
244 break
245 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
246 onpass="ONOS startup successful",
247 onfail="ONOS startup failed" )
248
249 main.log.step( "Starting ONOS CLI sessions" )
250 cliResults = main.TRUE
251 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700252 for i in range( main.numCtrls ):
253 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700254 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700255 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700256 threads.append( t )
257 t.start()
258
259 for t in threads:
260 t.join()
261 cliResults = cliResults and t.result
262 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
263 onpass="ONOS cli startup successful",
264 onfail="ONOS cli startup failed" )
265
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700266 # Create a list of active nodes for use when some nodes are stopped
267 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
268
Jon Hall5cf14d52015-07-16 12:15:19 -0700269 if main.params[ 'tcpdump' ].lower() == "true":
270 main.step( "Start Packet Capture MN" )
271 main.Mininet2.startTcpdump(
272 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
273 + "-MN.pcap",
274 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
275 port=main.params[ 'MNtcpdump' ][ 'port' ] )
276
277 main.step( "App Ids check" )
Jon Hallf3d16e72015-12-16 17:45:08 -0800278 time.sleep(60)
Jon Hall5cf14d52015-07-16 12:15:19 -0700279 appCheck = main.TRUE
280 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700281 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700282 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700283 name="appToIDCheck-" + str( i ),
284 args=[] )
285 threads.append( t )
286 t.start()
287
288 for t in threads:
289 t.join()
290 appCheck = appCheck and t.result
291 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700292 node = main.activeNodes[0]
293 main.log.warn( main.CLIs[node].apps() )
294 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700295 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
296 onpass="App Ids seem to be correct",
297 onfail="Something is wrong with app Ids" )
298
299 if cliResults == main.FALSE:
300 main.log.error( "Failed to start ONOS, stopping test" )
301 main.cleanup()
302 main.exit()
303
304 def CASE2( self, main ):
305 """
306 Assign devices to controllers
307 """
308 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700309 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700310 assert main, "main not defined"
311 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700312 assert main.CLIs, "main.CLIs not defined"
313 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700314 assert ONOS1Port, "ONOS1Port not defined"
315 assert ONOS2Port, "ONOS2Port not defined"
316 assert ONOS3Port, "ONOS3Port not defined"
317 assert ONOS4Port, "ONOS4Port not defined"
318 assert ONOS5Port, "ONOS5Port not defined"
319 assert ONOS6Port, "ONOS6Port not defined"
320 assert ONOS7Port, "ONOS7Port not defined"
321
322 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700323 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700324 "and check that an ONOS node becomes the " +\
325 "master of the device."
326 main.step( "Assign switches to controllers" )
327
328 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700329 for i in range( main.numCtrls ):
330 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700331 swList = []
332 for i in range( 1, 29 ):
333 swList.append( "s" + str( i ) )
334 main.Mininet1.assignSwController( sw=swList, ip=ipList )
335
336 mastershipCheck = main.TRUE
337 for i in range( 1, 29 ):
338 response = main.Mininet1.getSwController( "s" + str( i ) )
339 try:
340 main.log.info( str( response ) )
341 except Exception:
342 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700343 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700344 if re.search( "tcp:" + node.ip_address, response ):
345 mastershipCheck = mastershipCheck and main.TRUE
346 else:
347 main.log.error( "Error, node " + node.ip_address + " is " +
348 "not in the list of controllers s" +
349 str( i ) + " is connecting to." )
350 mastershipCheck = main.FALSE
351 utilities.assert_equals(
352 expect=main.TRUE,
353 actual=mastershipCheck,
354 onpass="Switch mastership assigned correctly",
355 onfail="Switches not assigned correctly to controllers" )
356
357 def CASE21( self, main ):
358 """
359 Assign mastership to controllers
360 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700361 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700362 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700363 assert main, "main not defined"
364 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700365 assert main.CLIs, "main.CLIs not defined"
366 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700367 assert ONOS1Port, "ONOS1Port not defined"
368 assert ONOS2Port, "ONOS2Port not defined"
369 assert ONOS3Port, "ONOS3Port not defined"
370 assert ONOS4Port, "ONOS4Port not defined"
371 assert ONOS5Port, "ONOS5Port not defined"
372 assert ONOS6Port, "ONOS6Port not defined"
373 assert ONOS7Port, "ONOS7Port not defined"
374
375 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700376 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700377 "device. Then manually assign" +\
378 " mastership to specific ONOS nodes using" +\
379 " 'device-role'"
380 main.step( "Assign mastership of switches to specific controllers" )
381 # Manually assign mastership to the controller we want
382 roleCall = main.TRUE
383
384 ipList = [ ]
385 deviceList = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700386 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700387 try:
388 # Assign mastership to specific controllers. This assignment was
389 # determined for a 7 node cluser, but will work with any sized
390 # cluster
391 for i in range( 1, 29 ): # switches 1 through 28
392 # set up correct variables:
393 if i == 1:
394 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700395 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700396 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700397 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700398 c = 1 % main.numCtrls
399 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700400 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700401 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700402 c = 1 % main.numCtrls
403 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700404 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700405 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700406 c = 3 % main.numCtrls
407 ip = main.nodes[ c ].ip_address # ONOS4
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700408 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700409 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700410 c = 2 % main.numCtrls
411 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700412 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700413 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700414 c = 2 % main.numCtrls
415 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700416 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700417 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700418 c = 5 % main.numCtrls
419 ip = main.nodes[ c ].ip_address # ONOS6
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700420 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700421 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700422 c = 4 % main.numCtrls
423 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700424 dpid = '3' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700425 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700426 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700427 c = 6 % main.numCtrls
428 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700429 dpid = '6' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700430 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700431 elif i == 28:
432 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700433 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700434 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700435 else:
436 main.log.error( "You didn't write an else statement for " +
437 "switch s" + str( i ) )
438 roleCall = main.FALSE
439 # Assign switch
440 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
441 # TODO: make this controller dynamic
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700442 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700443 ipList.append( ip )
444 deviceList.append( deviceId )
445 except ( AttributeError, AssertionError ):
446 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700447 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700448 utilities.assert_equals(
449 expect=main.TRUE,
450 actual=roleCall,
451 onpass="Re-assigned switch mastership to designated controller",
452 onfail="Something wrong with deviceRole calls" )
453
454 main.step( "Check mastership was correctly assigned" )
455 roleCheck = main.TRUE
456 # NOTE: This is due to the fact that device mastership change is not
457 # atomic and is actually a multi step process
458 time.sleep( 5 )
459 for i in range( len( ipList ) ):
460 ip = ipList[i]
461 deviceId = deviceList[i]
462 # Check assignment
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700463 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700464 if ip in master:
465 roleCheck = roleCheck and main.TRUE
466 else:
467 roleCheck = roleCheck and main.FALSE
468 main.log.error( "Error, controller " + ip + " is not" +
469 " master " + "of device " +
470 str( deviceId ) + ". Master is " +
471 repr( master ) + "." )
472 utilities.assert_equals(
473 expect=main.TRUE,
474 actual=roleCheck,
475 onpass="Switches were successfully reassigned to designated " +
476 "controller",
477 onfail="Switches were not successfully reassigned" )
478
479 def CASE3( self, main ):
480 """
481 Assign intents
482 """
483 import time
484 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700485 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700486 assert main, "main not defined"
487 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700488 assert main.CLIs, "main.CLIs not defined"
489 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700490 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700491 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700492 "assign predetermined host-to-host intents." +\
493 " After installation, check that the intent" +\
494 " is distributed to all nodes and the state" +\
495 " is INSTALLED"
496
497 # install onos-app-fwd
498 main.step( "Install reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700499 onosCli = main.CLIs[ main.activeNodes[0] ]
500 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700501 utilities.assert_equals( expect=main.TRUE, actual=installResults,
502 onpass="Install fwd successful",
503 onfail="Install fwd failed" )
504
505 main.step( "Check app ids" )
506 appCheck = main.TRUE
507 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700508 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700509 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700510 name="appToIDCheck-" + str( i ),
511 args=[] )
512 threads.append( t )
513 t.start()
514
515 for t in threads:
516 t.join()
517 appCheck = appCheck and t.result
518 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700519 main.log.warn( onosCli.apps() )
520 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700521 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
522 onpass="App Ids seem to be correct",
523 onfail="Something is wrong with app Ids" )
524
525 main.step( "Discovering Hosts( Via pingall for now )" )
526 # FIXME: Once we have a host discovery mechanism, use that instead
527 # REACTIVE FWD test
528 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700529 passMsg = "Reactive Pingall test passed"
530 time1 = time.time()
531 pingResult = main.Mininet1.pingall()
532 time2 = time.time()
533 if not pingResult:
534 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700535 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700536 passMsg += " on the second try"
537 utilities.assert_equals(
538 expect=main.TRUE,
539 actual=pingResult,
540 onpass= passMsg,
541 onfail="Reactive Pingall failed, " +
542 "one or more ping pairs failed" )
543 main.log.info( "Time for pingall: %2f seconds" %
544 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700545 # timeout for fwd flows
546 time.sleep( 11 )
547 # uninstall onos-app-fwd
548 main.step( "Uninstall reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700549 node = main.activeNodes[0]
550 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700551 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
552 onpass="Uninstall fwd successful",
553 onfail="Uninstall fwd failed" )
554
555 main.step( "Check app ids" )
556 threads = []
557 appCheck2 = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700558 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700559 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700560 name="appToIDCheck-" + str( i ),
561 args=[] )
562 threads.append( t )
563 t.start()
564
565 for t in threads:
566 t.join()
567 appCheck2 = appCheck2 and t.result
568 if appCheck2 != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700569 node = main.activeNodes[0]
570 main.log.warn( main.CLIs[node].apps() )
571 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700572 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
573 onpass="App Ids seem to be correct",
574 onfail="Something is wrong with app Ids" )
575
576 main.step( "Add host intents via cli" )
577 intentIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700578 # TODO: move the host numbers to params
579 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700580 intentAddResult = True
581 hostResult = main.TRUE
582 for i in range( 8, 18 ):
583 main.log.info( "Adding host intent between h" + str( i ) +
584 " and h" + str( i + 10 ) )
585 host1 = "00:00:00:00:00:" + \
586 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
587 host2 = "00:00:00:00:00:" + \
588 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
589 # NOTE: getHost can return None
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700590 host1Dict = onosCli.getHost( host1 )
591 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700592 host1Id = None
593 host2Id = None
594 if host1Dict and host2Dict:
595 host1Id = host1Dict.get( 'id', None )
596 host2Id = host2Dict.get( 'id', None )
597 if host1Id and host2Id:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700598 nodeNum = ( i % len( main.activeNodes ) )
599 node = main.activeNodes[nodeNum]
600 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700601 if tmpId:
602 main.log.info( "Added intent with id: " + tmpId )
603 intentIds.append( tmpId )
604 else:
605 main.log.error( "addHostIntent returned: " +
606 repr( tmpId ) )
607 else:
608 main.log.error( "Error, getHost() failed for h" + str( i ) +
609 " and/or h" + str( i + 10 ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700610 node = main.activeNodes[0]
611 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700612 main.log.warn( "Hosts output: " )
613 try:
614 main.log.warn( json.dumps( json.loads( hosts ),
615 sort_keys=True,
616 indent=4,
617 separators=( ',', ': ' ) ) )
618 except ( ValueError, TypeError ):
619 main.log.warn( repr( hosts ) )
620 hostResult = main.FALSE
621 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
622 onpass="Found a host id for each host",
623 onfail="Error looking up host ids" )
624
625 intentStart = time.time()
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700626 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700627 main.log.info( "Submitted intents: " + str( intentIds ) )
628 main.log.info( "Intents in ONOS: " + str( onosIds ) )
629 for intent in intentIds:
630 if intent in onosIds:
631 pass # intent submitted is in onos
632 else:
633 intentAddResult = False
634 if intentAddResult:
635 intentStop = time.time()
636 else:
637 intentStop = None
638 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700639 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700640 intentStates = []
641 installedCheck = True
642 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
643 count = 0
644 try:
645 for intent in json.loads( intents ):
646 state = intent.get( 'state', None )
647 if "INSTALLED" not in state:
648 installedCheck = False
649 intentId = intent.get( 'id', None )
650 intentStates.append( ( intentId, state ) )
651 except ( ValueError, TypeError ):
652 main.log.exception( "Error parsing intents" )
653 # add submitted intents not in the store
654 tmplist = [ i for i, s in intentStates ]
655 missingIntents = False
656 for i in intentIds:
657 if i not in tmplist:
658 intentStates.append( ( i, " - " ) )
659 missingIntents = True
660 intentStates.sort()
661 for i, s in intentStates:
662 count += 1
663 main.log.info( "%-6s%-15s%-15s" %
664 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700665 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700666 try:
667 missing = False
668 if leaders:
669 parsedLeaders = json.loads( leaders )
670 main.log.warn( json.dumps( parsedLeaders,
671 sort_keys=True,
672 indent=4,
673 separators=( ',', ': ' ) ) )
674 # check for all intent partitions
675 topics = []
676 for i in range( 14 ):
677 topics.append( "intent-partition-" + str( i ) )
678 main.log.debug( topics )
679 ONOStopics = [ j['topic'] for j in parsedLeaders ]
680 for topic in topics:
681 if topic not in ONOStopics:
682 main.log.error( "Error: " + topic +
683 " not in leaders" )
684 missing = True
685 else:
686 main.log.error( "leaders() returned None" )
687 except ( ValueError, TypeError ):
688 main.log.exception( "Error parsing leaders" )
689 main.log.error( repr( leaders ) )
690 # Check all nodes
691 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700692 for i in main.activeNodes:
693 response = main.CLIs[i].leaders( jsonFormat=False)
694 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700695 str( response ) )
696
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700697 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700698 try:
699 if partitions :
700 parsedPartitions = json.loads( partitions )
701 main.log.warn( json.dumps( parsedPartitions,
702 sort_keys=True,
703 indent=4,
704 separators=( ',', ': ' ) ) )
705 # TODO check for a leader in all paritions
706 # TODO check for consistency among nodes
707 else:
708 main.log.error( "partitions() returned None" )
709 except ( ValueError, TypeError ):
710 main.log.exception( "Error parsing partitions" )
711 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700712 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700713 try:
714 if pendingMap :
715 parsedPending = json.loads( pendingMap )
716 main.log.warn( json.dumps( parsedPending,
717 sort_keys=True,
718 indent=4,
719 separators=( ',', ': ' ) ) )
720 # TODO check something here?
721 else:
722 main.log.error( "pendingMap() returned None" )
723 except ( ValueError, TypeError ):
724 main.log.exception( "Error parsing pending map" )
725 main.log.error( repr( pendingMap ) )
726
727 intentAddResult = bool( intentAddResult and not missingIntents and
728 installedCheck )
729 if not intentAddResult:
730 main.log.error( "Error in pushing host intents to ONOS" )
731
732 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700733 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700734 correct = True
735 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700736 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700737 onosIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700738 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700739 onosIds.append( ids )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700740 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700741 str( sorted( onosIds ) ) )
742 if sorted( ids ) != sorted( intentIds ):
743 main.log.warn( "Set of intent IDs doesn't match" )
744 correct = False
745 break
746 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700747 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700748 for intent in intents:
749 if intent[ 'state' ] != "INSTALLED":
750 main.log.warn( "Intent " + intent[ 'id' ] +
751 " is " + intent[ 'state' ] )
752 correct = False
753 break
754 if correct:
755 break
756 else:
757 time.sleep(1)
758 if not intentStop:
759 intentStop = time.time()
760 global gossipTime
761 gossipTime = intentStop - intentStart
762 main.log.info( "It took about " + str( gossipTime ) +
763 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700764 gossipPeriod = int( main.params['timers']['gossip'] )
765 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700766 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700767 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700768 onpass="ECM anti-entropy for intents worked within " +
769 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700770 onfail="Intent ECM anti-entropy took too long. " +
771 "Expected time:{}, Actual time:{}".format( maxGossipTime,
772 gossipTime ) )
773 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700774 intentAddResult = True
775
776 if not intentAddResult or "key" in pendingMap:
777 import time
778 installedCheck = True
779 main.log.info( "Sleeping 60 seconds to see if intents are found" )
780 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700781 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700782 main.log.info( "Submitted intents: " + str( intentIds ) )
783 main.log.info( "Intents in ONOS: " + str( onosIds ) )
784 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700785 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700786 intentStates = []
787 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
788 count = 0
789 try:
790 for intent in json.loads( intents ):
791 # Iter through intents of a node
792 state = intent.get( 'state', None )
793 if "INSTALLED" not in state:
794 installedCheck = False
795 intentId = intent.get( 'id', None )
796 intentStates.append( ( intentId, state ) )
797 except ( ValueError, TypeError ):
798 main.log.exception( "Error parsing intents" )
799 # add submitted intents not in the store
800 tmplist = [ i for i, s in intentStates ]
801 for i in intentIds:
802 if i not in tmplist:
803 intentStates.append( ( i, " - " ) )
804 intentStates.sort()
805 for i, s in intentStates:
806 count += 1
807 main.log.info( "%-6s%-15s%-15s" %
808 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700809 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700810 try:
811 missing = False
812 if leaders:
813 parsedLeaders = json.loads( leaders )
814 main.log.warn( json.dumps( parsedLeaders,
815 sort_keys=True,
816 indent=4,
817 separators=( ',', ': ' ) ) )
818 # check for all intent partitions
819 # check for election
820 topics = []
821 for i in range( 14 ):
822 topics.append( "intent-partition-" + str( i ) )
823 # FIXME: this should only be after we start the app
824 topics.append( "org.onosproject.election" )
825 main.log.debug( topics )
826 ONOStopics = [ j['topic'] for j in parsedLeaders ]
827 for topic in topics:
828 if topic not in ONOStopics:
829 main.log.error( "Error: " + topic +
830 " not in leaders" )
831 missing = True
832 else:
833 main.log.error( "leaders() returned None" )
834 except ( ValueError, TypeError ):
835 main.log.exception( "Error parsing leaders" )
836 main.log.error( repr( leaders ) )
837 # Check all nodes
838 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700839 for i in main.activeNodes:
840 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700841 response = node.leaders( jsonFormat=False)
842 main.log.warn( str( node.name ) + " leaders output: \n" +
843 str( response ) )
844
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700845 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700846 try:
847 if partitions :
848 parsedPartitions = json.loads( partitions )
849 main.log.warn( json.dumps( parsedPartitions,
850 sort_keys=True,
851 indent=4,
852 separators=( ',', ': ' ) ) )
853 # TODO check for a leader in all paritions
854 # TODO check for consistency among nodes
855 else:
856 main.log.error( "partitions() returned None" )
857 except ( ValueError, TypeError ):
858 main.log.exception( "Error parsing partitions" )
859 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700860 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700861 try:
862 if pendingMap :
863 parsedPending = json.loads( pendingMap )
864 main.log.warn( json.dumps( parsedPending,
865 sort_keys=True,
866 indent=4,
867 separators=( ',', ': ' ) ) )
868 # TODO check something here?
869 else:
870 main.log.error( "pendingMap() returned None" )
871 except ( ValueError, TypeError ):
872 main.log.exception( "Error parsing pending map" )
873 main.log.error( repr( pendingMap ) )
874
875 def CASE4( self, main ):
876 """
877 Ping across added host intents
878 """
879 import json
880 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700881 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700882 assert main, "main not defined"
883 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700884 assert main.CLIs, "main.CLIs not defined"
885 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700886 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700887 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700888 "functionality and check the state of " +\
889 "the intent"
890 main.step( "Ping across added host intents" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700891 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700892 PingResult = main.TRUE
893 for i in range( 8, 18 ):
894 ping = main.Mininet1.pingHost( src="h" + str( i ),
895 target="h" + str( i + 10 ) )
896 PingResult = PingResult and ping
897 if ping == main.FALSE:
898 main.log.warn( "Ping failed between h" + str( i ) +
899 " and h" + str( i + 10 ) )
900 elif ping == main.TRUE:
901 main.log.info( "Ping test passed!" )
902 # Don't set PingResult or you'd override failures
903 if PingResult == main.FALSE:
904 main.log.error(
905 "Intents have not been installed correctly, pings failed." )
906 # TODO: pretty print
907 main.log.warn( "ONOS1 intents: " )
908 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700909 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700910 main.log.warn( json.dumps( json.loads( tmpIntents ),
911 sort_keys=True,
912 indent=4,
913 separators=( ',', ': ' ) ) )
914 except ( ValueError, TypeError ):
915 main.log.warn( repr( tmpIntents ) )
916 utilities.assert_equals(
917 expect=main.TRUE,
918 actual=PingResult,
919 onpass="Intents have been installed correctly and pings work",
920 onfail="Intents have not been installed correctly, pings failed." )
921
922 main.step( "Check Intent state" )
923 installedCheck = False
924 loopCount = 0
925 while not installedCheck and loopCount < 40:
926 installedCheck = True
927 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700928 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700929 intentStates = []
930 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
931 count = 0
932 # Iter through intents of a node
933 try:
934 for intent in json.loads( intents ):
935 state = intent.get( 'state', None )
936 if "INSTALLED" not in state:
937 installedCheck = False
938 intentId = intent.get( 'id', None )
939 intentStates.append( ( intentId, state ) )
940 except ( ValueError, TypeError ):
941 main.log.exception( "Error parsing intents." )
942 # Print states
943 intentStates.sort()
944 for i, s in intentStates:
945 count += 1
946 main.log.info( "%-6s%-15s%-15s" %
947 ( str( count ), str( i ), str( s ) ) )
948 if not installedCheck:
949 time.sleep( 1 )
950 loopCount += 1
951 utilities.assert_equals( expect=True, actual=installedCheck,
952 onpass="Intents are all INSTALLED",
953 onfail="Intents are not all in " +
954 "INSTALLED state" )
955
956 main.step( "Check leadership of topics" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700957 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700958 topicCheck = main.TRUE
959 try:
960 if leaders:
961 parsedLeaders = json.loads( leaders )
962 main.log.warn( json.dumps( parsedLeaders,
963 sort_keys=True,
964 indent=4,
965 separators=( ',', ': ' ) ) )
966 # check for all intent partitions
967 # check for election
968 # TODO: Look at Devices as topics now that it uses this system
969 topics = []
970 for i in range( 14 ):
971 topics.append( "intent-partition-" + str( i ) )
972 # FIXME: this should only be after we start the app
973 # FIXME: topics.append( "org.onosproject.election" )
974 # Print leaders output
975 main.log.debug( topics )
976 ONOStopics = [ j['topic'] for j in parsedLeaders ]
977 for topic in topics:
978 if topic not in ONOStopics:
979 main.log.error( "Error: " + topic +
980 " not in leaders" )
981 topicCheck = main.FALSE
982 else:
983 main.log.error( "leaders() returned None" )
984 topicCheck = main.FALSE
985 except ( ValueError, TypeError ):
986 topicCheck = main.FALSE
987 main.log.exception( "Error parsing leaders" )
988 main.log.error( repr( leaders ) )
989 # TODO: Check for a leader of these topics
990 # Check all nodes
991 if topicCheck:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700992 for i in main.activeNodes:
993 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700994 response = node.leaders( jsonFormat=False)
995 main.log.warn( str( node.name ) + " leaders output: \n" +
996 str( response ) )
997
998 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
999 onpass="intent Partitions is in leaders",
1000 onfail="Some topics were lost " )
1001 # Print partitions
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001002 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001003 try:
1004 if partitions :
1005 parsedPartitions = json.loads( partitions )
1006 main.log.warn( json.dumps( parsedPartitions,
1007 sort_keys=True,
1008 indent=4,
1009 separators=( ',', ': ' ) ) )
1010 # TODO check for a leader in all paritions
1011 # TODO check for consistency among nodes
1012 else:
1013 main.log.error( "partitions() returned None" )
1014 except ( ValueError, TypeError ):
1015 main.log.exception( "Error parsing partitions" )
1016 main.log.error( repr( partitions ) )
1017 # Print Pending Map
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001018 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001019 try:
1020 if pendingMap :
1021 parsedPending = json.loads( pendingMap )
1022 main.log.warn( json.dumps( parsedPending,
1023 sort_keys=True,
1024 indent=4,
1025 separators=( ',', ': ' ) ) )
1026 # TODO check something here?
1027 else:
1028 main.log.error( "pendingMap() returned None" )
1029 except ( ValueError, TypeError ):
1030 main.log.exception( "Error parsing pending map" )
1031 main.log.error( repr( pendingMap ) )
1032
1033 if not installedCheck:
1034 main.log.info( "Waiting 60 seconds to see if the state of " +
1035 "intents change" )
1036 time.sleep( 60 )
1037 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001038 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001039 intentStates = []
1040 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1041 count = 0
1042 # Iter through intents of a node
1043 try:
1044 for intent in json.loads( intents ):
1045 state = intent.get( 'state', None )
1046 if "INSTALLED" not in state:
1047 installedCheck = False
1048 intentId = intent.get( 'id', None )
1049 intentStates.append( ( intentId, state ) )
1050 except ( ValueError, TypeError ):
1051 main.log.exception( "Error parsing intents." )
1052 intentStates.sort()
1053 for i, s in intentStates:
1054 count += 1
1055 main.log.info( "%-6s%-15s%-15s" %
1056 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001057 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001058 try:
1059 missing = False
1060 if leaders:
1061 parsedLeaders = json.loads( leaders )
1062 main.log.warn( json.dumps( parsedLeaders,
1063 sort_keys=True,
1064 indent=4,
1065 separators=( ',', ': ' ) ) )
1066 # check for all intent partitions
1067 # check for election
1068 topics = []
1069 for i in range( 14 ):
1070 topics.append( "intent-partition-" + str( i ) )
1071 # FIXME: this should only be after we start the app
1072 topics.append( "org.onosproject.election" )
1073 main.log.debug( topics )
1074 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1075 for topic in topics:
1076 if topic not in ONOStopics:
1077 main.log.error( "Error: " + topic +
1078 " not in leaders" )
1079 missing = True
1080 else:
1081 main.log.error( "leaders() returned None" )
1082 except ( ValueError, TypeError ):
1083 main.log.exception( "Error parsing leaders" )
1084 main.log.error( repr( leaders ) )
1085 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001086 for i in main.activeNodes:
1087 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001088 response = node.leaders( jsonFormat=False)
1089 main.log.warn( str( node.name ) + " leaders output: \n" +
1090 str( response ) )
1091
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001092 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001093 try:
1094 if partitions :
1095 parsedPartitions = json.loads( partitions )
1096 main.log.warn( json.dumps( parsedPartitions,
1097 sort_keys=True,
1098 indent=4,
1099 separators=( ',', ': ' ) ) )
1100 # TODO check for a leader in all paritions
1101 # TODO check for consistency among nodes
1102 else:
1103 main.log.error( "partitions() returned None" )
1104 except ( ValueError, TypeError ):
1105 main.log.exception( "Error parsing partitions" )
1106 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001107 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001108 try:
1109 if pendingMap :
1110 parsedPending = json.loads( pendingMap )
1111 main.log.warn( json.dumps( parsedPending,
1112 sort_keys=True,
1113 indent=4,
1114 separators=( ',', ': ' ) ) )
1115 # TODO check something here?
1116 else:
1117 main.log.error( "pendingMap() returned None" )
1118 except ( ValueError, TypeError ):
1119 main.log.exception( "Error parsing pending map" )
1120 main.log.error( repr( pendingMap ) )
1121 # Print flowrules
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001122 node = main.activeNodes[0]
1123 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001124 main.step( "Wait a minute then ping again" )
1125 # the wait is above
1126 PingResult = main.TRUE
1127 for i in range( 8, 18 ):
1128 ping = main.Mininet1.pingHost( src="h" + str( i ),
1129 target="h" + str( i + 10 ) )
1130 PingResult = PingResult and ping
1131 if ping == main.FALSE:
1132 main.log.warn( "Ping failed between h" + str( i ) +
1133 " and h" + str( i + 10 ) )
1134 elif ping == main.TRUE:
1135 main.log.info( "Ping test passed!" )
1136 # Don't set PingResult or you'd override failures
1137 if PingResult == main.FALSE:
1138 main.log.error(
1139 "Intents have not been installed correctly, pings failed." )
1140 # TODO: pretty print
1141 main.log.warn( "ONOS1 intents: " )
1142 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001143 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001144 main.log.warn( json.dumps( json.loads( tmpIntents ),
1145 sort_keys=True,
1146 indent=4,
1147 separators=( ',', ': ' ) ) )
1148 except ( ValueError, TypeError ):
1149 main.log.warn( repr( tmpIntents ) )
1150 utilities.assert_equals(
1151 expect=main.TRUE,
1152 actual=PingResult,
1153 onpass="Intents have been installed correctly and pings work",
1154 onfail="Intents have not been installed correctly, pings failed." )
1155
1156 def CASE5( self, main ):
1157 """
1158 Reading state of ONOS
1159 """
1160 import json
1161 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001162 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001163 assert main, "main not defined"
1164 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001165 assert main.CLIs, "main.CLIs not defined"
1166 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001167
1168 main.case( "Setting up and gathering data for current state" )
1169 # The general idea for this test case is to pull the state of
1170 # ( intents,flows, topology,... ) from each ONOS node
1171 # We can then compare them with each other and also with past states
1172
1173 main.step( "Check that each switch has a master" )
1174 global mastershipState
1175 mastershipState = '[]'
1176
1177 # Assert that each device has a master
1178 rolesNotNull = main.TRUE
1179 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001180 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001181 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001182 name="rolesNotNull-" + str( i ),
1183 args=[] )
1184 threads.append( t )
1185 t.start()
1186
1187 for t in threads:
1188 t.join()
1189 rolesNotNull = rolesNotNull and t.result
1190 utilities.assert_equals(
1191 expect=main.TRUE,
1192 actual=rolesNotNull,
1193 onpass="Each device has a master",
1194 onfail="Some devices don't have a master assigned" )
1195
1196 main.step( "Get the Mastership of each switch from each controller" )
1197 ONOSMastership = []
1198 mastershipCheck = main.FALSE
1199 consistentMastership = True
1200 rolesResults = True
1201 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001202 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001203 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001204 name="roles-" + str( i ),
1205 args=[] )
1206 threads.append( t )
1207 t.start()
1208
1209 for t in threads:
1210 t.join()
1211 ONOSMastership.append( t.result )
1212
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001213 for i in range( len( ONOSMastership ) ):
1214 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001215 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001216 main.log.error( "Error in getting ONOS" + node + " roles" )
1217 main.log.warn( "ONOS" + node + " mastership response: " +
1218 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001219 rolesResults = False
1220 utilities.assert_equals(
1221 expect=True,
1222 actual=rolesResults,
1223 onpass="No error in reading roles output",
1224 onfail="Error in reading roles from ONOS" )
1225
1226 main.step( "Check for consistency in roles from each controller" )
1227 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1228 main.log.info(
1229 "Switch roles are consistent across all ONOS nodes" )
1230 else:
1231 consistentMastership = False
1232 utilities.assert_equals(
1233 expect=True,
1234 actual=consistentMastership,
1235 onpass="Switch roles are consistent across all ONOS nodes",
1236 onfail="ONOS nodes have different views of switch roles" )
1237
1238 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001239 for i in range( len( main.activeNodes ) ):
1240 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001241 try:
1242 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001243 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001244 json.dumps(
1245 json.loads( ONOSMastership[ i ] ),
1246 sort_keys=True,
1247 indent=4,
1248 separators=( ',', ': ' ) ) )
1249 except ( ValueError, TypeError ):
1250 main.log.warn( repr( ONOSMastership[ i ] ) )
1251 elif rolesResults and consistentMastership:
1252 mastershipCheck = main.TRUE
1253 mastershipState = ONOSMastership[ 0 ]
1254
1255 main.step( "Get the intents from each controller" )
1256 global intentState
1257 intentState = []
1258 ONOSIntents = []
1259 intentCheck = main.FALSE
1260 consistentIntents = True
1261 intentsResults = True
1262 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001263 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001264 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001265 name="intents-" + str( i ),
1266 args=[],
1267 kwargs={ 'jsonFormat': True } )
1268 threads.append( t )
1269 t.start()
1270
1271 for t in threads:
1272 t.join()
1273 ONOSIntents.append( t.result )
1274
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001275 for i in range( len( ONOSIntents ) ):
1276 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001277 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001278 main.log.error( "Error in getting ONOS" + node + " intents" )
1279 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001280 repr( ONOSIntents[ i ] ) )
1281 intentsResults = False
1282 utilities.assert_equals(
1283 expect=True,
1284 actual=intentsResults,
1285 onpass="No error in reading intents output",
1286 onfail="Error in reading intents from ONOS" )
1287
1288 main.step( "Check for consistency in Intents from each controller" )
1289 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1290 main.log.info( "Intents are consistent across all ONOS " +
1291 "nodes" )
1292 else:
1293 consistentIntents = False
1294 main.log.error( "Intents not consistent" )
1295 utilities.assert_equals(
1296 expect=True,
1297 actual=consistentIntents,
1298 onpass="Intents are consistent across all ONOS nodes",
1299 onfail="ONOS nodes have different views of intents" )
1300
1301 if intentsResults:
1302 # Try to make it easy to figure out what is happening
1303 #
1304 # Intent ONOS1 ONOS2 ...
1305 # 0x01 INSTALLED INSTALLING
1306 # ... ... ...
1307 # ... ... ...
1308 title = " Id"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001309 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001310 title += " " * 10 + "ONOS" + str( n + 1 )
1311 main.log.warn( title )
1312 # get all intent keys in the cluster
1313 keys = []
1314 for nodeStr in ONOSIntents:
1315 node = json.loads( nodeStr )
1316 for intent in node:
1317 keys.append( intent.get( 'id' ) )
1318 keys = set( keys )
1319 for key in keys:
1320 row = "%-13s" % key
1321 for nodeStr in ONOSIntents:
1322 node = json.loads( nodeStr )
1323 for intent in node:
1324 if intent.get( 'id', "Error" ) == key:
1325 row += "%-15s" % intent.get( 'state' )
1326 main.log.warn( row )
1327 # End table view
1328
1329 if intentsResults and not consistentIntents:
1330 # print the json objects
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001331 n = str( main.activeNodes[-1] + 1 )
1332 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001333 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1334 sort_keys=True,
1335 indent=4,
1336 separators=( ',', ': ' ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001337 for i in range( len( ONOSIntents ) ):
1338 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001339 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001340 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001341 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1342 sort_keys=True,
1343 indent=4,
1344 separators=( ',', ': ' ) ) )
1345 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001346 main.log.debug( "ONOS" + node + " intents match ONOS" +
1347 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001348 elif intentsResults and consistentIntents:
1349 intentCheck = main.TRUE
1350 intentState = ONOSIntents[ 0 ]
1351
1352 main.step( "Get the flows from each controller" )
1353 global flowState
1354 flowState = []
1355 ONOSFlows = []
1356 ONOSFlowsJson = []
1357 flowCheck = main.FALSE
1358 consistentFlows = True
1359 flowsResults = True
1360 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001361 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001362 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001363 name="flows-" + str( i ),
1364 args=[],
1365 kwargs={ 'jsonFormat': True } )
1366 threads.append( t )
1367 t.start()
1368
1369 # NOTE: Flows command can take some time to run
1370 time.sleep(30)
1371 for t in threads:
1372 t.join()
1373 result = t.result
1374 ONOSFlows.append( result )
1375
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001376 for i in range( len( ONOSFlows ) ):
1377 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001378 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1379 main.log.error( "Error in getting ONOS" + num + " flows" )
1380 main.log.warn( "ONOS" + num + " flows response: " +
1381 repr( ONOSFlows[ i ] ) )
1382 flowsResults = False
1383 ONOSFlowsJson.append( None )
1384 else:
1385 try:
1386 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1387 except ( ValueError, TypeError ):
1388 # FIXME: change this to log.error?
1389 main.log.exception( "Error in parsing ONOS" + num +
1390 " response as json." )
1391 main.log.error( repr( ONOSFlows[ i ] ) )
1392 ONOSFlowsJson.append( None )
1393 flowsResults = False
1394 utilities.assert_equals(
1395 expect=True,
1396 actual=flowsResults,
1397 onpass="No error in reading flows output",
1398 onfail="Error in reading flows from ONOS" )
1399
1400 main.step( "Check for consistency in Flows from each controller" )
1401 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1402 if all( tmp ):
1403 main.log.info( "Flow count is consistent across all ONOS nodes" )
1404 else:
1405 consistentFlows = False
1406 utilities.assert_equals(
1407 expect=True,
1408 actual=consistentFlows,
1409 onpass="The flow count is consistent across all ONOS nodes",
1410 onfail="ONOS nodes have different flow counts" )
1411
1412 if flowsResults and not consistentFlows:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001413 for i in range( len( ONOSFlows ) ):
1414 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001415 try:
1416 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001417 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001418 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1419 indent=4, separators=( ',', ': ' ) ) )
1420 except ( ValueError, TypeError ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001421 main.log.warn( "ONOS" + node + " flows: " +
1422 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001423 elif flowsResults and consistentFlows:
1424 flowCheck = main.TRUE
1425 flowState = ONOSFlows[ 0 ]
1426
1427 main.step( "Get the OF Table entries" )
1428 global flows
1429 flows = []
1430 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001431 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001432 if flowCheck == main.FALSE:
1433 for table in flows:
1434 main.log.warn( table )
1435 # TODO: Compare switch flow tables with ONOS flow tables
1436
1437 main.step( "Start continuous pings" )
1438 main.Mininet2.pingLong(
1439 src=main.params[ 'PING' ][ 'source1' ],
1440 target=main.params[ 'PING' ][ 'target1' ],
1441 pingTime=500 )
1442 main.Mininet2.pingLong(
1443 src=main.params[ 'PING' ][ 'source2' ],
1444 target=main.params[ 'PING' ][ 'target2' ],
1445 pingTime=500 )
1446 main.Mininet2.pingLong(
1447 src=main.params[ 'PING' ][ 'source3' ],
1448 target=main.params[ 'PING' ][ 'target3' ],
1449 pingTime=500 )
1450 main.Mininet2.pingLong(
1451 src=main.params[ 'PING' ][ 'source4' ],
1452 target=main.params[ 'PING' ][ 'target4' ],
1453 pingTime=500 )
1454 main.Mininet2.pingLong(
1455 src=main.params[ 'PING' ][ 'source5' ],
1456 target=main.params[ 'PING' ][ 'target5' ],
1457 pingTime=500 )
1458 main.Mininet2.pingLong(
1459 src=main.params[ 'PING' ][ 'source6' ],
1460 target=main.params[ 'PING' ][ 'target6' ],
1461 pingTime=500 )
1462 main.Mininet2.pingLong(
1463 src=main.params[ 'PING' ][ 'source7' ],
1464 target=main.params[ 'PING' ][ 'target7' ],
1465 pingTime=500 )
1466 main.Mininet2.pingLong(
1467 src=main.params[ 'PING' ][ 'source8' ],
1468 target=main.params[ 'PING' ][ 'target8' ],
1469 pingTime=500 )
1470 main.Mininet2.pingLong(
1471 src=main.params[ 'PING' ][ 'source9' ],
1472 target=main.params[ 'PING' ][ 'target9' ],
1473 pingTime=500 )
1474 main.Mininet2.pingLong(
1475 src=main.params[ 'PING' ][ 'source10' ],
1476 target=main.params[ 'PING' ][ 'target10' ],
1477 pingTime=500 )
1478
1479 main.step( "Collecting topology information from ONOS" )
1480 devices = []
1481 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001482 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001483 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001484 name="devices-" + str( i ),
1485 args=[ ] )
1486 threads.append( t )
1487 t.start()
1488
1489 for t in threads:
1490 t.join()
1491 devices.append( t.result )
1492 hosts = []
1493 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001494 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001495 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001496 name="hosts-" + str( i ),
1497 args=[ ] )
1498 threads.append( t )
1499 t.start()
1500
1501 for t in threads:
1502 t.join()
1503 try:
1504 hosts.append( json.loads( t.result ) )
1505 except ( ValueError, TypeError ):
1506 # FIXME: better handling of this, print which node
1507 # Maybe use thread name?
1508 main.log.exception( "Error parsing json output of hosts" )
Jon Hallf3d16e72015-12-16 17:45:08 -08001509 main.log.warn( repr( t.result ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001510 hosts.append( None )
1511
1512 ports = []
1513 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001514 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001515 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001516 name="ports-" + str( i ),
1517 args=[ ] )
1518 threads.append( t )
1519 t.start()
1520
1521 for t in threads:
1522 t.join()
1523 ports.append( t.result )
1524 links = []
1525 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001526 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001527 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001528 name="links-" + str( i ),
1529 args=[ ] )
1530 threads.append( t )
1531 t.start()
1532
1533 for t in threads:
1534 t.join()
1535 links.append( t.result )
1536 clusters = []
1537 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001538 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001539 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001540 name="clusters-" + str( i ),
1541 args=[ ] )
1542 threads.append( t )
1543 t.start()
1544
1545 for t in threads:
1546 t.join()
1547 clusters.append( t.result )
1548 # Compare json objects for hosts and dataplane clusters
1549
1550 # hosts
1551 main.step( "Host view is consistent across ONOS nodes" )
1552 consistentHostsResult = main.TRUE
1553 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001554 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001555 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001556 if hosts[ controller ] == hosts[ 0 ]:
1557 continue
1558 else: # hosts not consistent
1559 main.log.error( "hosts from ONOS" +
1560 controllerStr +
1561 " is inconsistent with ONOS1" )
1562 main.log.warn( repr( hosts[ controller ] ) )
1563 consistentHostsResult = main.FALSE
1564
1565 else:
1566 main.log.error( "Error in getting ONOS hosts from ONOS" +
1567 controllerStr )
1568 consistentHostsResult = main.FALSE
1569 main.log.warn( "ONOS" + controllerStr +
1570 " hosts response: " +
1571 repr( hosts[ controller ] ) )
1572 utilities.assert_equals(
1573 expect=main.TRUE,
1574 actual=consistentHostsResult,
1575 onpass="Hosts view is consistent across all ONOS nodes",
1576 onfail="ONOS nodes have different views of hosts" )
1577
1578 main.step( "Each host has an IP address" )
1579 ipResult = main.TRUE
1580 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001581 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001582 if hosts[ controller ]:
1583 for host in hosts[ controller ]:
1584 if not host.get( 'ipAddresses', [ ] ):
1585 main.log.error( "Error with host ips on controller" +
1586 controllerStr + ": " + str( host ) )
1587 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001588 utilities.assert_equals(
1589 expect=main.TRUE,
1590 actual=ipResult,
1591 onpass="The ips of the hosts aren't empty",
1592 onfail="The ip of at least one host is missing" )
1593
1594 # Strongly connected clusters of devices
1595 main.step( "Cluster view is consistent across ONOS nodes" )
1596 consistentClustersResult = main.TRUE
1597 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001598 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001599 if "Error" not in clusters[ controller ]:
1600 if clusters[ controller ] == clusters[ 0 ]:
1601 continue
1602 else: # clusters not consistent
1603 main.log.error( "clusters from ONOS" + controllerStr +
1604 " is inconsistent with ONOS1" )
1605 consistentClustersResult = main.FALSE
1606
1607 else:
1608 main.log.error( "Error in getting dataplane clusters " +
1609 "from ONOS" + controllerStr )
1610 consistentClustersResult = main.FALSE
1611 main.log.warn( "ONOS" + controllerStr +
1612 " clusters response: " +
1613 repr( clusters[ controller ] ) )
1614 utilities.assert_equals(
1615 expect=main.TRUE,
1616 actual=consistentClustersResult,
1617 onpass="Clusters view is consistent across all ONOS nodes",
1618 onfail="ONOS nodes have different views of clusters" )
1619 # there should always only be one cluster
1620 main.step( "Cluster view correct across ONOS nodes" )
1621 try:
1622 numClusters = len( json.loads( clusters[ 0 ] ) )
1623 except ( ValueError, TypeError ):
1624 main.log.exception( "Error parsing clusters[0]: " +
1625 repr( clusters[ 0 ] ) )
1626 clusterResults = main.FALSE
1627 if numClusters == 1:
1628 clusterResults = main.TRUE
1629 utilities.assert_equals(
1630 expect=1,
1631 actual=numClusters,
1632 onpass="ONOS shows 1 SCC",
1633 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1634
1635 main.step( "Comparing ONOS topology to MN" )
1636 devicesResults = main.TRUE
1637 linksResults = main.TRUE
1638 hostsResults = main.TRUE
1639 mnSwitches = main.Mininet1.getSwitches()
1640 mnLinks = main.Mininet1.getLinks()
1641 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001642 for controller in main.activeNodes:
1643 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001644 if devices[ controller ] and ports[ controller ] and\
1645 "Error" not in devices[ controller ] and\
1646 "Error" not in ports[ controller ]:
1647
1648 currentDevicesResult = main.Mininet1.compareSwitches(
1649 mnSwitches,
1650 json.loads( devices[ controller ] ),
1651 json.loads( ports[ controller ] ) )
1652 else:
1653 currentDevicesResult = main.FALSE
1654 utilities.assert_equals( expect=main.TRUE,
1655 actual=currentDevicesResult,
1656 onpass="ONOS" + controllerStr +
1657 " Switches view is correct",
1658 onfail="ONOS" + controllerStr +
1659 " Switches view is incorrect" )
1660 if links[ controller ] and "Error" not in links[ controller ]:
1661 currentLinksResult = main.Mininet1.compareLinks(
1662 mnSwitches, mnLinks,
1663 json.loads( links[ controller ] ) )
1664 else:
1665 currentLinksResult = main.FALSE
1666 utilities.assert_equals( expect=main.TRUE,
1667 actual=currentLinksResult,
1668 onpass="ONOS" + controllerStr +
1669 " links view is correct",
1670 onfail="ONOS" + controllerStr +
1671 " links view is incorrect" )
1672
Jon Hall657cdf62015-12-17 14:40:51 -08001673 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001674 currentHostsResult = main.Mininet1.compareHosts(
1675 mnHosts,
1676 hosts[ controller ] )
1677 else:
1678 currentHostsResult = main.FALSE
1679 utilities.assert_equals( expect=main.TRUE,
1680 actual=currentHostsResult,
1681 onpass="ONOS" + controllerStr +
1682 " hosts exist in Mininet",
1683 onfail="ONOS" + controllerStr +
1684 " hosts don't match Mininet" )
1685
1686 devicesResults = devicesResults and currentDevicesResult
1687 linksResults = linksResults and currentLinksResult
1688 hostsResults = hostsResults and currentHostsResult
1689
1690 main.step( "Device information is correct" )
1691 utilities.assert_equals(
1692 expect=main.TRUE,
1693 actual=devicesResults,
1694 onpass="Device information is correct",
1695 onfail="Device information is incorrect" )
1696
1697 main.step( "Links are correct" )
1698 utilities.assert_equals(
1699 expect=main.TRUE,
1700 actual=linksResults,
1701 onpass="Link are correct",
1702 onfail="Links are incorrect" )
1703
1704 main.step( "Hosts are correct" )
1705 utilities.assert_equals(
1706 expect=main.TRUE,
1707 actual=hostsResults,
1708 onpass="Hosts are correct",
1709 onfail="Hosts are incorrect" )
1710
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001711 def CASE61( self, main ):
Jon Hall5cf14d52015-07-16 12:15:19 -07001712 """
1713 The Failure case.
1714 """
Jon Halle1a3b752015-07-22 13:02:46 -07001715 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001716 assert main, "main not defined"
1717 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001718 assert main.CLIs, "main.CLIs not defined"
1719 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001720 main.case( "Stop minority of ONOS nodes" )
Jon Hall96091e62015-09-21 17:34:17 -07001721
1722 main.step( "Checking ONOS Logs for errors" )
1723 for node in main.nodes:
1724 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1725 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1726
Jon Hall3b489db2015-10-05 14:38:37 -07001727 n = len( main.nodes ) # Number of nodes
1728 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1729 main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
1730 if n > 3:
1731 main.kill.append( p - 1 )
1732 # NOTE: This only works for cluster sizes of 3,5, or 7.
1733
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001734 main.step( "Stopping " + str( len( main.kill ) ) + " ONOS nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07001735 killResults = main.TRUE
1736 for i in main.kill:
1737 killResults = killResults and\
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001738 main.ONOSbench.onosStop( main.nodes[i].ip_address )
1739 main.activeNodes.remove( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001740 utilities.assert_equals( expect=main.TRUE, actual=killResults,
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001741 onpass="ONOS nodes stopped successfully",
1742 onfail="ONOS nodes NOT successfully stopped" )
1743
1744 def CASE62( self, main ):
1745 """
1746 The bring up stopped nodes
1747 """
1748 import time
1749 assert main.numCtrls, "main.numCtrls not defined"
1750 assert main, "main not defined"
1751 assert utilities.assert_equals, "utilities.assert_equals not defined"
1752 assert main.CLIs, "main.CLIs not defined"
1753 assert main.nodes, "main.nodes not defined"
1754 assert main.kill, "main.kill not defined"
1755 main.case( "Restart minority of ONOS nodes" )
1756
1757 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1758 startResults = main.TRUE
1759 restartTime = time.time()
1760 for i in main.kill:
1761 startResults = startResults and\
1762 main.ONOSbench.onosStart( main.nodes[i].ip_address )
1763 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1764 onpass="ONOS nodes started successfully",
1765 onfail="ONOS nodes NOT successfully started" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001766
1767 main.step( "Checking if ONOS is up yet" )
1768 count = 0
1769 onosIsupResult = main.FALSE
1770 while onosIsupResult == main.FALSE and count < 10:
Jon Hall3b489db2015-10-05 14:38:37 -07001771 onosIsupResult = main.TRUE
1772 for i in main.kill:
1773 onosIsupResult = onosIsupResult and\
1774 main.ONOSbench.isup( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07001775 count = count + 1
Jon Hall5cf14d52015-07-16 12:15:19 -07001776 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1777 onpass="ONOS restarted successfully",
1778 onfail="ONOS restart NOT successful" )
1779
Jon Halle1a3b752015-07-22 13:02:46 -07001780 main.step( "Restarting ONOS main.CLIs" )
Jon Hall3b489db2015-10-05 14:38:37 -07001781 cliResults = main.TRUE
1782 for i in main.kill:
1783 cliResults = cliResults and\
1784 main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001785 main.activeNodes.append( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001786 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1787 onpass="ONOS cli restarted",
1788 onfail="ONOS cli did not restart" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001789 main.activeNodes.sort()
1790 try:
1791 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1792 "List of active nodes has duplicates, this likely indicates something was run out of order"
1793 except AssertionError:
1794 main.log.exception( "" )
1795 main.cleanup()
1796 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -07001797
1798 # Grab the time of restart so we chan check how long the gossip
1799 # protocol has had time to work
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001800 main.restartTime = time.time() - restartTime
Jon Hall5cf14d52015-07-16 12:15:19 -07001801 main.log.debug( "Restart time: " + str( main.restartTime ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001802 # TODO: MAke this configurable. Also, we are breaking the above timer
1803 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001804 node = main.activeNodes[0]
1805 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1806 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1807 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001808
1809 def CASE7( self, main ):
1810 """
1811 Check state after ONOS failure
1812 """
1813 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001814 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001815 assert main, "main not defined"
1816 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001817 assert main.CLIs, "main.CLIs not defined"
1818 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001819 try:
1820 main.kill
1821 except AttributeError:
1822 main.kill = []
1823
Jon Hall5cf14d52015-07-16 12:15:19 -07001824 main.case( "Running ONOS Constant State Tests" )
1825
1826 main.step( "Check that each switch has a master" )
1827 # Assert that each device has a master
1828 rolesNotNull = main.TRUE
1829 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001830 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001831 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001832 name="rolesNotNull-" + str( i ),
1833 args=[ ] )
1834 threads.append( t )
1835 t.start()
1836
1837 for t in threads:
1838 t.join()
1839 rolesNotNull = rolesNotNull and t.result
1840 utilities.assert_equals(
1841 expect=main.TRUE,
1842 actual=rolesNotNull,
1843 onpass="Each device has a master",
1844 onfail="Some devices don't have a master assigned" )
1845
1846 main.step( "Read device roles from ONOS" )
1847 ONOSMastership = []
1848 consistentMastership = True
1849 rolesResults = True
1850 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001851 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001852 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001853 name="roles-" + str( i ),
1854 args=[] )
1855 threads.append( t )
1856 t.start()
1857
1858 for t in threads:
1859 t.join()
1860 ONOSMastership.append( t.result )
1861
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001862 for i in range( len( ONOSMastership ) ):
1863 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001864 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001865 main.log.error( "Error in getting ONOS" + node + " roles" )
1866 main.log.warn( "ONOS" + node + " mastership response: " +
1867 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001868 rolesResults = False
1869 utilities.assert_equals(
1870 expect=True,
1871 actual=rolesResults,
1872 onpass="No error in reading roles output",
1873 onfail="Error in reading roles from ONOS" )
1874
1875 main.step( "Check for consistency in roles from each controller" )
1876 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1877 main.log.info(
1878 "Switch roles are consistent across all ONOS nodes" )
1879 else:
1880 consistentMastership = False
1881 utilities.assert_equals(
1882 expect=True,
1883 actual=consistentMastership,
1884 onpass="Switch roles are consistent across all ONOS nodes",
1885 onfail="ONOS nodes have different views of switch roles" )
1886
1887 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001888 for i in range( len( ONOSMastership ) ):
1889 node = str( main.activeNodes[i] + 1 )
1890 main.log.warn( "ONOS" + node + " roles: ",
1891 json.dumps( json.loads( ONOSMastership[ i ] ),
1892 sort_keys=True,
1893 indent=4,
1894 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001895
1896 # NOTE: we expect mastership to change on controller failure
Jon Hall5cf14d52015-07-16 12:15:19 -07001897
1898 main.step( "Get the intents and compare across all nodes" )
1899 ONOSIntents = []
1900 intentCheck = main.FALSE
1901 consistentIntents = True
1902 intentsResults = True
1903 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001904 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001905 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001906 name="intents-" + str( i ),
1907 args=[],
1908 kwargs={ 'jsonFormat': True } )
1909 threads.append( t )
1910 t.start()
1911
1912 for t in threads:
1913 t.join()
1914 ONOSIntents.append( t.result )
1915
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001916 for i in range( len( ONOSIntents) ):
1917 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001918 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001919 main.log.error( "Error in getting ONOS" + node + " intents" )
1920 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001921 repr( ONOSIntents[ i ] ) )
1922 intentsResults = False
1923 utilities.assert_equals(
1924 expect=True,
1925 actual=intentsResults,
1926 onpass="No error in reading intents output",
1927 onfail="Error in reading intents from ONOS" )
1928
1929 main.step( "Check for consistency in Intents from each controller" )
1930 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1931 main.log.info( "Intents are consistent across all ONOS " +
1932 "nodes" )
1933 else:
1934 consistentIntents = False
1935
1936 # Try to make it easy to figure out what is happening
1937 #
1938 # Intent ONOS1 ONOS2 ...
1939 # 0x01 INSTALLED INSTALLING
1940 # ... ... ...
1941 # ... ... ...
1942 title = " ID"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001943 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001944 title += " " * 10 + "ONOS" + str( n + 1 )
1945 main.log.warn( title )
1946 # get all intent keys in the cluster
1947 keys = []
1948 for nodeStr in ONOSIntents:
1949 node = json.loads( nodeStr )
1950 for intent in node:
1951 keys.append( intent.get( 'id' ) )
1952 keys = set( keys )
1953 for key in keys:
1954 row = "%-13s" % key
1955 for nodeStr in ONOSIntents:
1956 node = json.loads( nodeStr )
1957 for intent in node:
1958 if intent.get( 'id' ) == key:
1959 row += "%-15s" % intent.get( 'state' )
1960 main.log.warn( row )
1961 # End table view
1962
1963 utilities.assert_equals(
1964 expect=True,
1965 actual=consistentIntents,
1966 onpass="Intents are consistent across all ONOS nodes",
1967 onfail="ONOS nodes have different views of intents" )
1968 intentStates = []
1969 for node in ONOSIntents: # Iter through ONOS nodes
1970 nodeStates = []
1971 # Iter through intents of a node
1972 try:
1973 for intent in json.loads( node ):
1974 nodeStates.append( intent[ 'state' ] )
1975 except ( ValueError, TypeError ):
1976 main.log.exception( "Error in parsing intents" )
1977 main.log.error( repr( node ) )
1978 intentStates.append( nodeStates )
1979 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1980 main.log.info( dict( out ) )
1981
1982 if intentsResults and not consistentIntents:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001983 for i in range( len( main.activeNodes ) ):
1984 node = str( main.activeNodes[i] + 1 )
1985 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001986 main.log.warn( json.dumps(
1987 json.loads( ONOSIntents[ i ] ),
1988 sort_keys=True,
1989 indent=4,
1990 separators=( ',', ': ' ) ) )
1991 elif intentsResults and consistentIntents:
1992 intentCheck = main.TRUE
1993
1994 # NOTE: Store has no durability, so intents are lost across system
1995 # restarts
1996 main.step( "Compare current intents with intents before the failure" )
1997 # NOTE: this requires case 5 to pass for intentState to be set.
1998 # maybe we should stop the test if that fails?
1999 sameIntents = main.FALSE
2000 if intentState and intentState == ONOSIntents[ 0 ]:
2001 sameIntents = main.TRUE
2002 main.log.info( "Intents are consistent with before failure" )
2003 # TODO: possibly the states have changed? we may need to figure out
2004 # what the acceptable states are
2005 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2006 sameIntents = main.TRUE
2007 try:
2008 before = json.loads( intentState )
2009 after = json.loads( ONOSIntents[ 0 ] )
2010 for intent in before:
2011 if intent not in after:
2012 sameIntents = main.FALSE
2013 main.log.debug( "Intent is not currently in ONOS " +
2014 "(at least in the same form):" )
2015 main.log.debug( json.dumps( intent ) )
2016 except ( ValueError, TypeError ):
2017 main.log.exception( "Exception printing intents" )
2018 main.log.debug( repr( ONOSIntents[0] ) )
2019 main.log.debug( repr( intentState ) )
2020 if sameIntents == main.FALSE:
2021 try:
2022 main.log.debug( "ONOS intents before: " )
2023 main.log.debug( json.dumps( json.loads( intentState ),
2024 sort_keys=True, indent=4,
2025 separators=( ',', ': ' ) ) )
2026 main.log.debug( "Current ONOS intents: " )
2027 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2028 sort_keys=True, indent=4,
2029 separators=( ',', ': ' ) ) )
2030 except ( ValueError, TypeError ):
2031 main.log.exception( "Exception printing intents" )
2032 main.log.debug( repr( ONOSIntents[0] ) )
2033 main.log.debug( repr( intentState ) )
2034 utilities.assert_equals(
2035 expect=main.TRUE,
2036 actual=sameIntents,
2037 onpass="Intents are consistent with before failure",
2038 onfail="The Intents changed during failure" )
2039 intentCheck = intentCheck and sameIntents
2040
2041 main.step( "Get the OF Table entries and compare to before " +
2042 "component failure" )
2043 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002044 for i in range( 28 ):
2045 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002046 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2047 FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
Jon Hall5cf14d52015-07-16 12:15:19 -07002048 if FlowTables == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002049 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2050
Jon Hall5cf14d52015-07-16 12:15:19 -07002051 utilities.assert_equals(
2052 expect=main.TRUE,
2053 actual=FlowTables,
2054 onpass="No changes were found in the flow tables",
2055 onfail="Changes were found in the flow tables" )
2056
2057 main.Mininet2.pingLongKill()
2058 '''
2059 main.step( "Check the continuous pings to ensure that no packets " +
2060 "were dropped during component failure" )
2061 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2062 main.params[ 'TESTONIP' ] )
2063 LossInPings = main.FALSE
2064 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2065 for i in range( 8, 18 ):
2066 main.log.info(
2067 "Checking for a loss in pings along flow from s" +
2068 str( i ) )
2069 LossInPings = main.Mininet2.checkForLoss(
2070 "/tmp/ping.h" +
2071 str( i ) ) or LossInPings
2072 if LossInPings == main.TRUE:
2073 main.log.info( "Loss in ping detected" )
2074 elif LossInPings == main.ERROR:
2075 main.log.info( "There are multiple mininet process running" )
2076 elif LossInPings == main.FALSE:
2077 main.log.info( "No Loss in the pings" )
2078 main.log.info( "No loss of dataplane connectivity" )
2079 utilities.assert_equals(
2080 expect=main.FALSE,
2081 actual=LossInPings,
2082 onpass="No Loss of connectivity",
2083 onfail="Loss of dataplane connectivity detected" )
2084 '''
2085
2086 main.step( "Leadership Election is still functional" )
2087 # Test of LeadershipElection
2088 leaderList = []
Jon Hall5cf14d52015-07-16 12:15:19 -07002089
Jon Hall3b489db2015-10-05 14:38:37 -07002090 restarted = []
2091 for i in main.kill:
2092 restarted.append( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07002093 leaderResult = main.TRUE
Jon Hall3b489db2015-10-05 14:38:37 -07002094
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002095 for i in main.activeNodes:
2096 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002097 leaderN = cli.electionTestLeader()
2098 leaderList.append( leaderN )
2099 if leaderN == main.FALSE:
2100 # error in response
2101 main.log.error( "Something is wrong with " +
2102 "electionTestLeader function, check the" +
2103 " error logs" )
2104 leaderResult = main.FALSE
2105 elif leaderN is None:
2106 main.log.error( cli.name +
2107 " shows no leader for the election-app was" +
2108 " elected after the old one died" )
2109 leaderResult = main.FALSE
2110 elif leaderN in restarted:
2111 main.log.error( cli.name + " shows " + str( leaderN ) +
2112 " as leader for the election-app, but it " +
2113 "was restarted" )
2114 leaderResult = main.FALSE
2115 if len( set( leaderList ) ) != 1:
2116 leaderResult = main.FALSE
2117 main.log.error(
2118 "Inconsistent view of leader for the election test app" )
2119 # TODO: print the list
2120 utilities.assert_equals(
2121 expect=main.TRUE,
2122 actual=leaderResult,
2123 onpass="Leadership election passed",
2124 onfail="Something went wrong with Leadership election" )
2125
2126 def CASE8( self, main ):
2127 """
2128 Compare topo
2129 """
2130 import json
2131 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002132 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002133 assert main, "main not defined"
2134 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002135 assert main.CLIs, "main.CLIs not defined"
2136 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002137
2138 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002139 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002140 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002141 topoResult = main.FALSE
2142 elapsed = 0
2143 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002144 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002145 startTime = time.time()
2146 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002147 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002148 devicesResults = main.TRUE
2149 linksResults = main.TRUE
2150 hostsResults = main.TRUE
2151 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002152 count += 1
2153 cliStart = time.time()
2154 devices = []
2155 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002156 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002157 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07002158 name="devices-" + str( i ),
2159 args=[ ] )
2160 threads.append( t )
2161 t.start()
2162
2163 for t in threads:
2164 t.join()
2165 devices.append( t.result )
2166 hosts = []
2167 ipResult = main.TRUE
2168 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002169 for i in main.activeNodes:
Jon Hallbaf53162015-12-17 17:04:34 -08002170 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002171 name="hosts-" + str( i ),
Jon Hallbaf53162015-12-17 17:04:34 -08002172 args=[ main.CLIs[i].hosts, [ None ] ],
2173 kwargs= { 'sleep': 5, 'attempts': 5,
2174 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002175 threads.append( t )
2176 t.start()
2177
2178 for t in threads:
2179 t.join()
2180 try:
2181 hosts.append( json.loads( t.result ) )
2182 except ( ValueError, TypeError ):
2183 main.log.exception( "Error parsing hosts results" )
2184 main.log.error( repr( t.result ) )
Jon Hallf3d16e72015-12-16 17:45:08 -08002185 hosts.append( None )
Jon Hall5cf14d52015-07-16 12:15:19 -07002186 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002187 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallacd1b182015-12-17 11:43:20 -08002188 if hosts[ controller ]:
2189 for host in hosts[ controller ]:
2190 if host is None or host.get( 'ipAddresses', [] ) == []:
2191 main.log.error(
2192 "Error with host ipAddresses on controller" +
2193 controllerStr + ": " + str( host ) )
2194 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002195 ports = []
2196 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002197 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002198 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07002199 name="ports-" + str( i ),
2200 args=[ ] )
2201 threads.append( t )
2202 t.start()
2203
2204 for t in threads:
2205 t.join()
2206 ports.append( t.result )
2207 links = []
2208 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002209 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002210 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07002211 name="links-" + str( i ),
2212 args=[ ] )
2213 threads.append( t )
2214 t.start()
2215
2216 for t in threads:
2217 t.join()
2218 links.append( t.result )
2219 clusters = []
2220 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002221 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002222 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07002223 name="clusters-" + str( i ),
2224 args=[ ] )
2225 threads.append( t )
2226 t.start()
2227
2228 for t in threads:
2229 t.join()
2230 clusters.append( t.result )
2231
2232 elapsed = time.time() - startTime
2233 cliTime = time.time() - cliStart
2234 print "Elapsed time: " + str( elapsed )
2235 print "CLI time: " + str( cliTime )
2236
2237 mnSwitches = main.Mininet1.getSwitches()
2238 mnLinks = main.Mininet1.getLinks()
2239 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002240 for controller in range( len( main.activeNodes ) ):
2241 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002242 if devices[ controller ] and ports[ controller ] and\
2243 "Error" not in devices[ controller ] and\
2244 "Error" not in ports[ controller ]:
2245
2246 currentDevicesResult = main.Mininet1.compareSwitches(
2247 mnSwitches,
2248 json.loads( devices[ controller ] ),
2249 json.loads( ports[ controller ] ) )
2250 else:
2251 currentDevicesResult = main.FALSE
2252 utilities.assert_equals( expect=main.TRUE,
2253 actual=currentDevicesResult,
2254 onpass="ONOS" + controllerStr +
2255 " Switches view is correct",
2256 onfail="ONOS" + controllerStr +
2257 " Switches view is incorrect" )
2258
2259 if links[ controller ] and "Error" not in links[ controller ]:
2260 currentLinksResult = main.Mininet1.compareLinks(
2261 mnSwitches, mnLinks,
2262 json.loads( links[ controller ] ) )
2263 else:
2264 currentLinksResult = main.FALSE
2265 utilities.assert_equals( expect=main.TRUE,
2266 actual=currentLinksResult,
2267 onpass="ONOS" + controllerStr +
2268 " links view is correct",
2269 onfail="ONOS" + controllerStr +
2270 " links view is incorrect" )
Jon Hall657cdf62015-12-17 14:40:51 -08002271 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002272 currentHostsResult = main.Mininet1.compareHosts(
2273 mnHosts,
2274 hosts[ controller ] )
Jon Hallaf39dcc2016-01-05 12:17:01 -08002275 elif hosts[ controller ] == []:
2276 currentHostsResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002277 else:
2278 currentHostsResult = main.FALSE
2279 utilities.assert_equals( expect=main.TRUE,
2280 actual=currentHostsResult,
2281 onpass="ONOS" + controllerStr +
2282 " hosts exist in Mininet",
2283 onfail="ONOS" + controllerStr +
2284 " hosts don't match Mininet" )
2285 # CHECKING HOST ATTACHMENT POINTS
2286 hostAttachment = True
2287 zeroHosts = False
2288 # FIXME: topo-HA/obelisk specific mappings:
2289 # key is mac and value is dpid
2290 mappings = {}
2291 for i in range( 1, 29 ): # hosts 1 through 28
2292 # set up correct variables:
2293 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2294 if i == 1:
2295 deviceId = "1000".zfill(16)
2296 elif i == 2:
2297 deviceId = "2000".zfill(16)
2298 elif i == 3:
2299 deviceId = "3000".zfill(16)
2300 elif i == 4:
2301 deviceId = "3004".zfill(16)
2302 elif i == 5:
2303 deviceId = "5000".zfill(16)
2304 elif i == 6:
2305 deviceId = "6000".zfill(16)
2306 elif i == 7:
2307 deviceId = "6007".zfill(16)
2308 elif i >= 8 and i <= 17:
2309 dpid = '3' + str( i ).zfill( 3 )
2310 deviceId = dpid.zfill(16)
2311 elif i >= 18 and i <= 27:
2312 dpid = '6' + str( i ).zfill( 3 )
2313 deviceId = dpid.zfill(16)
2314 elif i == 28:
2315 deviceId = "2800".zfill(16)
2316 mappings[ macId ] = deviceId
Jon Hallbaf53162015-12-17 17:04:34 -08002317 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002318 if hosts[ controller ] == []:
2319 main.log.warn( "There are no hosts discovered" )
2320 zeroHosts = True
2321 else:
2322 for host in hosts[ controller ]:
2323 mac = None
2324 location = None
2325 device = None
2326 port = None
2327 try:
2328 mac = host.get( 'mac' )
2329 assert mac, "mac field could not be found for this host object"
2330
2331 location = host.get( 'location' )
2332 assert location, "location field could not be found for this host object"
2333
2334 # Trim the protocol identifier off deviceId
2335 device = str( location.get( 'elementId' ) ).split(':')[1]
2336 assert device, "elementId field could not be found for this host location object"
2337
2338 port = location.get( 'port' )
2339 assert port, "port field could not be found for this host location object"
2340
2341 # Now check if this matches where they should be
2342 if mac and device and port:
2343 if str( port ) != "1":
2344 main.log.error( "The attachment port is incorrect for " +
2345 "host " + str( mac ) +
2346 ". Expected: 1 Actual: " + str( port) )
2347 hostAttachment = False
2348 if device != mappings[ str( mac ) ]:
2349 main.log.error( "The attachment device is incorrect for " +
2350 "host " + str( mac ) +
2351 ". Expected: " + mappings[ str( mac ) ] +
2352 " Actual: " + device )
2353 hostAttachment = False
2354 else:
2355 hostAttachment = False
2356 except AssertionError:
2357 main.log.exception( "Json object not as expected" )
2358 main.log.error( repr( host ) )
2359 hostAttachment = False
2360 else:
2361 main.log.error( "No hosts json output or \"Error\"" +
2362 " in output. hosts = " +
2363 repr( hosts[ controller ] ) )
2364 if zeroHosts is False:
2365 hostAttachment = True
2366
2367 # END CHECKING HOST ATTACHMENT POINTS
2368 devicesResults = devicesResults and currentDevicesResult
2369 linksResults = linksResults and currentLinksResult
2370 hostsResults = hostsResults and currentHostsResult
2371 hostAttachmentResults = hostAttachmentResults and\
2372 hostAttachment
Jon Halle9b1fa32015-12-08 15:32:21 -08002373 topoResult = devicesResults and linksResults and\
2374 hostsResults and hostAttachmentResults
2375 utilities.assert_equals( expect=True,
2376 actual=topoResult,
2377 onpass="ONOS topology matches Mininet",
2378 onfail="ONOS topology don't match Mininet" )
2379 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002380
2381 # Compare json objects for hosts and dataplane clusters
2382
2383 # hosts
2384 main.step( "Hosts view is consistent across all ONOS nodes" )
2385 consistentHostsResult = main.TRUE
2386 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002387 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallaf39dcc2016-01-05 12:17:01 -08002388 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002389 if hosts[ controller ] == hosts[ 0 ]:
2390 continue
2391 else: # hosts not consistent
2392 main.log.error( "hosts from ONOS" + controllerStr +
2393 " is inconsistent with ONOS1" )
2394 main.log.warn( repr( hosts[ controller ] ) )
2395 consistentHostsResult = main.FALSE
2396
2397 else:
2398 main.log.error( "Error in getting ONOS hosts from ONOS" +
2399 controllerStr )
2400 consistentHostsResult = main.FALSE
2401 main.log.warn( "ONOS" + controllerStr +
2402 " hosts response: " +
2403 repr( hosts[ controller ] ) )
2404 utilities.assert_equals(
2405 expect=main.TRUE,
2406 actual=consistentHostsResult,
2407 onpass="Hosts view is consistent across all ONOS nodes",
2408 onfail="ONOS nodes have different views of hosts" )
2409
2410 main.step( "Hosts information is correct" )
2411 hostsResults = hostsResults and ipResult
2412 utilities.assert_equals(
2413 expect=main.TRUE,
2414 actual=hostsResults,
2415 onpass="Host information is correct",
2416 onfail="Host information is incorrect" )
2417
2418 main.step( "Host attachment points to the network" )
2419 utilities.assert_equals(
2420 expect=True,
2421 actual=hostAttachmentResults,
2422 onpass="Hosts are correctly attached to the network",
2423 onfail="ONOS did not correctly attach hosts to the network" )
2424
2425 # Strongly connected clusters of devices
2426 main.step( "Clusters view is consistent across all ONOS nodes" )
2427 consistentClustersResult = main.TRUE
2428 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002429 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002430 if "Error" not in clusters[ controller ]:
2431 if clusters[ controller ] == clusters[ 0 ]:
2432 continue
2433 else: # clusters not consistent
2434 main.log.error( "clusters from ONOS" +
2435 controllerStr +
2436 " is inconsistent with ONOS1" )
2437 consistentClustersResult = main.FALSE
2438
2439 else:
2440 main.log.error( "Error in getting dataplane clusters " +
2441 "from ONOS" + controllerStr )
2442 consistentClustersResult = main.FALSE
2443 main.log.warn( "ONOS" + controllerStr +
2444 " clusters response: " +
2445 repr( clusters[ controller ] ) )
2446 utilities.assert_equals(
2447 expect=main.TRUE,
2448 actual=consistentClustersResult,
2449 onpass="Clusters view is consistent across all ONOS nodes",
2450 onfail="ONOS nodes have different views of clusters" )
2451
2452 main.step( "There is only one SCC" )
2453 # there should always only be one cluster
2454 try:
2455 numClusters = len( json.loads( clusters[ 0 ] ) )
2456 except ( ValueError, TypeError ):
2457 main.log.exception( "Error parsing clusters[0]: " +
2458 repr( clusters[0] ) )
2459 clusterResults = main.FALSE
2460 if numClusters == 1:
2461 clusterResults = main.TRUE
2462 utilities.assert_equals(
2463 expect=1,
2464 actual=numClusters,
2465 onpass="ONOS shows 1 SCC",
2466 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2467
2468 topoResult = ( devicesResults and linksResults
2469 and hostsResults and consistentHostsResult
2470 and consistentClustersResult and clusterResults
2471 and ipResult and hostAttachmentResults )
2472
2473 topoResult = topoResult and int( count <= 2 )
2474 note = "note it takes about " + str( int( cliTime ) ) + \
2475 " seconds for the test to make all the cli calls to fetch " +\
2476 "the topology from each ONOS instance"
2477 main.log.info(
2478 "Very crass estimate for topology discovery/convergence( " +
2479 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2480 str( count ) + " tries" )
2481
2482 main.step( "Device information is correct" )
2483 utilities.assert_equals(
2484 expect=main.TRUE,
2485 actual=devicesResults,
2486 onpass="Device information is correct",
2487 onfail="Device information is incorrect" )
2488
2489 main.step( "Links are correct" )
2490 utilities.assert_equals(
2491 expect=main.TRUE,
2492 actual=linksResults,
2493 onpass="Link are correct",
2494 onfail="Links are incorrect" )
2495
2496 # FIXME: move this to an ONOS state case
2497 main.step( "Checking ONOS nodes" )
2498 nodesOutput = []
2499 nodeResults = main.TRUE
2500 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002501 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002502 t = main.Thread( target=main.CLIs[i].nodes,
Jon Hall5cf14d52015-07-16 12:15:19 -07002503 name="nodes-" + str( i ),
2504 args=[ ] )
2505 threads.append( t )
2506 t.start()
2507
2508 for t in threads:
2509 t.join()
2510 nodesOutput.append( t.result )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002511 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
Jon Halle9b1fa32015-12-08 15:32:21 -08002512 ips.sort()
Jon Hall5cf14d52015-07-16 12:15:19 -07002513 for i in nodesOutput:
2514 try:
2515 current = json.loads( i )
Jon Halle9b1fa32015-12-08 15:32:21 -08002516 activeIps = []
2517 currentResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002518 for node in current:
Jon Halle9b1fa32015-12-08 15:32:21 -08002519 if node['state'] == 'ACTIVE':
2520 activeIps.append( node['ip'] )
2521 activeIps.sort()
2522 if ips == activeIps:
2523 currentResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002524 except ( ValueError, TypeError ):
2525 main.log.error( "Error parsing nodes output" )
2526 main.log.warn( repr( i ) )
Jon Halle9b1fa32015-12-08 15:32:21 -08002527 currentResult = main.FALSE
2528 nodeResults = nodeResults and currentResult
Jon Hall5cf14d52015-07-16 12:15:19 -07002529 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2530 onpass="Nodes check successful",
2531 onfail="Nodes check NOT successful" )
2532
2533 def CASE9( self, main ):
2534 """
2535 Link s3-s28 down
2536 """
2537 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002538 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002539 assert main, "main not defined"
2540 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002541 assert main.CLIs, "main.CLIs not defined"
2542 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002543 # NOTE: You should probably run a topology check after this
2544
2545 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2546
2547 description = "Turn off a link to ensure that Link Discovery " +\
2548 "is working properly"
2549 main.case( description )
2550
2551 main.step( "Kill Link between s3 and s28" )
2552 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2553 main.log.info( "Waiting " + str( linkSleep ) +
2554 " seconds for link down to be discovered" )
2555 time.sleep( linkSleep )
2556 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2557 onpass="Link down successful",
2558 onfail="Failed to bring link down" )
2559 # TODO do some sort of check here
2560
2561 def CASE10( self, main ):
2562 """
2563 Link s3-s28 up
2564 """
2565 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002566 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002567 assert main, "main not defined"
2568 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002569 assert main.CLIs, "main.CLIs not defined"
2570 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002571 # NOTE: You should probably run a topology check after this
2572
2573 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2574
2575 description = "Restore a link to ensure that Link Discovery is " + \
2576 "working properly"
2577 main.case( description )
2578
2579 main.step( "Bring link between s3 and s28 back up" )
2580 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2581 main.log.info( "Waiting " + str( linkSleep ) +
2582 " seconds for link up to be discovered" )
2583 time.sleep( linkSleep )
2584 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2585 onpass="Link up successful",
2586 onfail="Failed to bring link up" )
2587 # TODO do some sort of check here
2588
2589 def CASE11( self, main ):
2590 """
2591 Switch Down
2592 """
2593 # NOTE: You should probably run a topology check after this
2594 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002595 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002596 assert main, "main not defined"
2597 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002598 assert main.CLIs, "main.CLIs not defined"
2599 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002600
2601 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2602
2603 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002604 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002605 main.case( description )
2606 switch = main.params[ 'kill' ][ 'switch' ]
2607 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2608
2609 # TODO: Make this switch parameterizable
2610 main.step( "Kill " + switch )
2611 main.log.info( "Deleting " + switch )
2612 main.Mininet1.delSwitch( switch )
2613 main.log.info( "Waiting " + str( switchSleep ) +
2614 " seconds for switch down to be discovered" )
2615 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002616 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002617 # Peek at the deleted switch
2618 main.log.warn( str( device ) )
2619 result = main.FALSE
2620 if device and device[ 'available' ] is False:
2621 result = main.TRUE
2622 utilities.assert_equals( expect=main.TRUE, actual=result,
2623 onpass="Kill switch successful",
2624 onfail="Failed to kill switch?" )
2625
2626 def CASE12( self, main ):
2627 """
2628 Switch Up
2629 """
2630 # NOTE: You should probably run a topology check after this
2631 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002632 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002633 assert main, "main not defined"
2634 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002635 assert main.CLIs, "main.CLIs not defined"
2636 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002637 assert ONOS1Port, "ONOS1Port not defined"
2638 assert ONOS2Port, "ONOS2Port not defined"
2639 assert ONOS3Port, "ONOS3Port not defined"
2640 assert ONOS4Port, "ONOS4Port not defined"
2641 assert ONOS5Port, "ONOS5Port not defined"
2642 assert ONOS6Port, "ONOS6Port not defined"
2643 assert ONOS7Port, "ONOS7Port not defined"
2644
2645 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2646 switch = main.params[ 'kill' ][ 'switch' ]
2647 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2648 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002649 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002650 description = "Adding a switch to ensure it is discovered correctly"
2651 main.case( description )
2652
2653 main.step( "Add back " + switch )
2654 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2655 for peer in links:
2656 main.Mininet1.addLink( switch, peer )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002657 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002658 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2659 main.log.info( "Waiting " + str( switchSleep ) +
2660 " seconds for switch up to be discovered" )
2661 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002662 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002663 # Peek at the deleted switch
2664 main.log.warn( str( device ) )
2665 result = main.FALSE
2666 if device and device[ 'available' ]:
2667 result = main.TRUE
2668 utilities.assert_equals( expect=main.TRUE, actual=result,
2669 onpass="add switch successful",
2670 onfail="Failed to add switch?" )
2671
2672 def CASE13( self, main ):
2673 """
2674 Clean up
2675 """
2676 import os
2677 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002678 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002679 assert main, "main not defined"
2680 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002681 assert main.CLIs, "main.CLIs not defined"
2682 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002683
2684 # printing colors to terminal
2685 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2686 'blue': '\033[94m', 'green': '\033[92m',
2687 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2688 main.case( "Test Cleanup" )
2689 main.step( "Killing tcpdumps" )
2690 main.Mininet2.stopTcpdump()
2691
2692 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002693 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002694 main.step( "Copying MN pcap and ONOS log files to test station" )
2695 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2696 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002697 # NOTE: MN Pcap file is being saved to logdir.
2698 # We scp this file as MN and TestON aren't necessarily the same vm
2699
2700 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002701 # TODO: Load these from params
2702 # NOTE: must end in /
2703 logFolder = "/opt/onos/log/"
2704 logFiles = [ "karaf.log", "karaf.log.1" ]
2705 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002706 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002707 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002708 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002709 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2710 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002711 # std*.log's
2712 # NOTE: must end in /
2713 logFolder = "/opt/onos/var/"
2714 logFiles = [ "stderr.log", "stdout.log" ]
2715 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002716 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002717 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002718 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002719 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2720 logFolder + f, dstName )
2721 else:
2722 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002723
2724 main.step( "Stopping Mininet" )
2725 mnResult = main.Mininet1.stopNet()
2726 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2727 onpass="Mininet stopped",
2728 onfail="MN cleanup NOT successful" )
2729
2730 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002731 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002732 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2733 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002734
2735 try:
2736 timerLog = open( main.logdir + "/Timers.csv", 'w')
2737 # Overwrite with empty line and close
2738 labels = "Gossip Intents, Restart"
2739 data = str( gossipTime ) + ", " + str( main.restartTime )
2740 timerLog.write( labels + "\n" + data )
2741 timerLog.close()
2742 except NameError, e:
2743 main.log.exception(e)
2744
2745 def CASE14( self, main ):
2746 """
2747 start election app on all onos nodes
2748 """
Jon Halle1a3b752015-07-22 13:02:46 -07002749 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002750 assert main, "main not defined"
2751 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002752 assert main.CLIs, "main.CLIs not defined"
2753 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002754
2755 main.case("Start Leadership Election app")
2756 main.step( "Install leadership election app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002757 onosCli = main.CLIs[ main.activeNodes[0] ]
2758 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002759 utilities.assert_equals(
2760 expect=main.TRUE,
2761 actual=appResult,
2762 onpass="Election app installed",
2763 onfail="Something went wrong with installing Leadership election" )
2764
2765 main.step( "Run for election on each node" )
2766 leaderResult = main.TRUE
2767 leaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002768 for i in main.activeNodes:
2769 main.CLIs[i].electionTestRun()
2770 for i in main.activeNodes:
2771 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002772 leader = cli.electionTestLeader()
2773 if leader is None or leader == main.FALSE:
2774 main.log.error( cli.name + ": Leader for the election app " +
2775 "should be an ONOS node, instead got '" +
2776 str( leader ) + "'" )
2777 leaderResult = main.FALSE
2778 leaders.append( leader )
2779 utilities.assert_equals(
2780 expect=main.TRUE,
2781 actual=leaderResult,
2782 onpass="Successfully ran for leadership",
2783 onfail="Failed to run for leadership" )
2784
2785 main.step( "Check that each node shows the same leader" )
2786 sameLeader = main.TRUE
2787 if len( set( leaders ) ) != 1:
2788 sameLeader = main.FALSE
Jon Halle1a3b752015-07-22 13:02:46 -07002789 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
Jon Hall5cf14d52015-07-16 12:15:19 -07002790 str( leaders ) )
2791 utilities.assert_equals(
2792 expect=main.TRUE,
2793 actual=sameLeader,
2794 onpass="Leadership is consistent for the election topic",
2795 onfail="Nodes have different leaders" )
2796
2797 def CASE15( self, main ):
2798 """
2799 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002800 15.1 Run election on each node
2801 15.2 Check that each node has the same leaders and candidates
2802 15.3 Find current leader and withdraw
2803 15.4 Check that a new node was elected leader
2804 15.5 Check that that new leader was the candidate of old leader
2805 15.6 Run for election on old leader
2806 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2807 15.8 Make sure that the old leader was added to the candidate list
2808
2809 old and new variable prefixes refer to data from before vs after
2810 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002811 """
2812 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002813 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002814 assert main, "main not defined"
2815 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002816 assert main.CLIs, "main.CLIs not defined"
2817 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002818
Jon Hall5cf14d52015-07-16 12:15:19 -07002819 description = "Check that Leadership Election is still functional"
2820 main.case( description )
acsmars71adceb2015-08-31 15:09:26 -07002821 # NOTE: Need to re-run since being a canidate is not persistant
2822 # TODO: add check for "Command not found:" in the driver, this
2823 # means the election test app isn't loaded
Jon Hall5cf14d52015-07-16 12:15:19 -07002824
acsmars71adceb2015-08-31 15:09:26 -07002825 oldLeaders = [] # leaders by node before withdrawl from candidates
2826 newLeaders = [] # leaders by node after withdrawl from candidates
2827 oldAllCandidates = [] # list of lists of each nodes' candidates before
2828 newAllCandidates = [] # list of lists of each nodes' candidates after
2829 oldCandidates = [] # list of candidates from node 0 before withdrawl
2830 newCandidates = [] # list of candidates from node 0 after withdrawl
2831 oldLeader = '' # the old leader from oldLeaders, None if not same
2832 newLeader = '' # the new leaders fron newLoeaders, None if not same
2833 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2834 expectNoLeader = False # True when there is only one leader
2835 if main.numCtrls == 1:
2836 expectNoLeader = True
2837
2838 main.step( "Run for election on each node" )
2839 electionResult = main.TRUE
2840
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002841 for i in main.activeNodes: # run test election on each node
2842 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002843 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002844 utilities.assert_equals(
2845 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002846 actual=electionResult,
2847 onpass="All nodes successfully ran for leadership",
2848 onfail="At least one node failed to run for leadership" )
2849
acsmars3a72bde2015-09-02 14:16:22 -07002850 if electionResult == main.FALSE:
2851 main.log.error(
2852 "Skipping Test Case because Election Test App isn't loaded" )
2853 main.skipCase()
2854
acsmars71adceb2015-08-31 15:09:26 -07002855 main.step( "Check that each node shows the same leader and candidates" )
2856 sameResult = main.TRUE
2857 failMessage = "Nodes have different leaders"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002858 for i in main.activeNodes:
2859 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002860 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2861 oldAllCandidates.append( node )
2862 oldLeaders.append( node[ 0 ] )
2863 oldCandidates = oldAllCandidates[ 0 ]
2864
2865 # Check that each node has the same leader. Defines oldLeader
2866 if len( set( oldLeaders ) ) != 1:
2867 sameResult = main.FALSE
2868 main.log.error( "More than one leader present:" + str( oldLeaders ) )
2869 oldLeader = None
2870 else:
2871 oldLeader = oldLeaders[ 0 ]
2872
2873 # Check that each node's candidate list is the same
acsmars29233db2015-11-04 11:15:00 -08002874 candidateDiscrepancy = False # Boolean of candidate mismatches
acsmars71adceb2015-08-31 15:09:26 -07002875 for candidates in oldAllCandidates:
2876 if set( candidates ) != set( oldCandidates ):
2877 sameResult = main.FALSE
acsmars29233db2015-11-04 11:15:00 -08002878 candidateDiscrepancy = True
2879
2880 if candidateDiscrepancy:
2881 failMessage += " and candidates"
2882
acsmars71adceb2015-08-31 15:09:26 -07002883 utilities.assert_equals(
2884 expect=main.TRUE,
2885 actual=sameResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002886 onpass="Leadership is consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002887 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002888
2889 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002890 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002891 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002892 if oldLeader is None:
2893 main.log.error( "Leadership isn't consistent." )
2894 withdrawResult = main.FALSE
2895 # Get the CLI of the oldLeader
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002896 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07002897 if oldLeader == main.nodes[ i ].ip_address:
2898 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002899 break
2900 else: # FOR/ELSE statement
2901 main.log.error( "Leader election, could not find current leader" )
2902 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07002903 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07002904 utilities.assert_equals(
2905 expect=main.TRUE,
2906 actual=withdrawResult,
2907 onpass="Node was withdrawn from election",
2908 onfail="Node was not withdrawn from election" )
2909
acsmars71adceb2015-08-31 15:09:26 -07002910 main.step( "Check that a new node was elected leader" )
2911
Jon Hall5cf14d52015-07-16 12:15:19 -07002912 # FIXME: use threads
acsmars71adceb2015-08-31 15:09:26 -07002913 newLeaderResult = main.TRUE
2914 failMessage = "Nodes have different leaders"
2915
2916 # Get new leaders and candidates
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002917 for i in main.activeNodes:
2918 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002919 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2920 # elections might no have finished yet
2921 if node[ 0 ] == 'none' and not expectNoLeader:
2922 main.log.info( "Node has no leader, waiting 5 seconds to be " +
2923 "sure elections are complete." )
2924 time.sleep(5)
2925 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2926 # election still isn't done or there is a problem
2927 if node[ 0 ] == 'none':
2928 main.log.error( "No leader was elected on at least 1 node" )
2929 newLeaderResult = main.FALSE
2930 newAllCandidates.append( node )
2931 newLeaders.append( node[ 0 ] )
2932 newCandidates = newAllCandidates[ 0 ]
2933
2934 # Check that each node has the same leader. Defines newLeader
2935 if len( set( newLeaders ) ) != 1:
2936 newLeaderResult = main.FALSE
2937 main.log.error( "Nodes have different leaders: " +
2938 str( newLeaders ) )
2939 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07002940 else:
acsmars71adceb2015-08-31 15:09:26 -07002941 newLeader = newLeaders[ 0 ]
2942
2943 # Check that each node's candidate list is the same
2944 for candidates in newAllCandidates:
2945 if set( candidates ) != set( newCandidates ):
2946 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07002947 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07002948
2949 # Check that the new leader is not the older leader, which was withdrawn
2950 if newLeader == oldLeader:
2951 newLeaderResult = main.FALSE
2952 main.log.error( "All nodes still see old leader: " + oldLeader +
2953 " as the current leader" )
2954
Jon Hall5cf14d52015-07-16 12:15:19 -07002955 utilities.assert_equals(
2956 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002957 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002958 onpass="Leadership election passed",
2959 onfail="Something went wrong with Leadership election" )
2960
acsmars71adceb2015-08-31 15:09:26 -07002961 main.step( "Check that that new leader was the candidate of old leader")
2962 # candidates[ 2 ] should be come the top candidate after withdrawl
2963 correctCandidateResult = main.TRUE
2964 if expectNoLeader:
2965 if newLeader == 'none':
2966 main.log.info( "No leader expected. None found. Pass" )
2967 correctCandidateResult = main.TRUE
2968 else:
2969 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2970 correctCandidateResult = main.FALSE
2971 elif newLeader != oldCandidates[ 2 ]:
2972 correctCandidateResult = main.FALSE
2973 main.log.error( "Candidate " + newLeader + " was elected. " +
2974 oldCandidates[ 2 ] + " should have had priority." )
2975
2976 utilities.assert_equals(
2977 expect=main.TRUE,
2978 actual=correctCandidateResult,
2979 onpass="Correct Candidate Elected",
2980 onfail="Incorrect Candidate Elected" )
2981
Jon Hall5cf14d52015-07-16 12:15:19 -07002982 main.step( "Run for election on old leader( just so everyone " +
2983 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07002984 if oldLeaderCLI is not None:
2985 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07002986 else:
acsmars71adceb2015-08-31 15:09:26 -07002987 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002988 runResult = main.FALSE
2989 utilities.assert_equals(
2990 expect=main.TRUE,
2991 actual=runResult,
2992 onpass="App re-ran for election",
2993 onfail="App failed to run for election" )
acsmars71adceb2015-08-31 15:09:26 -07002994 main.step(
2995 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002996 # verify leader didn't just change
acsmars71adceb2015-08-31 15:09:26 -07002997 positionResult = main.TRUE
2998 # Get new leaders and candidates, wait if oldLeader is not a candidate yet
2999
3000 # Reset and reuse the new candidate and leaders lists
3001 newAllCandidates = []
3002 newCandidates = []
3003 newLeaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003004 for i in main.activeNodes:
3005 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07003006 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3007 if oldLeader not in node: # election might no have finished yet
3008 main.log.info( "Old Leader not elected, waiting 5 seconds to " +
3009 "be sure elections are complete" )
3010 time.sleep(5)
3011 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3012 if oldLeader not in node: # election still isn't done, errors
3013 main.log.error(
3014 "Old leader was not elected on at least one node" )
3015 positionResult = main.FALSE
3016 newAllCandidates.append( node )
3017 newLeaders.append( node[ 0 ] )
3018 newCandidates = newAllCandidates[ 0 ]
3019
3020 # Check that each node has the same leader. Defines newLeader
3021 if len( set( newLeaders ) ) != 1:
3022 positionResult = main.FALSE
3023 main.log.error( "Nodes have different leaders: " +
3024 str( newLeaders ) )
3025 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07003026 else:
acsmars71adceb2015-08-31 15:09:26 -07003027 newLeader = newLeaders[ 0 ]
3028
3029 # Check that each node's candidate list is the same
3030 for candidates in newAllCandidates:
3031 if set( candidates ) != set( newCandidates ):
3032 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07003033 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07003034
3035 # Check that the re-elected node is last on the candidate List
3036 if oldLeader != newCandidates[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003037 main.log.error( "Old Leader (" + oldLeader + ") not in the proper position " +
acsmars71adceb2015-08-31 15:09:26 -07003038 str( newCandidates ) )
3039 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003040
3041 utilities.assert_equals(
3042 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07003043 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003044 onpass="Old leader successfully re-ran for election",
3045 onfail="Something went wrong with Leadership election after " +
3046 "the old leader re-ran for election" )
3047
3048 def CASE16( self, main ):
3049 """
3050 Install Distributed Primitives app
3051 """
3052 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003053 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003054 assert main, "main not defined"
3055 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003056 assert main.CLIs, "main.CLIs not defined"
3057 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003058
3059 # Variables for the distributed primitives tests
3060 global pCounterName
3061 global iCounterName
3062 global pCounterValue
3063 global iCounterValue
3064 global onosSet
3065 global onosSetName
3066 pCounterName = "TestON-Partitions"
3067 iCounterName = "TestON-inMemory"
3068 pCounterValue = 0
3069 iCounterValue = 0
3070 onosSet = set([])
3071 onosSetName = "TestON-set"
3072
3073 description = "Install Primitives app"
3074 main.case( description )
3075 main.step( "Install Primitives app" )
3076 appName = "org.onosproject.distributedprimitives"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003077 node = main.activeNodes[0]
3078 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003079 utilities.assert_equals( expect=main.TRUE,
3080 actual=appResults,
3081 onpass="Primitives app activated",
3082 onfail="Primitives app not activated" )
3083 time.sleep( 5 ) # To allow all nodes to activate
3084
3085 def CASE17( self, main ):
3086 """
3087 Check for basic functionality with distributed primitives
3088 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003089 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003090 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003091 assert main, "main not defined"
3092 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003093 assert main.CLIs, "main.CLIs not defined"
3094 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003095 assert pCounterName, "pCounterName not defined"
3096 assert iCounterName, "iCounterName not defined"
3097 assert onosSetName, "onosSetName not defined"
3098 # NOTE: assert fails if value is 0/None/Empty/False
3099 try:
3100 pCounterValue
3101 except NameError:
3102 main.log.error( "pCounterValue not defined, setting to 0" )
3103 pCounterValue = 0
3104 try:
3105 iCounterValue
3106 except NameError:
3107 main.log.error( "iCounterValue not defined, setting to 0" )
3108 iCounterValue = 0
3109 try:
3110 onosSet
3111 except NameError:
3112 main.log.error( "onosSet not defined, setting to empty Set" )
3113 onosSet = set([])
3114 # Variables for the distributed primitives tests. These are local only
3115 addValue = "a"
3116 addAllValue = "a b c d e f"
3117 retainValue = "c d e f"
3118
3119 description = "Check for basic functionality with distributed " +\
3120 "primitives"
3121 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003122 main.caseExplanation = "Test the methods of the distributed " +\
3123 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003124 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003125 # Partitioned counters
3126 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003127 pCounters = []
3128 threads = []
3129 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003130 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003131 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3132 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003133 args=[ pCounterName ] )
3134 pCounterValue += 1
3135 addedPValues.append( pCounterValue )
3136 threads.append( t )
3137 t.start()
3138
3139 for t in threads:
3140 t.join()
3141 pCounters.append( t.result )
3142 # Check that counter incremented numController times
3143 pCounterResults = True
3144 for i in addedPValues:
3145 tmpResult = i in pCounters
3146 pCounterResults = pCounterResults and tmpResult
3147 if not tmpResult:
3148 main.log.error( str( i ) + " is not in partitioned "
3149 "counter incremented results" )
3150 utilities.assert_equals( expect=True,
3151 actual=pCounterResults,
3152 onpass="Default counter incremented",
3153 onfail="Error incrementing default" +
3154 " counter" )
3155
Jon Halle1a3b752015-07-22 13:02:46 -07003156 main.step( "Get then Increment a default counter on each node" )
3157 pCounters = []
3158 threads = []
3159 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003160 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003161 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3162 name="counterGetAndAdd-" + str( i ),
3163 args=[ pCounterName ] )
3164 addedPValues.append( pCounterValue )
3165 pCounterValue += 1
3166 threads.append( t )
3167 t.start()
3168
3169 for t in threads:
3170 t.join()
3171 pCounters.append( t.result )
3172 # Check that counter incremented numController times
3173 pCounterResults = True
3174 for i in addedPValues:
3175 tmpResult = i in pCounters
3176 pCounterResults = pCounterResults and tmpResult
3177 if not tmpResult:
3178 main.log.error( str( i ) + " is not in partitioned "
3179 "counter incremented results" )
3180 utilities.assert_equals( expect=True,
3181 actual=pCounterResults,
3182 onpass="Default counter incremented",
3183 onfail="Error incrementing default" +
3184 " counter" )
3185
3186 main.step( "Counters we added have the correct values" )
3187 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3188 utilities.assert_equals( expect=main.TRUE,
3189 actual=incrementCheck,
3190 onpass="Added counters are correct",
3191 onfail="Added counters are incorrect" )
3192
3193 main.step( "Add -8 to then get a default counter on each node" )
3194 pCounters = []
3195 threads = []
3196 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003197 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003198 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3199 name="counterIncrement-" + str( i ),
3200 args=[ pCounterName ],
3201 kwargs={ "delta": -8 } )
3202 pCounterValue += -8
3203 addedPValues.append( pCounterValue )
3204 threads.append( t )
3205 t.start()
3206
3207 for t in threads:
3208 t.join()
3209 pCounters.append( t.result )
3210 # Check that counter incremented numController times
3211 pCounterResults = True
3212 for i in addedPValues:
3213 tmpResult = i in pCounters
3214 pCounterResults = pCounterResults and tmpResult
3215 if not tmpResult:
3216 main.log.error( str( i ) + " is not in partitioned "
3217 "counter incremented results" )
3218 utilities.assert_equals( expect=True,
3219 actual=pCounterResults,
3220 onpass="Default counter incremented",
3221 onfail="Error incrementing default" +
3222 " counter" )
3223
3224 main.step( "Add 5 to then get a default counter on each node" )
3225 pCounters = []
3226 threads = []
3227 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003228 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003229 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3230 name="counterIncrement-" + str( i ),
3231 args=[ pCounterName ],
3232 kwargs={ "delta": 5 } )
3233 pCounterValue += 5
3234 addedPValues.append( pCounterValue )
3235 threads.append( t )
3236 t.start()
3237
3238 for t in threads:
3239 t.join()
3240 pCounters.append( t.result )
3241 # Check that counter incremented numController times
3242 pCounterResults = True
3243 for i in addedPValues:
3244 tmpResult = i in pCounters
3245 pCounterResults = pCounterResults and tmpResult
3246 if not tmpResult:
3247 main.log.error( str( i ) + " is not in partitioned "
3248 "counter incremented results" )
3249 utilities.assert_equals( expect=True,
3250 actual=pCounterResults,
3251 onpass="Default counter incremented",
3252 onfail="Error incrementing default" +
3253 " counter" )
3254
3255 main.step( "Get then add 5 to a default counter on each node" )
3256 pCounters = []
3257 threads = []
3258 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003259 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003260 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3261 name="counterIncrement-" + str( i ),
3262 args=[ pCounterName ],
3263 kwargs={ "delta": 5 } )
3264 addedPValues.append( pCounterValue )
3265 pCounterValue += 5
3266 threads.append( t )
3267 t.start()
3268
3269 for t in threads:
3270 t.join()
3271 pCounters.append( t.result )
3272 # Check that counter incremented numController times
3273 pCounterResults = True
3274 for i in addedPValues:
3275 tmpResult = i in pCounters
3276 pCounterResults = pCounterResults and tmpResult
3277 if not tmpResult:
3278 main.log.error( str( i ) + " is not in partitioned "
3279 "counter incremented results" )
3280 utilities.assert_equals( expect=True,
3281 actual=pCounterResults,
3282 onpass="Default counter incremented",
3283 onfail="Error incrementing default" +
3284 " counter" )
3285
3286 main.step( "Counters we added have the correct values" )
3287 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3288 utilities.assert_equals( expect=main.TRUE,
3289 actual=incrementCheck,
3290 onpass="Added counters are correct",
3291 onfail="Added counters are incorrect" )
3292
3293 # In-Memory counters
3294 main.step( "Increment and get an in-memory counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003295 iCounters = []
3296 addedIValues = []
3297 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003298 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003299 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003300 name="icounterIncrement-" + str( i ),
3301 args=[ iCounterName ],
3302 kwargs={ "inMemory": True } )
3303 iCounterValue += 1
3304 addedIValues.append( iCounterValue )
3305 threads.append( t )
3306 t.start()
3307
3308 for t in threads:
3309 t.join()
3310 iCounters.append( t.result )
3311 # Check that counter incremented numController times
3312 iCounterResults = True
3313 for i in addedIValues:
3314 tmpResult = i in iCounters
3315 iCounterResults = iCounterResults and tmpResult
3316 if not tmpResult:
3317 main.log.error( str( i ) + " is not in the in-memory "
3318 "counter incremented results" )
3319 utilities.assert_equals( expect=True,
3320 actual=iCounterResults,
Jon Halle1a3b752015-07-22 13:02:46 -07003321 onpass="In-memory counter incremented",
3322 onfail="Error incrementing in-memory" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003323 " counter" )
3324
Jon Halle1a3b752015-07-22 13:02:46 -07003325 main.step( "Get then Increment a in-memory counter on each node" )
3326 iCounters = []
3327 threads = []
3328 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003329 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003330 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3331 name="counterGetAndAdd-" + str( i ),
3332 args=[ iCounterName ],
3333 kwargs={ "inMemory": True } )
3334 addedIValues.append( iCounterValue )
3335 iCounterValue += 1
3336 threads.append( t )
3337 t.start()
3338
3339 for t in threads:
3340 t.join()
3341 iCounters.append( t.result )
3342 # Check that counter incremented numController times
3343 iCounterResults = True
3344 for i in addedIValues:
3345 tmpResult = i in iCounters
3346 iCounterResults = iCounterResults and tmpResult
3347 if not tmpResult:
3348 main.log.error( str( i ) + " is not in in-memory "
3349 "counter incremented results" )
3350 utilities.assert_equals( expect=True,
3351 actual=iCounterResults,
3352 onpass="In-memory counter incremented",
3353 onfail="Error incrementing in-memory" +
3354 " counter" )
3355
3356 main.step( "Counters we added have the correct values" )
3357 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3358 utilities.assert_equals( expect=main.TRUE,
3359 actual=incrementCheck,
3360 onpass="Added counters are correct",
3361 onfail="Added counters are incorrect" )
3362
3363 main.step( "Add -8 to then get a in-memory counter on each node" )
3364 iCounters = []
3365 threads = []
3366 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003367 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003368 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3369 name="counterIncrement-" + str( i ),
3370 args=[ iCounterName ],
3371 kwargs={ "delta": -8, "inMemory": True } )
3372 iCounterValue += -8
3373 addedIValues.append( iCounterValue )
3374 threads.append( t )
3375 t.start()
3376
3377 for t in threads:
3378 t.join()
3379 iCounters.append( t.result )
3380 # Check that counter incremented numController times
3381 iCounterResults = True
3382 for i in addedIValues:
3383 tmpResult = i in iCounters
3384 iCounterResults = iCounterResults and tmpResult
3385 if not tmpResult:
3386 main.log.error( str( i ) + " is not in in-memory "
3387 "counter incremented results" )
3388 utilities.assert_equals( expect=True,
3389 actual=pCounterResults,
3390 onpass="In-memory counter incremented",
3391 onfail="Error incrementing in-memory" +
3392 " counter" )
3393
3394 main.step( "Add 5 to then get a in-memory counter on each node" )
3395 iCounters = []
3396 threads = []
3397 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003398 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003399 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3400 name="counterIncrement-" + str( i ),
3401 args=[ iCounterName ],
3402 kwargs={ "delta": 5, "inMemory": True } )
3403 iCounterValue += 5
3404 addedIValues.append( iCounterValue )
3405 threads.append( t )
3406 t.start()
3407
3408 for t in threads:
3409 t.join()
3410 iCounters.append( t.result )
3411 # Check that counter incremented numController times
3412 iCounterResults = True
3413 for i in addedIValues:
3414 tmpResult = i in iCounters
3415 iCounterResults = iCounterResults and tmpResult
3416 if not tmpResult:
3417 main.log.error( str( i ) + " is not in in-memory "
3418 "counter incremented results" )
3419 utilities.assert_equals( expect=True,
3420 actual=pCounterResults,
3421 onpass="In-memory counter incremented",
3422 onfail="Error incrementing in-memory" +
3423 " counter" )
3424
3425 main.step( "Get then add 5 to a in-memory counter on each node" )
3426 iCounters = []
3427 threads = []
3428 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003429 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003430 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3431 name="counterIncrement-" + str( i ),
3432 args=[ iCounterName ],
3433 kwargs={ "delta": 5, "inMemory": True } )
3434 addedIValues.append( iCounterValue )
3435 iCounterValue += 5
3436 threads.append( t )
3437 t.start()
3438
3439 for t in threads:
3440 t.join()
3441 iCounters.append( t.result )
3442 # Check that counter incremented numController times
3443 iCounterResults = True
3444 for i in addedIValues:
3445 tmpResult = i in iCounters
3446 iCounterResults = iCounterResults and tmpResult
3447 if not tmpResult:
3448 main.log.error( str( i ) + " is not in in-memory "
3449 "counter incremented results" )
3450 utilities.assert_equals( expect=True,
3451 actual=iCounterResults,
3452 onpass="In-memory counter incremented",
3453 onfail="Error incrementing in-memory" +
3454 " counter" )
3455
3456 main.step( "Counters we added have the correct values" )
3457 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3458 utilities.assert_equals( expect=main.TRUE,
3459 actual=incrementCheck,
3460 onpass="Added counters are correct",
3461 onfail="Added counters are incorrect" )
3462
Jon Hall5cf14d52015-07-16 12:15:19 -07003463 main.step( "Check counters are consistant across nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07003464 onosCounters, consistentCounterResults = main.Counters.consistentCheck()
Jon Hall5cf14d52015-07-16 12:15:19 -07003465 utilities.assert_equals( expect=main.TRUE,
3466 actual=consistentCounterResults,
3467 onpass="ONOS counters are consistent " +
3468 "across nodes",
3469 onfail="ONOS Counters are inconsistent " +
3470 "across nodes" )
3471
3472 main.step( "Counters we added have the correct values" )
Jon Halle1a3b752015-07-22 13:02:46 -07003473 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3474 incrementCheck = incrementCheck and \
3475 main.Counters.counterCheck( iCounterName, iCounterValue )
Jon Hall5cf14d52015-07-16 12:15:19 -07003476 utilities.assert_equals( expect=main.TRUE,
Jon Halle1a3b752015-07-22 13:02:46 -07003477 actual=incrementCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -07003478 onpass="Added counters are correct",
3479 onfail="Added counters are incorrect" )
3480 # DISTRIBUTED SETS
3481 main.step( "Distributed Set get" )
3482 size = len( onosSet )
3483 getResponses = []
3484 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003485 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003486 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003487 name="setTestGet-" + str( i ),
3488 args=[ onosSetName ] )
3489 threads.append( t )
3490 t.start()
3491 for t in threads:
3492 t.join()
3493 getResponses.append( t.result )
3494
3495 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003496 for i in range( len( main.activeNodes ) ):
3497 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003498 if isinstance( getResponses[ i ], list):
3499 current = set( getResponses[ i ] )
3500 if len( current ) == len( getResponses[ i ] ):
3501 # no repeats
3502 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003503 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003504 " has incorrect view" +
3505 " of set " + onosSetName + ":\n" +
3506 str( getResponses[ i ] ) )
3507 main.log.debug( "Expected: " + str( onosSet ) )
3508 main.log.debug( "Actual: " + str( current ) )
3509 getResults = main.FALSE
3510 else:
3511 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003512 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003513 " has repeat elements in" +
3514 " set " + onosSetName + ":\n" +
3515 str( getResponses[ i ] ) )
3516 getResults = main.FALSE
3517 elif getResponses[ i ] == main.ERROR:
3518 getResults = main.FALSE
3519 utilities.assert_equals( expect=main.TRUE,
3520 actual=getResults,
3521 onpass="Set elements are correct",
3522 onfail="Set elements are incorrect" )
3523
3524 main.step( "Distributed Set size" )
3525 sizeResponses = []
3526 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003527 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003528 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003529 name="setTestSize-" + str( i ),
3530 args=[ onosSetName ] )
3531 threads.append( t )
3532 t.start()
3533 for t in threads:
3534 t.join()
3535 sizeResponses.append( t.result )
3536
3537 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003538 for i in range( len( main.activeNodes ) ):
3539 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003540 if size != sizeResponses[ i ]:
3541 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003542 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003543 " expected a size of " + str( size ) +
3544 " for set " + onosSetName +
3545 " but got " + str( sizeResponses[ i ] ) )
3546 utilities.assert_equals( expect=main.TRUE,
3547 actual=sizeResults,
3548 onpass="Set sizes are correct",
3549 onfail="Set sizes are incorrect" )
3550
3551 main.step( "Distributed Set add()" )
3552 onosSet.add( addValue )
3553 addResponses = []
3554 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003555 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003556 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003557 name="setTestAdd-" + str( i ),
3558 args=[ onosSetName, addValue ] )
3559 threads.append( t )
3560 t.start()
3561 for t in threads:
3562 t.join()
3563 addResponses.append( t.result )
3564
3565 # main.TRUE = successfully changed the set
3566 # main.FALSE = action resulted in no change in set
3567 # main.ERROR - Some error in executing the function
3568 addResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003569 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003570 if addResponses[ i ] == main.TRUE:
3571 # All is well
3572 pass
3573 elif addResponses[ i ] == main.FALSE:
3574 # Already in set, probably fine
3575 pass
3576 elif addResponses[ i ] == main.ERROR:
3577 # Error in execution
3578 addResults = main.FALSE
3579 else:
3580 # unexpected result
3581 addResults = main.FALSE
3582 if addResults != main.TRUE:
3583 main.log.error( "Error executing set add" )
3584
3585 # Check if set is still correct
3586 size = len( onosSet )
3587 getResponses = []
3588 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003589 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003590 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003591 name="setTestGet-" + str( i ),
3592 args=[ onosSetName ] )
3593 threads.append( t )
3594 t.start()
3595 for t in threads:
3596 t.join()
3597 getResponses.append( t.result )
3598 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003599 for i in range( len( main.activeNodes ) ):
3600 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003601 if isinstance( getResponses[ i ], list):
3602 current = set( getResponses[ i ] )
3603 if len( current ) == len( getResponses[ i ] ):
3604 # no repeats
3605 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003606 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003607 " of set " + onosSetName + ":\n" +
3608 str( getResponses[ i ] ) )
3609 main.log.debug( "Expected: " + str( onosSet ) )
3610 main.log.debug( "Actual: " + str( current ) )
3611 getResults = main.FALSE
3612 else:
3613 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003614 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003615 " set " + onosSetName + ":\n" +
3616 str( getResponses[ i ] ) )
3617 getResults = main.FALSE
3618 elif getResponses[ i ] == main.ERROR:
3619 getResults = main.FALSE
3620 sizeResponses = []
3621 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003622 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003623 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003624 name="setTestSize-" + str( i ),
3625 args=[ onosSetName ] )
3626 threads.append( t )
3627 t.start()
3628 for t in threads:
3629 t.join()
3630 sizeResponses.append( t.result )
3631 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003632 for i in range( len( main.activeNodes ) ):
3633 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003634 if size != sizeResponses[ i ]:
3635 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003636 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003637 " expected a size of " + str( size ) +
3638 " for set " + onosSetName +
3639 " but got " + str( sizeResponses[ i ] ) )
3640 addResults = addResults and getResults and sizeResults
3641 utilities.assert_equals( expect=main.TRUE,
3642 actual=addResults,
3643 onpass="Set add correct",
3644 onfail="Set add was incorrect" )
3645
3646 main.step( "Distributed Set addAll()" )
3647 onosSet.update( addAllValue.split() )
3648 addResponses = []
3649 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003650 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003651 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003652 name="setTestAddAll-" + str( i ),
3653 args=[ onosSetName, addAllValue ] )
3654 threads.append( t )
3655 t.start()
3656 for t in threads:
3657 t.join()
3658 addResponses.append( t.result )
3659
3660 # main.TRUE = successfully changed the set
3661 # main.FALSE = action resulted in no change in set
3662 # main.ERROR - Some error in executing the function
3663 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003664 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003665 if addResponses[ i ] == main.TRUE:
3666 # All is well
3667 pass
3668 elif addResponses[ i ] == main.FALSE:
3669 # Already in set, probably fine
3670 pass
3671 elif addResponses[ i ] == main.ERROR:
3672 # Error in execution
3673 addAllResults = main.FALSE
3674 else:
3675 # unexpected result
3676 addAllResults = main.FALSE
3677 if addAllResults != main.TRUE:
3678 main.log.error( "Error executing set addAll" )
3679
3680 # Check if set is still correct
3681 size = len( onosSet )
3682 getResponses = []
3683 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003684 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003685 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003686 name="setTestGet-" + str( i ),
3687 args=[ onosSetName ] )
3688 threads.append( t )
3689 t.start()
3690 for t in threads:
3691 t.join()
3692 getResponses.append( t.result )
3693 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003694 for i in range( len( main.activeNodes ) ):
3695 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003696 if isinstance( getResponses[ i ], list):
3697 current = set( getResponses[ i ] )
3698 if len( current ) == len( getResponses[ i ] ):
3699 # no repeats
3700 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003701 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003702 " has incorrect view" +
3703 " of set " + onosSetName + ":\n" +
3704 str( getResponses[ i ] ) )
3705 main.log.debug( "Expected: " + str( onosSet ) )
3706 main.log.debug( "Actual: " + str( current ) )
3707 getResults = main.FALSE
3708 else:
3709 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003710 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003711 " has repeat elements in" +
3712 " set " + onosSetName + ":\n" +
3713 str( getResponses[ i ] ) )
3714 getResults = main.FALSE
3715 elif getResponses[ i ] == main.ERROR:
3716 getResults = main.FALSE
3717 sizeResponses = []
3718 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003719 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003720 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003721 name="setTestSize-" + str( i ),
3722 args=[ onosSetName ] )
3723 threads.append( t )
3724 t.start()
3725 for t in threads:
3726 t.join()
3727 sizeResponses.append( t.result )
3728 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003729 for i in range( len( main.activeNodes ) ):
3730 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003731 if size != sizeResponses[ i ]:
3732 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003733 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003734 " expected a size of " + str( size ) +
3735 " for set " + onosSetName +
3736 " but got " + str( sizeResponses[ i ] ) )
3737 addAllResults = addAllResults and getResults and sizeResults
3738 utilities.assert_equals( expect=main.TRUE,
3739 actual=addAllResults,
3740 onpass="Set addAll correct",
3741 onfail="Set addAll was incorrect" )
3742
3743 main.step( "Distributed Set contains()" )
3744 containsResponses = []
3745 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003746 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003747 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003748 name="setContains-" + str( i ),
3749 args=[ onosSetName ],
3750 kwargs={ "values": addValue } )
3751 threads.append( t )
3752 t.start()
3753 for t in threads:
3754 t.join()
3755 # NOTE: This is the tuple
3756 containsResponses.append( t.result )
3757
3758 containsResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003759 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003760 if containsResponses[ i ] == main.ERROR:
3761 containsResults = main.FALSE
3762 else:
3763 containsResults = containsResults and\
3764 containsResponses[ i ][ 1 ]
3765 utilities.assert_equals( expect=main.TRUE,
3766 actual=containsResults,
3767 onpass="Set contains is functional",
3768 onfail="Set contains failed" )
3769
3770 main.step( "Distributed Set containsAll()" )
3771 containsAllResponses = []
3772 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003773 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003774 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003775 name="setContainsAll-" + str( i ),
3776 args=[ onosSetName ],
3777 kwargs={ "values": addAllValue } )
3778 threads.append( t )
3779 t.start()
3780 for t in threads:
3781 t.join()
3782 # NOTE: This is the tuple
3783 containsAllResponses.append( t.result )
3784
3785 containsAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003786 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003787 if containsResponses[ i ] == main.ERROR:
3788 containsResults = main.FALSE
3789 else:
3790 containsResults = containsResults and\
3791 containsResponses[ i ][ 1 ]
3792 utilities.assert_equals( expect=main.TRUE,
3793 actual=containsAllResults,
3794 onpass="Set containsAll is functional",
3795 onfail="Set containsAll failed" )
3796
3797 main.step( "Distributed Set remove()" )
3798 onosSet.remove( addValue )
3799 removeResponses = []
3800 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003801 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003802 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003803 name="setTestRemove-" + str( i ),
3804 args=[ onosSetName, addValue ] )
3805 threads.append( t )
3806 t.start()
3807 for t in threads:
3808 t.join()
3809 removeResponses.append( t.result )
3810
3811 # main.TRUE = successfully changed the set
3812 # main.FALSE = action resulted in no change in set
3813 # main.ERROR - Some error in executing the function
3814 removeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003815 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003816 if removeResponses[ i ] == main.TRUE:
3817 # All is well
3818 pass
3819 elif removeResponses[ i ] == main.FALSE:
3820 # not in set, probably fine
3821 pass
3822 elif removeResponses[ i ] == main.ERROR:
3823 # Error in execution
3824 removeResults = main.FALSE
3825 else:
3826 # unexpected result
3827 removeResults = main.FALSE
3828 if removeResults != main.TRUE:
3829 main.log.error( "Error executing set remove" )
3830
3831 # Check if set is still correct
3832 size = len( onosSet )
3833 getResponses = []
3834 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003835 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003836 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003837 name="setTestGet-" + str( i ),
3838 args=[ onosSetName ] )
3839 threads.append( t )
3840 t.start()
3841 for t in threads:
3842 t.join()
3843 getResponses.append( t.result )
3844 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003845 for i in range( len( main.activeNodes ) ):
3846 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003847 if isinstance( getResponses[ i ], list):
3848 current = set( getResponses[ i ] )
3849 if len( current ) == len( getResponses[ i ] ):
3850 # no repeats
3851 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003852 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003853 " has incorrect view" +
3854 " of set " + onosSetName + ":\n" +
3855 str( getResponses[ i ] ) )
3856 main.log.debug( "Expected: " + str( onosSet ) )
3857 main.log.debug( "Actual: " + str( current ) )
3858 getResults = main.FALSE
3859 else:
3860 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003861 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003862 " has repeat elements in" +
3863 " set " + onosSetName + ":\n" +
3864 str( getResponses[ i ] ) )
3865 getResults = main.FALSE
3866 elif getResponses[ i ] == main.ERROR:
3867 getResults = main.FALSE
3868 sizeResponses = []
3869 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003870 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003871 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003872 name="setTestSize-" + str( i ),
3873 args=[ onosSetName ] )
3874 threads.append( t )
3875 t.start()
3876 for t in threads:
3877 t.join()
3878 sizeResponses.append( t.result )
3879 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003880 for i in range( len( main.activeNodes ) ):
3881 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003882 if size != sizeResponses[ i ]:
3883 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003884 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003885 " expected a size of " + str( size ) +
3886 " for set " + onosSetName +
3887 " but got " + str( sizeResponses[ i ] ) )
3888 removeResults = removeResults and getResults and sizeResults
3889 utilities.assert_equals( expect=main.TRUE,
3890 actual=removeResults,
3891 onpass="Set remove correct",
3892 onfail="Set remove was incorrect" )
3893
3894 main.step( "Distributed Set removeAll()" )
3895 onosSet.difference_update( addAllValue.split() )
3896 removeAllResponses = []
3897 threads = []
3898 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003899 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003900 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003901 name="setTestRemoveAll-" + str( i ),
3902 args=[ onosSetName, addAllValue ] )
3903 threads.append( t )
3904 t.start()
3905 for t in threads:
3906 t.join()
3907 removeAllResponses.append( t.result )
3908 except Exception, e:
3909 main.log.exception(e)
3910
3911 # main.TRUE = successfully changed the set
3912 # main.FALSE = action resulted in no change in set
3913 # main.ERROR - Some error in executing the function
3914 removeAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003915 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003916 if removeAllResponses[ i ] == main.TRUE:
3917 # All is well
3918 pass
3919 elif removeAllResponses[ i ] == main.FALSE:
3920 # not in set, probably fine
3921 pass
3922 elif removeAllResponses[ i ] == main.ERROR:
3923 # Error in execution
3924 removeAllResults = main.FALSE
3925 else:
3926 # unexpected result
3927 removeAllResults = main.FALSE
3928 if removeAllResults != main.TRUE:
3929 main.log.error( "Error executing set removeAll" )
3930
3931 # Check if set is still correct
3932 size = len( onosSet )
3933 getResponses = []
3934 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003935 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003936 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003937 name="setTestGet-" + str( i ),
3938 args=[ onosSetName ] )
3939 threads.append( t )
3940 t.start()
3941 for t in threads:
3942 t.join()
3943 getResponses.append( t.result )
3944 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003945 for i in range( len( main.activeNodes ) ):
3946 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003947 if isinstance( getResponses[ i ], list):
3948 current = set( getResponses[ i ] )
3949 if len( current ) == len( getResponses[ i ] ):
3950 # no repeats
3951 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003952 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003953 " has incorrect view" +
3954 " of set " + onosSetName + ":\n" +
3955 str( getResponses[ i ] ) )
3956 main.log.debug( "Expected: " + str( onosSet ) )
3957 main.log.debug( "Actual: " + str( current ) )
3958 getResults = main.FALSE
3959 else:
3960 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003961 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003962 " has repeat elements in" +
3963 " set " + onosSetName + ":\n" +
3964 str( getResponses[ i ] ) )
3965 getResults = main.FALSE
3966 elif getResponses[ i ] == main.ERROR:
3967 getResults = main.FALSE
3968 sizeResponses = []
3969 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003970 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003971 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003972 name="setTestSize-" + str( i ),
3973 args=[ onosSetName ] )
3974 threads.append( t )
3975 t.start()
3976 for t in threads:
3977 t.join()
3978 sizeResponses.append( t.result )
3979 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003980 for i in range( len( main.activeNodes ) ):
3981 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003982 if size != sizeResponses[ i ]:
3983 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003984 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003985 " expected a size of " + str( size ) +
3986 " for set " + onosSetName +
3987 " but got " + str( sizeResponses[ i ] ) )
3988 removeAllResults = removeAllResults and getResults and sizeResults
3989 utilities.assert_equals( expect=main.TRUE,
3990 actual=removeAllResults,
3991 onpass="Set removeAll correct",
3992 onfail="Set removeAll was incorrect" )
3993
3994 main.step( "Distributed Set addAll()" )
3995 onosSet.update( addAllValue.split() )
3996 addResponses = []
3997 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003998 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003999 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004000 name="setTestAddAll-" + str( i ),
4001 args=[ onosSetName, addAllValue ] )
4002 threads.append( t )
4003 t.start()
4004 for t in threads:
4005 t.join()
4006 addResponses.append( t.result )
4007
4008 # main.TRUE = successfully changed the set
4009 # main.FALSE = action resulted in no change in set
4010 # main.ERROR - Some error in executing the function
4011 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004012 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004013 if addResponses[ i ] == main.TRUE:
4014 # All is well
4015 pass
4016 elif addResponses[ i ] == main.FALSE:
4017 # Already in set, probably fine
4018 pass
4019 elif addResponses[ i ] == main.ERROR:
4020 # Error in execution
4021 addAllResults = main.FALSE
4022 else:
4023 # unexpected result
4024 addAllResults = main.FALSE
4025 if addAllResults != main.TRUE:
4026 main.log.error( "Error executing set addAll" )
4027
4028 # Check if set is still correct
4029 size = len( onosSet )
4030 getResponses = []
4031 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004032 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004033 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004034 name="setTestGet-" + str( i ),
4035 args=[ onosSetName ] )
4036 threads.append( t )
4037 t.start()
4038 for t in threads:
4039 t.join()
4040 getResponses.append( t.result )
4041 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004042 for i in range( len( main.activeNodes ) ):
4043 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004044 if isinstance( getResponses[ i ], list):
4045 current = set( getResponses[ i ] )
4046 if len( current ) == len( getResponses[ i ] ):
4047 # no repeats
4048 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004049 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004050 " has incorrect view" +
4051 " of set " + onosSetName + ":\n" +
4052 str( getResponses[ i ] ) )
4053 main.log.debug( "Expected: " + str( onosSet ) )
4054 main.log.debug( "Actual: " + str( current ) )
4055 getResults = main.FALSE
4056 else:
4057 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004058 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004059 " has repeat elements in" +
4060 " set " + onosSetName + ":\n" +
4061 str( getResponses[ i ] ) )
4062 getResults = main.FALSE
4063 elif getResponses[ i ] == main.ERROR:
4064 getResults = main.FALSE
4065 sizeResponses = []
4066 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004067 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004068 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004069 name="setTestSize-" + str( i ),
4070 args=[ onosSetName ] )
4071 threads.append( t )
4072 t.start()
4073 for t in threads:
4074 t.join()
4075 sizeResponses.append( t.result )
4076 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004077 for i in range( len( main.activeNodes ) ):
4078 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004079 if size != sizeResponses[ i ]:
4080 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004081 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004082 " expected a size of " + str( size ) +
4083 " for set " + onosSetName +
4084 " but got " + str( sizeResponses[ i ] ) )
4085 addAllResults = addAllResults and getResults and sizeResults
4086 utilities.assert_equals( expect=main.TRUE,
4087 actual=addAllResults,
4088 onpass="Set addAll correct",
4089 onfail="Set addAll was incorrect" )
4090
4091 main.step( "Distributed Set clear()" )
4092 onosSet.clear()
4093 clearResponses = []
4094 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004095 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004096 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004097 name="setTestClear-" + str( i ),
4098 args=[ onosSetName, " "], # Values doesn't matter
4099 kwargs={ "clear": True } )
4100 threads.append( t )
4101 t.start()
4102 for t in threads:
4103 t.join()
4104 clearResponses.append( t.result )
4105
4106 # main.TRUE = successfully changed the set
4107 # main.FALSE = action resulted in no change in set
4108 # main.ERROR - Some error in executing the function
4109 clearResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004110 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004111 if clearResponses[ i ] == main.TRUE:
4112 # All is well
4113 pass
4114 elif clearResponses[ i ] == main.FALSE:
4115 # Nothing set, probably fine
4116 pass
4117 elif clearResponses[ i ] == main.ERROR:
4118 # Error in execution
4119 clearResults = main.FALSE
4120 else:
4121 # unexpected result
4122 clearResults = main.FALSE
4123 if clearResults != main.TRUE:
4124 main.log.error( "Error executing set clear" )
4125
4126 # Check if set is still correct
4127 size = len( onosSet )
4128 getResponses = []
4129 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004130 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004131 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004132 name="setTestGet-" + str( i ),
4133 args=[ onosSetName ] )
4134 threads.append( t )
4135 t.start()
4136 for t in threads:
4137 t.join()
4138 getResponses.append( t.result )
4139 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004140 for i in range( len( main.activeNodes ) ):
4141 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004142 if isinstance( getResponses[ i ], list):
4143 current = set( getResponses[ i ] )
4144 if len( current ) == len( getResponses[ i ] ):
4145 # no repeats
4146 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004147 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004148 " has incorrect view" +
4149 " of set " + onosSetName + ":\n" +
4150 str( getResponses[ i ] ) )
4151 main.log.debug( "Expected: " + str( onosSet ) )
4152 main.log.debug( "Actual: " + str( current ) )
4153 getResults = main.FALSE
4154 else:
4155 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004156 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004157 " has repeat elements in" +
4158 " set " + onosSetName + ":\n" +
4159 str( getResponses[ i ] ) )
4160 getResults = main.FALSE
4161 elif getResponses[ i ] == main.ERROR:
4162 getResults = main.FALSE
4163 sizeResponses = []
4164 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004165 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004166 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004167 name="setTestSize-" + str( i ),
4168 args=[ onosSetName ] )
4169 threads.append( t )
4170 t.start()
4171 for t in threads:
4172 t.join()
4173 sizeResponses.append( t.result )
4174 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004175 for i in range( len( main.activeNodes ) ):
4176 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004177 if size != sizeResponses[ i ]:
4178 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004179 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004180 " expected a size of " + str( size ) +
4181 " for set " + onosSetName +
4182 " but got " + str( sizeResponses[ i ] ) )
4183 clearResults = clearResults and getResults and sizeResults
4184 utilities.assert_equals( expect=main.TRUE,
4185 actual=clearResults,
4186 onpass="Set clear correct",
4187 onfail="Set clear was incorrect" )
4188
4189 main.step( "Distributed Set addAll()" )
4190 onosSet.update( addAllValue.split() )
4191 addResponses = []
4192 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004193 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004194 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004195 name="setTestAddAll-" + str( i ),
4196 args=[ onosSetName, addAllValue ] )
4197 threads.append( t )
4198 t.start()
4199 for t in threads:
4200 t.join()
4201 addResponses.append( t.result )
4202
4203 # main.TRUE = successfully changed the set
4204 # main.FALSE = action resulted in no change in set
4205 # main.ERROR - Some error in executing the function
4206 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004207 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004208 if addResponses[ i ] == main.TRUE:
4209 # All is well
4210 pass
4211 elif addResponses[ i ] == main.FALSE:
4212 # Already in set, probably fine
4213 pass
4214 elif addResponses[ i ] == main.ERROR:
4215 # Error in execution
4216 addAllResults = main.FALSE
4217 else:
4218 # unexpected result
4219 addAllResults = main.FALSE
4220 if addAllResults != main.TRUE:
4221 main.log.error( "Error executing set addAll" )
4222
4223 # Check if set is still correct
4224 size = len( onosSet )
4225 getResponses = []
4226 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004227 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004228 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004229 name="setTestGet-" + str( i ),
4230 args=[ onosSetName ] )
4231 threads.append( t )
4232 t.start()
4233 for t in threads:
4234 t.join()
4235 getResponses.append( t.result )
4236 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004237 for i in range( len( main.activeNodes ) ):
4238 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004239 if isinstance( getResponses[ i ], list):
4240 current = set( getResponses[ i ] )
4241 if len( current ) == len( getResponses[ i ] ):
4242 # no repeats
4243 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004244 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004245 " has incorrect view" +
4246 " of set " + onosSetName + ":\n" +
4247 str( getResponses[ i ] ) )
4248 main.log.debug( "Expected: " + str( onosSet ) )
4249 main.log.debug( "Actual: " + str( current ) )
4250 getResults = main.FALSE
4251 else:
4252 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004253 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004254 " has repeat elements in" +
4255 " set " + onosSetName + ":\n" +
4256 str( getResponses[ i ] ) )
4257 getResults = main.FALSE
4258 elif getResponses[ i ] == main.ERROR:
4259 getResults = main.FALSE
4260 sizeResponses = []
4261 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004262 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004263 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004264 name="setTestSize-" + str( i ),
4265 args=[ onosSetName ] )
4266 threads.append( t )
4267 t.start()
4268 for t in threads:
4269 t.join()
4270 sizeResponses.append( t.result )
4271 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004272 for i in range( len( main.activeNodes ) ):
4273 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004274 if size != sizeResponses[ i ]:
4275 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004276 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004277 " expected a size of " + str( size ) +
4278 " for set " + onosSetName +
4279 " but got " + str( sizeResponses[ i ] ) )
4280 addAllResults = addAllResults and getResults and sizeResults
4281 utilities.assert_equals( expect=main.TRUE,
4282 actual=addAllResults,
4283 onpass="Set addAll correct",
4284 onfail="Set addAll was incorrect" )
4285
4286 main.step( "Distributed Set retain()" )
4287 onosSet.intersection_update( retainValue.split() )
4288 retainResponses = []
4289 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004290 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004291 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004292 name="setTestRetain-" + str( i ),
4293 args=[ onosSetName, retainValue ],
4294 kwargs={ "retain": True } )
4295 threads.append( t )
4296 t.start()
4297 for t in threads:
4298 t.join()
4299 retainResponses.append( t.result )
4300
4301 # main.TRUE = successfully changed the set
4302 # main.FALSE = action resulted in no change in set
4303 # main.ERROR - Some error in executing the function
4304 retainResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004305 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004306 if retainResponses[ i ] == main.TRUE:
4307 # All is well
4308 pass
4309 elif retainResponses[ i ] == main.FALSE:
4310 # Already in set, probably fine
4311 pass
4312 elif retainResponses[ i ] == main.ERROR:
4313 # Error in execution
4314 retainResults = main.FALSE
4315 else:
4316 # unexpected result
4317 retainResults = main.FALSE
4318 if retainResults != main.TRUE:
4319 main.log.error( "Error executing set retain" )
4320
4321 # Check if set is still correct
4322 size = len( onosSet )
4323 getResponses = []
4324 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004325 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004326 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004327 name="setTestGet-" + str( i ),
4328 args=[ onosSetName ] )
4329 threads.append( t )
4330 t.start()
4331 for t in threads:
4332 t.join()
4333 getResponses.append( t.result )
4334 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004335 for i in range( len( main.activeNodes ) ):
4336 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004337 if isinstance( getResponses[ i ], list):
4338 current = set( getResponses[ i ] )
4339 if len( current ) == len( getResponses[ i ] ):
4340 # no repeats
4341 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004342 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004343 " has incorrect view" +
4344 " of set " + onosSetName + ":\n" +
4345 str( getResponses[ i ] ) )
4346 main.log.debug( "Expected: " + str( onosSet ) )
4347 main.log.debug( "Actual: " + str( current ) )
4348 getResults = main.FALSE
4349 else:
4350 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004351 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004352 " has repeat elements in" +
4353 " set " + onosSetName + ":\n" +
4354 str( getResponses[ i ] ) )
4355 getResults = main.FALSE
4356 elif getResponses[ i ] == main.ERROR:
4357 getResults = main.FALSE
4358 sizeResponses = []
4359 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004360 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004361 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004362 name="setTestSize-" + str( i ),
4363 args=[ onosSetName ] )
4364 threads.append( t )
4365 t.start()
4366 for t in threads:
4367 t.join()
4368 sizeResponses.append( t.result )
4369 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004370 for i in range( len( main.activeNodes ) ):
4371 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004372 if size != sizeResponses[ i ]:
4373 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004374 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004375 str( size ) + " for set " + onosSetName +
4376 " but got " + str( sizeResponses[ i ] ) )
4377 retainResults = retainResults and getResults and sizeResults
4378 utilities.assert_equals( expect=main.TRUE,
4379 actual=retainResults,
4380 onpass="Set retain correct",
4381 onfail="Set retain was incorrect" )
4382
Jon Hall2a5002c2015-08-21 16:49:11 -07004383 # Transactional maps
4384 main.step( "Partitioned Transactional maps put" )
4385 tMapValue = "Testing"
4386 numKeys = 100
4387 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004388 node = main.activeNodes[0]
4389 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
Jon Hall2a5002c2015-08-21 16:49:11 -07004390 if len( putResponses ) == 100:
4391 for i in putResponses:
4392 if putResponses[ i ][ 'value' ] != tMapValue:
4393 putResult = False
4394 else:
4395 putResult = False
4396 if not putResult:
4397 main.log.debug( "Put response values: " + str( putResponses ) )
4398 utilities.assert_equals( expect=True,
4399 actual=putResult,
4400 onpass="Partitioned Transactional Map put successful",
4401 onfail="Partitioned Transactional Map put values are incorrect" )
4402
4403 main.step( "Partitioned Transactional maps get" )
4404 getCheck = True
4405 for n in range( 1, numKeys + 1 ):
4406 getResponses = []
4407 threads = []
4408 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004409 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004410 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4411 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004412 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004413 threads.append( t )
4414 t.start()
4415 for t in threads:
4416 t.join()
4417 getResponses.append( t.result )
4418 for node in getResponses:
4419 if node != tMapValue:
4420 valueCheck = False
4421 if not valueCheck:
4422 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4423 main.log.warn( getResponses )
4424 getCheck = getCheck and valueCheck
4425 utilities.assert_equals( expect=True,
4426 actual=getCheck,
4427 onpass="Partitioned Transactional Map get values were correct",
4428 onfail="Partitioned Transactional Map values incorrect" )
4429
4430 main.step( "In-memory Transactional maps put" )
4431 tMapValue = "Testing"
4432 numKeys = 100
4433 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004434 node = main.activeNodes[0]
4435 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue, inMemory=True )
Jon Hall2a5002c2015-08-21 16:49:11 -07004436 if len( putResponses ) == 100:
4437 for i in putResponses:
4438 if putResponses[ i ][ 'value' ] != tMapValue:
4439 putResult = False
4440 else:
4441 putResult = False
4442 if not putResult:
4443 main.log.debug( "Put response values: " + str( putResponses ) )
4444 utilities.assert_equals( expect=True,
4445 actual=putResult,
4446 onpass="In-Memory Transactional Map put successful",
4447 onfail="In-Memory Transactional Map put values are incorrect" )
4448
4449 main.step( "In-Memory Transactional maps get" )
4450 getCheck = True
4451 for n in range( 1, numKeys + 1 ):
4452 getResponses = []
4453 threads = []
4454 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004455 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004456 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4457 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004458 args=[ "Key" + str( n ) ],
Jon Hall2a5002c2015-08-21 16:49:11 -07004459 kwargs={ "inMemory": True } )
4460 threads.append( t )
4461 t.start()
4462 for t in threads:
4463 t.join()
4464 getResponses.append( t.result )
4465 for node in getResponses:
4466 if node != tMapValue:
4467 valueCheck = False
4468 if not valueCheck:
4469 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4470 main.log.warn( getResponses )
4471 getCheck = getCheck and valueCheck
4472 utilities.assert_equals( expect=True,
4473 actual=getCheck,
4474 onpass="In-Memory Transactional Map get values were correct",
4475 onfail="In-Memory Transactional Map values incorrect" )