blob: 2127d5466ea1a32a1626d16025c1510c81a14728 [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
Jon Hallb3ed8ed2015-10-28 16:43:55 -070012CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
Jon Hall5cf14d52015-07-16 12:15:19 -070014CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
Jon Hallb3ed8ed2015-10-28 16:43:55 -070028class HAstopNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -070029
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hall3b489db2015-10-05 14:38:37 -070052 import pexpect
Jon Hallf3d16e72015-12-16 17:45:08 -080053 import time
Jon Hallb3ed8ed2015-10-28 16:43:55 -070054 main.log.info( "ONOS HA test: Stop a minority of ONOS nodes - " +
Jon Hall5cf14d52015-07-16 12:15:19 -070055 "initialization" )
56 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070057 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070058 "installing ONOS, starting Mininet and ONOS" +\
59 "cli sessions."
60 # TODO: save all the timers and output them for plotting
61
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
Jon Halle1a3b752015-07-22 13:02:46 -070069 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070070 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070071 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070074 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
81
82 # FIXME: just get controller port from params?
83 # TODO: do we really need all these?
84 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
85 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
86 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
87 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
88 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
89 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
90 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
91
Jon Halle1a3b752015-07-22 13:02:46 -070092 try:
93 fileName = "Counters"
94 # TODO: Maybe make a library folder somewhere?
95 path = main.params[ 'imports' ][ 'path' ]
96 main.Counters = imp.load_source( fileName,
97 path + fileName + ".py" )
98 except Exception as e:
99 main.log.exception( e )
100 main.cleanup()
101 main.exit()
102
103 main.CLIs = []
104 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700105 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700106 for i in range( 1, main.numCtrls + 1 ):
107 try:
108 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
109 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
110 ipList.append( main.nodes[ -1 ].ip_address )
111 except AttributeError:
112 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700113
114 main.step( "Create cell file" )
115 cellAppString = main.params[ 'ENV' ][ 'appString' ]
116 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
117 main.Mininet1.ip_address,
118 cellAppString, ipList )
119 main.step( "Applying cell variable to environment" )
120 cellResult = main.ONOSbench.setCell( cellName )
121 verifyResult = main.ONOSbench.verifyCell()
122
123 # FIXME:this is short term fix
124 main.log.info( "Removing raft logs" )
125 main.ONOSbench.onosRemoveRaftLogs()
126
127 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700128 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700129 main.ONOSbench.onosUninstall( node.ip_address )
130
131 # Make sure ONOS is DEAD
132 main.log.info( "Killing any ONOS processes" )
133 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700134 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700135 killed = main.ONOSbench.onosKill( node.ip_address )
136 killResults = killResults and killed
137
138 cleanInstallResult = main.TRUE
139 gitPullResult = main.TRUE
140
141 main.step( "Starting Mininet" )
142 # scp topo file to mininet
143 # TODO: move to params?
144 topoName = "obelisk.py"
145 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700146 main.ONOSbench.scp( main.Mininet1,
147 filePath + topoName,
148 main.Mininet1.home,
149 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700150 mnResult = main.Mininet1.startNet( )
151 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
152 onpass="Mininet Started",
153 onfail="Error starting Mininet" )
154
155 main.step( "Git checkout and pull " + gitBranch )
156 if PULLCODE:
157 main.ONOSbench.gitCheckout( gitBranch )
158 gitPullResult = main.ONOSbench.gitPull()
159 # values of 1 or 3 are good
160 utilities.assert_lesser( expect=0, actual=gitPullResult,
161 onpass="Git pull successful",
162 onfail="Git pull failed" )
163 main.ONOSbench.getVersion( report=True )
164
165 main.step( "Using mvn clean install" )
166 cleanInstallResult = main.TRUE
167 if PULLCODE and gitPullResult == main.TRUE:
168 cleanInstallResult = main.ONOSbench.cleanInstall()
169 else:
170 main.log.warn( "Did not pull new code so skipping mvn " +
171 "clean install" )
172 utilities.assert_equals( expect=main.TRUE,
173 actual=cleanInstallResult,
174 onpass="MCI successful",
175 onfail="MCI failed" )
176 # GRAPHS
177 # NOTE: important params here:
178 # job = name of Jenkins job
179 # Plot Name = Plot-HA, only can be used if multiple plots
180 # index = The number of the graph under plot name
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700181 job = "HAstopNodes"
Jon Hall5cf14d52015-07-16 12:15:19 -0700182 plotName = "Plot-HA"
183 graphs = '<ac:structured-macro ac:name="html">\n'
184 graphs += '<ac:plain-text-body><![CDATA[\n'
185 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
186 '/plot/' + plotName + '/getPlot?index=0' +\
187 '&width=500&height=300"' +\
188 'noborder="0" width="500" height="300" scrolling="yes" ' +\
189 'seamless="seamless"></iframe>\n'
190 graphs += ']]></ac:plain-text-body>\n'
191 graphs += '</ac:structured-macro>\n'
192 main.log.wiki(graphs)
193
194 main.step( "Creating ONOS package" )
Jon Hall3b489db2015-10-05 14:38:37 -0700195 # copy gen-partions file to ONOS
196 # NOTE: this assumes TestON and ONOS are on the same machine
197 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
198 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
199 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
200 main.ONOSbench.ip_address,
201 srcFile,
202 dstDir,
203 pwd=main.ONOSbench.pwd,
204 direction="from" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700205 packageResult = main.ONOSbench.onosPackage()
206 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
207 onpass="ONOS package successful",
208 onfail="ONOS package failed" )
209
210 main.step( "Installing ONOS package" )
211 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700212 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700213 tmpResult = main.ONOSbench.onosInstall( options="-f",
214 node=node.ip_address )
215 onosInstallResult = onosInstallResult and tmpResult
216 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
217 onpass="ONOS install successful",
218 onfail="ONOS install failed" )
Jon Hall3b489db2015-10-05 14:38:37 -0700219 # clean up gen-partitions file
220 try:
221 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
222 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
223 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
224 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
225 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
226 str( main.ONOSbench.handle.before ) )
227 except ( pexpect.TIMEOUT, pexpect.EOF ):
228 main.log.exception( "ONOSbench: pexpect exception found:" +
229 main.ONOSbench.handle.before )
230 main.cleanup()
231 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700232
233 main.step( "Checking if ONOS is up yet" )
234 for i in range( 2 ):
235 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700236 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700237 started = main.ONOSbench.isup( node.ip_address )
238 if not started:
239 main.log.error( node.name + " didn't start!" )
240 main.ONOSbench.onosStop( node.ip_address )
241 main.ONOSbench.onosStart( node.ip_address )
242 onosIsupResult = onosIsupResult and started
243 if onosIsupResult == main.TRUE:
244 break
245 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
246 onpass="ONOS startup successful",
247 onfail="ONOS startup failed" )
248
249 main.log.step( "Starting ONOS CLI sessions" )
250 cliResults = main.TRUE
251 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700252 for i in range( main.numCtrls ):
253 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700254 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700255 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700256 threads.append( t )
257 t.start()
258
259 for t in threads:
260 t.join()
261 cliResults = cliResults and t.result
262 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
263 onpass="ONOS cli startup successful",
264 onfail="ONOS cli startup failed" )
265
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700266 # Create a list of active nodes for use when some nodes are stopped
267 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
268
Jon Hall5cf14d52015-07-16 12:15:19 -0700269 if main.params[ 'tcpdump' ].lower() == "true":
270 main.step( "Start Packet Capture MN" )
271 main.Mininet2.startTcpdump(
272 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
273 + "-MN.pcap",
274 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
275 port=main.params[ 'MNtcpdump' ][ 'port' ] )
276
277 main.step( "App Ids check" )
Jon Hallf3d16e72015-12-16 17:45:08 -0800278 time.sleep(60)
Jon Hall5cf14d52015-07-16 12:15:19 -0700279 appCheck = main.TRUE
280 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700281 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700282 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700283 name="appToIDCheck-" + str( i ),
284 args=[] )
285 threads.append( t )
286 t.start()
287
288 for t in threads:
289 t.join()
290 appCheck = appCheck and t.result
291 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700292 node = main.activeNodes[0]
293 main.log.warn( main.CLIs[node].apps() )
294 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700295 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
296 onpass="App Ids seem to be correct",
297 onfail="Something is wrong with app Ids" )
298
299 if cliResults == main.FALSE:
300 main.log.error( "Failed to start ONOS, stopping test" )
301 main.cleanup()
302 main.exit()
303
304 def CASE2( self, main ):
305 """
306 Assign devices to controllers
307 """
308 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700309 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700310 assert main, "main not defined"
311 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700312 assert main.CLIs, "main.CLIs not defined"
313 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700314 assert ONOS1Port, "ONOS1Port not defined"
315 assert ONOS2Port, "ONOS2Port not defined"
316 assert ONOS3Port, "ONOS3Port not defined"
317 assert ONOS4Port, "ONOS4Port not defined"
318 assert ONOS5Port, "ONOS5Port not defined"
319 assert ONOS6Port, "ONOS6Port not defined"
320 assert ONOS7Port, "ONOS7Port not defined"
321
322 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700323 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700324 "and check that an ONOS node becomes the " +\
325 "master of the device."
326 main.step( "Assign switches to controllers" )
327
328 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700329 for i in range( main.numCtrls ):
330 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700331 swList = []
332 for i in range( 1, 29 ):
333 swList.append( "s" + str( i ) )
334 main.Mininet1.assignSwController( sw=swList, ip=ipList )
335
336 mastershipCheck = main.TRUE
337 for i in range( 1, 29 ):
338 response = main.Mininet1.getSwController( "s" + str( i ) )
339 try:
340 main.log.info( str( response ) )
341 except Exception:
342 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700343 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700344 if re.search( "tcp:" + node.ip_address, response ):
345 mastershipCheck = mastershipCheck and main.TRUE
346 else:
347 main.log.error( "Error, node " + node.ip_address + " is " +
348 "not in the list of controllers s" +
349 str( i ) + " is connecting to." )
350 mastershipCheck = main.FALSE
351 utilities.assert_equals(
352 expect=main.TRUE,
353 actual=mastershipCheck,
354 onpass="Switch mastership assigned correctly",
355 onfail="Switches not assigned correctly to controllers" )
356
357 def CASE21( self, main ):
358 """
359 Assign mastership to controllers
360 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700361 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700362 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700363 assert main, "main not defined"
364 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700365 assert main.CLIs, "main.CLIs not defined"
366 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700367 assert ONOS1Port, "ONOS1Port not defined"
368 assert ONOS2Port, "ONOS2Port not defined"
369 assert ONOS3Port, "ONOS3Port not defined"
370 assert ONOS4Port, "ONOS4Port not defined"
371 assert ONOS5Port, "ONOS5Port not defined"
372 assert ONOS6Port, "ONOS6Port not defined"
373 assert ONOS7Port, "ONOS7Port not defined"
374
375 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700376 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700377 "device. Then manually assign" +\
378 " mastership to specific ONOS nodes using" +\
379 " 'device-role'"
380 main.step( "Assign mastership of switches to specific controllers" )
381 # Manually assign mastership to the controller we want
382 roleCall = main.TRUE
383
384 ipList = [ ]
385 deviceList = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700386 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700387 try:
388 # Assign mastership to specific controllers. This assignment was
389 # determined for a 7 node cluser, but will work with any sized
390 # cluster
391 for i in range( 1, 29 ): # switches 1 through 28
392 # set up correct variables:
393 if i == 1:
394 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700395 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700396 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700397 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700398 c = 1 % main.numCtrls
399 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700400 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700401 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700402 c = 1 % main.numCtrls
403 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700404 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700405 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700406 c = 3 % main.numCtrls
407 ip = main.nodes[ c ].ip_address # ONOS4
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700408 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700409 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700410 c = 2 % main.numCtrls
411 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700412 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700413 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700414 c = 2 % main.numCtrls
415 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700416 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700417 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700418 c = 5 % main.numCtrls
419 ip = main.nodes[ c ].ip_address # ONOS6
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700420 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700421 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700422 c = 4 % main.numCtrls
423 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700424 dpid = '3' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700425 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700426 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700427 c = 6 % main.numCtrls
428 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700429 dpid = '6' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700430 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700431 elif i == 28:
432 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700433 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700434 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700435 else:
436 main.log.error( "You didn't write an else statement for " +
437 "switch s" + str( i ) )
438 roleCall = main.FALSE
439 # Assign switch
440 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
441 # TODO: make this controller dynamic
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700442 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700443 ipList.append( ip )
444 deviceList.append( deviceId )
445 except ( AttributeError, AssertionError ):
446 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700447 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700448 utilities.assert_equals(
449 expect=main.TRUE,
450 actual=roleCall,
451 onpass="Re-assigned switch mastership to designated controller",
452 onfail="Something wrong with deviceRole calls" )
453
454 main.step( "Check mastership was correctly assigned" )
455 roleCheck = main.TRUE
456 # NOTE: This is due to the fact that device mastership change is not
457 # atomic and is actually a multi step process
458 time.sleep( 5 )
459 for i in range( len( ipList ) ):
460 ip = ipList[i]
461 deviceId = deviceList[i]
462 # Check assignment
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700463 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700464 if ip in master:
465 roleCheck = roleCheck and main.TRUE
466 else:
467 roleCheck = roleCheck and main.FALSE
468 main.log.error( "Error, controller " + ip + " is not" +
469 " master " + "of device " +
470 str( deviceId ) + ". Master is " +
471 repr( master ) + "." )
472 utilities.assert_equals(
473 expect=main.TRUE,
474 actual=roleCheck,
475 onpass="Switches were successfully reassigned to designated " +
476 "controller",
477 onfail="Switches were not successfully reassigned" )
478
479 def CASE3( self, main ):
480 """
481 Assign intents
482 """
483 import time
484 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700485 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700486 assert main, "main not defined"
487 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700488 assert main.CLIs, "main.CLIs not defined"
489 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700490 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700491 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700492 "assign predetermined host-to-host intents." +\
493 " After installation, check that the intent" +\
494 " is distributed to all nodes and the state" +\
495 " is INSTALLED"
496
497 # install onos-app-fwd
498 main.step( "Install reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700499 onosCli = main.CLIs[ main.activeNodes[0] ]
500 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700501 utilities.assert_equals( expect=main.TRUE, actual=installResults,
502 onpass="Install fwd successful",
503 onfail="Install fwd failed" )
504
505 main.step( "Check app ids" )
506 appCheck = main.TRUE
507 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700508 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700509 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700510 name="appToIDCheck-" + str( i ),
511 args=[] )
512 threads.append( t )
513 t.start()
514
515 for t in threads:
516 t.join()
517 appCheck = appCheck and t.result
518 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700519 main.log.warn( onosCli.apps() )
520 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700521 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
522 onpass="App Ids seem to be correct",
523 onfail="Something is wrong with app Ids" )
524
525 main.step( "Discovering Hosts( Via pingall for now )" )
526 # FIXME: Once we have a host discovery mechanism, use that instead
527 # REACTIVE FWD test
528 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700529 passMsg = "Reactive Pingall test passed"
530 time1 = time.time()
531 pingResult = main.Mininet1.pingall()
532 time2 = time.time()
533 if not pingResult:
534 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700535 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700536 passMsg += " on the second try"
537 utilities.assert_equals(
538 expect=main.TRUE,
539 actual=pingResult,
540 onpass= passMsg,
541 onfail="Reactive Pingall failed, " +
542 "one or more ping pairs failed" )
543 main.log.info( "Time for pingall: %2f seconds" %
544 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700545 # timeout for fwd flows
546 time.sleep( 11 )
547 # uninstall onos-app-fwd
548 main.step( "Uninstall reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700549 node = main.activeNodes[0]
550 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700551 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
552 onpass="Uninstall fwd successful",
553 onfail="Uninstall fwd failed" )
554
555 main.step( "Check app ids" )
556 threads = []
557 appCheck2 = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700558 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700559 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700560 name="appToIDCheck-" + str( i ),
561 args=[] )
562 threads.append( t )
563 t.start()
564
565 for t in threads:
566 t.join()
567 appCheck2 = appCheck2 and t.result
568 if appCheck2 != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700569 node = main.activeNodes[0]
570 main.log.warn( main.CLIs[node].apps() )
571 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700572 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
573 onpass="App Ids seem to be correct",
574 onfail="Something is wrong with app Ids" )
575
576 main.step( "Add host intents via cli" )
577 intentIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700578 # TODO: move the host numbers to params
579 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700580 intentAddResult = True
581 hostResult = main.TRUE
582 for i in range( 8, 18 ):
583 main.log.info( "Adding host intent between h" + str( i ) +
584 " and h" + str( i + 10 ) )
585 host1 = "00:00:00:00:00:" + \
586 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
587 host2 = "00:00:00:00:00:" + \
588 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
589 # NOTE: getHost can return None
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700590 host1Dict = onosCli.getHost( host1 )
591 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700592 host1Id = None
593 host2Id = None
594 if host1Dict and host2Dict:
595 host1Id = host1Dict.get( 'id', None )
596 host2Id = host2Dict.get( 'id', None )
597 if host1Id and host2Id:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700598 nodeNum = ( i % len( main.activeNodes ) )
599 node = main.activeNodes[nodeNum]
600 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700601 if tmpId:
602 main.log.info( "Added intent with id: " + tmpId )
603 intentIds.append( tmpId )
604 else:
605 main.log.error( "addHostIntent returned: " +
606 repr( tmpId ) )
607 else:
608 main.log.error( "Error, getHost() failed for h" + str( i ) +
609 " and/or h" + str( i + 10 ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700610 node = main.activeNodes[0]
611 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700612 main.log.warn( "Hosts output: " )
613 try:
614 main.log.warn( json.dumps( json.loads( hosts ),
615 sort_keys=True,
616 indent=4,
617 separators=( ',', ': ' ) ) )
618 except ( ValueError, TypeError ):
619 main.log.warn( repr( hosts ) )
620 hostResult = main.FALSE
621 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
622 onpass="Found a host id for each host",
623 onfail="Error looking up host ids" )
624
625 intentStart = time.time()
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700626 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700627 main.log.info( "Submitted intents: " + str( intentIds ) )
628 main.log.info( "Intents in ONOS: " + str( onosIds ) )
629 for intent in intentIds:
630 if intent in onosIds:
631 pass # intent submitted is in onos
632 else:
633 intentAddResult = False
634 if intentAddResult:
635 intentStop = time.time()
636 else:
637 intentStop = None
638 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700639 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700640 intentStates = []
641 installedCheck = True
642 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
643 count = 0
644 try:
645 for intent in json.loads( intents ):
646 state = intent.get( 'state', None )
647 if "INSTALLED" not in state:
648 installedCheck = False
649 intentId = intent.get( 'id', None )
650 intentStates.append( ( intentId, state ) )
651 except ( ValueError, TypeError ):
652 main.log.exception( "Error parsing intents" )
653 # add submitted intents not in the store
654 tmplist = [ i for i, s in intentStates ]
655 missingIntents = False
656 for i in intentIds:
657 if i not in tmplist:
658 intentStates.append( ( i, " - " ) )
659 missingIntents = True
660 intentStates.sort()
661 for i, s in intentStates:
662 count += 1
663 main.log.info( "%-6s%-15s%-15s" %
664 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700665 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700666 try:
667 missing = False
668 if leaders:
669 parsedLeaders = json.loads( leaders )
670 main.log.warn( json.dumps( parsedLeaders,
671 sort_keys=True,
672 indent=4,
673 separators=( ',', ': ' ) ) )
674 # check for all intent partitions
675 topics = []
676 for i in range( 14 ):
677 topics.append( "intent-partition-" + str( i ) )
678 main.log.debug( topics )
679 ONOStopics = [ j['topic'] for j in parsedLeaders ]
680 for topic in topics:
681 if topic not in ONOStopics:
682 main.log.error( "Error: " + topic +
683 " not in leaders" )
684 missing = True
685 else:
686 main.log.error( "leaders() returned None" )
687 except ( ValueError, TypeError ):
688 main.log.exception( "Error parsing leaders" )
689 main.log.error( repr( leaders ) )
690 # Check all nodes
691 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700692 for i in main.activeNodes:
693 response = main.CLIs[i].leaders( jsonFormat=False)
694 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700695 str( response ) )
696
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700697 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700698 try:
699 if partitions :
700 parsedPartitions = json.loads( partitions )
701 main.log.warn( json.dumps( parsedPartitions,
702 sort_keys=True,
703 indent=4,
704 separators=( ',', ': ' ) ) )
705 # TODO check for a leader in all paritions
706 # TODO check for consistency among nodes
707 else:
708 main.log.error( "partitions() returned None" )
709 except ( ValueError, TypeError ):
710 main.log.exception( "Error parsing partitions" )
711 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700712 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700713 try:
714 if pendingMap :
715 parsedPending = json.loads( pendingMap )
716 main.log.warn( json.dumps( parsedPending,
717 sort_keys=True,
718 indent=4,
719 separators=( ',', ': ' ) ) )
720 # TODO check something here?
721 else:
722 main.log.error( "pendingMap() returned None" )
723 except ( ValueError, TypeError ):
724 main.log.exception( "Error parsing pending map" )
725 main.log.error( repr( pendingMap ) )
726
727 intentAddResult = bool( intentAddResult and not missingIntents and
728 installedCheck )
729 if not intentAddResult:
730 main.log.error( "Error in pushing host intents to ONOS" )
731
732 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700733 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700734 correct = True
735 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700736 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700737 onosIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700738 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700739 onosIds.append( ids )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700740 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700741 str( sorted( onosIds ) ) )
742 if sorted( ids ) != sorted( intentIds ):
743 main.log.warn( "Set of intent IDs doesn't match" )
744 correct = False
745 break
746 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700747 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700748 for intent in intents:
749 if intent[ 'state' ] != "INSTALLED":
750 main.log.warn( "Intent " + intent[ 'id' ] +
751 " is " + intent[ 'state' ] )
752 correct = False
753 break
754 if correct:
755 break
756 else:
757 time.sleep(1)
758 if not intentStop:
759 intentStop = time.time()
760 global gossipTime
761 gossipTime = intentStop - intentStart
762 main.log.info( "It took about " + str( gossipTime ) +
763 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700764 gossipPeriod = int( main.params['timers']['gossip'] )
765 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700766 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700767 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700768 onpass="ECM anti-entropy for intents worked within " +
769 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700770 onfail="Intent ECM anti-entropy took too long. " +
771 "Expected time:{}, Actual time:{}".format( maxGossipTime,
772 gossipTime ) )
773 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700774 intentAddResult = True
775
776 if not intentAddResult or "key" in pendingMap:
777 import time
778 installedCheck = True
779 main.log.info( "Sleeping 60 seconds to see if intents are found" )
780 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700781 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700782 main.log.info( "Submitted intents: " + str( intentIds ) )
783 main.log.info( "Intents in ONOS: " + str( onosIds ) )
784 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700785 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700786 intentStates = []
787 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
788 count = 0
789 try:
790 for intent in json.loads( intents ):
791 # Iter through intents of a node
792 state = intent.get( 'state', None )
793 if "INSTALLED" not in state:
794 installedCheck = False
795 intentId = intent.get( 'id', None )
796 intentStates.append( ( intentId, state ) )
797 except ( ValueError, TypeError ):
798 main.log.exception( "Error parsing intents" )
799 # add submitted intents not in the store
800 tmplist = [ i for i, s in intentStates ]
801 for i in intentIds:
802 if i not in tmplist:
803 intentStates.append( ( i, " - " ) )
804 intentStates.sort()
805 for i, s in intentStates:
806 count += 1
807 main.log.info( "%-6s%-15s%-15s" %
808 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700809 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700810 try:
811 missing = False
812 if leaders:
813 parsedLeaders = json.loads( leaders )
814 main.log.warn( json.dumps( parsedLeaders,
815 sort_keys=True,
816 indent=4,
817 separators=( ',', ': ' ) ) )
818 # check for all intent partitions
819 # check for election
820 topics = []
821 for i in range( 14 ):
822 topics.append( "intent-partition-" + str( i ) )
823 # FIXME: this should only be after we start the app
824 topics.append( "org.onosproject.election" )
825 main.log.debug( topics )
826 ONOStopics = [ j['topic'] for j in parsedLeaders ]
827 for topic in topics:
828 if topic not in ONOStopics:
829 main.log.error( "Error: " + topic +
830 " not in leaders" )
831 missing = True
832 else:
833 main.log.error( "leaders() returned None" )
834 except ( ValueError, TypeError ):
835 main.log.exception( "Error parsing leaders" )
836 main.log.error( repr( leaders ) )
837 # Check all nodes
838 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700839 for i in main.activeNodes:
840 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700841 response = node.leaders( jsonFormat=False)
842 main.log.warn( str( node.name ) + " leaders output: \n" +
843 str( response ) )
844
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700845 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700846 try:
847 if partitions :
848 parsedPartitions = json.loads( partitions )
849 main.log.warn( json.dumps( parsedPartitions,
850 sort_keys=True,
851 indent=4,
852 separators=( ',', ': ' ) ) )
853 # TODO check for a leader in all paritions
854 # TODO check for consistency among nodes
855 else:
856 main.log.error( "partitions() returned None" )
857 except ( ValueError, TypeError ):
858 main.log.exception( "Error parsing partitions" )
859 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700860 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700861 try:
862 if pendingMap :
863 parsedPending = json.loads( pendingMap )
864 main.log.warn( json.dumps( parsedPending,
865 sort_keys=True,
866 indent=4,
867 separators=( ',', ': ' ) ) )
868 # TODO check something here?
869 else:
870 main.log.error( "pendingMap() returned None" )
871 except ( ValueError, TypeError ):
872 main.log.exception( "Error parsing pending map" )
873 main.log.error( repr( pendingMap ) )
874
875 def CASE4( self, main ):
876 """
877 Ping across added host intents
878 """
879 import json
880 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700881 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700882 assert main, "main not defined"
883 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700884 assert main.CLIs, "main.CLIs not defined"
885 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700886 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700887 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700888 "functionality and check the state of " +\
889 "the intent"
890 main.step( "Ping across added host intents" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700891 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700892 PingResult = main.TRUE
893 for i in range( 8, 18 ):
894 ping = main.Mininet1.pingHost( src="h" + str( i ),
895 target="h" + str( i + 10 ) )
896 PingResult = PingResult and ping
897 if ping == main.FALSE:
898 main.log.warn( "Ping failed between h" + str( i ) +
899 " and h" + str( i + 10 ) )
900 elif ping == main.TRUE:
901 main.log.info( "Ping test passed!" )
902 # Don't set PingResult or you'd override failures
903 if PingResult == main.FALSE:
904 main.log.error(
905 "Intents have not been installed correctly, pings failed." )
906 # TODO: pretty print
907 main.log.warn( "ONOS1 intents: " )
908 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700909 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700910 main.log.warn( json.dumps( json.loads( tmpIntents ),
911 sort_keys=True,
912 indent=4,
913 separators=( ',', ': ' ) ) )
914 except ( ValueError, TypeError ):
915 main.log.warn( repr( tmpIntents ) )
916 utilities.assert_equals(
917 expect=main.TRUE,
918 actual=PingResult,
919 onpass="Intents have been installed correctly and pings work",
920 onfail="Intents have not been installed correctly, pings failed." )
921
922 main.step( "Check Intent state" )
923 installedCheck = False
924 loopCount = 0
925 while not installedCheck and loopCount < 40:
926 installedCheck = True
927 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700928 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700929 intentStates = []
930 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
931 count = 0
932 # Iter through intents of a node
933 try:
934 for intent in json.loads( intents ):
935 state = intent.get( 'state', None )
936 if "INSTALLED" not in state:
937 installedCheck = False
938 intentId = intent.get( 'id', None )
939 intentStates.append( ( intentId, state ) )
940 except ( ValueError, TypeError ):
941 main.log.exception( "Error parsing intents." )
942 # Print states
943 intentStates.sort()
944 for i, s in intentStates:
945 count += 1
946 main.log.info( "%-6s%-15s%-15s" %
947 ( str( count ), str( i ), str( s ) ) )
948 if not installedCheck:
949 time.sleep( 1 )
950 loopCount += 1
951 utilities.assert_equals( expect=True, actual=installedCheck,
952 onpass="Intents are all INSTALLED",
953 onfail="Intents are not all in " +
954 "INSTALLED state" )
955
956 main.step( "Check leadership of topics" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700957 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700958 topicCheck = main.TRUE
959 try:
960 if leaders:
961 parsedLeaders = json.loads( leaders )
962 main.log.warn( json.dumps( parsedLeaders,
963 sort_keys=True,
964 indent=4,
965 separators=( ',', ': ' ) ) )
966 # check for all intent partitions
967 # check for election
968 # TODO: Look at Devices as topics now that it uses this system
969 topics = []
970 for i in range( 14 ):
971 topics.append( "intent-partition-" + str( i ) )
972 # FIXME: this should only be after we start the app
973 # FIXME: topics.append( "org.onosproject.election" )
974 # Print leaders output
975 main.log.debug( topics )
976 ONOStopics = [ j['topic'] for j in parsedLeaders ]
977 for topic in topics:
978 if topic not in ONOStopics:
979 main.log.error( "Error: " + topic +
980 " not in leaders" )
981 topicCheck = main.FALSE
982 else:
983 main.log.error( "leaders() returned None" )
984 topicCheck = main.FALSE
985 except ( ValueError, TypeError ):
986 topicCheck = main.FALSE
987 main.log.exception( "Error parsing leaders" )
988 main.log.error( repr( leaders ) )
989 # TODO: Check for a leader of these topics
990 # Check all nodes
991 if topicCheck:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700992 for i in main.activeNodes:
993 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700994 response = node.leaders( jsonFormat=False)
995 main.log.warn( str( node.name ) + " leaders output: \n" +
996 str( response ) )
997
998 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
999 onpass="intent Partitions is in leaders",
1000 onfail="Some topics were lost " )
1001 # Print partitions
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001002 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001003 try:
1004 if partitions :
1005 parsedPartitions = json.loads( partitions )
1006 main.log.warn( json.dumps( parsedPartitions,
1007 sort_keys=True,
1008 indent=4,
1009 separators=( ',', ': ' ) ) )
1010 # TODO check for a leader in all paritions
1011 # TODO check for consistency among nodes
1012 else:
1013 main.log.error( "partitions() returned None" )
1014 except ( ValueError, TypeError ):
1015 main.log.exception( "Error parsing partitions" )
1016 main.log.error( repr( partitions ) )
1017 # Print Pending Map
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001018 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001019 try:
1020 if pendingMap :
1021 parsedPending = json.loads( pendingMap )
1022 main.log.warn( json.dumps( parsedPending,
1023 sort_keys=True,
1024 indent=4,
1025 separators=( ',', ': ' ) ) )
1026 # TODO check something here?
1027 else:
1028 main.log.error( "pendingMap() returned None" )
1029 except ( ValueError, TypeError ):
1030 main.log.exception( "Error parsing pending map" )
1031 main.log.error( repr( pendingMap ) )
1032
1033 if not installedCheck:
1034 main.log.info( "Waiting 60 seconds to see if the state of " +
1035 "intents change" )
1036 time.sleep( 60 )
1037 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001038 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001039 intentStates = []
1040 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1041 count = 0
1042 # Iter through intents of a node
1043 try:
1044 for intent in json.loads( intents ):
1045 state = intent.get( 'state', None )
1046 if "INSTALLED" not in state:
1047 installedCheck = False
1048 intentId = intent.get( 'id', None )
1049 intentStates.append( ( intentId, state ) )
1050 except ( ValueError, TypeError ):
1051 main.log.exception( "Error parsing intents." )
1052 intentStates.sort()
1053 for i, s in intentStates:
1054 count += 1
1055 main.log.info( "%-6s%-15s%-15s" %
1056 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001057 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001058 try:
1059 missing = False
1060 if leaders:
1061 parsedLeaders = json.loads( leaders )
1062 main.log.warn( json.dumps( parsedLeaders,
1063 sort_keys=True,
1064 indent=4,
1065 separators=( ',', ': ' ) ) )
1066 # check for all intent partitions
1067 # check for election
1068 topics = []
1069 for i in range( 14 ):
1070 topics.append( "intent-partition-" + str( i ) )
1071 # FIXME: this should only be after we start the app
1072 topics.append( "org.onosproject.election" )
1073 main.log.debug( topics )
1074 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1075 for topic in topics:
1076 if topic not in ONOStopics:
1077 main.log.error( "Error: " + topic +
1078 " not in leaders" )
1079 missing = True
1080 else:
1081 main.log.error( "leaders() returned None" )
1082 except ( ValueError, TypeError ):
1083 main.log.exception( "Error parsing leaders" )
1084 main.log.error( repr( leaders ) )
1085 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001086 for i in main.activeNodes:
1087 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001088 response = node.leaders( jsonFormat=False)
1089 main.log.warn( str( node.name ) + " leaders output: \n" +
1090 str( response ) )
1091
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001092 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001093 try:
1094 if partitions :
1095 parsedPartitions = json.loads( partitions )
1096 main.log.warn( json.dumps( parsedPartitions,
1097 sort_keys=True,
1098 indent=4,
1099 separators=( ',', ': ' ) ) )
1100 # TODO check for a leader in all paritions
1101 # TODO check for consistency among nodes
1102 else:
1103 main.log.error( "partitions() returned None" )
1104 except ( ValueError, TypeError ):
1105 main.log.exception( "Error parsing partitions" )
1106 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001107 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001108 try:
1109 if pendingMap :
1110 parsedPending = json.loads( pendingMap )
1111 main.log.warn( json.dumps( parsedPending,
1112 sort_keys=True,
1113 indent=4,
1114 separators=( ',', ': ' ) ) )
1115 # TODO check something here?
1116 else:
1117 main.log.error( "pendingMap() returned None" )
1118 except ( ValueError, TypeError ):
1119 main.log.exception( "Error parsing pending map" )
1120 main.log.error( repr( pendingMap ) )
1121 # Print flowrules
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001122 node = main.activeNodes[0]
1123 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001124 main.step( "Wait a minute then ping again" )
1125 # the wait is above
1126 PingResult = main.TRUE
1127 for i in range( 8, 18 ):
1128 ping = main.Mininet1.pingHost( src="h" + str( i ),
1129 target="h" + str( i + 10 ) )
1130 PingResult = PingResult and ping
1131 if ping == main.FALSE:
1132 main.log.warn( "Ping failed between h" + str( i ) +
1133 " and h" + str( i + 10 ) )
1134 elif ping == main.TRUE:
1135 main.log.info( "Ping test passed!" )
1136 # Don't set PingResult or you'd override failures
1137 if PingResult == main.FALSE:
1138 main.log.error(
1139 "Intents have not been installed correctly, pings failed." )
1140 # TODO: pretty print
1141 main.log.warn( "ONOS1 intents: " )
1142 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001143 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001144 main.log.warn( json.dumps( json.loads( tmpIntents ),
1145 sort_keys=True,
1146 indent=4,
1147 separators=( ',', ': ' ) ) )
1148 except ( ValueError, TypeError ):
1149 main.log.warn( repr( tmpIntents ) )
1150 utilities.assert_equals(
1151 expect=main.TRUE,
1152 actual=PingResult,
1153 onpass="Intents have been installed correctly and pings work",
1154 onfail="Intents have not been installed correctly, pings failed." )
1155
1156 def CASE5( self, main ):
1157 """
1158 Reading state of ONOS
1159 """
1160 import json
1161 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001162 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001163 assert main, "main not defined"
1164 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001165 assert main.CLIs, "main.CLIs not defined"
1166 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001167
1168 main.case( "Setting up and gathering data for current state" )
1169 # The general idea for this test case is to pull the state of
1170 # ( intents,flows, topology,... ) from each ONOS node
1171 # We can then compare them with each other and also with past states
1172
1173 main.step( "Check that each switch has a master" )
1174 global mastershipState
1175 mastershipState = '[]'
1176
1177 # Assert that each device has a master
1178 rolesNotNull = main.TRUE
1179 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001180 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001181 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001182 name="rolesNotNull-" + str( i ),
1183 args=[] )
1184 threads.append( t )
1185 t.start()
1186
1187 for t in threads:
1188 t.join()
1189 rolesNotNull = rolesNotNull and t.result
1190 utilities.assert_equals(
1191 expect=main.TRUE,
1192 actual=rolesNotNull,
1193 onpass="Each device has a master",
1194 onfail="Some devices don't have a master assigned" )
1195
1196 main.step( "Get the Mastership of each switch from each controller" )
1197 ONOSMastership = []
1198 mastershipCheck = main.FALSE
1199 consistentMastership = True
1200 rolesResults = True
1201 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001202 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001203 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001204 name="roles-" + str( i ),
1205 args=[] )
1206 threads.append( t )
1207 t.start()
1208
1209 for t in threads:
1210 t.join()
1211 ONOSMastership.append( t.result )
1212
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001213 for i in range( len( ONOSMastership ) ):
1214 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001215 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001216 main.log.error( "Error in getting ONOS" + node + " roles" )
1217 main.log.warn( "ONOS" + node + " mastership response: " +
1218 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001219 rolesResults = False
1220 utilities.assert_equals(
1221 expect=True,
1222 actual=rolesResults,
1223 onpass="No error in reading roles output",
1224 onfail="Error in reading roles from ONOS" )
1225
1226 main.step( "Check for consistency in roles from each controller" )
1227 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1228 main.log.info(
1229 "Switch roles are consistent across all ONOS nodes" )
1230 else:
1231 consistentMastership = False
1232 utilities.assert_equals(
1233 expect=True,
1234 actual=consistentMastership,
1235 onpass="Switch roles are consistent across all ONOS nodes",
1236 onfail="ONOS nodes have different views of switch roles" )
1237
1238 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001239 for i in range( len( main.activeNodes ) ):
1240 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001241 try:
1242 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001243 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001244 json.dumps(
1245 json.loads( ONOSMastership[ i ] ),
1246 sort_keys=True,
1247 indent=4,
1248 separators=( ',', ': ' ) ) )
1249 except ( ValueError, TypeError ):
1250 main.log.warn( repr( ONOSMastership[ i ] ) )
1251 elif rolesResults and consistentMastership:
1252 mastershipCheck = main.TRUE
1253 mastershipState = ONOSMastership[ 0 ]
1254
1255 main.step( "Get the intents from each controller" )
1256 global intentState
1257 intentState = []
1258 ONOSIntents = []
1259 intentCheck = main.FALSE
1260 consistentIntents = True
1261 intentsResults = True
1262 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001263 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001264 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001265 name="intents-" + str( i ),
1266 args=[],
1267 kwargs={ 'jsonFormat': True } )
1268 threads.append( t )
1269 t.start()
1270
1271 for t in threads:
1272 t.join()
1273 ONOSIntents.append( t.result )
1274
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001275 for i in range( len( ONOSIntents ) ):
1276 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001277 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001278 main.log.error( "Error in getting ONOS" + node + " intents" )
1279 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001280 repr( ONOSIntents[ i ] ) )
1281 intentsResults = False
1282 utilities.assert_equals(
1283 expect=True,
1284 actual=intentsResults,
1285 onpass="No error in reading intents output",
1286 onfail="Error in reading intents from ONOS" )
1287
1288 main.step( "Check for consistency in Intents from each controller" )
1289 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1290 main.log.info( "Intents are consistent across all ONOS " +
1291 "nodes" )
1292 else:
1293 consistentIntents = False
1294 main.log.error( "Intents not consistent" )
1295 utilities.assert_equals(
1296 expect=True,
1297 actual=consistentIntents,
1298 onpass="Intents are consistent across all ONOS nodes",
1299 onfail="ONOS nodes have different views of intents" )
1300
1301 if intentsResults:
1302 # Try to make it easy to figure out what is happening
1303 #
1304 # Intent ONOS1 ONOS2 ...
1305 # 0x01 INSTALLED INSTALLING
1306 # ... ... ...
1307 # ... ... ...
1308 title = " Id"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001309 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001310 title += " " * 10 + "ONOS" + str( n + 1 )
1311 main.log.warn( title )
1312 # get all intent keys in the cluster
1313 keys = []
1314 for nodeStr in ONOSIntents:
1315 node = json.loads( nodeStr )
1316 for intent in node:
1317 keys.append( intent.get( 'id' ) )
1318 keys = set( keys )
1319 for key in keys:
1320 row = "%-13s" % key
1321 for nodeStr in ONOSIntents:
1322 node = json.loads( nodeStr )
1323 for intent in node:
1324 if intent.get( 'id', "Error" ) == key:
1325 row += "%-15s" % intent.get( 'state' )
1326 main.log.warn( row )
1327 # End table view
1328
1329 if intentsResults and not consistentIntents:
1330 # print the json objects
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001331 n = str( main.activeNodes[-1] + 1 )
1332 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001333 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1334 sort_keys=True,
1335 indent=4,
1336 separators=( ',', ': ' ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001337 for i in range( len( ONOSIntents ) ):
1338 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001339 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001340 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001341 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1342 sort_keys=True,
1343 indent=4,
1344 separators=( ',', ': ' ) ) )
1345 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001346 main.log.debug( "ONOS" + node + " intents match ONOS" +
1347 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001348 elif intentsResults and consistentIntents:
1349 intentCheck = main.TRUE
1350 intentState = ONOSIntents[ 0 ]
1351
1352 main.step( "Get the flows from each controller" )
1353 global flowState
1354 flowState = []
1355 ONOSFlows = []
1356 ONOSFlowsJson = []
1357 flowCheck = main.FALSE
1358 consistentFlows = True
1359 flowsResults = True
1360 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001361 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001362 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001363 name="flows-" + str( i ),
1364 args=[],
1365 kwargs={ 'jsonFormat': True } )
1366 threads.append( t )
1367 t.start()
1368
1369 # NOTE: Flows command can take some time to run
1370 time.sleep(30)
1371 for t in threads:
1372 t.join()
1373 result = t.result
1374 ONOSFlows.append( result )
1375
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001376 for i in range( len( ONOSFlows ) ):
1377 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001378 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1379 main.log.error( "Error in getting ONOS" + num + " flows" )
1380 main.log.warn( "ONOS" + num + " flows response: " +
1381 repr( ONOSFlows[ i ] ) )
1382 flowsResults = False
1383 ONOSFlowsJson.append( None )
1384 else:
1385 try:
1386 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1387 except ( ValueError, TypeError ):
1388 # FIXME: change this to log.error?
1389 main.log.exception( "Error in parsing ONOS" + num +
1390 " response as json." )
1391 main.log.error( repr( ONOSFlows[ i ] ) )
1392 ONOSFlowsJson.append( None )
1393 flowsResults = False
1394 utilities.assert_equals(
1395 expect=True,
1396 actual=flowsResults,
1397 onpass="No error in reading flows output",
1398 onfail="Error in reading flows from ONOS" )
1399
1400 main.step( "Check for consistency in Flows from each controller" )
1401 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1402 if all( tmp ):
1403 main.log.info( "Flow count is consistent across all ONOS nodes" )
1404 else:
1405 consistentFlows = False
1406 utilities.assert_equals(
1407 expect=True,
1408 actual=consistentFlows,
1409 onpass="The flow count is consistent across all ONOS nodes",
1410 onfail="ONOS nodes have different flow counts" )
1411
1412 if flowsResults and not consistentFlows:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001413 for i in range( len( ONOSFlows ) ):
1414 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001415 try:
1416 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001417 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001418 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1419 indent=4, separators=( ',', ': ' ) ) )
1420 except ( ValueError, TypeError ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001421 main.log.warn( "ONOS" + node + " flows: " +
1422 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001423 elif flowsResults and consistentFlows:
1424 flowCheck = main.TRUE
1425 flowState = ONOSFlows[ 0 ]
1426
1427 main.step( "Get the OF Table entries" )
1428 global flows
1429 flows = []
1430 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001431 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001432 if flowCheck == main.FALSE:
1433 for table in flows:
1434 main.log.warn( table )
1435 # TODO: Compare switch flow tables with ONOS flow tables
1436
1437 main.step( "Start continuous pings" )
1438 main.Mininet2.pingLong(
1439 src=main.params[ 'PING' ][ 'source1' ],
1440 target=main.params[ 'PING' ][ 'target1' ],
1441 pingTime=500 )
1442 main.Mininet2.pingLong(
1443 src=main.params[ 'PING' ][ 'source2' ],
1444 target=main.params[ 'PING' ][ 'target2' ],
1445 pingTime=500 )
1446 main.Mininet2.pingLong(
1447 src=main.params[ 'PING' ][ 'source3' ],
1448 target=main.params[ 'PING' ][ 'target3' ],
1449 pingTime=500 )
1450 main.Mininet2.pingLong(
1451 src=main.params[ 'PING' ][ 'source4' ],
1452 target=main.params[ 'PING' ][ 'target4' ],
1453 pingTime=500 )
1454 main.Mininet2.pingLong(
1455 src=main.params[ 'PING' ][ 'source5' ],
1456 target=main.params[ 'PING' ][ 'target5' ],
1457 pingTime=500 )
1458 main.Mininet2.pingLong(
1459 src=main.params[ 'PING' ][ 'source6' ],
1460 target=main.params[ 'PING' ][ 'target6' ],
1461 pingTime=500 )
1462 main.Mininet2.pingLong(
1463 src=main.params[ 'PING' ][ 'source7' ],
1464 target=main.params[ 'PING' ][ 'target7' ],
1465 pingTime=500 )
1466 main.Mininet2.pingLong(
1467 src=main.params[ 'PING' ][ 'source8' ],
1468 target=main.params[ 'PING' ][ 'target8' ],
1469 pingTime=500 )
1470 main.Mininet2.pingLong(
1471 src=main.params[ 'PING' ][ 'source9' ],
1472 target=main.params[ 'PING' ][ 'target9' ],
1473 pingTime=500 )
1474 main.Mininet2.pingLong(
1475 src=main.params[ 'PING' ][ 'source10' ],
1476 target=main.params[ 'PING' ][ 'target10' ],
1477 pingTime=500 )
1478
1479 main.step( "Collecting topology information from ONOS" )
1480 devices = []
1481 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001482 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001483 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001484 name="devices-" + str( i ),
1485 args=[ ] )
1486 threads.append( t )
1487 t.start()
1488
1489 for t in threads:
1490 t.join()
1491 devices.append( t.result )
1492 hosts = []
1493 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001494 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001495 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001496 name="hosts-" + str( i ),
1497 args=[ ] )
1498 threads.append( t )
1499 t.start()
1500
1501 for t in threads:
1502 t.join()
1503 try:
1504 hosts.append( json.loads( t.result ) )
1505 except ( ValueError, TypeError ):
1506 # FIXME: better handling of this, print which node
1507 # Maybe use thread name?
1508 main.log.exception( "Error parsing json output of hosts" )
Jon Hallf3d16e72015-12-16 17:45:08 -08001509 main.log.warn( repr( t.result ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001510 hosts.append( None )
1511
1512 ports = []
1513 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001514 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001515 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001516 name="ports-" + str( i ),
1517 args=[ ] )
1518 threads.append( t )
1519 t.start()
1520
1521 for t in threads:
1522 t.join()
1523 ports.append( t.result )
1524 links = []
1525 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001526 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001527 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001528 name="links-" + str( i ),
1529 args=[ ] )
1530 threads.append( t )
1531 t.start()
1532
1533 for t in threads:
1534 t.join()
1535 links.append( t.result )
1536 clusters = []
1537 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001538 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001539 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001540 name="clusters-" + str( i ),
1541 args=[ ] )
1542 threads.append( t )
1543 t.start()
1544
1545 for t in threads:
1546 t.join()
1547 clusters.append( t.result )
1548 # Compare json objects for hosts and dataplane clusters
1549
1550 # hosts
1551 main.step( "Host view is consistent across ONOS nodes" )
1552 consistentHostsResult = main.TRUE
1553 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001554 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001555 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001556 if hosts[ controller ] == hosts[ 0 ]:
1557 continue
1558 else: # hosts not consistent
1559 main.log.error( "hosts from ONOS" +
1560 controllerStr +
1561 " is inconsistent with ONOS1" )
1562 main.log.warn( repr( hosts[ controller ] ) )
1563 consistentHostsResult = main.FALSE
1564
1565 else:
1566 main.log.error( "Error in getting ONOS hosts from ONOS" +
1567 controllerStr )
1568 consistentHostsResult = main.FALSE
1569 main.log.warn( "ONOS" + controllerStr +
1570 " hosts response: " +
1571 repr( hosts[ controller ] ) )
1572 utilities.assert_equals(
1573 expect=main.TRUE,
1574 actual=consistentHostsResult,
1575 onpass="Hosts view is consistent across all ONOS nodes",
1576 onfail="ONOS nodes have different views of hosts" )
1577
1578 main.step( "Each host has an IP address" )
1579 ipResult = main.TRUE
1580 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001581 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001582 if hosts[ controller ]:
1583 for host in hosts[ controller ]:
1584 if not host.get( 'ipAddresses', [ ] ):
1585 main.log.error( "Error with host ips on controller" +
1586 controllerStr + ": " + str( host ) )
1587 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001588 utilities.assert_equals(
1589 expect=main.TRUE,
1590 actual=ipResult,
1591 onpass="The ips of the hosts aren't empty",
1592 onfail="The ip of at least one host is missing" )
1593
1594 # Strongly connected clusters of devices
1595 main.step( "Cluster view is consistent across ONOS nodes" )
1596 consistentClustersResult = main.TRUE
1597 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001598 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001599 if "Error" not in clusters[ controller ]:
1600 if clusters[ controller ] == clusters[ 0 ]:
1601 continue
1602 else: # clusters not consistent
1603 main.log.error( "clusters from ONOS" + controllerStr +
1604 " is inconsistent with ONOS1" )
1605 consistentClustersResult = main.FALSE
1606
1607 else:
1608 main.log.error( "Error in getting dataplane clusters " +
1609 "from ONOS" + controllerStr )
1610 consistentClustersResult = main.FALSE
1611 main.log.warn( "ONOS" + controllerStr +
1612 " clusters response: " +
1613 repr( clusters[ controller ] ) )
1614 utilities.assert_equals(
1615 expect=main.TRUE,
1616 actual=consistentClustersResult,
1617 onpass="Clusters view is consistent across all ONOS nodes",
1618 onfail="ONOS nodes have different views of clusters" )
1619 # there should always only be one cluster
1620 main.step( "Cluster view correct across ONOS nodes" )
1621 try:
1622 numClusters = len( json.loads( clusters[ 0 ] ) )
1623 except ( ValueError, TypeError ):
1624 main.log.exception( "Error parsing clusters[0]: " +
1625 repr( clusters[ 0 ] ) )
1626 clusterResults = main.FALSE
1627 if numClusters == 1:
1628 clusterResults = main.TRUE
1629 utilities.assert_equals(
1630 expect=1,
1631 actual=numClusters,
1632 onpass="ONOS shows 1 SCC",
1633 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1634
1635 main.step( "Comparing ONOS topology to MN" )
1636 devicesResults = main.TRUE
1637 linksResults = main.TRUE
1638 hostsResults = main.TRUE
1639 mnSwitches = main.Mininet1.getSwitches()
1640 mnLinks = main.Mininet1.getLinks()
1641 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001642 for controller in main.activeNodes:
1643 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001644 if devices[ controller ] and ports[ controller ] and\
1645 "Error" not in devices[ controller ] and\
1646 "Error" not in ports[ controller ]:
1647
1648 currentDevicesResult = main.Mininet1.compareSwitches(
1649 mnSwitches,
1650 json.loads( devices[ controller ] ),
1651 json.loads( ports[ controller ] ) )
1652 else:
1653 currentDevicesResult = main.FALSE
1654 utilities.assert_equals( expect=main.TRUE,
1655 actual=currentDevicesResult,
1656 onpass="ONOS" + controllerStr +
1657 " Switches view is correct",
1658 onfail="ONOS" + controllerStr +
1659 " Switches view is incorrect" )
1660 if links[ controller ] and "Error" not in links[ controller ]:
1661 currentLinksResult = main.Mininet1.compareLinks(
1662 mnSwitches, mnLinks,
1663 json.loads( links[ controller ] ) )
1664 else:
1665 currentLinksResult = main.FALSE
1666 utilities.assert_equals( expect=main.TRUE,
1667 actual=currentLinksResult,
1668 onpass="ONOS" + controllerStr +
1669 " links view is correct",
1670 onfail="ONOS" + controllerStr +
1671 " links view is incorrect" )
1672
Jon Hall657cdf62015-12-17 14:40:51 -08001673 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001674 currentHostsResult = main.Mininet1.compareHosts(
1675 mnHosts,
1676 hosts[ controller ] )
1677 else:
1678 currentHostsResult = main.FALSE
1679 utilities.assert_equals( expect=main.TRUE,
1680 actual=currentHostsResult,
1681 onpass="ONOS" + controllerStr +
1682 " hosts exist in Mininet",
1683 onfail="ONOS" + controllerStr +
1684 " hosts don't match Mininet" )
1685
1686 devicesResults = devicesResults and currentDevicesResult
1687 linksResults = linksResults and currentLinksResult
1688 hostsResults = hostsResults and currentHostsResult
1689
1690 main.step( "Device information is correct" )
1691 utilities.assert_equals(
1692 expect=main.TRUE,
1693 actual=devicesResults,
1694 onpass="Device information is correct",
1695 onfail="Device information is incorrect" )
1696
1697 main.step( "Links are correct" )
1698 utilities.assert_equals(
1699 expect=main.TRUE,
1700 actual=linksResults,
1701 onpass="Link are correct",
1702 onfail="Links are incorrect" )
1703
1704 main.step( "Hosts are correct" )
1705 utilities.assert_equals(
1706 expect=main.TRUE,
1707 actual=hostsResults,
1708 onpass="Hosts are correct",
1709 onfail="Hosts are incorrect" )
1710
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001711 def CASE61( self, main ):
Jon Hall5cf14d52015-07-16 12:15:19 -07001712 """
1713 The Failure case.
1714 """
Jon Halle1a3b752015-07-22 13:02:46 -07001715 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001716 assert main, "main not defined"
1717 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001718 assert main.CLIs, "main.CLIs not defined"
1719 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001720 main.case( "Stop minority of ONOS nodes" )
Jon Hall96091e62015-09-21 17:34:17 -07001721
1722 main.step( "Checking ONOS Logs for errors" )
1723 for node in main.nodes:
1724 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1725 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1726
Jon Hall3b489db2015-10-05 14:38:37 -07001727 n = len( main.nodes ) # Number of nodes
1728 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1729 main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
1730 if n > 3:
1731 main.kill.append( p - 1 )
1732 # NOTE: This only works for cluster sizes of 3,5, or 7.
1733
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001734 main.step( "Stopping " + str( len( main.kill ) ) + " ONOS nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07001735 killResults = main.TRUE
1736 for i in main.kill:
1737 killResults = killResults and\
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001738 main.ONOSbench.onosStop( main.nodes[i].ip_address )
1739 main.activeNodes.remove( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001740 utilities.assert_equals( expect=main.TRUE, actual=killResults,
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001741 onpass="ONOS nodes stopped successfully",
1742 onfail="ONOS nodes NOT successfully stopped" )
1743
1744 def CASE62( self, main ):
1745 """
1746 The bring up stopped nodes
1747 """
1748 import time
1749 assert main.numCtrls, "main.numCtrls not defined"
1750 assert main, "main not defined"
1751 assert utilities.assert_equals, "utilities.assert_equals not defined"
1752 assert main.CLIs, "main.CLIs not defined"
1753 assert main.nodes, "main.nodes not defined"
1754 assert main.kill, "main.kill not defined"
1755 main.case( "Restart minority of ONOS nodes" )
1756
1757 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1758 startResults = main.TRUE
1759 restartTime = time.time()
1760 for i in main.kill:
1761 startResults = startResults and\
1762 main.ONOSbench.onosStart( main.nodes[i].ip_address )
1763 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1764 onpass="ONOS nodes started successfully",
1765 onfail="ONOS nodes NOT successfully started" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001766
1767 main.step( "Checking if ONOS is up yet" )
1768 count = 0
1769 onosIsupResult = main.FALSE
1770 while onosIsupResult == main.FALSE and count < 10:
Jon Hall3b489db2015-10-05 14:38:37 -07001771 onosIsupResult = main.TRUE
1772 for i in main.kill:
1773 onosIsupResult = onosIsupResult and\
1774 main.ONOSbench.isup( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07001775 count = count + 1
Jon Hall5cf14d52015-07-16 12:15:19 -07001776 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1777 onpass="ONOS restarted successfully",
1778 onfail="ONOS restart NOT successful" )
1779
Jon Halle1a3b752015-07-22 13:02:46 -07001780 main.step( "Restarting ONOS main.CLIs" )
Jon Hall3b489db2015-10-05 14:38:37 -07001781 cliResults = main.TRUE
1782 for i in main.kill:
1783 cliResults = cliResults and\
1784 main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001785 main.activeNodes.append( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001786 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1787 onpass="ONOS cli restarted",
1788 onfail="ONOS cli did not restart" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001789 main.activeNodes.sort()
1790 try:
1791 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1792 "List of active nodes has duplicates, this likely indicates something was run out of order"
1793 except AssertionError:
1794 main.log.exception( "" )
1795 main.cleanup()
1796 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -07001797
1798 # Grab the time of restart so we chan check how long the gossip
1799 # protocol has had time to work
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001800 main.restartTime = time.time() - restartTime
Jon Hall5cf14d52015-07-16 12:15:19 -07001801 main.log.debug( "Restart time: " + str( main.restartTime ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001802 # TODO: MAke this configurable. Also, we are breaking the above timer
1803 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001804 node = main.activeNodes[0]
1805 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1806 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1807 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001808
1809 def CASE7( self, main ):
1810 """
1811 Check state after ONOS failure
1812 """
1813 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001814 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001815 assert main, "main not defined"
1816 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001817 assert main.CLIs, "main.CLIs not defined"
1818 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001819 try:
1820 main.kill
1821 except AttributeError:
1822 main.kill = []
1823
Jon Hall5cf14d52015-07-16 12:15:19 -07001824 main.case( "Running ONOS Constant State Tests" )
1825
1826 main.step( "Check that each switch has a master" )
1827 # Assert that each device has a master
1828 rolesNotNull = main.TRUE
1829 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001830 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001831 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001832 name="rolesNotNull-" + str( i ),
1833 args=[ ] )
1834 threads.append( t )
1835 t.start()
1836
1837 for t in threads:
1838 t.join()
1839 rolesNotNull = rolesNotNull and t.result
1840 utilities.assert_equals(
1841 expect=main.TRUE,
1842 actual=rolesNotNull,
1843 onpass="Each device has a master",
1844 onfail="Some devices don't have a master assigned" )
1845
1846 main.step( "Read device roles from ONOS" )
1847 ONOSMastership = []
1848 consistentMastership = True
1849 rolesResults = True
1850 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001851 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001852 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001853 name="roles-" + str( i ),
1854 args=[] )
1855 threads.append( t )
1856 t.start()
1857
1858 for t in threads:
1859 t.join()
1860 ONOSMastership.append( t.result )
1861
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001862 for i in range( len( ONOSMastership ) ):
1863 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001864 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001865 main.log.error( "Error in getting ONOS" + node + " roles" )
1866 main.log.warn( "ONOS" + node + " mastership response: " +
1867 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001868 rolesResults = False
1869 utilities.assert_equals(
1870 expect=True,
1871 actual=rolesResults,
1872 onpass="No error in reading roles output",
1873 onfail="Error in reading roles from ONOS" )
1874
1875 main.step( "Check for consistency in roles from each controller" )
1876 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1877 main.log.info(
1878 "Switch roles are consistent across all ONOS nodes" )
1879 else:
1880 consistentMastership = False
1881 utilities.assert_equals(
1882 expect=True,
1883 actual=consistentMastership,
1884 onpass="Switch roles are consistent across all ONOS nodes",
1885 onfail="ONOS nodes have different views of switch roles" )
1886
1887 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001888 for i in range( len( ONOSMastership ) ):
1889 node = str( main.activeNodes[i] + 1 )
1890 main.log.warn( "ONOS" + node + " roles: ",
1891 json.dumps( json.loads( ONOSMastership[ i ] ),
1892 sort_keys=True,
1893 indent=4,
1894 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001895
1896 # NOTE: we expect mastership to change on controller failure
Jon Hall5cf14d52015-07-16 12:15:19 -07001897
1898 main.step( "Get the intents and compare across all nodes" )
1899 ONOSIntents = []
1900 intentCheck = main.FALSE
1901 consistentIntents = True
1902 intentsResults = True
1903 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001904 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001905 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001906 name="intents-" + str( i ),
1907 args=[],
1908 kwargs={ 'jsonFormat': True } )
1909 threads.append( t )
1910 t.start()
1911
1912 for t in threads:
1913 t.join()
1914 ONOSIntents.append( t.result )
1915
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001916 for i in range( len( ONOSIntents) ):
1917 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001918 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001919 main.log.error( "Error in getting ONOS" + node + " intents" )
1920 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001921 repr( ONOSIntents[ i ] ) )
1922 intentsResults = False
1923 utilities.assert_equals(
1924 expect=True,
1925 actual=intentsResults,
1926 onpass="No error in reading intents output",
1927 onfail="Error in reading intents from ONOS" )
1928
1929 main.step( "Check for consistency in Intents from each controller" )
1930 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1931 main.log.info( "Intents are consistent across all ONOS " +
1932 "nodes" )
1933 else:
1934 consistentIntents = False
1935
1936 # Try to make it easy to figure out what is happening
1937 #
1938 # Intent ONOS1 ONOS2 ...
1939 # 0x01 INSTALLED INSTALLING
1940 # ... ... ...
1941 # ... ... ...
1942 title = " ID"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001943 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001944 title += " " * 10 + "ONOS" + str( n + 1 )
1945 main.log.warn( title )
1946 # get all intent keys in the cluster
1947 keys = []
1948 for nodeStr in ONOSIntents:
1949 node = json.loads( nodeStr )
1950 for intent in node:
1951 keys.append( intent.get( 'id' ) )
1952 keys = set( keys )
1953 for key in keys:
1954 row = "%-13s" % key
1955 for nodeStr in ONOSIntents:
1956 node = json.loads( nodeStr )
1957 for intent in node:
1958 if intent.get( 'id' ) == key:
1959 row += "%-15s" % intent.get( 'state' )
1960 main.log.warn( row )
1961 # End table view
1962
1963 utilities.assert_equals(
1964 expect=True,
1965 actual=consistentIntents,
1966 onpass="Intents are consistent across all ONOS nodes",
1967 onfail="ONOS nodes have different views of intents" )
1968 intentStates = []
1969 for node in ONOSIntents: # Iter through ONOS nodes
1970 nodeStates = []
1971 # Iter through intents of a node
1972 try:
1973 for intent in json.loads( node ):
1974 nodeStates.append( intent[ 'state' ] )
1975 except ( ValueError, TypeError ):
1976 main.log.exception( "Error in parsing intents" )
1977 main.log.error( repr( node ) )
1978 intentStates.append( nodeStates )
1979 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1980 main.log.info( dict( out ) )
1981
1982 if intentsResults and not consistentIntents:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001983 for i in range( len( main.activeNodes ) ):
1984 node = str( main.activeNodes[i] + 1 )
1985 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001986 main.log.warn( json.dumps(
1987 json.loads( ONOSIntents[ i ] ),
1988 sort_keys=True,
1989 indent=4,
1990 separators=( ',', ': ' ) ) )
1991 elif intentsResults and consistentIntents:
1992 intentCheck = main.TRUE
1993
1994 # NOTE: Store has no durability, so intents are lost across system
1995 # restarts
1996 main.step( "Compare current intents with intents before the failure" )
1997 # NOTE: this requires case 5 to pass for intentState to be set.
1998 # maybe we should stop the test if that fails?
1999 sameIntents = main.FALSE
2000 if intentState and intentState == ONOSIntents[ 0 ]:
2001 sameIntents = main.TRUE
2002 main.log.info( "Intents are consistent with before failure" )
2003 # TODO: possibly the states have changed? we may need to figure out
2004 # what the acceptable states are
2005 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2006 sameIntents = main.TRUE
2007 try:
2008 before = json.loads( intentState )
2009 after = json.loads( ONOSIntents[ 0 ] )
2010 for intent in before:
2011 if intent not in after:
2012 sameIntents = main.FALSE
2013 main.log.debug( "Intent is not currently in ONOS " +
2014 "(at least in the same form):" )
2015 main.log.debug( json.dumps( intent ) )
2016 except ( ValueError, TypeError ):
2017 main.log.exception( "Exception printing intents" )
2018 main.log.debug( repr( ONOSIntents[0] ) )
2019 main.log.debug( repr( intentState ) )
2020 if sameIntents == main.FALSE:
2021 try:
2022 main.log.debug( "ONOS intents before: " )
2023 main.log.debug( json.dumps( json.loads( intentState ),
2024 sort_keys=True, indent=4,
2025 separators=( ',', ': ' ) ) )
2026 main.log.debug( "Current ONOS intents: " )
2027 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2028 sort_keys=True, indent=4,
2029 separators=( ',', ': ' ) ) )
2030 except ( ValueError, TypeError ):
2031 main.log.exception( "Exception printing intents" )
2032 main.log.debug( repr( ONOSIntents[0] ) )
2033 main.log.debug( repr( intentState ) )
2034 utilities.assert_equals(
2035 expect=main.TRUE,
2036 actual=sameIntents,
2037 onpass="Intents are consistent with before failure",
2038 onfail="The Intents changed during failure" )
2039 intentCheck = intentCheck and sameIntents
2040
2041 main.step( "Get the OF Table entries and compare to before " +
2042 "component failure" )
2043 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002044 for i in range( 28 ):
2045 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002046 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2047 FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
Jon Hall5cf14d52015-07-16 12:15:19 -07002048 if FlowTables == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002049 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2050
Jon Hall5cf14d52015-07-16 12:15:19 -07002051 utilities.assert_equals(
2052 expect=main.TRUE,
2053 actual=FlowTables,
2054 onpass="No changes were found in the flow tables",
2055 onfail="Changes were found in the flow tables" )
2056
2057 main.Mininet2.pingLongKill()
2058 '''
2059 main.step( "Check the continuous pings to ensure that no packets " +
2060 "were dropped during component failure" )
2061 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2062 main.params[ 'TESTONIP' ] )
2063 LossInPings = main.FALSE
2064 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2065 for i in range( 8, 18 ):
2066 main.log.info(
2067 "Checking for a loss in pings along flow from s" +
2068 str( i ) )
2069 LossInPings = main.Mininet2.checkForLoss(
2070 "/tmp/ping.h" +
2071 str( i ) ) or LossInPings
2072 if LossInPings == main.TRUE:
2073 main.log.info( "Loss in ping detected" )
2074 elif LossInPings == main.ERROR:
2075 main.log.info( "There are multiple mininet process running" )
2076 elif LossInPings == main.FALSE:
2077 main.log.info( "No Loss in the pings" )
2078 main.log.info( "No loss of dataplane connectivity" )
2079 utilities.assert_equals(
2080 expect=main.FALSE,
2081 actual=LossInPings,
2082 onpass="No Loss of connectivity",
2083 onfail="Loss of dataplane connectivity detected" )
2084 '''
2085
2086 main.step( "Leadership Election is still functional" )
2087 # Test of LeadershipElection
2088 leaderList = []
Jon Hall5cf14d52015-07-16 12:15:19 -07002089
Jon Hall3b489db2015-10-05 14:38:37 -07002090 restarted = []
2091 for i in main.kill:
2092 restarted.append( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07002093 leaderResult = main.TRUE
Jon Hall3b489db2015-10-05 14:38:37 -07002094
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002095 for i in main.activeNodes:
2096 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002097 leaderN = cli.electionTestLeader()
2098 leaderList.append( leaderN )
2099 if leaderN == main.FALSE:
2100 # error in response
2101 main.log.error( "Something is wrong with " +
2102 "electionTestLeader function, check the" +
2103 " error logs" )
2104 leaderResult = main.FALSE
2105 elif leaderN is None:
2106 main.log.error( cli.name +
2107 " shows no leader for the election-app was" +
2108 " elected after the old one died" )
2109 leaderResult = main.FALSE
2110 elif leaderN in restarted:
2111 main.log.error( cli.name + " shows " + str( leaderN ) +
2112 " as leader for the election-app, but it " +
2113 "was restarted" )
2114 leaderResult = main.FALSE
2115 if len( set( leaderList ) ) != 1:
2116 leaderResult = main.FALSE
2117 main.log.error(
2118 "Inconsistent view of leader for the election test app" )
2119 # TODO: print the list
2120 utilities.assert_equals(
2121 expect=main.TRUE,
2122 actual=leaderResult,
2123 onpass="Leadership election passed",
2124 onfail="Something went wrong with Leadership election" )
2125
2126 def CASE8( self, main ):
2127 """
2128 Compare topo
2129 """
2130 import json
2131 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002132 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002133 assert main, "main not defined"
2134 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002135 assert main.CLIs, "main.CLIs not defined"
2136 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002137
2138 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002139 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002140 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002141 topoResult = main.FALSE
2142 elapsed = 0
2143 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002144 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002145 startTime = time.time()
2146 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002147 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002148 devicesResults = main.TRUE
2149 linksResults = main.TRUE
2150 hostsResults = main.TRUE
2151 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002152 count += 1
2153 cliStart = time.time()
2154 devices = []
2155 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002156 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002157 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07002158 name="devices-" + str( i ),
2159 args=[ ] )
2160 threads.append( t )
2161 t.start()
2162
2163 for t in threads:
2164 t.join()
2165 devices.append( t.result )
2166 hosts = []
2167 ipResult = main.TRUE
2168 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002169 for i in main.activeNodes:
Jon Halld8f6de82015-12-17 17:04:34 -08002170 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002171 name="hosts-" + str( i ),
Jon Halld8f6de82015-12-17 17:04:34 -08002172 args=[ main.CLIs[i].hosts, [ None ] ],
2173 kwargs= { 'sleep': 5, 'attempts': 5,
2174 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002175 threads.append( t )
2176 t.start()
2177
2178 for t in threads:
2179 t.join()
2180 try:
2181 hosts.append( json.loads( t.result ) )
2182 except ( ValueError, TypeError ):
2183 main.log.exception( "Error parsing hosts results" )
2184 main.log.error( repr( t.result ) )
Jon Hallf3d16e72015-12-16 17:45:08 -08002185 hosts.append( None )
Jon Hall5cf14d52015-07-16 12:15:19 -07002186 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002187 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallacd1b182015-12-17 11:43:20 -08002188 if hosts[ controller ]:
2189 for host in hosts[ controller ]:
2190 if host is None or host.get( 'ipAddresses', [] ) == []:
2191 main.log.error(
2192 "Error with host ipAddresses on controller" +
2193 controllerStr + ": " + str( host ) )
2194 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002195 ports = []
2196 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002197 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002198 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07002199 name="ports-" + str( i ),
2200 args=[ ] )
2201 threads.append( t )
2202 t.start()
2203
2204 for t in threads:
2205 t.join()
2206 ports.append( t.result )
2207 links = []
2208 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002209 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002210 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07002211 name="links-" + str( i ),
2212 args=[ ] )
2213 threads.append( t )
2214 t.start()
2215
2216 for t in threads:
2217 t.join()
2218 links.append( t.result )
2219 clusters = []
2220 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002221 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002222 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07002223 name="clusters-" + str( i ),
2224 args=[ ] )
2225 threads.append( t )
2226 t.start()
2227
2228 for t in threads:
2229 t.join()
2230 clusters.append( t.result )
2231
2232 elapsed = time.time() - startTime
2233 cliTime = time.time() - cliStart
2234 print "Elapsed time: " + str( elapsed )
2235 print "CLI time: " + str( cliTime )
2236
2237 mnSwitches = main.Mininet1.getSwitches()
2238 mnLinks = main.Mininet1.getLinks()
2239 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002240 for controller in range( len( main.activeNodes ) ):
2241 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002242 if devices[ controller ] and ports[ controller ] and\
2243 "Error" not in devices[ controller ] and\
2244 "Error" not in ports[ controller ]:
2245
2246 currentDevicesResult = main.Mininet1.compareSwitches(
2247 mnSwitches,
2248 json.loads( devices[ controller ] ),
2249 json.loads( ports[ controller ] ) )
2250 else:
2251 currentDevicesResult = main.FALSE
2252 utilities.assert_equals( expect=main.TRUE,
2253 actual=currentDevicesResult,
2254 onpass="ONOS" + controllerStr +
2255 " Switches view is correct",
2256 onfail="ONOS" + controllerStr +
2257 " Switches view is incorrect" )
2258
2259 if links[ controller ] and "Error" not in links[ controller ]:
2260 currentLinksResult = main.Mininet1.compareLinks(
2261 mnSwitches, mnLinks,
2262 json.loads( links[ controller ] ) )
2263 else:
2264 currentLinksResult = main.FALSE
2265 utilities.assert_equals( expect=main.TRUE,
2266 actual=currentLinksResult,
2267 onpass="ONOS" + controllerStr +
2268 " links view is correct",
2269 onfail="ONOS" + controllerStr +
2270 " links view is incorrect" )
Jon Hall657cdf62015-12-17 14:40:51 -08002271 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002272 currentHostsResult = main.Mininet1.compareHosts(
2273 mnHosts,
2274 hosts[ controller ] )
2275 else:
2276 currentHostsResult = main.FALSE
2277 utilities.assert_equals( expect=main.TRUE,
2278 actual=currentHostsResult,
2279 onpass="ONOS" + controllerStr +
2280 " hosts exist in Mininet",
2281 onfail="ONOS" + controllerStr +
2282 " hosts don't match Mininet" )
2283 # CHECKING HOST ATTACHMENT POINTS
2284 hostAttachment = True
2285 zeroHosts = False
2286 # FIXME: topo-HA/obelisk specific mappings:
2287 # key is mac and value is dpid
2288 mappings = {}
2289 for i in range( 1, 29 ): # hosts 1 through 28
2290 # set up correct variables:
2291 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2292 if i == 1:
2293 deviceId = "1000".zfill(16)
2294 elif i == 2:
2295 deviceId = "2000".zfill(16)
2296 elif i == 3:
2297 deviceId = "3000".zfill(16)
2298 elif i == 4:
2299 deviceId = "3004".zfill(16)
2300 elif i == 5:
2301 deviceId = "5000".zfill(16)
2302 elif i == 6:
2303 deviceId = "6000".zfill(16)
2304 elif i == 7:
2305 deviceId = "6007".zfill(16)
2306 elif i >= 8 and i <= 17:
2307 dpid = '3' + str( i ).zfill( 3 )
2308 deviceId = dpid.zfill(16)
2309 elif i >= 18 and i <= 27:
2310 dpid = '6' + str( i ).zfill( 3 )
2311 deviceId = dpid.zfill(16)
2312 elif i == 28:
2313 deviceId = "2800".zfill(16)
2314 mappings[ macId ] = deviceId
Jon Halld8f6de82015-12-17 17:04:34 -08002315 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002316 if hosts[ controller ] == []:
2317 main.log.warn( "There are no hosts discovered" )
2318 zeroHosts = True
2319 else:
2320 for host in hosts[ controller ]:
2321 mac = None
2322 location = None
2323 device = None
2324 port = None
2325 try:
2326 mac = host.get( 'mac' )
2327 assert mac, "mac field could not be found for this host object"
2328
2329 location = host.get( 'location' )
2330 assert location, "location field could not be found for this host object"
2331
2332 # Trim the protocol identifier off deviceId
2333 device = str( location.get( 'elementId' ) ).split(':')[1]
2334 assert device, "elementId field could not be found for this host location object"
2335
2336 port = location.get( 'port' )
2337 assert port, "port field could not be found for this host location object"
2338
2339 # Now check if this matches where they should be
2340 if mac and device and port:
2341 if str( port ) != "1":
2342 main.log.error( "The attachment port is incorrect for " +
2343 "host " + str( mac ) +
2344 ". Expected: 1 Actual: " + str( port) )
2345 hostAttachment = False
2346 if device != mappings[ str( mac ) ]:
2347 main.log.error( "The attachment device is incorrect for " +
2348 "host " + str( mac ) +
2349 ". Expected: " + mappings[ str( mac ) ] +
2350 " Actual: " + device )
2351 hostAttachment = False
2352 else:
2353 hostAttachment = False
2354 except AssertionError:
2355 main.log.exception( "Json object not as expected" )
2356 main.log.error( repr( host ) )
2357 hostAttachment = False
2358 else:
2359 main.log.error( "No hosts json output or \"Error\"" +
2360 " in output. hosts = " +
2361 repr( hosts[ controller ] ) )
2362 if zeroHosts is False:
2363 hostAttachment = True
2364
2365 # END CHECKING HOST ATTACHMENT POINTS
2366 devicesResults = devicesResults and currentDevicesResult
2367 linksResults = linksResults and currentLinksResult
2368 hostsResults = hostsResults and currentHostsResult
2369 hostAttachmentResults = hostAttachmentResults and\
2370 hostAttachment
Jon Halle9b1fa32015-12-08 15:32:21 -08002371 topoResult = devicesResults and linksResults and\
2372 hostsResults and hostAttachmentResults
2373 utilities.assert_equals( expect=True,
2374 actual=topoResult,
2375 onpass="ONOS topology matches Mininet",
2376 onfail="ONOS topology don't match Mininet" )
2377 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002378
2379 # Compare json objects for hosts and dataplane clusters
2380
2381 # hosts
2382 main.step( "Hosts view is consistent across all ONOS nodes" )
2383 consistentHostsResult = main.TRUE
2384 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002385 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall657cdf62015-12-17 14:40:51 -08002386 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002387 if hosts[ controller ] == hosts[ 0 ]:
2388 continue
2389 else: # hosts not consistent
2390 main.log.error( "hosts from ONOS" + controllerStr +
2391 " is inconsistent with ONOS1" )
2392 main.log.warn( repr( hosts[ controller ] ) )
2393 consistentHostsResult = main.FALSE
2394
2395 else:
2396 main.log.error( "Error in getting ONOS hosts from ONOS" +
2397 controllerStr )
2398 consistentHostsResult = main.FALSE
2399 main.log.warn( "ONOS" + controllerStr +
2400 " hosts response: " +
2401 repr( hosts[ controller ] ) )
2402 utilities.assert_equals(
2403 expect=main.TRUE,
2404 actual=consistentHostsResult,
2405 onpass="Hosts view is consistent across all ONOS nodes",
2406 onfail="ONOS nodes have different views of hosts" )
2407
2408 main.step( "Hosts information is correct" )
2409 hostsResults = hostsResults and ipResult
2410 utilities.assert_equals(
2411 expect=main.TRUE,
2412 actual=hostsResults,
2413 onpass="Host information is correct",
2414 onfail="Host information is incorrect" )
2415
2416 main.step( "Host attachment points to the network" )
2417 utilities.assert_equals(
2418 expect=True,
2419 actual=hostAttachmentResults,
2420 onpass="Hosts are correctly attached to the network",
2421 onfail="ONOS did not correctly attach hosts to the network" )
2422
2423 # Strongly connected clusters of devices
2424 main.step( "Clusters view is consistent across all ONOS nodes" )
2425 consistentClustersResult = main.TRUE
2426 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002427 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002428 if "Error" not in clusters[ controller ]:
2429 if clusters[ controller ] == clusters[ 0 ]:
2430 continue
2431 else: # clusters not consistent
2432 main.log.error( "clusters from ONOS" +
2433 controllerStr +
2434 " is inconsistent with ONOS1" )
2435 consistentClustersResult = main.FALSE
2436
2437 else:
2438 main.log.error( "Error in getting dataplane clusters " +
2439 "from ONOS" + controllerStr )
2440 consistentClustersResult = main.FALSE
2441 main.log.warn( "ONOS" + controllerStr +
2442 " clusters response: " +
2443 repr( clusters[ controller ] ) )
2444 utilities.assert_equals(
2445 expect=main.TRUE,
2446 actual=consistentClustersResult,
2447 onpass="Clusters view is consistent across all ONOS nodes",
2448 onfail="ONOS nodes have different views of clusters" )
2449
2450 main.step( "There is only one SCC" )
2451 # there should always only be one cluster
2452 try:
2453 numClusters = len( json.loads( clusters[ 0 ] ) )
2454 except ( ValueError, TypeError ):
2455 main.log.exception( "Error parsing clusters[0]: " +
2456 repr( clusters[0] ) )
2457 clusterResults = main.FALSE
2458 if numClusters == 1:
2459 clusterResults = main.TRUE
2460 utilities.assert_equals(
2461 expect=1,
2462 actual=numClusters,
2463 onpass="ONOS shows 1 SCC",
2464 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2465
2466 topoResult = ( devicesResults and linksResults
2467 and hostsResults and consistentHostsResult
2468 and consistentClustersResult and clusterResults
2469 and ipResult and hostAttachmentResults )
2470
2471 topoResult = topoResult and int( count <= 2 )
2472 note = "note it takes about " + str( int( cliTime ) ) + \
2473 " seconds for the test to make all the cli calls to fetch " +\
2474 "the topology from each ONOS instance"
2475 main.log.info(
2476 "Very crass estimate for topology discovery/convergence( " +
2477 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2478 str( count ) + " tries" )
2479
2480 main.step( "Device information is correct" )
2481 utilities.assert_equals(
2482 expect=main.TRUE,
2483 actual=devicesResults,
2484 onpass="Device information is correct",
2485 onfail="Device information is incorrect" )
2486
2487 main.step( "Links are correct" )
2488 utilities.assert_equals(
2489 expect=main.TRUE,
2490 actual=linksResults,
2491 onpass="Link are correct",
2492 onfail="Links are incorrect" )
2493
2494 # FIXME: move this to an ONOS state case
2495 main.step( "Checking ONOS nodes" )
2496 nodesOutput = []
2497 nodeResults = main.TRUE
2498 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002499 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002500 t = main.Thread( target=main.CLIs[i].nodes,
Jon Hall5cf14d52015-07-16 12:15:19 -07002501 name="nodes-" + str( i ),
2502 args=[ ] )
2503 threads.append( t )
2504 t.start()
2505
2506 for t in threads:
2507 t.join()
2508 nodesOutput.append( t.result )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002509 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
Jon Halle9b1fa32015-12-08 15:32:21 -08002510 ips.sort()
Jon Hall5cf14d52015-07-16 12:15:19 -07002511 for i in nodesOutput:
2512 try:
2513 current = json.loads( i )
Jon Halle9b1fa32015-12-08 15:32:21 -08002514 activeIps = []
2515 currentResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002516 for node in current:
Jon Halle9b1fa32015-12-08 15:32:21 -08002517 if node['state'] == 'ACTIVE':
2518 activeIps.append( node['ip'] )
2519 activeIps.sort()
2520 if ips == activeIps:
2521 currentResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002522 except ( ValueError, TypeError ):
2523 main.log.error( "Error parsing nodes output" )
2524 main.log.warn( repr( i ) )
Jon Halle9b1fa32015-12-08 15:32:21 -08002525 currentResult = main.FALSE
2526 nodeResults = nodeResults and currentResult
Jon Hall5cf14d52015-07-16 12:15:19 -07002527 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2528 onpass="Nodes check successful",
2529 onfail="Nodes check NOT successful" )
2530
2531 def CASE9( self, main ):
2532 """
2533 Link s3-s28 down
2534 """
2535 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002536 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002537 assert main, "main not defined"
2538 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002539 assert main.CLIs, "main.CLIs not defined"
2540 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002541 # NOTE: You should probably run a topology check after this
2542
2543 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2544
2545 description = "Turn off a link to ensure that Link Discovery " +\
2546 "is working properly"
2547 main.case( description )
2548
2549 main.step( "Kill Link between s3 and s28" )
2550 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2551 main.log.info( "Waiting " + str( linkSleep ) +
2552 " seconds for link down to be discovered" )
2553 time.sleep( linkSleep )
2554 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2555 onpass="Link down successful",
2556 onfail="Failed to bring link down" )
2557 # TODO do some sort of check here
2558
2559 def CASE10( self, main ):
2560 """
2561 Link s3-s28 up
2562 """
2563 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002564 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002565 assert main, "main not defined"
2566 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002567 assert main.CLIs, "main.CLIs not defined"
2568 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002569 # NOTE: You should probably run a topology check after this
2570
2571 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2572
2573 description = "Restore a link to ensure that Link Discovery is " + \
2574 "working properly"
2575 main.case( description )
2576
2577 main.step( "Bring link between s3 and s28 back up" )
2578 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2579 main.log.info( "Waiting " + str( linkSleep ) +
2580 " seconds for link up to be discovered" )
2581 time.sleep( linkSleep )
2582 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2583 onpass="Link up successful",
2584 onfail="Failed to bring link up" )
2585 # TODO do some sort of check here
2586
2587 def CASE11( self, main ):
2588 """
2589 Switch Down
2590 """
2591 # NOTE: You should probably run a topology check after this
2592 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002593 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002594 assert main, "main not defined"
2595 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002596 assert main.CLIs, "main.CLIs not defined"
2597 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002598
2599 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2600
2601 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002602 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002603 main.case( description )
2604 switch = main.params[ 'kill' ][ 'switch' ]
2605 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2606
2607 # TODO: Make this switch parameterizable
2608 main.step( "Kill " + switch )
2609 main.log.info( "Deleting " + switch )
2610 main.Mininet1.delSwitch( switch )
2611 main.log.info( "Waiting " + str( switchSleep ) +
2612 " seconds for switch down to be discovered" )
2613 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002614 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002615 # Peek at the deleted switch
2616 main.log.warn( str( device ) )
2617 result = main.FALSE
2618 if device and device[ 'available' ] is False:
2619 result = main.TRUE
2620 utilities.assert_equals( expect=main.TRUE, actual=result,
2621 onpass="Kill switch successful",
2622 onfail="Failed to kill switch?" )
2623
2624 def CASE12( self, main ):
2625 """
2626 Switch Up
2627 """
2628 # NOTE: You should probably run a topology check after this
2629 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002630 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002631 assert main, "main not defined"
2632 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002633 assert main.CLIs, "main.CLIs not defined"
2634 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002635 assert ONOS1Port, "ONOS1Port not defined"
2636 assert ONOS2Port, "ONOS2Port not defined"
2637 assert ONOS3Port, "ONOS3Port not defined"
2638 assert ONOS4Port, "ONOS4Port not defined"
2639 assert ONOS5Port, "ONOS5Port not defined"
2640 assert ONOS6Port, "ONOS6Port not defined"
2641 assert ONOS7Port, "ONOS7Port not defined"
2642
2643 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2644 switch = main.params[ 'kill' ][ 'switch' ]
2645 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2646 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002647 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002648 description = "Adding a switch to ensure it is discovered correctly"
2649 main.case( description )
2650
2651 main.step( "Add back " + switch )
2652 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2653 for peer in links:
2654 main.Mininet1.addLink( switch, peer )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002655 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002656 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2657 main.log.info( "Waiting " + str( switchSleep ) +
2658 " seconds for switch up to be discovered" )
2659 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002660 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002661 # Peek at the deleted switch
2662 main.log.warn( str( device ) )
2663 result = main.FALSE
2664 if device and device[ 'available' ]:
2665 result = main.TRUE
2666 utilities.assert_equals( expect=main.TRUE, actual=result,
2667 onpass="add switch successful",
2668 onfail="Failed to add switch?" )
2669
2670 def CASE13( self, main ):
2671 """
2672 Clean up
2673 """
2674 import os
2675 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002676 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002677 assert main, "main not defined"
2678 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002679 assert main.CLIs, "main.CLIs not defined"
2680 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002681
2682 # printing colors to terminal
2683 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2684 'blue': '\033[94m', 'green': '\033[92m',
2685 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2686 main.case( "Test Cleanup" )
2687 main.step( "Killing tcpdumps" )
2688 main.Mininet2.stopTcpdump()
2689
2690 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002691 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002692 main.step( "Copying MN pcap and ONOS log files to test station" )
2693 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2694 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002695 # NOTE: MN Pcap file is being saved to logdir.
2696 # We scp this file as MN and TestON aren't necessarily the same vm
2697
2698 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002699 # TODO: Load these from params
2700 # NOTE: must end in /
2701 logFolder = "/opt/onos/log/"
2702 logFiles = [ "karaf.log", "karaf.log.1" ]
2703 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002704 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002705 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002706 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002707 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2708 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002709 # std*.log's
2710 # NOTE: must end in /
2711 logFolder = "/opt/onos/var/"
2712 logFiles = [ "stderr.log", "stdout.log" ]
2713 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002714 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002715 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002716 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002717 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2718 logFolder + f, dstName )
2719 else:
2720 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002721
2722 main.step( "Stopping Mininet" )
2723 mnResult = main.Mininet1.stopNet()
2724 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2725 onpass="Mininet stopped",
2726 onfail="MN cleanup NOT successful" )
2727
2728 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002729 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002730 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2731 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002732
2733 try:
2734 timerLog = open( main.logdir + "/Timers.csv", 'w')
2735 # Overwrite with empty line and close
2736 labels = "Gossip Intents, Restart"
2737 data = str( gossipTime ) + ", " + str( main.restartTime )
2738 timerLog.write( labels + "\n" + data )
2739 timerLog.close()
2740 except NameError, e:
2741 main.log.exception(e)
2742
2743 def CASE14( self, main ):
2744 """
2745 start election app on all onos nodes
2746 """
Jon Halle1a3b752015-07-22 13:02:46 -07002747 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002748 assert main, "main not defined"
2749 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002750 assert main.CLIs, "main.CLIs not defined"
2751 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002752
2753 main.case("Start Leadership Election app")
2754 main.step( "Install leadership election app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002755 onosCli = main.CLIs[ main.activeNodes[0] ]
2756 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002757 utilities.assert_equals(
2758 expect=main.TRUE,
2759 actual=appResult,
2760 onpass="Election app installed",
2761 onfail="Something went wrong with installing Leadership election" )
2762
2763 main.step( "Run for election on each node" )
2764 leaderResult = main.TRUE
2765 leaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002766 for i in main.activeNodes:
2767 main.CLIs[i].electionTestRun()
2768 for i in main.activeNodes:
2769 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002770 leader = cli.electionTestLeader()
2771 if leader is None or leader == main.FALSE:
2772 main.log.error( cli.name + ": Leader for the election app " +
2773 "should be an ONOS node, instead got '" +
2774 str( leader ) + "'" )
2775 leaderResult = main.FALSE
2776 leaders.append( leader )
2777 utilities.assert_equals(
2778 expect=main.TRUE,
2779 actual=leaderResult,
2780 onpass="Successfully ran for leadership",
2781 onfail="Failed to run for leadership" )
2782
2783 main.step( "Check that each node shows the same leader" )
2784 sameLeader = main.TRUE
2785 if len( set( leaders ) ) != 1:
2786 sameLeader = main.FALSE
Jon Halle1a3b752015-07-22 13:02:46 -07002787 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
Jon Hall5cf14d52015-07-16 12:15:19 -07002788 str( leaders ) )
2789 utilities.assert_equals(
2790 expect=main.TRUE,
2791 actual=sameLeader,
2792 onpass="Leadership is consistent for the election topic",
2793 onfail="Nodes have different leaders" )
2794
2795 def CASE15( self, main ):
2796 """
2797 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002798 15.1 Run election on each node
2799 15.2 Check that each node has the same leaders and candidates
2800 15.3 Find current leader and withdraw
2801 15.4 Check that a new node was elected leader
2802 15.5 Check that that new leader was the candidate of old leader
2803 15.6 Run for election on old leader
2804 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2805 15.8 Make sure that the old leader was added to the candidate list
2806
2807 old and new variable prefixes refer to data from before vs after
2808 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002809 """
2810 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002811 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002812 assert main, "main not defined"
2813 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002814 assert main.CLIs, "main.CLIs not defined"
2815 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002816
Jon Hall5cf14d52015-07-16 12:15:19 -07002817 description = "Check that Leadership Election is still functional"
2818 main.case( description )
acsmars71adceb2015-08-31 15:09:26 -07002819 # NOTE: Need to re-run since being a canidate is not persistant
2820 # TODO: add check for "Command not found:" in the driver, this
2821 # means the election test app isn't loaded
Jon Hall5cf14d52015-07-16 12:15:19 -07002822
acsmars71adceb2015-08-31 15:09:26 -07002823 oldLeaders = [] # leaders by node before withdrawl from candidates
2824 newLeaders = [] # leaders by node after withdrawl from candidates
2825 oldAllCandidates = [] # list of lists of each nodes' candidates before
2826 newAllCandidates = [] # list of lists of each nodes' candidates after
2827 oldCandidates = [] # list of candidates from node 0 before withdrawl
2828 newCandidates = [] # list of candidates from node 0 after withdrawl
2829 oldLeader = '' # the old leader from oldLeaders, None if not same
2830 newLeader = '' # the new leaders fron newLoeaders, None if not same
2831 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2832 expectNoLeader = False # True when there is only one leader
2833 if main.numCtrls == 1:
2834 expectNoLeader = True
2835
2836 main.step( "Run for election on each node" )
2837 electionResult = main.TRUE
2838
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002839 for i in main.activeNodes: # run test election on each node
2840 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002841 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002842 utilities.assert_equals(
2843 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002844 actual=electionResult,
2845 onpass="All nodes successfully ran for leadership",
2846 onfail="At least one node failed to run for leadership" )
2847
acsmars3a72bde2015-09-02 14:16:22 -07002848 if electionResult == main.FALSE:
2849 main.log.error(
2850 "Skipping Test Case because Election Test App isn't loaded" )
2851 main.skipCase()
2852
acsmars71adceb2015-08-31 15:09:26 -07002853 main.step( "Check that each node shows the same leader and candidates" )
2854 sameResult = main.TRUE
2855 failMessage = "Nodes have different leaders"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002856 for i in main.activeNodes:
2857 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002858 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2859 oldAllCandidates.append( node )
2860 oldLeaders.append( node[ 0 ] )
2861 oldCandidates = oldAllCandidates[ 0 ]
2862
2863 # Check that each node has the same leader. Defines oldLeader
2864 if len( set( oldLeaders ) ) != 1:
2865 sameResult = main.FALSE
2866 main.log.error( "More than one leader present:" + str( oldLeaders ) )
2867 oldLeader = None
2868 else:
2869 oldLeader = oldLeaders[ 0 ]
2870
2871 # Check that each node's candidate list is the same
acsmars29233db2015-11-04 11:15:00 -08002872 candidateDiscrepancy = False # Boolean of candidate mismatches
acsmars71adceb2015-08-31 15:09:26 -07002873 for candidates in oldAllCandidates:
2874 if set( candidates ) != set( oldCandidates ):
2875 sameResult = main.FALSE
acsmars29233db2015-11-04 11:15:00 -08002876 candidateDiscrepancy = True
2877
2878 if candidateDiscrepancy:
2879 failMessage += " and candidates"
2880
acsmars71adceb2015-08-31 15:09:26 -07002881 utilities.assert_equals(
2882 expect=main.TRUE,
2883 actual=sameResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002884 onpass="Leadership is consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002885 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002886
2887 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002888 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002889 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002890 if oldLeader is None:
2891 main.log.error( "Leadership isn't consistent." )
2892 withdrawResult = main.FALSE
2893 # Get the CLI of the oldLeader
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002894 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07002895 if oldLeader == main.nodes[ i ].ip_address:
2896 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002897 break
2898 else: # FOR/ELSE statement
2899 main.log.error( "Leader election, could not find current leader" )
2900 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07002901 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07002902 utilities.assert_equals(
2903 expect=main.TRUE,
2904 actual=withdrawResult,
2905 onpass="Node was withdrawn from election",
2906 onfail="Node was not withdrawn from election" )
2907
acsmars71adceb2015-08-31 15:09:26 -07002908 main.step( "Check that a new node was elected leader" )
2909
Jon Hall5cf14d52015-07-16 12:15:19 -07002910 # FIXME: use threads
acsmars71adceb2015-08-31 15:09:26 -07002911 newLeaderResult = main.TRUE
2912 failMessage = "Nodes have different leaders"
2913
2914 # Get new leaders and candidates
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002915 for i in main.activeNodes:
2916 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002917 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2918 # elections might no have finished yet
2919 if node[ 0 ] == 'none' and not expectNoLeader:
2920 main.log.info( "Node has no leader, waiting 5 seconds to be " +
2921 "sure elections are complete." )
2922 time.sleep(5)
2923 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2924 # election still isn't done or there is a problem
2925 if node[ 0 ] == 'none':
2926 main.log.error( "No leader was elected on at least 1 node" )
2927 newLeaderResult = main.FALSE
2928 newAllCandidates.append( node )
2929 newLeaders.append( node[ 0 ] )
2930 newCandidates = newAllCandidates[ 0 ]
2931
2932 # Check that each node has the same leader. Defines newLeader
2933 if len( set( newLeaders ) ) != 1:
2934 newLeaderResult = main.FALSE
2935 main.log.error( "Nodes have different leaders: " +
2936 str( newLeaders ) )
2937 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07002938 else:
acsmars71adceb2015-08-31 15:09:26 -07002939 newLeader = newLeaders[ 0 ]
2940
2941 # Check that each node's candidate list is the same
2942 for candidates in newAllCandidates:
2943 if set( candidates ) != set( newCandidates ):
2944 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07002945 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07002946
2947 # Check that the new leader is not the older leader, which was withdrawn
2948 if newLeader == oldLeader:
2949 newLeaderResult = main.FALSE
2950 main.log.error( "All nodes still see old leader: " + oldLeader +
2951 " as the current leader" )
2952
Jon Hall5cf14d52015-07-16 12:15:19 -07002953 utilities.assert_equals(
2954 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002955 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002956 onpass="Leadership election passed",
2957 onfail="Something went wrong with Leadership election" )
2958
acsmars71adceb2015-08-31 15:09:26 -07002959 main.step( "Check that that new leader was the candidate of old leader")
2960 # candidates[ 2 ] should be come the top candidate after withdrawl
2961 correctCandidateResult = main.TRUE
2962 if expectNoLeader:
2963 if newLeader == 'none':
2964 main.log.info( "No leader expected. None found. Pass" )
2965 correctCandidateResult = main.TRUE
2966 else:
2967 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2968 correctCandidateResult = main.FALSE
2969 elif newLeader != oldCandidates[ 2 ]:
2970 correctCandidateResult = main.FALSE
2971 main.log.error( "Candidate " + newLeader + " was elected. " +
2972 oldCandidates[ 2 ] + " should have had priority." )
2973
2974 utilities.assert_equals(
2975 expect=main.TRUE,
2976 actual=correctCandidateResult,
2977 onpass="Correct Candidate Elected",
2978 onfail="Incorrect Candidate Elected" )
2979
Jon Hall5cf14d52015-07-16 12:15:19 -07002980 main.step( "Run for election on old leader( just so everyone " +
2981 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07002982 if oldLeaderCLI is not None:
2983 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07002984 else:
acsmars71adceb2015-08-31 15:09:26 -07002985 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002986 runResult = main.FALSE
2987 utilities.assert_equals(
2988 expect=main.TRUE,
2989 actual=runResult,
2990 onpass="App re-ran for election",
2991 onfail="App failed to run for election" )
acsmars71adceb2015-08-31 15:09:26 -07002992 main.step(
2993 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002994 # verify leader didn't just change
acsmars71adceb2015-08-31 15:09:26 -07002995 positionResult = main.TRUE
2996 # Get new leaders and candidates, wait if oldLeader is not a candidate yet
2997
2998 # Reset and reuse the new candidate and leaders lists
2999 newAllCandidates = []
3000 newCandidates = []
3001 newLeaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003002 for i in main.activeNodes:
3003 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07003004 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3005 if oldLeader not in node: # election might no have finished yet
3006 main.log.info( "Old Leader not elected, waiting 5 seconds to " +
3007 "be sure elections are complete" )
3008 time.sleep(5)
3009 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3010 if oldLeader not in node: # election still isn't done, errors
3011 main.log.error(
3012 "Old leader was not elected on at least one node" )
3013 positionResult = main.FALSE
3014 newAllCandidates.append( node )
3015 newLeaders.append( node[ 0 ] )
3016 newCandidates = newAllCandidates[ 0 ]
3017
3018 # Check that each node has the same leader. Defines newLeader
3019 if len( set( newLeaders ) ) != 1:
3020 positionResult = main.FALSE
3021 main.log.error( "Nodes have different leaders: " +
3022 str( newLeaders ) )
3023 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07003024 else:
acsmars71adceb2015-08-31 15:09:26 -07003025 newLeader = newLeaders[ 0 ]
3026
3027 # Check that each node's candidate list is the same
3028 for candidates in newAllCandidates:
3029 if set( candidates ) != set( newCandidates ):
3030 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07003031 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07003032
3033 # Check that the re-elected node is last on the candidate List
3034 if oldLeader != newCandidates[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003035 main.log.error( "Old Leader (" + oldLeader + ") not in the proper position " +
acsmars71adceb2015-08-31 15:09:26 -07003036 str( newCandidates ) )
3037 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003038
3039 utilities.assert_equals(
3040 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07003041 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003042 onpass="Old leader successfully re-ran for election",
3043 onfail="Something went wrong with Leadership election after " +
3044 "the old leader re-ran for election" )
3045
3046 def CASE16( self, main ):
3047 """
3048 Install Distributed Primitives app
3049 """
3050 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003051 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003052 assert main, "main not defined"
3053 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003054 assert main.CLIs, "main.CLIs not defined"
3055 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003056
3057 # Variables for the distributed primitives tests
3058 global pCounterName
3059 global iCounterName
3060 global pCounterValue
3061 global iCounterValue
3062 global onosSet
3063 global onosSetName
3064 pCounterName = "TestON-Partitions"
3065 iCounterName = "TestON-inMemory"
3066 pCounterValue = 0
3067 iCounterValue = 0
3068 onosSet = set([])
3069 onosSetName = "TestON-set"
3070
3071 description = "Install Primitives app"
3072 main.case( description )
3073 main.step( "Install Primitives app" )
3074 appName = "org.onosproject.distributedprimitives"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003075 node = main.activeNodes[0]
3076 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003077 utilities.assert_equals( expect=main.TRUE,
3078 actual=appResults,
3079 onpass="Primitives app activated",
3080 onfail="Primitives app not activated" )
3081 time.sleep( 5 ) # To allow all nodes to activate
3082
3083 def CASE17( self, main ):
3084 """
3085 Check for basic functionality with distributed primitives
3086 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003087 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003088 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003089 assert main, "main not defined"
3090 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003091 assert main.CLIs, "main.CLIs not defined"
3092 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003093 assert pCounterName, "pCounterName not defined"
3094 assert iCounterName, "iCounterName not defined"
3095 assert onosSetName, "onosSetName not defined"
3096 # NOTE: assert fails if value is 0/None/Empty/False
3097 try:
3098 pCounterValue
3099 except NameError:
3100 main.log.error( "pCounterValue not defined, setting to 0" )
3101 pCounterValue = 0
3102 try:
3103 iCounterValue
3104 except NameError:
3105 main.log.error( "iCounterValue not defined, setting to 0" )
3106 iCounterValue = 0
3107 try:
3108 onosSet
3109 except NameError:
3110 main.log.error( "onosSet not defined, setting to empty Set" )
3111 onosSet = set([])
3112 # Variables for the distributed primitives tests. These are local only
3113 addValue = "a"
3114 addAllValue = "a b c d e f"
3115 retainValue = "c d e f"
3116
3117 description = "Check for basic functionality with distributed " +\
3118 "primitives"
3119 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003120 main.caseExplanation = "Test the methods of the distributed " +\
3121 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003122 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003123 # Partitioned counters
3124 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003125 pCounters = []
3126 threads = []
3127 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003128 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003129 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3130 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003131 args=[ pCounterName ] )
3132 pCounterValue += 1
3133 addedPValues.append( pCounterValue )
3134 threads.append( t )
3135 t.start()
3136
3137 for t in threads:
3138 t.join()
3139 pCounters.append( t.result )
3140 # Check that counter incremented numController times
3141 pCounterResults = True
3142 for i in addedPValues:
3143 tmpResult = i in pCounters
3144 pCounterResults = pCounterResults and tmpResult
3145 if not tmpResult:
3146 main.log.error( str( i ) + " is not in partitioned "
3147 "counter incremented results" )
3148 utilities.assert_equals( expect=True,
3149 actual=pCounterResults,
3150 onpass="Default counter incremented",
3151 onfail="Error incrementing default" +
3152 " counter" )
3153
Jon Halle1a3b752015-07-22 13:02:46 -07003154 main.step( "Get then Increment a default counter on each node" )
3155 pCounters = []
3156 threads = []
3157 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003158 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003159 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3160 name="counterGetAndAdd-" + str( i ),
3161 args=[ pCounterName ] )
3162 addedPValues.append( pCounterValue )
3163 pCounterValue += 1
3164 threads.append( t )
3165 t.start()
3166
3167 for t in threads:
3168 t.join()
3169 pCounters.append( t.result )
3170 # Check that counter incremented numController times
3171 pCounterResults = True
3172 for i in addedPValues:
3173 tmpResult = i in pCounters
3174 pCounterResults = pCounterResults and tmpResult
3175 if not tmpResult:
3176 main.log.error( str( i ) + " is not in partitioned "
3177 "counter incremented results" )
3178 utilities.assert_equals( expect=True,
3179 actual=pCounterResults,
3180 onpass="Default counter incremented",
3181 onfail="Error incrementing default" +
3182 " counter" )
3183
3184 main.step( "Counters we added have the correct values" )
3185 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3186 utilities.assert_equals( expect=main.TRUE,
3187 actual=incrementCheck,
3188 onpass="Added counters are correct",
3189 onfail="Added counters are incorrect" )
3190
3191 main.step( "Add -8 to then get a default counter on each node" )
3192 pCounters = []
3193 threads = []
3194 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003195 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003196 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3197 name="counterIncrement-" + str( i ),
3198 args=[ pCounterName ],
3199 kwargs={ "delta": -8 } )
3200 pCounterValue += -8
3201 addedPValues.append( pCounterValue )
3202 threads.append( t )
3203 t.start()
3204
3205 for t in threads:
3206 t.join()
3207 pCounters.append( t.result )
3208 # Check that counter incremented numController times
3209 pCounterResults = True
3210 for i in addedPValues:
3211 tmpResult = i in pCounters
3212 pCounterResults = pCounterResults and tmpResult
3213 if not tmpResult:
3214 main.log.error( str( i ) + " is not in partitioned "
3215 "counter incremented results" )
3216 utilities.assert_equals( expect=True,
3217 actual=pCounterResults,
3218 onpass="Default counter incremented",
3219 onfail="Error incrementing default" +
3220 " counter" )
3221
3222 main.step( "Add 5 to then get a default counter on each node" )
3223 pCounters = []
3224 threads = []
3225 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003226 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003227 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3228 name="counterIncrement-" + str( i ),
3229 args=[ pCounterName ],
3230 kwargs={ "delta": 5 } )
3231 pCounterValue += 5
3232 addedPValues.append( pCounterValue )
3233 threads.append( t )
3234 t.start()
3235
3236 for t in threads:
3237 t.join()
3238 pCounters.append( t.result )
3239 # Check that counter incremented numController times
3240 pCounterResults = True
3241 for i in addedPValues:
3242 tmpResult = i in pCounters
3243 pCounterResults = pCounterResults and tmpResult
3244 if not tmpResult:
3245 main.log.error( str( i ) + " is not in partitioned "
3246 "counter incremented results" )
3247 utilities.assert_equals( expect=True,
3248 actual=pCounterResults,
3249 onpass="Default counter incremented",
3250 onfail="Error incrementing default" +
3251 " counter" )
3252
3253 main.step( "Get then add 5 to a default counter on each node" )
3254 pCounters = []
3255 threads = []
3256 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003257 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003258 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3259 name="counterIncrement-" + str( i ),
3260 args=[ pCounterName ],
3261 kwargs={ "delta": 5 } )
3262 addedPValues.append( pCounterValue )
3263 pCounterValue += 5
3264 threads.append( t )
3265 t.start()
3266
3267 for t in threads:
3268 t.join()
3269 pCounters.append( t.result )
3270 # Check that counter incremented numController times
3271 pCounterResults = True
3272 for i in addedPValues:
3273 tmpResult = i in pCounters
3274 pCounterResults = pCounterResults and tmpResult
3275 if not tmpResult:
3276 main.log.error( str( i ) + " is not in partitioned "
3277 "counter incremented results" )
3278 utilities.assert_equals( expect=True,
3279 actual=pCounterResults,
3280 onpass="Default counter incremented",
3281 onfail="Error incrementing default" +
3282 " counter" )
3283
3284 main.step( "Counters we added have the correct values" )
3285 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3286 utilities.assert_equals( expect=main.TRUE,
3287 actual=incrementCheck,
3288 onpass="Added counters are correct",
3289 onfail="Added counters are incorrect" )
3290
3291 # In-Memory counters
3292 main.step( "Increment and get an in-memory counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003293 iCounters = []
3294 addedIValues = []
3295 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003296 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003297 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003298 name="icounterIncrement-" + str( i ),
3299 args=[ iCounterName ],
3300 kwargs={ "inMemory": True } )
3301 iCounterValue += 1
3302 addedIValues.append( iCounterValue )
3303 threads.append( t )
3304 t.start()
3305
3306 for t in threads:
3307 t.join()
3308 iCounters.append( t.result )
3309 # Check that counter incremented numController times
3310 iCounterResults = True
3311 for i in addedIValues:
3312 tmpResult = i in iCounters
3313 iCounterResults = iCounterResults and tmpResult
3314 if not tmpResult:
3315 main.log.error( str( i ) + " is not in the in-memory "
3316 "counter incremented results" )
3317 utilities.assert_equals( expect=True,
3318 actual=iCounterResults,
Jon Halle1a3b752015-07-22 13:02:46 -07003319 onpass="In-memory counter incremented",
3320 onfail="Error incrementing in-memory" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003321 " counter" )
3322
Jon Halle1a3b752015-07-22 13:02:46 -07003323 main.step( "Get then Increment a in-memory counter on each node" )
3324 iCounters = []
3325 threads = []
3326 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003327 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003328 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3329 name="counterGetAndAdd-" + str( i ),
3330 args=[ iCounterName ],
3331 kwargs={ "inMemory": True } )
3332 addedIValues.append( iCounterValue )
3333 iCounterValue += 1
3334 threads.append( t )
3335 t.start()
3336
3337 for t in threads:
3338 t.join()
3339 iCounters.append( t.result )
3340 # Check that counter incremented numController times
3341 iCounterResults = True
3342 for i in addedIValues:
3343 tmpResult = i in iCounters
3344 iCounterResults = iCounterResults and tmpResult
3345 if not tmpResult:
3346 main.log.error( str( i ) + " is not in in-memory "
3347 "counter incremented results" )
3348 utilities.assert_equals( expect=True,
3349 actual=iCounterResults,
3350 onpass="In-memory counter incremented",
3351 onfail="Error incrementing in-memory" +
3352 " counter" )
3353
3354 main.step( "Counters we added have the correct values" )
3355 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3356 utilities.assert_equals( expect=main.TRUE,
3357 actual=incrementCheck,
3358 onpass="Added counters are correct",
3359 onfail="Added counters are incorrect" )
3360
3361 main.step( "Add -8 to then get a in-memory counter on each node" )
3362 iCounters = []
3363 threads = []
3364 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003365 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003366 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3367 name="counterIncrement-" + str( i ),
3368 args=[ iCounterName ],
3369 kwargs={ "delta": -8, "inMemory": True } )
3370 iCounterValue += -8
3371 addedIValues.append( iCounterValue )
3372 threads.append( t )
3373 t.start()
3374
3375 for t in threads:
3376 t.join()
3377 iCounters.append( t.result )
3378 # Check that counter incremented numController times
3379 iCounterResults = True
3380 for i in addedIValues:
3381 tmpResult = i in iCounters
3382 iCounterResults = iCounterResults and tmpResult
3383 if not tmpResult:
3384 main.log.error( str( i ) + " is not in in-memory "
3385 "counter incremented results" )
3386 utilities.assert_equals( expect=True,
3387 actual=pCounterResults,
3388 onpass="In-memory counter incremented",
3389 onfail="Error incrementing in-memory" +
3390 " counter" )
3391
3392 main.step( "Add 5 to then get a in-memory counter on each node" )
3393 iCounters = []
3394 threads = []
3395 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003396 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003397 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3398 name="counterIncrement-" + str( i ),
3399 args=[ iCounterName ],
3400 kwargs={ "delta": 5, "inMemory": True } )
3401 iCounterValue += 5
3402 addedIValues.append( iCounterValue )
3403 threads.append( t )
3404 t.start()
3405
3406 for t in threads:
3407 t.join()
3408 iCounters.append( t.result )
3409 # Check that counter incremented numController times
3410 iCounterResults = True
3411 for i in addedIValues:
3412 tmpResult = i in iCounters
3413 iCounterResults = iCounterResults and tmpResult
3414 if not tmpResult:
3415 main.log.error( str( i ) + " is not in in-memory "
3416 "counter incremented results" )
3417 utilities.assert_equals( expect=True,
3418 actual=pCounterResults,
3419 onpass="In-memory counter incremented",
3420 onfail="Error incrementing in-memory" +
3421 " counter" )
3422
3423 main.step( "Get then add 5 to a in-memory counter on each node" )
3424 iCounters = []
3425 threads = []
3426 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003427 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003428 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3429 name="counterIncrement-" + str( i ),
3430 args=[ iCounterName ],
3431 kwargs={ "delta": 5, "inMemory": True } )
3432 addedIValues.append( iCounterValue )
3433 iCounterValue += 5
3434 threads.append( t )
3435 t.start()
3436
3437 for t in threads:
3438 t.join()
3439 iCounters.append( t.result )
3440 # Check that counter incremented numController times
3441 iCounterResults = True
3442 for i in addedIValues:
3443 tmpResult = i in iCounters
3444 iCounterResults = iCounterResults and tmpResult
3445 if not tmpResult:
3446 main.log.error( str( i ) + " is not in in-memory "
3447 "counter incremented results" )
3448 utilities.assert_equals( expect=True,
3449 actual=iCounterResults,
3450 onpass="In-memory counter incremented",
3451 onfail="Error incrementing in-memory" +
3452 " counter" )
3453
3454 main.step( "Counters we added have the correct values" )
3455 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3456 utilities.assert_equals( expect=main.TRUE,
3457 actual=incrementCheck,
3458 onpass="Added counters are correct",
3459 onfail="Added counters are incorrect" )
3460
Jon Hall5cf14d52015-07-16 12:15:19 -07003461 main.step( "Check counters are consistant across nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07003462 onosCounters, consistentCounterResults = main.Counters.consistentCheck()
Jon Hall5cf14d52015-07-16 12:15:19 -07003463 utilities.assert_equals( expect=main.TRUE,
3464 actual=consistentCounterResults,
3465 onpass="ONOS counters are consistent " +
3466 "across nodes",
3467 onfail="ONOS Counters are inconsistent " +
3468 "across nodes" )
3469
3470 main.step( "Counters we added have the correct values" )
Jon Halle1a3b752015-07-22 13:02:46 -07003471 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3472 incrementCheck = incrementCheck and \
3473 main.Counters.counterCheck( iCounterName, iCounterValue )
Jon Hall5cf14d52015-07-16 12:15:19 -07003474 utilities.assert_equals( expect=main.TRUE,
Jon Halle1a3b752015-07-22 13:02:46 -07003475 actual=incrementCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -07003476 onpass="Added counters are correct",
3477 onfail="Added counters are incorrect" )
3478 # DISTRIBUTED SETS
3479 main.step( "Distributed Set get" )
3480 size = len( onosSet )
3481 getResponses = []
3482 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003483 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003484 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003485 name="setTestGet-" + str( i ),
3486 args=[ onosSetName ] )
3487 threads.append( t )
3488 t.start()
3489 for t in threads:
3490 t.join()
3491 getResponses.append( t.result )
3492
3493 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003494 for i in range( len( main.activeNodes ) ):
3495 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003496 if isinstance( getResponses[ i ], list):
3497 current = set( getResponses[ i ] )
3498 if len( current ) == len( getResponses[ i ] ):
3499 # no repeats
3500 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003501 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003502 " has incorrect view" +
3503 " of set " + onosSetName + ":\n" +
3504 str( getResponses[ i ] ) )
3505 main.log.debug( "Expected: " + str( onosSet ) )
3506 main.log.debug( "Actual: " + str( current ) )
3507 getResults = main.FALSE
3508 else:
3509 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003510 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003511 " has repeat elements in" +
3512 " set " + onosSetName + ":\n" +
3513 str( getResponses[ i ] ) )
3514 getResults = main.FALSE
3515 elif getResponses[ i ] == main.ERROR:
3516 getResults = main.FALSE
3517 utilities.assert_equals( expect=main.TRUE,
3518 actual=getResults,
3519 onpass="Set elements are correct",
3520 onfail="Set elements are incorrect" )
3521
3522 main.step( "Distributed Set size" )
3523 sizeResponses = []
3524 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003525 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003526 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003527 name="setTestSize-" + str( i ),
3528 args=[ onosSetName ] )
3529 threads.append( t )
3530 t.start()
3531 for t in threads:
3532 t.join()
3533 sizeResponses.append( t.result )
3534
3535 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003536 for i in range( len( main.activeNodes ) ):
3537 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003538 if size != sizeResponses[ i ]:
3539 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003540 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003541 " expected a size of " + str( size ) +
3542 " for set " + onosSetName +
3543 " but got " + str( sizeResponses[ i ] ) )
3544 utilities.assert_equals( expect=main.TRUE,
3545 actual=sizeResults,
3546 onpass="Set sizes are correct",
3547 onfail="Set sizes are incorrect" )
3548
3549 main.step( "Distributed Set add()" )
3550 onosSet.add( addValue )
3551 addResponses = []
3552 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003553 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003554 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003555 name="setTestAdd-" + str( i ),
3556 args=[ onosSetName, addValue ] )
3557 threads.append( t )
3558 t.start()
3559 for t in threads:
3560 t.join()
3561 addResponses.append( t.result )
3562
3563 # main.TRUE = successfully changed the set
3564 # main.FALSE = action resulted in no change in set
3565 # main.ERROR - Some error in executing the function
3566 addResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003567 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003568 if addResponses[ i ] == main.TRUE:
3569 # All is well
3570 pass
3571 elif addResponses[ i ] == main.FALSE:
3572 # Already in set, probably fine
3573 pass
3574 elif addResponses[ i ] == main.ERROR:
3575 # Error in execution
3576 addResults = main.FALSE
3577 else:
3578 # unexpected result
3579 addResults = main.FALSE
3580 if addResults != main.TRUE:
3581 main.log.error( "Error executing set add" )
3582
3583 # Check if set is still correct
3584 size = len( onosSet )
3585 getResponses = []
3586 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003587 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003588 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003589 name="setTestGet-" + str( i ),
3590 args=[ onosSetName ] )
3591 threads.append( t )
3592 t.start()
3593 for t in threads:
3594 t.join()
3595 getResponses.append( t.result )
3596 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003597 for i in range( len( main.activeNodes ) ):
3598 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003599 if isinstance( getResponses[ i ], list):
3600 current = set( getResponses[ i ] )
3601 if len( current ) == len( getResponses[ i ] ):
3602 # no repeats
3603 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003604 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003605 " of set " + onosSetName + ":\n" +
3606 str( getResponses[ i ] ) )
3607 main.log.debug( "Expected: " + str( onosSet ) )
3608 main.log.debug( "Actual: " + str( current ) )
3609 getResults = main.FALSE
3610 else:
3611 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003612 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003613 " set " + onosSetName + ":\n" +
3614 str( getResponses[ i ] ) )
3615 getResults = main.FALSE
3616 elif getResponses[ i ] == main.ERROR:
3617 getResults = main.FALSE
3618 sizeResponses = []
3619 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003620 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003621 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003622 name="setTestSize-" + str( i ),
3623 args=[ onosSetName ] )
3624 threads.append( t )
3625 t.start()
3626 for t in threads:
3627 t.join()
3628 sizeResponses.append( t.result )
3629 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003630 for i in range( len( main.activeNodes ) ):
3631 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003632 if size != sizeResponses[ i ]:
3633 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003634 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003635 " expected a size of " + str( size ) +
3636 " for set " + onosSetName +
3637 " but got " + str( sizeResponses[ i ] ) )
3638 addResults = addResults and getResults and sizeResults
3639 utilities.assert_equals( expect=main.TRUE,
3640 actual=addResults,
3641 onpass="Set add correct",
3642 onfail="Set add was incorrect" )
3643
3644 main.step( "Distributed Set addAll()" )
3645 onosSet.update( addAllValue.split() )
3646 addResponses = []
3647 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003648 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003649 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003650 name="setTestAddAll-" + str( i ),
3651 args=[ onosSetName, addAllValue ] )
3652 threads.append( t )
3653 t.start()
3654 for t in threads:
3655 t.join()
3656 addResponses.append( t.result )
3657
3658 # main.TRUE = successfully changed the set
3659 # main.FALSE = action resulted in no change in set
3660 # main.ERROR - Some error in executing the function
3661 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003662 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003663 if addResponses[ i ] == main.TRUE:
3664 # All is well
3665 pass
3666 elif addResponses[ i ] == main.FALSE:
3667 # Already in set, probably fine
3668 pass
3669 elif addResponses[ i ] == main.ERROR:
3670 # Error in execution
3671 addAllResults = main.FALSE
3672 else:
3673 # unexpected result
3674 addAllResults = main.FALSE
3675 if addAllResults != main.TRUE:
3676 main.log.error( "Error executing set addAll" )
3677
3678 # Check if set is still correct
3679 size = len( onosSet )
3680 getResponses = []
3681 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003682 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003683 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003684 name="setTestGet-" + str( i ),
3685 args=[ onosSetName ] )
3686 threads.append( t )
3687 t.start()
3688 for t in threads:
3689 t.join()
3690 getResponses.append( t.result )
3691 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003692 for i in range( len( main.activeNodes ) ):
3693 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003694 if isinstance( getResponses[ i ], list):
3695 current = set( getResponses[ i ] )
3696 if len( current ) == len( getResponses[ i ] ):
3697 # no repeats
3698 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003699 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003700 " has incorrect view" +
3701 " of set " + onosSetName + ":\n" +
3702 str( getResponses[ i ] ) )
3703 main.log.debug( "Expected: " + str( onosSet ) )
3704 main.log.debug( "Actual: " + str( current ) )
3705 getResults = main.FALSE
3706 else:
3707 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003708 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003709 " has repeat elements in" +
3710 " set " + onosSetName + ":\n" +
3711 str( getResponses[ i ] ) )
3712 getResults = main.FALSE
3713 elif getResponses[ i ] == main.ERROR:
3714 getResults = main.FALSE
3715 sizeResponses = []
3716 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003717 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003718 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003719 name="setTestSize-" + str( i ),
3720 args=[ onosSetName ] )
3721 threads.append( t )
3722 t.start()
3723 for t in threads:
3724 t.join()
3725 sizeResponses.append( t.result )
3726 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003727 for i in range( len( main.activeNodes ) ):
3728 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003729 if size != sizeResponses[ i ]:
3730 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003731 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003732 " expected a size of " + str( size ) +
3733 " for set " + onosSetName +
3734 " but got " + str( sizeResponses[ i ] ) )
3735 addAllResults = addAllResults and getResults and sizeResults
3736 utilities.assert_equals( expect=main.TRUE,
3737 actual=addAllResults,
3738 onpass="Set addAll correct",
3739 onfail="Set addAll was incorrect" )
3740
3741 main.step( "Distributed Set contains()" )
3742 containsResponses = []
3743 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003744 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003745 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003746 name="setContains-" + str( i ),
3747 args=[ onosSetName ],
3748 kwargs={ "values": addValue } )
3749 threads.append( t )
3750 t.start()
3751 for t in threads:
3752 t.join()
3753 # NOTE: This is the tuple
3754 containsResponses.append( t.result )
3755
3756 containsResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003757 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003758 if containsResponses[ i ] == main.ERROR:
3759 containsResults = main.FALSE
3760 else:
3761 containsResults = containsResults and\
3762 containsResponses[ i ][ 1 ]
3763 utilities.assert_equals( expect=main.TRUE,
3764 actual=containsResults,
3765 onpass="Set contains is functional",
3766 onfail="Set contains failed" )
3767
3768 main.step( "Distributed Set containsAll()" )
3769 containsAllResponses = []
3770 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003771 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003772 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003773 name="setContainsAll-" + str( i ),
3774 args=[ onosSetName ],
3775 kwargs={ "values": addAllValue } )
3776 threads.append( t )
3777 t.start()
3778 for t in threads:
3779 t.join()
3780 # NOTE: This is the tuple
3781 containsAllResponses.append( t.result )
3782
3783 containsAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003784 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003785 if containsResponses[ i ] == main.ERROR:
3786 containsResults = main.FALSE
3787 else:
3788 containsResults = containsResults and\
3789 containsResponses[ i ][ 1 ]
3790 utilities.assert_equals( expect=main.TRUE,
3791 actual=containsAllResults,
3792 onpass="Set containsAll is functional",
3793 onfail="Set containsAll failed" )
3794
3795 main.step( "Distributed Set remove()" )
3796 onosSet.remove( addValue )
3797 removeResponses = []
3798 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003799 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003800 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003801 name="setTestRemove-" + str( i ),
3802 args=[ onosSetName, addValue ] )
3803 threads.append( t )
3804 t.start()
3805 for t in threads:
3806 t.join()
3807 removeResponses.append( t.result )
3808
3809 # main.TRUE = successfully changed the set
3810 # main.FALSE = action resulted in no change in set
3811 # main.ERROR - Some error in executing the function
3812 removeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003813 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003814 if removeResponses[ i ] == main.TRUE:
3815 # All is well
3816 pass
3817 elif removeResponses[ i ] == main.FALSE:
3818 # not in set, probably fine
3819 pass
3820 elif removeResponses[ i ] == main.ERROR:
3821 # Error in execution
3822 removeResults = main.FALSE
3823 else:
3824 # unexpected result
3825 removeResults = main.FALSE
3826 if removeResults != main.TRUE:
3827 main.log.error( "Error executing set remove" )
3828
3829 # Check if set is still correct
3830 size = len( onosSet )
3831 getResponses = []
3832 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003833 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003834 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003835 name="setTestGet-" + str( i ),
3836 args=[ onosSetName ] )
3837 threads.append( t )
3838 t.start()
3839 for t in threads:
3840 t.join()
3841 getResponses.append( t.result )
3842 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003843 for i in range( len( main.activeNodes ) ):
3844 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003845 if isinstance( getResponses[ i ], list):
3846 current = set( getResponses[ i ] )
3847 if len( current ) == len( getResponses[ i ] ):
3848 # no repeats
3849 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003850 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003851 " has incorrect view" +
3852 " of set " + onosSetName + ":\n" +
3853 str( getResponses[ i ] ) )
3854 main.log.debug( "Expected: " + str( onosSet ) )
3855 main.log.debug( "Actual: " + str( current ) )
3856 getResults = main.FALSE
3857 else:
3858 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003859 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003860 " has repeat elements in" +
3861 " set " + onosSetName + ":\n" +
3862 str( getResponses[ i ] ) )
3863 getResults = main.FALSE
3864 elif getResponses[ i ] == main.ERROR:
3865 getResults = main.FALSE
3866 sizeResponses = []
3867 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003868 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003869 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003870 name="setTestSize-" + str( i ),
3871 args=[ onosSetName ] )
3872 threads.append( t )
3873 t.start()
3874 for t in threads:
3875 t.join()
3876 sizeResponses.append( t.result )
3877 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003878 for i in range( len( main.activeNodes ) ):
3879 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003880 if size != sizeResponses[ i ]:
3881 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003882 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003883 " expected a size of " + str( size ) +
3884 " for set " + onosSetName +
3885 " but got " + str( sizeResponses[ i ] ) )
3886 removeResults = removeResults and getResults and sizeResults
3887 utilities.assert_equals( expect=main.TRUE,
3888 actual=removeResults,
3889 onpass="Set remove correct",
3890 onfail="Set remove was incorrect" )
3891
3892 main.step( "Distributed Set removeAll()" )
3893 onosSet.difference_update( addAllValue.split() )
3894 removeAllResponses = []
3895 threads = []
3896 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003897 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003898 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003899 name="setTestRemoveAll-" + str( i ),
3900 args=[ onosSetName, addAllValue ] )
3901 threads.append( t )
3902 t.start()
3903 for t in threads:
3904 t.join()
3905 removeAllResponses.append( t.result )
3906 except Exception, e:
3907 main.log.exception(e)
3908
3909 # main.TRUE = successfully changed the set
3910 # main.FALSE = action resulted in no change in set
3911 # main.ERROR - Some error in executing the function
3912 removeAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003913 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003914 if removeAllResponses[ i ] == main.TRUE:
3915 # All is well
3916 pass
3917 elif removeAllResponses[ i ] == main.FALSE:
3918 # not in set, probably fine
3919 pass
3920 elif removeAllResponses[ i ] == main.ERROR:
3921 # Error in execution
3922 removeAllResults = main.FALSE
3923 else:
3924 # unexpected result
3925 removeAllResults = main.FALSE
3926 if removeAllResults != main.TRUE:
3927 main.log.error( "Error executing set removeAll" )
3928
3929 # Check if set is still correct
3930 size = len( onosSet )
3931 getResponses = []
3932 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003933 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003934 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003935 name="setTestGet-" + str( i ),
3936 args=[ onosSetName ] )
3937 threads.append( t )
3938 t.start()
3939 for t in threads:
3940 t.join()
3941 getResponses.append( t.result )
3942 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003943 for i in range( len( main.activeNodes ) ):
3944 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003945 if isinstance( getResponses[ i ], list):
3946 current = set( getResponses[ i ] )
3947 if len( current ) == len( getResponses[ i ] ):
3948 # no repeats
3949 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003950 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003951 " has incorrect view" +
3952 " of set " + onosSetName + ":\n" +
3953 str( getResponses[ i ] ) )
3954 main.log.debug( "Expected: " + str( onosSet ) )
3955 main.log.debug( "Actual: " + str( current ) )
3956 getResults = main.FALSE
3957 else:
3958 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003959 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003960 " has repeat elements in" +
3961 " set " + onosSetName + ":\n" +
3962 str( getResponses[ i ] ) )
3963 getResults = main.FALSE
3964 elif getResponses[ i ] == main.ERROR:
3965 getResults = main.FALSE
3966 sizeResponses = []
3967 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003968 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003969 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003970 name="setTestSize-" + str( i ),
3971 args=[ onosSetName ] )
3972 threads.append( t )
3973 t.start()
3974 for t in threads:
3975 t.join()
3976 sizeResponses.append( t.result )
3977 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003978 for i in range( len( main.activeNodes ) ):
3979 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003980 if size != sizeResponses[ i ]:
3981 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003982 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003983 " expected a size of " + str( size ) +
3984 " for set " + onosSetName +
3985 " but got " + str( sizeResponses[ i ] ) )
3986 removeAllResults = removeAllResults and getResults and sizeResults
3987 utilities.assert_equals( expect=main.TRUE,
3988 actual=removeAllResults,
3989 onpass="Set removeAll correct",
3990 onfail="Set removeAll was incorrect" )
3991
3992 main.step( "Distributed Set addAll()" )
3993 onosSet.update( addAllValue.split() )
3994 addResponses = []
3995 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003996 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003997 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003998 name="setTestAddAll-" + str( i ),
3999 args=[ onosSetName, addAllValue ] )
4000 threads.append( t )
4001 t.start()
4002 for t in threads:
4003 t.join()
4004 addResponses.append( t.result )
4005
4006 # main.TRUE = successfully changed the set
4007 # main.FALSE = action resulted in no change in set
4008 # main.ERROR - Some error in executing the function
4009 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004010 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004011 if addResponses[ i ] == main.TRUE:
4012 # All is well
4013 pass
4014 elif addResponses[ i ] == main.FALSE:
4015 # Already in set, probably fine
4016 pass
4017 elif addResponses[ i ] == main.ERROR:
4018 # Error in execution
4019 addAllResults = main.FALSE
4020 else:
4021 # unexpected result
4022 addAllResults = main.FALSE
4023 if addAllResults != main.TRUE:
4024 main.log.error( "Error executing set addAll" )
4025
4026 # Check if set is still correct
4027 size = len( onosSet )
4028 getResponses = []
4029 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004030 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004031 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004032 name="setTestGet-" + str( i ),
4033 args=[ onosSetName ] )
4034 threads.append( t )
4035 t.start()
4036 for t in threads:
4037 t.join()
4038 getResponses.append( t.result )
4039 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004040 for i in range( len( main.activeNodes ) ):
4041 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004042 if isinstance( getResponses[ i ], list):
4043 current = set( getResponses[ i ] )
4044 if len( current ) == len( getResponses[ i ] ):
4045 # no repeats
4046 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004047 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004048 " has incorrect view" +
4049 " of set " + onosSetName + ":\n" +
4050 str( getResponses[ i ] ) )
4051 main.log.debug( "Expected: " + str( onosSet ) )
4052 main.log.debug( "Actual: " + str( current ) )
4053 getResults = main.FALSE
4054 else:
4055 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004056 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004057 " has repeat elements in" +
4058 " set " + onosSetName + ":\n" +
4059 str( getResponses[ i ] ) )
4060 getResults = main.FALSE
4061 elif getResponses[ i ] == main.ERROR:
4062 getResults = main.FALSE
4063 sizeResponses = []
4064 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004065 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004066 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004067 name="setTestSize-" + str( i ),
4068 args=[ onosSetName ] )
4069 threads.append( t )
4070 t.start()
4071 for t in threads:
4072 t.join()
4073 sizeResponses.append( t.result )
4074 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004075 for i in range( len( main.activeNodes ) ):
4076 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004077 if size != sizeResponses[ i ]:
4078 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004079 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004080 " expected a size of " + str( size ) +
4081 " for set " + onosSetName +
4082 " but got " + str( sizeResponses[ i ] ) )
4083 addAllResults = addAllResults and getResults and sizeResults
4084 utilities.assert_equals( expect=main.TRUE,
4085 actual=addAllResults,
4086 onpass="Set addAll correct",
4087 onfail="Set addAll was incorrect" )
4088
4089 main.step( "Distributed Set clear()" )
4090 onosSet.clear()
4091 clearResponses = []
4092 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004093 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004094 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004095 name="setTestClear-" + str( i ),
4096 args=[ onosSetName, " "], # Values doesn't matter
4097 kwargs={ "clear": True } )
4098 threads.append( t )
4099 t.start()
4100 for t in threads:
4101 t.join()
4102 clearResponses.append( t.result )
4103
4104 # main.TRUE = successfully changed the set
4105 # main.FALSE = action resulted in no change in set
4106 # main.ERROR - Some error in executing the function
4107 clearResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004108 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004109 if clearResponses[ i ] == main.TRUE:
4110 # All is well
4111 pass
4112 elif clearResponses[ i ] == main.FALSE:
4113 # Nothing set, probably fine
4114 pass
4115 elif clearResponses[ i ] == main.ERROR:
4116 # Error in execution
4117 clearResults = main.FALSE
4118 else:
4119 # unexpected result
4120 clearResults = main.FALSE
4121 if clearResults != main.TRUE:
4122 main.log.error( "Error executing set clear" )
4123
4124 # Check if set is still correct
4125 size = len( onosSet )
4126 getResponses = []
4127 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004128 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004129 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004130 name="setTestGet-" + str( i ),
4131 args=[ onosSetName ] )
4132 threads.append( t )
4133 t.start()
4134 for t in threads:
4135 t.join()
4136 getResponses.append( t.result )
4137 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004138 for i in range( len( main.activeNodes ) ):
4139 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004140 if isinstance( getResponses[ i ], list):
4141 current = set( getResponses[ i ] )
4142 if len( current ) == len( getResponses[ i ] ):
4143 # no repeats
4144 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004145 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004146 " has incorrect view" +
4147 " of set " + onosSetName + ":\n" +
4148 str( getResponses[ i ] ) )
4149 main.log.debug( "Expected: " + str( onosSet ) )
4150 main.log.debug( "Actual: " + str( current ) )
4151 getResults = main.FALSE
4152 else:
4153 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004154 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004155 " has repeat elements in" +
4156 " set " + onosSetName + ":\n" +
4157 str( getResponses[ i ] ) )
4158 getResults = main.FALSE
4159 elif getResponses[ i ] == main.ERROR:
4160 getResults = main.FALSE
4161 sizeResponses = []
4162 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004163 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004164 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004165 name="setTestSize-" + str( i ),
4166 args=[ onosSetName ] )
4167 threads.append( t )
4168 t.start()
4169 for t in threads:
4170 t.join()
4171 sizeResponses.append( t.result )
4172 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004173 for i in range( len( main.activeNodes ) ):
4174 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004175 if size != sizeResponses[ i ]:
4176 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004177 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004178 " expected a size of " + str( size ) +
4179 " for set " + onosSetName +
4180 " but got " + str( sizeResponses[ i ] ) )
4181 clearResults = clearResults and getResults and sizeResults
4182 utilities.assert_equals( expect=main.TRUE,
4183 actual=clearResults,
4184 onpass="Set clear correct",
4185 onfail="Set clear was incorrect" )
4186
4187 main.step( "Distributed Set addAll()" )
4188 onosSet.update( addAllValue.split() )
4189 addResponses = []
4190 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004191 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004192 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004193 name="setTestAddAll-" + str( i ),
4194 args=[ onosSetName, addAllValue ] )
4195 threads.append( t )
4196 t.start()
4197 for t in threads:
4198 t.join()
4199 addResponses.append( t.result )
4200
4201 # main.TRUE = successfully changed the set
4202 # main.FALSE = action resulted in no change in set
4203 # main.ERROR - Some error in executing the function
4204 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004205 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004206 if addResponses[ i ] == main.TRUE:
4207 # All is well
4208 pass
4209 elif addResponses[ i ] == main.FALSE:
4210 # Already in set, probably fine
4211 pass
4212 elif addResponses[ i ] == main.ERROR:
4213 # Error in execution
4214 addAllResults = main.FALSE
4215 else:
4216 # unexpected result
4217 addAllResults = main.FALSE
4218 if addAllResults != main.TRUE:
4219 main.log.error( "Error executing set addAll" )
4220
4221 # Check if set is still correct
4222 size = len( onosSet )
4223 getResponses = []
4224 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004225 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004226 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004227 name="setTestGet-" + str( i ),
4228 args=[ onosSetName ] )
4229 threads.append( t )
4230 t.start()
4231 for t in threads:
4232 t.join()
4233 getResponses.append( t.result )
4234 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004235 for i in range( len( main.activeNodes ) ):
4236 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004237 if isinstance( getResponses[ i ], list):
4238 current = set( getResponses[ i ] )
4239 if len( current ) == len( getResponses[ i ] ):
4240 # no repeats
4241 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004242 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004243 " has incorrect view" +
4244 " of set " + onosSetName + ":\n" +
4245 str( getResponses[ i ] ) )
4246 main.log.debug( "Expected: " + str( onosSet ) )
4247 main.log.debug( "Actual: " + str( current ) )
4248 getResults = main.FALSE
4249 else:
4250 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004251 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004252 " has repeat elements in" +
4253 " set " + onosSetName + ":\n" +
4254 str( getResponses[ i ] ) )
4255 getResults = main.FALSE
4256 elif getResponses[ i ] == main.ERROR:
4257 getResults = main.FALSE
4258 sizeResponses = []
4259 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004260 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004261 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004262 name="setTestSize-" + str( i ),
4263 args=[ onosSetName ] )
4264 threads.append( t )
4265 t.start()
4266 for t in threads:
4267 t.join()
4268 sizeResponses.append( t.result )
4269 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004270 for i in range( len( main.activeNodes ) ):
4271 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004272 if size != sizeResponses[ i ]:
4273 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004274 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004275 " expected a size of " + str( size ) +
4276 " for set " + onosSetName +
4277 " but got " + str( sizeResponses[ i ] ) )
4278 addAllResults = addAllResults and getResults and sizeResults
4279 utilities.assert_equals( expect=main.TRUE,
4280 actual=addAllResults,
4281 onpass="Set addAll correct",
4282 onfail="Set addAll was incorrect" )
4283
4284 main.step( "Distributed Set retain()" )
4285 onosSet.intersection_update( retainValue.split() )
4286 retainResponses = []
4287 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004288 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004289 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004290 name="setTestRetain-" + str( i ),
4291 args=[ onosSetName, retainValue ],
4292 kwargs={ "retain": True } )
4293 threads.append( t )
4294 t.start()
4295 for t in threads:
4296 t.join()
4297 retainResponses.append( t.result )
4298
4299 # main.TRUE = successfully changed the set
4300 # main.FALSE = action resulted in no change in set
4301 # main.ERROR - Some error in executing the function
4302 retainResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004303 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004304 if retainResponses[ i ] == main.TRUE:
4305 # All is well
4306 pass
4307 elif retainResponses[ i ] == main.FALSE:
4308 # Already in set, probably fine
4309 pass
4310 elif retainResponses[ i ] == main.ERROR:
4311 # Error in execution
4312 retainResults = main.FALSE
4313 else:
4314 # unexpected result
4315 retainResults = main.FALSE
4316 if retainResults != main.TRUE:
4317 main.log.error( "Error executing set retain" )
4318
4319 # Check if set is still correct
4320 size = len( onosSet )
4321 getResponses = []
4322 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004323 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004324 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004325 name="setTestGet-" + str( i ),
4326 args=[ onosSetName ] )
4327 threads.append( t )
4328 t.start()
4329 for t in threads:
4330 t.join()
4331 getResponses.append( t.result )
4332 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004333 for i in range( len( main.activeNodes ) ):
4334 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004335 if isinstance( getResponses[ i ], list):
4336 current = set( getResponses[ i ] )
4337 if len( current ) == len( getResponses[ i ] ):
4338 # no repeats
4339 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004340 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004341 " has incorrect view" +
4342 " of set " + onosSetName + ":\n" +
4343 str( getResponses[ i ] ) )
4344 main.log.debug( "Expected: " + str( onosSet ) )
4345 main.log.debug( "Actual: " + str( current ) )
4346 getResults = main.FALSE
4347 else:
4348 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004349 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004350 " has repeat elements in" +
4351 " set " + onosSetName + ":\n" +
4352 str( getResponses[ i ] ) )
4353 getResults = main.FALSE
4354 elif getResponses[ i ] == main.ERROR:
4355 getResults = main.FALSE
4356 sizeResponses = []
4357 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004358 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004359 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004360 name="setTestSize-" + str( i ),
4361 args=[ onosSetName ] )
4362 threads.append( t )
4363 t.start()
4364 for t in threads:
4365 t.join()
4366 sizeResponses.append( t.result )
4367 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004368 for i in range( len( main.activeNodes ) ):
4369 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004370 if size != sizeResponses[ i ]:
4371 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004372 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004373 str( size ) + " for set " + onosSetName +
4374 " but got " + str( sizeResponses[ i ] ) )
4375 retainResults = retainResults and getResults and sizeResults
4376 utilities.assert_equals( expect=main.TRUE,
4377 actual=retainResults,
4378 onpass="Set retain correct",
4379 onfail="Set retain was incorrect" )
4380
Jon Hall2a5002c2015-08-21 16:49:11 -07004381 # Transactional maps
4382 main.step( "Partitioned Transactional maps put" )
4383 tMapValue = "Testing"
4384 numKeys = 100
4385 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004386 node = main.activeNodes[0]
4387 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
Jon Hall2a5002c2015-08-21 16:49:11 -07004388 if len( putResponses ) == 100:
4389 for i in putResponses:
4390 if putResponses[ i ][ 'value' ] != tMapValue:
4391 putResult = False
4392 else:
4393 putResult = False
4394 if not putResult:
4395 main.log.debug( "Put response values: " + str( putResponses ) )
4396 utilities.assert_equals( expect=True,
4397 actual=putResult,
4398 onpass="Partitioned Transactional Map put successful",
4399 onfail="Partitioned Transactional Map put values are incorrect" )
4400
4401 main.step( "Partitioned Transactional maps get" )
4402 getCheck = True
4403 for n in range( 1, numKeys + 1 ):
4404 getResponses = []
4405 threads = []
4406 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004407 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004408 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4409 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004410 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004411 threads.append( t )
4412 t.start()
4413 for t in threads:
4414 t.join()
4415 getResponses.append( t.result )
4416 for node in getResponses:
4417 if node != tMapValue:
4418 valueCheck = False
4419 if not valueCheck:
4420 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4421 main.log.warn( getResponses )
4422 getCheck = getCheck and valueCheck
4423 utilities.assert_equals( expect=True,
4424 actual=getCheck,
4425 onpass="Partitioned Transactional Map get values were correct",
4426 onfail="Partitioned Transactional Map values incorrect" )
4427
4428 main.step( "In-memory Transactional maps put" )
4429 tMapValue = "Testing"
4430 numKeys = 100
4431 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004432 node = main.activeNodes[0]
4433 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue, inMemory=True )
Jon Hall2a5002c2015-08-21 16:49:11 -07004434 if len( putResponses ) == 100:
4435 for i in putResponses:
4436 if putResponses[ i ][ 'value' ] != tMapValue:
4437 putResult = False
4438 else:
4439 putResult = False
4440 if not putResult:
4441 main.log.debug( "Put response values: " + str( putResponses ) )
4442 utilities.assert_equals( expect=True,
4443 actual=putResult,
4444 onpass="In-Memory Transactional Map put successful",
4445 onfail="In-Memory Transactional Map put values are incorrect" )
4446
4447 main.step( "In-Memory Transactional maps get" )
4448 getCheck = True
4449 for n in range( 1, numKeys + 1 ):
4450 getResponses = []
4451 threads = []
4452 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004453 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004454 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4455 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004456 args=[ "Key" + str( n ) ],
Jon Hall2a5002c2015-08-21 16:49:11 -07004457 kwargs={ "inMemory": True } )
4458 threads.append( t )
4459 t.start()
4460 for t in threads:
4461 t.join()
4462 getResponses.append( t.result )
4463 for node in getResponses:
4464 if node != tMapValue:
4465 valueCheck = False
4466 if not valueCheck:
4467 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4468 main.log.warn( getResponses )
4469 getCheck = getCheck and valueCheck
4470 utilities.assert_equals( expect=True,
4471 actual=getCheck,
4472 onpass="In-Memory Transactional Map get values were correct",
4473 onfail="In-Memory Transactional Map values incorrect" )