blob: cb5032bc71402d0ef492ad455cbfe068b7d73130 [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
Jon Hallb3ed8ed2015-10-28 16:43:55 -070012CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
Jon Hall5cf14d52015-07-16 12:15:19 -070014CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
Jon Hallb3ed8ed2015-10-28 16:43:55 -070028class HAstopNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -070029
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hall3b489db2015-10-05 14:38:37 -070052 import pexpect
Jon Hallf3d16e72015-12-16 17:45:08 -080053 import time
Jon Hallb3ed8ed2015-10-28 16:43:55 -070054 main.log.info( "ONOS HA test: Stop a minority of ONOS nodes - " +
Jon Hall5cf14d52015-07-16 12:15:19 -070055 "initialization" )
56 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070057 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070058 "installing ONOS, starting Mininet and ONOS" +\
59 "cli sessions."
60 # TODO: save all the timers and output them for plotting
61
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
Jon Halle1a3b752015-07-22 13:02:46 -070069 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070070 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070071 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070074 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
81
82 # FIXME: just get controller port from params?
83 # TODO: do we really need all these?
84 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
85 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
86 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
87 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
88 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
89 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
90 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
91
Jon Halle1a3b752015-07-22 13:02:46 -070092 try:
93 fileName = "Counters"
94 # TODO: Maybe make a library folder somewhere?
95 path = main.params[ 'imports' ][ 'path' ]
96 main.Counters = imp.load_source( fileName,
97 path + fileName + ".py" )
98 except Exception as e:
99 main.log.exception( e )
100 main.cleanup()
101 main.exit()
102
103 main.CLIs = []
104 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700105 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700106 for i in range( 1, main.numCtrls + 1 ):
107 try:
108 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
109 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
110 ipList.append( main.nodes[ -1 ].ip_address )
111 except AttributeError:
112 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700113
114 main.step( "Create cell file" )
115 cellAppString = main.params[ 'ENV' ][ 'appString' ]
116 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
117 main.Mininet1.ip_address,
118 cellAppString, ipList )
119 main.step( "Applying cell variable to environment" )
120 cellResult = main.ONOSbench.setCell( cellName )
121 verifyResult = main.ONOSbench.verifyCell()
122
123 # FIXME:this is short term fix
124 main.log.info( "Removing raft logs" )
125 main.ONOSbench.onosRemoveRaftLogs()
126
127 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700128 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700129 main.ONOSbench.onosUninstall( node.ip_address )
130
131 # Make sure ONOS is DEAD
132 main.log.info( "Killing any ONOS processes" )
133 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700134 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700135 killed = main.ONOSbench.onosKill( node.ip_address )
136 killResults = killResults and killed
137
138 cleanInstallResult = main.TRUE
139 gitPullResult = main.TRUE
140
141 main.step( "Starting Mininet" )
142 # scp topo file to mininet
143 # TODO: move to params?
144 topoName = "obelisk.py"
145 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700146 main.ONOSbench.scp( main.Mininet1,
147 filePath + topoName,
148 main.Mininet1.home,
149 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700150 mnResult = main.Mininet1.startNet( )
151 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
152 onpass="Mininet Started",
153 onfail="Error starting Mininet" )
154
155 main.step( "Git checkout and pull " + gitBranch )
156 if PULLCODE:
157 main.ONOSbench.gitCheckout( gitBranch )
158 gitPullResult = main.ONOSbench.gitPull()
159 # values of 1 or 3 are good
160 utilities.assert_lesser( expect=0, actual=gitPullResult,
161 onpass="Git pull successful",
162 onfail="Git pull failed" )
163 main.ONOSbench.getVersion( report=True )
164
165 main.step( "Using mvn clean install" )
166 cleanInstallResult = main.TRUE
167 if PULLCODE and gitPullResult == main.TRUE:
168 cleanInstallResult = main.ONOSbench.cleanInstall()
169 else:
170 main.log.warn( "Did not pull new code so skipping mvn " +
171 "clean install" )
172 utilities.assert_equals( expect=main.TRUE,
173 actual=cleanInstallResult,
174 onpass="MCI successful",
175 onfail="MCI failed" )
176 # GRAPHS
177 # NOTE: important params here:
178 # job = name of Jenkins job
179 # Plot Name = Plot-HA, only can be used if multiple plots
180 # index = The number of the graph under plot name
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700181 job = "HAstopNodes"
Jon Hall5cf14d52015-07-16 12:15:19 -0700182 plotName = "Plot-HA"
183 graphs = '<ac:structured-macro ac:name="html">\n'
184 graphs += '<ac:plain-text-body><![CDATA[\n'
185 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
186 '/plot/' + plotName + '/getPlot?index=0' +\
187 '&width=500&height=300"' +\
188 'noborder="0" width="500" height="300" scrolling="yes" ' +\
189 'seamless="seamless"></iframe>\n'
190 graphs += ']]></ac:plain-text-body>\n'
191 graphs += '</ac:structured-macro>\n'
192 main.log.wiki(graphs)
193
194 main.step( "Creating ONOS package" )
Jon Hall3b489db2015-10-05 14:38:37 -0700195 # copy gen-partions file to ONOS
196 # NOTE: this assumes TestON and ONOS are on the same machine
197 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
198 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
199 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
200 main.ONOSbench.ip_address,
201 srcFile,
202 dstDir,
203 pwd=main.ONOSbench.pwd,
204 direction="from" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700205 packageResult = main.ONOSbench.onosPackage()
206 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
207 onpass="ONOS package successful",
208 onfail="ONOS package failed" )
209
210 main.step( "Installing ONOS package" )
211 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700212 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700213 tmpResult = main.ONOSbench.onosInstall( options="-f",
214 node=node.ip_address )
215 onosInstallResult = onosInstallResult and tmpResult
216 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
217 onpass="ONOS install successful",
218 onfail="ONOS install failed" )
Jon Hall3b489db2015-10-05 14:38:37 -0700219 # clean up gen-partitions file
220 try:
221 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
222 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
223 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
224 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
225 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
226 str( main.ONOSbench.handle.before ) )
227 except ( pexpect.TIMEOUT, pexpect.EOF ):
228 main.log.exception( "ONOSbench: pexpect exception found:" +
229 main.ONOSbench.handle.before )
230 main.cleanup()
231 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700232
233 main.step( "Checking if ONOS is up yet" )
234 for i in range( 2 ):
235 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700236 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700237 started = main.ONOSbench.isup( node.ip_address )
238 if not started:
239 main.log.error( node.name + " didn't start!" )
240 main.ONOSbench.onosStop( node.ip_address )
241 main.ONOSbench.onosStart( node.ip_address )
242 onosIsupResult = onosIsupResult and started
243 if onosIsupResult == main.TRUE:
244 break
245 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
246 onpass="ONOS startup successful",
247 onfail="ONOS startup failed" )
248
249 main.log.step( "Starting ONOS CLI sessions" )
250 cliResults = main.TRUE
251 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700252 for i in range( main.numCtrls ):
253 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700254 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700255 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700256 threads.append( t )
257 t.start()
258
259 for t in threads:
260 t.join()
261 cliResults = cliResults and t.result
262 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
263 onpass="ONOS cli startup successful",
264 onfail="ONOS cli startup failed" )
265
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700266 # Create a list of active nodes for use when some nodes are stopped
267 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
268
Jon Hall5cf14d52015-07-16 12:15:19 -0700269 if main.params[ 'tcpdump' ].lower() == "true":
270 main.step( "Start Packet Capture MN" )
271 main.Mininet2.startTcpdump(
272 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
273 + "-MN.pcap",
274 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
275 port=main.params[ 'MNtcpdump' ][ 'port' ] )
276
277 main.step( "App Ids check" )
Jon Hallf3d16e72015-12-16 17:45:08 -0800278 time.sleep(60)
Jon Hall5cf14d52015-07-16 12:15:19 -0700279 appCheck = main.TRUE
280 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700281 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700282 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700283 name="appToIDCheck-" + str( i ),
284 args=[] )
285 threads.append( t )
286 t.start()
287
288 for t in threads:
289 t.join()
290 appCheck = appCheck and t.result
291 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700292 node = main.activeNodes[0]
293 main.log.warn( main.CLIs[node].apps() )
294 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700295 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
296 onpass="App Ids seem to be correct",
297 onfail="Something is wrong with app Ids" )
298
299 if cliResults == main.FALSE:
300 main.log.error( "Failed to start ONOS, stopping test" )
301 main.cleanup()
302 main.exit()
303
304 def CASE2( self, main ):
305 """
306 Assign devices to controllers
307 """
308 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700309 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700310 assert main, "main not defined"
311 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700312 assert main.CLIs, "main.CLIs not defined"
313 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700314 assert ONOS1Port, "ONOS1Port not defined"
315 assert ONOS2Port, "ONOS2Port not defined"
316 assert ONOS3Port, "ONOS3Port not defined"
317 assert ONOS4Port, "ONOS4Port not defined"
318 assert ONOS5Port, "ONOS5Port not defined"
319 assert ONOS6Port, "ONOS6Port not defined"
320 assert ONOS7Port, "ONOS7Port not defined"
321
322 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700323 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700324 "and check that an ONOS node becomes the " +\
325 "master of the device."
326 main.step( "Assign switches to controllers" )
327
328 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700329 for i in range( main.numCtrls ):
330 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700331 swList = []
332 for i in range( 1, 29 ):
333 swList.append( "s" + str( i ) )
334 main.Mininet1.assignSwController( sw=swList, ip=ipList )
335
336 mastershipCheck = main.TRUE
337 for i in range( 1, 29 ):
338 response = main.Mininet1.getSwController( "s" + str( i ) )
339 try:
340 main.log.info( str( response ) )
341 except Exception:
342 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700343 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700344 if re.search( "tcp:" + node.ip_address, response ):
345 mastershipCheck = mastershipCheck and main.TRUE
346 else:
347 main.log.error( "Error, node " + node.ip_address + " is " +
348 "not in the list of controllers s" +
349 str( i ) + " is connecting to." )
350 mastershipCheck = main.FALSE
351 utilities.assert_equals(
352 expect=main.TRUE,
353 actual=mastershipCheck,
354 onpass="Switch mastership assigned correctly",
355 onfail="Switches not assigned correctly to controllers" )
356
357 def CASE21( self, main ):
358 """
359 Assign mastership to controllers
360 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700361 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700362 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700363 assert main, "main not defined"
364 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700365 assert main.CLIs, "main.CLIs not defined"
366 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700367 assert ONOS1Port, "ONOS1Port not defined"
368 assert ONOS2Port, "ONOS2Port not defined"
369 assert ONOS3Port, "ONOS3Port not defined"
370 assert ONOS4Port, "ONOS4Port not defined"
371 assert ONOS5Port, "ONOS5Port not defined"
372 assert ONOS6Port, "ONOS6Port not defined"
373 assert ONOS7Port, "ONOS7Port not defined"
374
375 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700376 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700377 "device. Then manually assign" +\
378 " mastership to specific ONOS nodes using" +\
379 " 'device-role'"
380 main.step( "Assign mastership of switches to specific controllers" )
381 # Manually assign mastership to the controller we want
382 roleCall = main.TRUE
383
384 ipList = [ ]
385 deviceList = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700386 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700387 try:
388 # Assign mastership to specific controllers. This assignment was
389 # determined for a 7 node cluser, but will work with any sized
390 # cluster
391 for i in range( 1, 29 ): # switches 1 through 28
392 # set up correct variables:
393 if i == 1:
394 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700395 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700396 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700397 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700398 c = 1 % main.numCtrls
399 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700400 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700401 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700402 c = 1 % main.numCtrls
403 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700404 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700405 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700406 c = 3 % main.numCtrls
407 ip = main.nodes[ c ].ip_address # ONOS4
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700408 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700409 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700410 c = 2 % main.numCtrls
411 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700412 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700413 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700414 c = 2 % main.numCtrls
415 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700416 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700417 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700418 c = 5 % main.numCtrls
419 ip = main.nodes[ c ].ip_address # ONOS6
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700420 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700421 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700422 c = 4 % main.numCtrls
423 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700424 dpid = '3' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700425 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700426 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700427 c = 6 % main.numCtrls
428 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700429 dpid = '6' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700430 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700431 elif i == 28:
432 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700433 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700434 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700435 else:
436 main.log.error( "You didn't write an else statement for " +
437 "switch s" + str( i ) )
438 roleCall = main.FALSE
439 # Assign switch
440 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
441 # TODO: make this controller dynamic
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700442 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700443 ipList.append( ip )
444 deviceList.append( deviceId )
445 except ( AttributeError, AssertionError ):
446 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700447 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700448 utilities.assert_equals(
449 expect=main.TRUE,
450 actual=roleCall,
451 onpass="Re-assigned switch mastership to designated controller",
452 onfail="Something wrong with deviceRole calls" )
453
454 main.step( "Check mastership was correctly assigned" )
455 roleCheck = main.TRUE
456 # NOTE: This is due to the fact that device mastership change is not
457 # atomic and is actually a multi step process
458 time.sleep( 5 )
459 for i in range( len( ipList ) ):
460 ip = ipList[i]
461 deviceId = deviceList[i]
462 # Check assignment
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700463 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700464 if ip in master:
465 roleCheck = roleCheck and main.TRUE
466 else:
467 roleCheck = roleCheck and main.FALSE
468 main.log.error( "Error, controller " + ip + " is not" +
469 " master " + "of device " +
470 str( deviceId ) + ". Master is " +
471 repr( master ) + "." )
472 utilities.assert_equals(
473 expect=main.TRUE,
474 actual=roleCheck,
475 onpass="Switches were successfully reassigned to designated " +
476 "controller",
477 onfail="Switches were not successfully reassigned" )
478
479 def CASE3( self, main ):
480 """
481 Assign intents
482 """
483 import time
484 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700485 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700486 assert main, "main not defined"
487 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700488 assert main.CLIs, "main.CLIs not defined"
489 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700490 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700491 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700492 "assign predetermined host-to-host intents." +\
493 " After installation, check that the intent" +\
494 " is distributed to all nodes and the state" +\
495 " is INSTALLED"
496
497 # install onos-app-fwd
498 main.step( "Install reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700499 onosCli = main.CLIs[ main.activeNodes[0] ]
500 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700501 utilities.assert_equals( expect=main.TRUE, actual=installResults,
502 onpass="Install fwd successful",
503 onfail="Install fwd failed" )
504
505 main.step( "Check app ids" )
506 appCheck = main.TRUE
507 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700508 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700509 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700510 name="appToIDCheck-" + str( i ),
511 args=[] )
512 threads.append( t )
513 t.start()
514
515 for t in threads:
516 t.join()
517 appCheck = appCheck and t.result
518 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700519 main.log.warn( onosCli.apps() )
520 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700521 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
522 onpass="App Ids seem to be correct",
523 onfail="Something is wrong with app Ids" )
524
525 main.step( "Discovering Hosts( Via pingall for now )" )
526 # FIXME: Once we have a host discovery mechanism, use that instead
527 # REACTIVE FWD test
528 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700529 passMsg = "Reactive Pingall test passed"
530 time1 = time.time()
531 pingResult = main.Mininet1.pingall()
532 time2 = time.time()
533 if not pingResult:
534 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700535 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700536 passMsg += " on the second try"
537 utilities.assert_equals(
538 expect=main.TRUE,
539 actual=pingResult,
540 onpass= passMsg,
541 onfail="Reactive Pingall failed, " +
542 "one or more ping pairs failed" )
543 main.log.info( "Time for pingall: %2f seconds" %
544 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700545 # timeout for fwd flows
546 time.sleep( 11 )
547 # uninstall onos-app-fwd
548 main.step( "Uninstall reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700549 node = main.activeNodes[0]
550 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700551 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
552 onpass="Uninstall fwd successful",
553 onfail="Uninstall fwd failed" )
554
555 main.step( "Check app ids" )
556 threads = []
557 appCheck2 = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700558 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700559 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700560 name="appToIDCheck-" + str( i ),
561 args=[] )
562 threads.append( t )
563 t.start()
564
565 for t in threads:
566 t.join()
567 appCheck2 = appCheck2 and t.result
568 if appCheck2 != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700569 node = main.activeNodes[0]
570 main.log.warn( main.CLIs[node].apps() )
571 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700572 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
573 onpass="App Ids seem to be correct",
574 onfail="Something is wrong with app Ids" )
575
576 main.step( "Add host intents via cli" )
577 intentIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700578 # TODO: move the host numbers to params
579 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700580 intentAddResult = True
581 hostResult = main.TRUE
582 for i in range( 8, 18 ):
583 main.log.info( "Adding host intent between h" + str( i ) +
584 " and h" + str( i + 10 ) )
585 host1 = "00:00:00:00:00:" + \
586 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
587 host2 = "00:00:00:00:00:" + \
588 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
589 # NOTE: getHost can return None
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700590 host1Dict = onosCli.getHost( host1 )
591 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700592 host1Id = None
593 host2Id = None
594 if host1Dict and host2Dict:
595 host1Id = host1Dict.get( 'id', None )
596 host2Id = host2Dict.get( 'id', None )
597 if host1Id and host2Id:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700598 nodeNum = ( i % len( main.activeNodes ) )
599 node = main.activeNodes[nodeNum]
600 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700601 if tmpId:
602 main.log.info( "Added intent with id: " + tmpId )
603 intentIds.append( tmpId )
604 else:
605 main.log.error( "addHostIntent returned: " +
606 repr( tmpId ) )
607 else:
608 main.log.error( "Error, getHost() failed for h" + str( i ) +
609 " and/or h" + str( i + 10 ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700610 node = main.activeNodes[0]
611 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700612 main.log.warn( "Hosts output: " )
613 try:
614 main.log.warn( json.dumps( json.loads( hosts ),
615 sort_keys=True,
616 indent=4,
617 separators=( ',', ': ' ) ) )
618 except ( ValueError, TypeError ):
619 main.log.warn( repr( hosts ) )
620 hostResult = main.FALSE
621 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
622 onpass="Found a host id for each host",
623 onfail="Error looking up host ids" )
624
625 intentStart = time.time()
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700626 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700627 main.log.info( "Submitted intents: " + str( intentIds ) )
628 main.log.info( "Intents in ONOS: " + str( onosIds ) )
629 for intent in intentIds:
630 if intent in onosIds:
631 pass # intent submitted is in onos
632 else:
633 intentAddResult = False
634 if intentAddResult:
635 intentStop = time.time()
636 else:
637 intentStop = None
638 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700639 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700640 intentStates = []
641 installedCheck = True
642 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
643 count = 0
644 try:
645 for intent in json.loads( intents ):
646 state = intent.get( 'state', None )
647 if "INSTALLED" not in state:
648 installedCheck = False
649 intentId = intent.get( 'id', None )
650 intentStates.append( ( intentId, state ) )
651 except ( ValueError, TypeError ):
652 main.log.exception( "Error parsing intents" )
653 # add submitted intents not in the store
654 tmplist = [ i for i, s in intentStates ]
655 missingIntents = False
656 for i in intentIds:
657 if i not in tmplist:
658 intentStates.append( ( i, " - " ) )
659 missingIntents = True
660 intentStates.sort()
661 for i, s in intentStates:
662 count += 1
663 main.log.info( "%-6s%-15s%-15s" %
664 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700665 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700666 try:
667 missing = False
668 if leaders:
669 parsedLeaders = json.loads( leaders )
670 main.log.warn( json.dumps( parsedLeaders,
671 sort_keys=True,
672 indent=4,
673 separators=( ',', ': ' ) ) )
674 # check for all intent partitions
675 topics = []
676 for i in range( 14 ):
677 topics.append( "intent-partition-" + str( i ) )
678 main.log.debug( topics )
679 ONOStopics = [ j['topic'] for j in parsedLeaders ]
680 for topic in topics:
681 if topic not in ONOStopics:
682 main.log.error( "Error: " + topic +
683 " not in leaders" )
684 missing = True
685 else:
686 main.log.error( "leaders() returned None" )
687 except ( ValueError, TypeError ):
688 main.log.exception( "Error parsing leaders" )
689 main.log.error( repr( leaders ) )
690 # Check all nodes
691 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700692 for i in main.activeNodes:
693 response = main.CLIs[i].leaders( jsonFormat=False)
694 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700695 str( response ) )
696
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700697 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700698 try:
699 if partitions :
700 parsedPartitions = json.loads( partitions )
701 main.log.warn( json.dumps( parsedPartitions,
702 sort_keys=True,
703 indent=4,
704 separators=( ',', ': ' ) ) )
705 # TODO check for a leader in all paritions
706 # TODO check for consistency among nodes
707 else:
708 main.log.error( "partitions() returned None" )
709 except ( ValueError, TypeError ):
710 main.log.exception( "Error parsing partitions" )
711 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700712 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700713 try:
714 if pendingMap :
715 parsedPending = json.loads( pendingMap )
716 main.log.warn( json.dumps( parsedPending,
717 sort_keys=True,
718 indent=4,
719 separators=( ',', ': ' ) ) )
720 # TODO check something here?
721 else:
722 main.log.error( "pendingMap() returned None" )
723 except ( ValueError, TypeError ):
724 main.log.exception( "Error parsing pending map" )
725 main.log.error( repr( pendingMap ) )
726
727 intentAddResult = bool( intentAddResult and not missingIntents and
728 installedCheck )
729 if not intentAddResult:
730 main.log.error( "Error in pushing host intents to ONOS" )
731
732 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700733 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700734 correct = True
735 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700736 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700737 onosIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700738 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700739 onosIds.append( ids )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700740 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700741 str( sorted( onosIds ) ) )
742 if sorted( ids ) != sorted( intentIds ):
743 main.log.warn( "Set of intent IDs doesn't match" )
744 correct = False
745 break
746 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700747 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700748 for intent in intents:
749 if intent[ 'state' ] != "INSTALLED":
750 main.log.warn( "Intent " + intent[ 'id' ] +
751 " is " + intent[ 'state' ] )
752 correct = False
753 break
754 if correct:
755 break
756 else:
757 time.sleep(1)
758 if not intentStop:
759 intentStop = time.time()
760 global gossipTime
761 gossipTime = intentStop - intentStart
762 main.log.info( "It took about " + str( gossipTime ) +
763 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700764 gossipPeriod = int( main.params['timers']['gossip'] )
765 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700766 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700767 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700768 onpass="ECM anti-entropy for intents worked within " +
769 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700770 onfail="Intent ECM anti-entropy took too long. " +
771 "Expected time:{}, Actual time:{}".format( maxGossipTime,
772 gossipTime ) )
773 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700774 intentAddResult = True
775
776 if not intentAddResult or "key" in pendingMap:
777 import time
778 installedCheck = True
779 main.log.info( "Sleeping 60 seconds to see if intents are found" )
780 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700781 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700782 main.log.info( "Submitted intents: " + str( intentIds ) )
783 main.log.info( "Intents in ONOS: " + str( onosIds ) )
784 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700785 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700786 intentStates = []
787 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
788 count = 0
789 try:
790 for intent in json.loads( intents ):
791 # Iter through intents of a node
792 state = intent.get( 'state', None )
793 if "INSTALLED" not in state:
794 installedCheck = False
795 intentId = intent.get( 'id', None )
796 intentStates.append( ( intentId, state ) )
797 except ( ValueError, TypeError ):
798 main.log.exception( "Error parsing intents" )
799 # add submitted intents not in the store
800 tmplist = [ i for i, s in intentStates ]
801 for i in intentIds:
802 if i not in tmplist:
803 intentStates.append( ( i, " - " ) )
804 intentStates.sort()
805 for i, s in intentStates:
806 count += 1
807 main.log.info( "%-6s%-15s%-15s" %
808 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700809 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700810 try:
811 missing = False
812 if leaders:
813 parsedLeaders = json.loads( leaders )
814 main.log.warn( json.dumps( parsedLeaders,
815 sort_keys=True,
816 indent=4,
817 separators=( ',', ': ' ) ) )
818 # check for all intent partitions
819 # check for election
820 topics = []
821 for i in range( 14 ):
822 topics.append( "intent-partition-" + str( i ) )
823 # FIXME: this should only be after we start the app
824 topics.append( "org.onosproject.election" )
825 main.log.debug( topics )
826 ONOStopics = [ j['topic'] for j in parsedLeaders ]
827 for topic in topics:
828 if topic not in ONOStopics:
829 main.log.error( "Error: " + topic +
830 " not in leaders" )
831 missing = True
832 else:
833 main.log.error( "leaders() returned None" )
834 except ( ValueError, TypeError ):
835 main.log.exception( "Error parsing leaders" )
836 main.log.error( repr( leaders ) )
837 # Check all nodes
838 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700839 for i in main.activeNodes:
840 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700841 response = node.leaders( jsonFormat=False)
842 main.log.warn( str( node.name ) + " leaders output: \n" +
843 str( response ) )
844
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700845 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700846 try:
847 if partitions :
848 parsedPartitions = json.loads( partitions )
849 main.log.warn( json.dumps( parsedPartitions,
850 sort_keys=True,
851 indent=4,
852 separators=( ',', ': ' ) ) )
853 # TODO check for a leader in all paritions
854 # TODO check for consistency among nodes
855 else:
856 main.log.error( "partitions() returned None" )
857 except ( ValueError, TypeError ):
858 main.log.exception( "Error parsing partitions" )
859 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700860 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700861 try:
862 if pendingMap :
863 parsedPending = json.loads( pendingMap )
864 main.log.warn( json.dumps( parsedPending,
865 sort_keys=True,
866 indent=4,
867 separators=( ',', ': ' ) ) )
868 # TODO check something here?
869 else:
870 main.log.error( "pendingMap() returned None" )
871 except ( ValueError, TypeError ):
872 main.log.exception( "Error parsing pending map" )
873 main.log.error( repr( pendingMap ) )
874
875 def CASE4( self, main ):
876 """
877 Ping across added host intents
878 """
879 import json
880 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700881 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700882 assert main, "main not defined"
883 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700884 assert main.CLIs, "main.CLIs not defined"
885 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700886 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700887 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700888 "functionality and check the state of " +\
889 "the intent"
890 main.step( "Ping across added host intents" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700891 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700892 PingResult = main.TRUE
893 for i in range( 8, 18 ):
894 ping = main.Mininet1.pingHost( src="h" + str( i ),
895 target="h" + str( i + 10 ) )
896 PingResult = PingResult and ping
897 if ping == main.FALSE:
898 main.log.warn( "Ping failed between h" + str( i ) +
899 " and h" + str( i + 10 ) )
900 elif ping == main.TRUE:
901 main.log.info( "Ping test passed!" )
902 # Don't set PingResult or you'd override failures
903 if PingResult == main.FALSE:
904 main.log.error(
905 "Intents have not been installed correctly, pings failed." )
906 # TODO: pretty print
907 main.log.warn( "ONOS1 intents: " )
908 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700909 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700910 main.log.warn( json.dumps( json.loads( tmpIntents ),
911 sort_keys=True,
912 indent=4,
913 separators=( ',', ': ' ) ) )
914 except ( ValueError, TypeError ):
915 main.log.warn( repr( tmpIntents ) )
916 utilities.assert_equals(
917 expect=main.TRUE,
918 actual=PingResult,
919 onpass="Intents have been installed correctly and pings work",
920 onfail="Intents have not been installed correctly, pings failed." )
921
922 main.step( "Check Intent state" )
923 installedCheck = False
924 loopCount = 0
925 while not installedCheck and loopCount < 40:
926 installedCheck = True
927 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700928 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700929 intentStates = []
930 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
931 count = 0
932 # Iter through intents of a node
933 try:
934 for intent in json.loads( intents ):
935 state = intent.get( 'state', None )
936 if "INSTALLED" not in state:
937 installedCheck = False
938 intentId = intent.get( 'id', None )
939 intentStates.append( ( intentId, state ) )
940 except ( ValueError, TypeError ):
941 main.log.exception( "Error parsing intents." )
942 # Print states
943 intentStates.sort()
944 for i, s in intentStates:
945 count += 1
946 main.log.info( "%-6s%-15s%-15s" %
947 ( str( count ), str( i ), str( s ) ) )
948 if not installedCheck:
949 time.sleep( 1 )
950 loopCount += 1
951 utilities.assert_equals( expect=True, actual=installedCheck,
952 onpass="Intents are all INSTALLED",
953 onfail="Intents are not all in " +
954 "INSTALLED state" )
955
956 main.step( "Check leadership of topics" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700957 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700958 topicCheck = main.TRUE
959 try:
960 if leaders:
961 parsedLeaders = json.loads( leaders )
962 main.log.warn( json.dumps( parsedLeaders,
963 sort_keys=True,
964 indent=4,
965 separators=( ',', ': ' ) ) )
966 # check for all intent partitions
967 # check for election
968 # TODO: Look at Devices as topics now that it uses this system
969 topics = []
970 for i in range( 14 ):
971 topics.append( "intent-partition-" + str( i ) )
972 # FIXME: this should only be after we start the app
973 # FIXME: topics.append( "org.onosproject.election" )
974 # Print leaders output
975 main.log.debug( topics )
976 ONOStopics = [ j['topic'] for j in parsedLeaders ]
977 for topic in topics:
978 if topic not in ONOStopics:
979 main.log.error( "Error: " + topic +
980 " not in leaders" )
981 topicCheck = main.FALSE
982 else:
983 main.log.error( "leaders() returned None" )
984 topicCheck = main.FALSE
985 except ( ValueError, TypeError ):
986 topicCheck = main.FALSE
987 main.log.exception( "Error parsing leaders" )
988 main.log.error( repr( leaders ) )
989 # TODO: Check for a leader of these topics
990 # Check all nodes
991 if topicCheck:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700992 for i in main.activeNodes:
993 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700994 response = node.leaders( jsonFormat=False)
995 main.log.warn( str( node.name ) + " leaders output: \n" +
996 str( response ) )
997
998 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
999 onpass="intent Partitions is in leaders",
1000 onfail="Some topics were lost " )
1001 # Print partitions
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001002 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001003 try:
1004 if partitions :
1005 parsedPartitions = json.loads( partitions )
1006 main.log.warn( json.dumps( parsedPartitions,
1007 sort_keys=True,
1008 indent=4,
1009 separators=( ',', ': ' ) ) )
1010 # TODO check for a leader in all paritions
1011 # TODO check for consistency among nodes
1012 else:
1013 main.log.error( "partitions() returned None" )
1014 except ( ValueError, TypeError ):
1015 main.log.exception( "Error parsing partitions" )
1016 main.log.error( repr( partitions ) )
1017 # Print Pending Map
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001018 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001019 try:
1020 if pendingMap :
1021 parsedPending = json.loads( pendingMap )
1022 main.log.warn( json.dumps( parsedPending,
1023 sort_keys=True,
1024 indent=4,
1025 separators=( ',', ': ' ) ) )
1026 # TODO check something here?
1027 else:
1028 main.log.error( "pendingMap() returned None" )
1029 except ( ValueError, TypeError ):
1030 main.log.exception( "Error parsing pending map" )
1031 main.log.error( repr( pendingMap ) )
1032
1033 if not installedCheck:
1034 main.log.info( "Waiting 60 seconds to see if the state of " +
1035 "intents change" )
1036 time.sleep( 60 )
1037 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001038 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001039 intentStates = []
1040 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1041 count = 0
1042 # Iter through intents of a node
1043 try:
1044 for intent in json.loads( intents ):
1045 state = intent.get( 'state', None )
1046 if "INSTALLED" not in state:
1047 installedCheck = False
1048 intentId = intent.get( 'id', None )
1049 intentStates.append( ( intentId, state ) )
1050 except ( ValueError, TypeError ):
1051 main.log.exception( "Error parsing intents." )
1052 intentStates.sort()
1053 for i, s in intentStates:
1054 count += 1
1055 main.log.info( "%-6s%-15s%-15s" %
1056 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001057 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001058 try:
1059 missing = False
1060 if leaders:
1061 parsedLeaders = json.loads( leaders )
1062 main.log.warn( json.dumps( parsedLeaders,
1063 sort_keys=True,
1064 indent=4,
1065 separators=( ',', ': ' ) ) )
1066 # check for all intent partitions
1067 # check for election
1068 topics = []
1069 for i in range( 14 ):
1070 topics.append( "intent-partition-" + str( i ) )
1071 # FIXME: this should only be after we start the app
1072 topics.append( "org.onosproject.election" )
1073 main.log.debug( topics )
1074 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1075 for topic in topics:
1076 if topic not in ONOStopics:
1077 main.log.error( "Error: " + topic +
1078 " not in leaders" )
1079 missing = True
1080 else:
1081 main.log.error( "leaders() returned None" )
1082 except ( ValueError, TypeError ):
1083 main.log.exception( "Error parsing leaders" )
1084 main.log.error( repr( leaders ) )
1085 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001086 for i in main.activeNodes:
1087 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001088 response = node.leaders( jsonFormat=False)
1089 main.log.warn( str( node.name ) + " leaders output: \n" +
1090 str( response ) )
1091
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001092 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001093 try:
1094 if partitions :
1095 parsedPartitions = json.loads( partitions )
1096 main.log.warn( json.dumps( parsedPartitions,
1097 sort_keys=True,
1098 indent=4,
1099 separators=( ',', ': ' ) ) )
1100 # TODO check for a leader in all paritions
1101 # TODO check for consistency among nodes
1102 else:
1103 main.log.error( "partitions() returned None" )
1104 except ( ValueError, TypeError ):
1105 main.log.exception( "Error parsing partitions" )
1106 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001107 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001108 try:
1109 if pendingMap :
1110 parsedPending = json.loads( pendingMap )
1111 main.log.warn( json.dumps( parsedPending,
1112 sort_keys=True,
1113 indent=4,
1114 separators=( ',', ': ' ) ) )
1115 # TODO check something here?
1116 else:
1117 main.log.error( "pendingMap() returned None" )
1118 except ( ValueError, TypeError ):
1119 main.log.exception( "Error parsing pending map" )
1120 main.log.error( repr( pendingMap ) )
1121 # Print flowrules
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001122 node = main.activeNodes[0]
1123 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001124 main.step( "Wait a minute then ping again" )
1125 # the wait is above
1126 PingResult = main.TRUE
1127 for i in range( 8, 18 ):
1128 ping = main.Mininet1.pingHost( src="h" + str( i ),
1129 target="h" + str( i + 10 ) )
1130 PingResult = PingResult and ping
1131 if ping == main.FALSE:
1132 main.log.warn( "Ping failed between h" + str( i ) +
1133 " and h" + str( i + 10 ) )
1134 elif ping == main.TRUE:
1135 main.log.info( "Ping test passed!" )
1136 # Don't set PingResult or you'd override failures
1137 if PingResult == main.FALSE:
1138 main.log.error(
1139 "Intents have not been installed correctly, pings failed." )
1140 # TODO: pretty print
1141 main.log.warn( "ONOS1 intents: " )
1142 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001143 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001144 main.log.warn( json.dumps( json.loads( tmpIntents ),
1145 sort_keys=True,
1146 indent=4,
1147 separators=( ',', ': ' ) ) )
1148 except ( ValueError, TypeError ):
1149 main.log.warn( repr( tmpIntents ) )
1150 utilities.assert_equals(
1151 expect=main.TRUE,
1152 actual=PingResult,
1153 onpass="Intents have been installed correctly and pings work",
1154 onfail="Intents have not been installed correctly, pings failed." )
1155
1156 def CASE5( self, main ):
1157 """
1158 Reading state of ONOS
1159 """
1160 import json
1161 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001162 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001163 assert main, "main not defined"
1164 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001165 assert main.CLIs, "main.CLIs not defined"
1166 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001167
1168 main.case( "Setting up and gathering data for current state" )
1169 # The general idea for this test case is to pull the state of
1170 # ( intents,flows, topology,... ) from each ONOS node
1171 # We can then compare them with each other and also with past states
1172
1173 main.step( "Check that each switch has a master" )
1174 global mastershipState
1175 mastershipState = '[]'
1176
1177 # Assert that each device has a master
1178 rolesNotNull = main.TRUE
1179 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001180 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001181 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001182 name="rolesNotNull-" + str( i ),
1183 args=[] )
1184 threads.append( t )
1185 t.start()
1186
1187 for t in threads:
1188 t.join()
1189 rolesNotNull = rolesNotNull and t.result
1190 utilities.assert_equals(
1191 expect=main.TRUE,
1192 actual=rolesNotNull,
1193 onpass="Each device has a master",
1194 onfail="Some devices don't have a master assigned" )
1195
1196 main.step( "Get the Mastership of each switch from each controller" )
1197 ONOSMastership = []
1198 mastershipCheck = main.FALSE
1199 consistentMastership = True
1200 rolesResults = True
1201 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001202 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001203 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001204 name="roles-" + str( i ),
1205 args=[] )
1206 threads.append( t )
1207 t.start()
1208
1209 for t in threads:
1210 t.join()
1211 ONOSMastership.append( t.result )
1212
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001213 for i in range( len( ONOSMastership ) ):
1214 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001215 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001216 main.log.error( "Error in getting ONOS" + node + " roles" )
1217 main.log.warn( "ONOS" + node + " mastership response: " +
1218 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001219 rolesResults = False
1220 utilities.assert_equals(
1221 expect=True,
1222 actual=rolesResults,
1223 onpass="No error in reading roles output",
1224 onfail="Error in reading roles from ONOS" )
1225
1226 main.step( "Check for consistency in roles from each controller" )
1227 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1228 main.log.info(
1229 "Switch roles are consistent across all ONOS nodes" )
1230 else:
1231 consistentMastership = False
1232 utilities.assert_equals(
1233 expect=True,
1234 actual=consistentMastership,
1235 onpass="Switch roles are consistent across all ONOS nodes",
1236 onfail="ONOS nodes have different views of switch roles" )
1237
1238 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001239 for i in range( len( main.activeNodes ) ):
1240 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001241 try:
1242 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001243 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001244 json.dumps(
1245 json.loads( ONOSMastership[ i ] ),
1246 sort_keys=True,
1247 indent=4,
1248 separators=( ',', ': ' ) ) )
1249 except ( ValueError, TypeError ):
1250 main.log.warn( repr( ONOSMastership[ i ] ) )
1251 elif rolesResults and consistentMastership:
1252 mastershipCheck = main.TRUE
1253 mastershipState = ONOSMastership[ 0 ]
1254
1255 main.step( "Get the intents from each controller" )
1256 global intentState
1257 intentState = []
1258 ONOSIntents = []
1259 intentCheck = main.FALSE
1260 consistentIntents = True
1261 intentsResults = True
1262 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001263 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001264 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001265 name="intents-" + str( i ),
1266 args=[],
1267 kwargs={ 'jsonFormat': True } )
1268 threads.append( t )
1269 t.start()
1270
1271 for t in threads:
1272 t.join()
1273 ONOSIntents.append( t.result )
1274
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001275 for i in range( len( ONOSIntents ) ):
1276 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001277 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001278 main.log.error( "Error in getting ONOS" + node + " intents" )
1279 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001280 repr( ONOSIntents[ i ] ) )
1281 intentsResults = False
1282 utilities.assert_equals(
1283 expect=True,
1284 actual=intentsResults,
1285 onpass="No error in reading intents output",
1286 onfail="Error in reading intents from ONOS" )
1287
1288 main.step( "Check for consistency in Intents from each controller" )
1289 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1290 main.log.info( "Intents are consistent across all ONOS " +
1291 "nodes" )
1292 else:
1293 consistentIntents = False
1294 main.log.error( "Intents not consistent" )
1295 utilities.assert_equals(
1296 expect=True,
1297 actual=consistentIntents,
1298 onpass="Intents are consistent across all ONOS nodes",
1299 onfail="ONOS nodes have different views of intents" )
1300
1301 if intentsResults:
1302 # Try to make it easy to figure out what is happening
1303 #
1304 # Intent ONOS1 ONOS2 ...
1305 # 0x01 INSTALLED INSTALLING
1306 # ... ... ...
1307 # ... ... ...
1308 title = " Id"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001309 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001310 title += " " * 10 + "ONOS" + str( n + 1 )
1311 main.log.warn( title )
1312 # get all intent keys in the cluster
1313 keys = []
1314 for nodeStr in ONOSIntents:
1315 node = json.loads( nodeStr )
1316 for intent in node:
1317 keys.append( intent.get( 'id' ) )
1318 keys = set( keys )
1319 for key in keys:
1320 row = "%-13s" % key
1321 for nodeStr in ONOSIntents:
1322 node = json.loads( nodeStr )
1323 for intent in node:
1324 if intent.get( 'id', "Error" ) == key:
1325 row += "%-15s" % intent.get( 'state' )
1326 main.log.warn( row )
1327 # End table view
1328
1329 if intentsResults and not consistentIntents:
1330 # print the json objects
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001331 n = str( main.activeNodes[-1] + 1 )
1332 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001333 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1334 sort_keys=True,
1335 indent=4,
1336 separators=( ',', ': ' ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001337 for i in range( len( ONOSIntents ) ):
1338 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001339 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001340 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001341 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1342 sort_keys=True,
1343 indent=4,
1344 separators=( ',', ': ' ) ) )
1345 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001346 main.log.debug( "ONOS" + node + " intents match ONOS" +
1347 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001348 elif intentsResults and consistentIntents:
1349 intentCheck = main.TRUE
1350 intentState = ONOSIntents[ 0 ]
1351
1352 main.step( "Get the flows from each controller" )
1353 global flowState
1354 flowState = []
1355 ONOSFlows = []
1356 ONOSFlowsJson = []
1357 flowCheck = main.FALSE
1358 consistentFlows = True
1359 flowsResults = True
1360 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001361 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001362 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001363 name="flows-" + str( i ),
1364 args=[],
1365 kwargs={ 'jsonFormat': True } )
1366 threads.append( t )
1367 t.start()
1368
1369 # NOTE: Flows command can take some time to run
1370 time.sleep(30)
1371 for t in threads:
1372 t.join()
1373 result = t.result
1374 ONOSFlows.append( result )
1375
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001376 for i in range( len( ONOSFlows ) ):
1377 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001378 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1379 main.log.error( "Error in getting ONOS" + num + " flows" )
1380 main.log.warn( "ONOS" + num + " flows response: " +
1381 repr( ONOSFlows[ i ] ) )
1382 flowsResults = False
1383 ONOSFlowsJson.append( None )
1384 else:
1385 try:
1386 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1387 except ( ValueError, TypeError ):
1388 # FIXME: change this to log.error?
1389 main.log.exception( "Error in parsing ONOS" + num +
1390 " response as json." )
1391 main.log.error( repr( ONOSFlows[ i ] ) )
1392 ONOSFlowsJson.append( None )
1393 flowsResults = False
1394 utilities.assert_equals(
1395 expect=True,
1396 actual=flowsResults,
1397 onpass="No error in reading flows output",
1398 onfail="Error in reading flows from ONOS" )
1399
1400 main.step( "Check for consistency in Flows from each controller" )
1401 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1402 if all( tmp ):
1403 main.log.info( "Flow count is consistent across all ONOS nodes" )
1404 else:
1405 consistentFlows = False
1406 utilities.assert_equals(
1407 expect=True,
1408 actual=consistentFlows,
1409 onpass="The flow count is consistent across all ONOS nodes",
1410 onfail="ONOS nodes have different flow counts" )
1411
1412 if flowsResults and not consistentFlows:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001413 for i in range( len( ONOSFlows ) ):
1414 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001415 try:
1416 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001417 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001418 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1419 indent=4, separators=( ',', ': ' ) ) )
1420 except ( ValueError, TypeError ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001421 main.log.warn( "ONOS" + node + " flows: " +
1422 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001423 elif flowsResults and consistentFlows:
1424 flowCheck = main.TRUE
1425 flowState = ONOSFlows[ 0 ]
1426
1427 main.step( "Get the OF Table entries" )
1428 global flows
1429 flows = []
1430 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001431 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001432 if flowCheck == main.FALSE:
1433 for table in flows:
1434 main.log.warn( table )
1435 # TODO: Compare switch flow tables with ONOS flow tables
1436
1437 main.step( "Start continuous pings" )
1438 main.Mininet2.pingLong(
1439 src=main.params[ 'PING' ][ 'source1' ],
1440 target=main.params[ 'PING' ][ 'target1' ],
1441 pingTime=500 )
1442 main.Mininet2.pingLong(
1443 src=main.params[ 'PING' ][ 'source2' ],
1444 target=main.params[ 'PING' ][ 'target2' ],
1445 pingTime=500 )
1446 main.Mininet2.pingLong(
1447 src=main.params[ 'PING' ][ 'source3' ],
1448 target=main.params[ 'PING' ][ 'target3' ],
1449 pingTime=500 )
1450 main.Mininet2.pingLong(
1451 src=main.params[ 'PING' ][ 'source4' ],
1452 target=main.params[ 'PING' ][ 'target4' ],
1453 pingTime=500 )
1454 main.Mininet2.pingLong(
1455 src=main.params[ 'PING' ][ 'source5' ],
1456 target=main.params[ 'PING' ][ 'target5' ],
1457 pingTime=500 )
1458 main.Mininet2.pingLong(
1459 src=main.params[ 'PING' ][ 'source6' ],
1460 target=main.params[ 'PING' ][ 'target6' ],
1461 pingTime=500 )
1462 main.Mininet2.pingLong(
1463 src=main.params[ 'PING' ][ 'source7' ],
1464 target=main.params[ 'PING' ][ 'target7' ],
1465 pingTime=500 )
1466 main.Mininet2.pingLong(
1467 src=main.params[ 'PING' ][ 'source8' ],
1468 target=main.params[ 'PING' ][ 'target8' ],
1469 pingTime=500 )
1470 main.Mininet2.pingLong(
1471 src=main.params[ 'PING' ][ 'source9' ],
1472 target=main.params[ 'PING' ][ 'target9' ],
1473 pingTime=500 )
1474 main.Mininet2.pingLong(
1475 src=main.params[ 'PING' ][ 'source10' ],
1476 target=main.params[ 'PING' ][ 'target10' ],
1477 pingTime=500 )
1478
1479 main.step( "Collecting topology information from ONOS" )
1480 devices = []
1481 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001482 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001483 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001484 name="devices-" + str( i ),
1485 args=[ ] )
1486 threads.append( t )
1487 t.start()
1488
1489 for t in threads:
1490 t.join()
1491 devices.append( t.result )
1492 hosts = []
1493 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001494 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001495 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001496 name="hosts-" + str( i ),
1497 args=[ ] )
1498 threads.append( t )
1499 t.start()
1500
1501 for t in threads:
1502 t.join()
1503 try:
1504 hosts.append( json.loads( t.result ) )
1505 except ( ValueError, TypeError ):
1506 # FIXME: better handling of this, print which node
1507 # Maybe use thread name?
1508 main.log.exception( "Error parsing json output of hosts" )
Jon Hallf3d16e72015-12-16 17:45:08 -08001509 main.log.warn( repr( t.result ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001510 hosts.append( None )
1511
1512 ports = []
1513 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001514 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001515 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001516 name="ports-" + str( i ),
1517 args=[ ] )
1518 threads.append( t )
1519 t.start()
1520
1521 for t in threads:
1522 t.join()
1523 ports.append( t.result )
1524 links = []
1525 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001526 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001527 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001528 name="links-" + str( i ),
1529 args=[ ] )
1530 threads.append( t )
1531 t.start()
1532
1533 for t in threads:
1534 t.join()
1535 links.append( t.result )
1536 clusters = []
1537 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001538 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001539 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001540 name="clusters-" + str( i ),
1541 args=[ ] )
1542 threads.append( t )
1543 t.start()
1544
1545 for t in threads:
1546 t.join()
1547 clusters.append( t.result )
1548 # Compare json objects for hosts and dataplane clusters
1549
1550 # hosts
1551 main.step( "Host view is consistent across ONOS nodes" )
1552 consistentHostsResult = main.TRUE
1553 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001554 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001555 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001556 if hosts[ controller ] == hosts[ 0 ]:
1557 continue
1558 else: # hosts not consistent
1559 main.log.error( "hosts from ONOS" +
1560 controllerStr +
1561 " is inconsistent with ONOS1" )
1562 main.log.warn( repr( hosts[ controller ] ) )
1563 consistentHostsResult = main.FALSE
1564
1565 else:
1566 main.log.error( "Error in getting ONOS hosts from ONOS" +
1567 controllerStr )
1568 consistentHostsResult = main.FALSE
1569 main.log.warn( "ONOS" + controllerStr +
1570 " hosts response: " +
1571 repr( hosts[ controller ] ) )
1572 utilities.assert_equals(
1573 expect=main.TRUE,
1574 actual=consistentHostsResult,
1575 onpass="Hosts view is consistent across all ONOS nodes",
1576 onfail="ONOS nodes have different views of hosts" )
1577
1578 main.step( "Each host has an IP address" )
1579 ipResult = main.TRUE
1580 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001581 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001582 if hosts[ controller ]:
1583 for host in hosts[ controller ]:
1584 if not host.get( 'ipAddresses', [ ] ):
1585 main.log.error( "Error with host ips on controller" +
1586 controllerStr + ": " + str( host ) )
1587 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001588 utilities.assert_equals(
1589 expect=main.TRUE,
1590 actual=ipResult,
1591 onpass="The ips of the hosts aren't empty",
1592 onfail="The ip of at least one host is missing" )
1593
1594 # Strongly connected clusters of devices
1595 main.step( "Cluster view is consistent across ONOS nodes" )
1596 consistentClustersResult = main.TRUE
1597 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001598 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001599 if "Error" not in clusters[ controller ]:
1600 if clusters[ controller ] == clusters[ 0 ]:
1601 continue
1602 else: # clusters not consistent
1603 main.log.error( "clusters from ONOS" + controllerStr +
1604 " is inconsistent with ONOS1" )
1605 consistentClustersResult = main.FALSE
1606
1607 else:
1608 main.log.error( "Error in getting dataplane clusters " +
1609 "from ONOS" + controllerStr )
1610 consistentClustersResult = main.FALSE
1611 main.log.warn( "ONOS" + controllerStr +
1612 " clusters response: " +
1613 repr( clusters[ controller ] ) )
1614 utilities.assert_equals(
1615 expect=main.TRUE,
1616 actual=consistentClustersResult,
1617 onpass="Clusters view is consistent across all ONOS nodes",
1618 onfail="ONOS nodes have different views of clusters" )
1619 # there should always only be one cluster
1620 main.step( "Cluster view correct across ONOS nodes" )
1621 try:
1622 numClusters = len( json.loads( clusters[ 0 ] ) )
1623 except ( ValueError, TypeError ):
1624 main.log.exception( "Error parsing clusters[0]: " +
1625 repr( clusters[ 0 ] ) )
1626 clusterResults = main.FALSE
1627 if numClusters == 1:
1628 clusterResults = main.TRUE
1629 utilities.assert_equals(
1630 expect=1,
1631 actual=numClusters,
1632 onpass="ONOS shows 1 SCC",
1633 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1634
1635 main.step( "Comparing ONOS topology to MN" )
1636 devicesResults = main.TRUE
1637 linksResults = main.TRUE
1638 hostsResults = main.TRUE
1639 mnSwitches = main.Mininet1.getSwitches()
1640 mnLinks = main.Mininet1.getLinks()
1641 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001642 for controller in main.activeNodes:
1643 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001644 if devices[ controller ] and ports[ controller ] and\
1645 "Error" not in devices[ controller ] and\
1646 "Error" not in ports[ controller ]:
1647
1648 currentDevicesResult = main.Mininet1.compareSwitches(
1649 mnSwitches,
1650 json.loads( devices[ controller ] ),
1651 json.loads( ports[ controller ] ) )
1652 else:
1653 currentDevicesResult = main.FALSE
1654 utilities.assert_equals( expect=main.TRUE,
1655 actual=currentDevicesResult,
1656 onpass="ONOS" + controllerStr +
1657 " Switches view is correct",
1658 onfail="ONOS" + controllerStr +
1659 " Switches view is incorrect" )
1660 if links[ controller ] and "Error" not in links[ controller ]:
1661 currentLinksResult = main.Mininet1.compareLinks(
1662 mnSwitches, mnLinks,
1663 json.loads( links[ controller ] ) )
1664 else:
1665 currentLinksResult = main.FALSE
1666 utilities.assert_equals( expect=main.TRUE,
1667 actual=currentLinksResult,
1668 onpass="ONOS" + controllerStr +
1669 " links view is correct",
1670 onfail="ONOS" + controllerStr +
1671 " links view is incorrect" )
1672
Jon Hall657cdf62015-12-17 14:40:51 -08001673 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001674 currentHostsResult = main.Mininet1.compareHosts(
1675 mnHosts,
1676 hosts[ controller ] )
1677 else:
1678 currentHostsResult = main.FALSE
1679 utilities.assert_equals( expect=main.TRUE,
1680 actual=currentHostsResult,
1681 onpass="ONOS" + controllerStr +
1682 " hosts exist in Mininet",
1683 onfail="ONOS" + controllerStr +
1684 " hosts don't match Mininet" )
1685
1686 devicesResults = devicesResults and currentDevicesResult
1687 linksResults = linksResults and currentLinksResult
1688 hostsResults = hostsResults and currentHostsResult
1689
1690 main.step( "Device information is correct" )
1691 utilities.assert_equals(
1692 expect=main.TRUE,
1693 actual=devicesResults,
1694 onpass="Device information is correct",
1695 onfail="Device information is incorrect" )
1696
1697 main.step( "Links are correct" )
1698 utilities.assert_equals(
1699 expect=main.TRUE,
1700 actual=linksResults,
1701 onpass="Link are correct",
1702 onfail="Links are incorrect" )
1703
1704 main.step( "Hosts are correct" )
1705 utilities.assert_equals(
1706 expect=main.TRUE,
1707 actual=hostsResults,
1708 onpass="Hosts are correct",
1709 onfail="Hosts are incorrect" )
1710
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001711 def CASE61( self, main ):
Jon Hall5cf14d52015-07-16 12:15:19 -07001712 """
1713 The Failure case.
1714 """
Jon Halle1a3b752015-07-22 13:02:46 -07001715 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001716 assert main, "main not defined"
1717 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001718 assert main.CLIs, "main.CLIs not defined"
1719 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001720 main.case( "Stop minority of ONOS nodes" )
Jon Hall96091e62015-09-21 17:34:17 -07001721
1722 main.step( "Checking ONOS Logs for errors" )
1723 for node in main.nodes:
1724 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1725 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1726
Jon Hall3b489db2015-10-05 14:38:37 -07001727 n = len( main.nodes ) # Number of nodes
1728 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1729 main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
1730 if n > 3:
1731 main.kill.append( p - 1 )
1732 # NOTE: This only works for cluster sizes of 3,5, or 7.
1733
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001734 main.step( "Stopping " + str( len( main.kill ) ) + " ONOS nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07001735 killResults = main.TRUE
1736 for i in main.kill:
1737 killResults = killResults and\
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001738 main.ONOSbench.onosStop( main.nodes[i].ip_address )
1739 main.activeNodes.remove( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001740 utilities.assert_equals( expect=main.TRUE, actual=killResults,
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001741 onpass="ONOS nodes stopped successfully",
1742 onfail="ONOS nodes NOT successfully stopped" )
1743
1744 def CASE62( self, main ):
1745 """
1746 The bring up stopped nodes
1747 """
1748 import time
1749 assert main.numCtrls, "main.numCtrls not defined"
1750 assert main, "main not defined"
1751 assert utilities.assert_equals, "utilities.assert_equals not defined"
1752 assert main.CLIs, "main.CLIs not defined"
1753 assert main.nodes, "main.nodes not defined"
1754 assert main.kill, "main.kill not defined"
1755 main.case( "Restart minority of ONOS nodes" )
1756
1757 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1758 startResults = main.TRUE
1759 restartTime = time.time()
1760 for i in main.kill:
1761 startResults = startResults and\
1762 main.ONOSbench.onosStart( main.nodes[i].ip_address )
1763 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1764 onpass="ONOS nodes started successfully",
1765 onfail="ONOS nodes NOT successfully started" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001766
1767 main.step( "Checking if ONOS is up yet" )
1768 count = 0
1769 onosIsupResult = main.FALSE
1770 while onosIsupResult == main.FALSE and count < 10:
Jon Hall3b489db2015-10-05 14:38:37 -07001771 onosIsupResult = main.TRUE
1772 for i in main.kill:
1773 onosIsupResult = onosIsupResult and\
1774 main.ONOSbench.isup( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07001775 count = count + 1
Jon Hall5cf14d52015-07-16 12:15:19 -07001776 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1777 onpass="ONOS restarted successfully",
1778 onfail="ONOS restart NOT successful" )
1779
Jon Halle1a3b752015-07-22 13:02:46 -07001780 main.step( "Restarting ONOS main.CLIs" )
Jon Hall3b489db2015-10-05 14:38:37 -07001781 cliResults = main.TRUE
1782 for i in main.kill:
1783 cliResults = cliResults and\
1784 main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001785 main.activeNodes.append( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001786 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1787 onpass="ONOS cli restarted",
1788 onfail="ONOS cli did not restart" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001789 main.activeNodes.sort()
1790 try:
1791 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1792 "List of active nodes has duplicates, this likely indicates something was run out of order"
1793 except AssertionError:
1794 main.log.exception( "" )
1795 main.cleanup()
1796 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -07001797
1798 # Grab the time of restart so we chan check how long the gossip
1799 # protocol has had time to work
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001800 main.restartTime = time.time() - restartTime
Jon Hall5cf14d52015-07-16 12:15:19 -07001801 main.log.debug( "Restart time: " + str( main.restartTime ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001802 # TODO: MAke this configurable. Also, we are breaking the above timer
1803 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001804 node = main.activeNodes[0]
1805 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1806 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1807 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001808
1809 def CASE7( self, main ):
1810 """
1811 Check state after ONOS failure
1812 """
1813 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001814 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001815 assert main, "main not defined"
1816 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001817 assert main.CLIs, "main.CLIs not defined"
1818 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001819 try:
1820 main.kill
1821 except AttributeError:
1822 main.kill = []
1823
Jon Hall5cf14d52015-07-16 12:15:19 -07001824 main.case( "Running ONOS Constant State Tests" )
1825
1826 main.step( "Check that each switch has a master" )
1827 # Assert that each device has a master
1828 rolesNotNull = main.TRUE
1829 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001830 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001831 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001832 name="rolesNotNull-" + str( i ),
1833 args=[ ] )
1834 threads.append( t )
1835 t.start()
1836
1837 for t in threads:
1838 t.join()
1839 rolesNotNull = rolesNotNull and t.result
1840 utilities.assert_equals(
1841 expect=main.TRUE,
1842 actual=rolesNotNull,
1843 onpass="Each device has a master",
1844 onfail="Some devices don't have a master assigned" )
1845
1846 main.step( "Read device roles from ONOS" )
1847 ONOSMastership = []
1848 consistentMastership = True
1849 rolesResults = True
1850 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001851 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001852 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001853 name="roles-" + str( i ),
1854 args=[] )
1855 threads.append( t )
1856 t.start()
1857
1858 for t in threads:
1859 t.join()
1860 ONOSMastership.append( t.result )
1861
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001862 for i in range( len( ONOSMastership ) ):
1863 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001864 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001865 main.log.error( "Error in getting ONOS" + node + " roles" )
1866 main.log.warn( "ONOS" + node + " mastership response: " +
1867 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001868 rolesResults = False
1869 utilities.assert_equals(
1870 expect=True,
1871 actual=rolesResults,
1872 onpass="No error in reading roles output",
1873 onfail="Error in reading roles from ONOS" )
1874
1875 main.step( "Check for consistency in roles from each controller" )
1876 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1877 main.log.info(
1878 "Switch roles are consistent across all ONOS nodes" )
1879 else:
1880 consistentMastership = False
1881 utilities.assert_equals(
1882 expect=True,
1883 actual=consistentMastership,
1884 onpass="Switch roles are consistent across all ONOS nodes",
1885 onfail="ONOS nodes have different views of switch roles" )
1886
1887 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001888 for i in range( len( ONOSMastership ) ):
1889 node = str( main.activeNodes[i] + 1 )
1890 main.log.warn( "ONOS" + node + " roles: ",
1891 json.dumps( json.loads( ONOSMastership[ i ] ),
1892 sort_keys=True,
1893 indent=4,
1894 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001895
1896 # NOTE: we expect mastership to change on controller failure
Jon Hall5cf14d52015-07-16 12:15:19 -07001897
1898 main.step( "Get the intents and compare across all nodes" )
1899 ONOSIntents = []
1900 intentCheck = main.FALSE
1901 consistentIntents = True
1902 intentsResults = True
1903 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001904 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001905 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001906 name="intents-" + str( i ),
1907 args=[],
1908 kwargs={ 'jsonFormat': True } )
1909 threads.append( t )
1910 t.start()
1911
1912 for t in threads:
1913 t.join()
1914 ONOSIntents.append( t.result )
1915
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001916 for i in range( len( ONOSIntents) ):
1917 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001918 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001919 main.log.error( "Error in getting ONOS" + node + " intents" )
1920 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001921 repr( ONOSIntents[ i ] ) )
1922 intentsResults = False
1923 utilities.assert_equals(
1924 expect=True,
1925 actual=intentsResults,
1926 onpass="No error in reading intents output",
1927 onfail="Error in reading intents from ONOS" )
1928
1929 main.step( "Check for consistency in Intents from each controller" )
1930 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1931 main.log.info( "Intents are consistent across all ONOS " +
1932 "nodes" )
1933 else:
1934 consistentIntents = False
1935
1936 # Try to make it easy to figure out what is happening
1937 #
1938 # Intent ONOS1 ONOS2 ...
1939 # 0x01 INSTALLED INSTALLING
1940 # ... ... ...
1941 # ... ... ...
1942 title = " ID"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001943 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001944 title += " " * 10 + "ONOS" + str( n + 1 )
1945 main.log.warn( title )
1946 # get all intent keys in the cluster
1947 keys = []
1948 for nodeStr in ONOSIntents:
1949 node = json.loads( nodeStr )
1950 for intent in node:
1951 keys.append( intent.get( 'id' ) )
1952 keys = set( keys )
1953 for key in keys:
1954 row = "%-13s" % key
1955 for nodeStr in ONOSIntents:
1956 node = json.loads( nodeStr )
1957 for intent in node:
1958 if intent.get( 'id' ) == key:
1959 row += "%-15s" % intent.get( 'state' )
1960 main.log.warn( row )
1961 # End table view
1962
1963 utilities.assert_equals(
1964 expect=True,
1965 actual=consistentIntents,
1966 onpass="Intents are consistent across all ONOS nodes",
1967 onfail="ONOS nodes have different views of intents" )
1968 intentStates = []
1969 for node in ONOSIntents: # Iter through ONOS nodes
1970 nodeStates = []
1971 # Iter through intents of a node
1972 try:
1973 for intent in json.loads( node ):
1974 nodeStates.append( intent[ 'state' ] )
1975 except ( ValueError, TypeError ):
1976 main.log.exception( "Error in parsing intents" )
1977 main.log.error( repr( node ) )
1978 intentStates.append( nodeStates )
1979 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1980 main.log.info( dict( out ) )
1981
1982 if intentsResults and not consistentIntents:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001983 for i in range( len( main.activeNodes ) ):
1984 node = str( main.activeNodes[i] + 1 )
1985 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001986 main.log.warn( json.dumps(
1987 json.loads( ONOSIntents[ i ] ),
1988 sort_keys=True,
1989 indent=4,
1990 separators=( ',', ': ' ) ) )
1991 elif intentsResults and consistentIntents:
1992 intentCheck = main.TRUE
1993
1994 # NOTE: Store has no durability, so intents are lost across system
1995 # restarts
1996 main.step( "Compare current intents with intents before the failure" )
1997 # NOTE: this requires case 5 to pass for intentState to be set.
1998 # maybe we should stop the test if that fails?
1999 sameIntents = main.FALSE
2000 if intentState and intentState == ONOSIntents[ 0 ]:
2001 sameIntents = main.TRUE
2002 main.log.info( "Intents are consistent with before failure" )
2003 # TODO: possibly the states have changed? we may need to figure out
2004 # what the acceptable states are
2005 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2006 sameIntents = main.TRUE
2007 try:
2008 before = json.loads( intentState )
2009 after = json.loads( ONOSIntents[ 0 ] )
2010 for intent in before:
2011 if intent not in after:
2012 sameIntents = main.FALSE
2013 main.log.debug( "Intent is not currently in ONOS " +
2014 "(at least in the same form):" )
2015 main.log.debug( json.dumps( intent ) )
2016 except ( ValueError, TypeError ):
2017 main.log.exception( "Exception printing intents" )
2018 main.log.debug( repr( ONOSIntents[0] ) )
2019 main.log.debug( repr( intentState ) )
2020 if sameIntents == main.FALSE:
2021 try:
2022 main.log.debug( "ONOS intents before: " )
2023 main.log.debug( json.dumps( json.loads( intentState ),
2024 sort_keys=True, indent=4,
2025 separators=( ',', ': ' ) ) )
2026 main.log.debug( "Current ONOS intents: " )
2027 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2028 sort_keys=True, indent=4,
2029 separators=( ',', ': ' ) ) )
2030 except ( ValueError, TypeError ):
2031 main.log.exception( "Exception printing intents" )
2032 main.log.debug( repr( ONOSIntents[0] ) )
2033 main.log.debug( repr( intentState ) )
2034 utilities.assert_equals(
2035 expect=main.TRUE,
2036 actual=sameIntents,
2037 onpass="Intents are consistent with before failure",
2038 onfail="The Intents changed during failure" )
2039 intentCheck = intentCheck and sameIntents
2040
2041 main.step( "Get the OF Table entries and compare to before " +
2042 "component failure" )
2043 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002044 for i in range( 28 ):
2045 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002046 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2047 FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
Jon Hall5cf14d52015-07-16 12:15:19 -07002048 if FlowTables == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002049 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2050
Jon Hall5cf14d52015-07-16 12:15:19 -07002051 utilities.assert_equals(
2052 expect=main.TRUE,
2053 actual=FlowTables,
2054 onpass="No changes were found in the flow tables",
2055 onfail="Changes were found in the flow tables" )
2056
2057 main.Mininet2.pingLongKill()
2058 '''
2059 main.step( "Check the continuous pings to ensure that no packets " +
2060 "were dropped during component failure" )
2061 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2062 main.params[ 'TESTONIP' ] )
2063 LossInPings = main.FALSE
2064 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2065 for i in range( 8, 18 ):
2066 main.log.info(
2067 "Checking for a loss in pings along flow from s" +
2068 str( i ) )
2069 LossInPings = main.Mininet2.checkForLoss(
2070 "/tmp/ping.h" +
2071 str( i ) ) or LossInPings
2072 if LossInPings == main.TRUE:
2073 main.log.info( "Loss in ping detected" )
2074 elif LossInPings == main.ERROR:
2075 main.log.info( "There are multiple mininet process running" )
2076 elif LossInPings == main.FALSE:
2077 main.log.info( "No Loss in the pings" )
2078 main.log.info( "No loss of dataplane connectivity" )
2079 utilities.assert_equals(
2080 expect=main.FALSE,
2081 actual=LossInPings,
2082 onpass="No Loss of connectivity",
2083 onfail="Loss of dataplane connectivity detected" )
2084 '''
2085
2086 main.step( "Leadership Election is still functional" )
2087 # Test of LeadershipElection
2088 leaderList = []
Jon Hall5cf14d52015-07-16 12:15:19 -07002089
Jon Hall3b489db2015-10-05 14:38:37 -07002090 restarted = []
2091 for i in main.kill:
2092 restarted.append( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07002093 leaderResult = main.TRUE
Jon Hall3b489db2015-10-05 14:38:37 -07002094
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002095 for i in main.activeNodes:
2096 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002097 leaderN = cli.electionTestLeader()
2098 leaderList.append( leaderN )
2099 if leaderN == main.FALSE:
2100 # error in response
2101 main.log.error( "Something is wrong with " +
2102 "electionTestLeader function, check the" +
2103 " error logs" )
2104 leaderResult = main.FALSE
2105 elif leaderN is None:
2106 main.log.error( cli.name +
2107 " shows no leader for the election-app was" +
2108 " elected after the old one died" )
2109 leaderResult = main.FALSE
2110 elif leaderN in restarted:
2111 main.log.error( cli.name + " shows " + str( leaderN ) +
2112 " as leader for the election-app, but it " +
2113 "was restarted" )
2114 leaderResult = main.FALSE
2115 if len( set( leaderList ) ) != 1:
2116 leaderResult = main.FALSE
2117 main.log.error(
2118 "Inconsistent view of leader for the election test app" )
2119 # TODO: print the list
2120 utilities.assert_equals(
2121 expect=main.TRUE,
2122 actual=leaderResult,
2123 onpass="Leadership election passed",
2124 onfail="Something went wrong with Leadership election" )
2125
2126 def CASE8( self, main ):
2127 """
2128 Compare topo
2129 """
2130 import json
2131 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002132 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002133 assert main, "main not defined"
2134 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002135 assert main.CLIs, "main.CLIs not defined"
2136 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002137
2138 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002139 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002140 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002141 topoResult = main.FALSE
2142 elapsed = 0
2143 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002144 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002145 startTime = time.time()
2146 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002147 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002148 devicesResults = main.TRUE
2149 linksResults = main.TRUE
2150 hostsResults = main.TRUE
2151 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002152 count += 1
2153 cliStart = time.time()
2154 devices = []
2155 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002156 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002157 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07002158 name="devices-" + str( i ),
2159 args=[ ] )
2160 threads.append( t )
2161 t.start()
2162
2163 for t in threads:
2164 t.join()
2165 devices.append( t.result )
2166 hosts = []
2167 ipResult = main.TRUE
2168 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002169 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002170 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07002171 name="hosts-" + str( i ),
2172 args=[ ] )
2173 threads.append( t )
2174 t.start()
2175
2176 for t in threads:
2177 t.join()
2178 try:
2179 hosts.append( json.loads( t.result ) )
2180 except ( ValueError, TypeError ):
2181 main.log.exception( "Error parsing hosts results" )
2182 main.log.error( repr( t.result ) )
Jon Hallf3d16e72015-12-16 17:45:08 -08002183 hosts.append( None )
Jon Hall5cf14d52015-07-16 12:15:19 -07002184 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002185 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallacd1b182015-12-17 11:43:20 -08002186 if hosts[ controller ]:
2187 for host in hosts[ controller ]:
2188 if host is None or host.get( 'ipAddresses', [] ) == []:
2189 main.log.error(
2190 "Error with host ipAddresses on controller" +
2191 controllerStr + ": " + str( host ) )
2192 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002193 ports = []
2194 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002195 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002196 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07002197 name="ports-" + str( i ),
2198 args=[ ] )
2199 threads.append( t )
2200 t.start()
2201
2202 for t in threads:
2203 t.join()
2204 ports.append( t.result )
2205 links = []
2206 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002207 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002208 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07002209 name="links-" + str( i ),
2210 args=[ ] )
2211 threads.append( t )
2212 t.start()
2213
2214 for t in threads:
2215 t.join()
2216 links.append( t.result )
2217 clusters = []
2218 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002219 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002220 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07002221 name="clusters-" + str( i ),
2222 args=[ ] )
2223 threads.append( t )
2224 t.start()
2225
2226 for t in threads:
2227 t.join()
2228 clusters.append( t.result )
2229
2230 elapsed = time.time() - startTime
2231 cliTime = time.time() - cliStart
2232 print "Elapsed time: " + str( elapsed )
2233 print "CLI time: " + str( cliTime )
2234
2235 mnSwitches = main.Mininet1.getSwitches()
2236 mnLinks = main.Mininet1.getLinks()
2237 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002238 for controller in range( len( main.activeNodes ) ):
2239 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002240 if devices[ controller ] and ports[ controller ] and\
2241 "Error" not in devices[ controller ] and\
2242 "Error" not in ports[ controller ]:
2243
2244 currentDevicesResult = main.Mininet1.compareSwitches(
2245 mnSwitches,
2246 json.loads( devices[ controller ] ),
2247 json.loads( ports[ controller ] ) )
2248 else:
2249 currentDevicesResult = main.FALSE
2250 utilities.assert_equals( expect=main.TRUE,
2251 actual=currentDevicesResult,
2252 onpass="ONOS" + controllerStr +
2253 " Switches view is correct",
2254 onfail="ONOS" + controllerStr +
2255 " Switches view is incorrect" )
2256
2257 if links[ controller ] and "Error" not in links[ controller ]:
2258 currentLinksResult = main.Mininet1.compareLinks(
2259 mnSwitches, mnLinks,
2260 json.loads( links[ controller ] ) )
2261 else:
2262 currentLinksResult = main.FALSE
2263 utilities.assert_equals( expect=main.TRUE,
2264 actual=currentLinksResult,
2265 onpass="ONOS" + controllerStr +
2266 " links view is correct",
2267 onfail="ONOS" + controllerStr +
2268 " links view is incorrect" )
Jon Hall657cdf62015-12-17 14:40:51 -08002269 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002270 currentHostsResult = main.Mininet1.compareHosts(
2271 mnHosts,
2272 hosts[ controller ] )
2273 else:
2274 currentHostsResult = main.FALSE
2275 utilities.assert_equals( expect=main.TRUE,
2276 actual=currentHostsResult,
2277 onpass="ONOS" + controllerStr +
2278 " hosts exist in Mininet",
2279 onfail="ONOS" + controllerStr +
2280 " hosts don't match Mininet" )
2281 # CHECKING HOST ATTACHMENT POINTS
2282 hostAttachment = True
2283 zeroHosts = False
2284 # FIXME: topo-HA/obelisk specific mappings:
2285 # key is mac and value is dpid
2286 mappings = {}
2287 for i in range( 1, 29 ): # hosts 1 through 28
2288 # set up correct variables:
2289 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2290 if i == 1:
2291 deviceId = "1000".zfill(16)
2292 elif i == 2:
2293 deviceId = "2000".zfill(16)
2294 elif i == 3:
2295 deviceId = "3000".zfill(16)
2296 elif i == 4:
2297 deviceId = "3004".zfill(16)
2298 elif i == 5:
2299 deviceId = "5000".zfill(16)
2300 elif i == 6:
2301 deviceId = "6000".zfill(16)
2302 elif i == 7:
2303 deviceId = "6007".zfill(16)
2304 elif i >= 8 and i <= 17:
2305 dpid = '3' + str( i ).zfill( 3 )
2306 deviceId = dpid.zfill(16)
2307 elif i >= 18 and i <= 27:
2308 dpid = '6' + str( i ).zfill( 3 )
2309 deviceId = dpid.zfill(16)
2310 elif i == 28:
2311 deviceId = "2800".zfill(16)
2312 mappings[ macId ] = deviceId
Jon Hall657cdf62015-12-17 14:40:51 -08002313 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002314 if hosts[ controller ] == []:
2315 main.log.warn( "There are no hosts discovered" )
2316 zeroHosts = True
2317 else:
2318 for host in hosts[ controller ]:
2319 mac = None
2320 location = None
2321 device = None
2322 port = None
2323 try:
2324 mac = host.get( 'mac' )
2325 assert mac, "mac field could not be found for this host object"
2326
2327 location = host.get( 'location' )
2328 assert location, "location field could not be found for this host object"
2329
2330 # Trim the protocol identifier off deviceId
2331 device = str( location.get( 'elementId' ) ).split(':')[1]
2332 assert device, "elementId field could not be found for this host location object"
2333
2334 port = location.get( 'port' )
2335 assert port, "port field could not be found for this host location object"
2336
2337 # Now check if this matches where they should be
2338 if mac and device and port:
2339 if str( port ) != "1":
2340 main.log.error( "The attachment port is incorrect for " +
2341 "host " + str( mac ) +
2342 ". Expected: 1 Actual: " + str( port) )
2343 hostAttachment = False
2344 if device != mappings[ str( mac ) ]:
2345 main.log.error( "The attachment device is incorrect for " +
2346 "host " + str( mac ) +
2347 ". Expected: " + mappings[ str( mac ) ] +
2348 " Actual: " + device )
2349 hostAttachment = False
2350 else:
2351 hostAttachment = False
2352 except AssertionError:
2353 main.log.exception( "Json object not as expected" )
2354 main.log.error( repr( host ) )
2355 hostAttachment = False
2356 else:
2357 main.log.error( "No hosts json output or \"Error\"" +
2358 " in output. hosts = " +
2359 repr( hosts[ controller ] ) )
2360 if zeroHosts is False:
2361 hostAttachment = True
2362
2363 # END CHECKING HOST ATTACHMENT POINTS
2364 devicesResults = devicesResults and currentDevicesResult
2365 linksResults = linksResults and currentLinksResult
2366 hostsResults = hostsResults and currentHostsResult
2367 hostAttachmentResults = hostAttachmentResults and\
2368 hostAttachment
Jon Halle9b1fa32015-12-08 15:32:21 -08002369 topoResult = devicesResults and linksResults and\
2370 hostsResults and hostAttachmentResults
2371 utilities.assert_equals( expect=True,
2372 actual=topoResult,
2373 onpass="ONOS topology matches Mininet",
2374 onfail="ONOS topology don't match Mininet" )
2375 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002376
2377 # Compare json objects for hosts and dataplane clusters
2378
2379 # hosts
2380 main.step( "Hosts view is consistent across all ONOS nodes" )
2381 consistentHostsResult = main.TRUE
2382 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002383 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall657cdf62015-12-17 14:40:51 -08002384 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002385 if hosts[ controller ] == hosts[ 0 ]:
2386 continue
2387 else: # hosts not consistent
2388 main.log.error( "hosts from ONOS" + controllerStr +
2389 " is inconsistent with ONOS1" )
2390 main.log.warn( repr( hosts[ controller ] ) )
2391 consistentHostsResult = main.FALSE
2392
2393 else:
2394 main.log.error( "Error in getting ONOS hosts from ONOS" +
2395 controllerStr )
2396 consistentHostsResult = main.FALSE
2397 main.log.warn( "ONOS" + controllerStr +
2398 " hosts response: " +
2399 repr( hosts[ controller ] ) )
2400 utilities.assert_equals(
2401 expect=main.TRUE,
2402 actual=consistentHostsResult,
2403 onpass="Hosts view is consistent across all ONOS nodes",
2404 onfail="ONOS nodes have different views of hosts" )
2405
2406 main.step( "Hosts information is correct" )
2407 hostsResults = hostsResults and ipResult
2408 utilities.assert_equals(
2409 expect=main.TRUE,
2410 actual=hostsResults,
2411 onpass="Host information is correct",
2412 onfail="Host information is incorrect" )
2413
2414 main.step( "Host attachment points to the network" )
2415 utilities.assert_equals(
2416 expect=True,
2417 actual=hostAttachmentResults,
2418 onpass="Hosts are correctly attached to the network",
2419 onfail="ONOS did not correctly attach hosts to the network" )
2420
2421 # Strongly connected clusters of devices
2422 main.step( "Clusters view is consistent across all ONOS nodes" )
2423 consistentClustersResult = main.TRUE
2424 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002425 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002426 if "Error" not in clusters[ controller ]:
2427 if clusters[ controller ] == clusters[ 0 ]:
2428 continue
2429 else: # clusters not consistent
2430 main.log.error( "clusters from ONOS" +
2431 controllerStr +
2432 " is inconsistent with ONOS1" )
2433 consistentClustersResult = main.FALSE
2434
2435 else:
2436 main.log.error( "Error in getting dataplane clusters " +
2437 "from ONOS" + controllerStr )
2438 consistentClustersResult = main.FALSE
2439 main.log.warn( "ONOS" + controllerStr +
2440 " clusters response: " +
2441 repr( clusters[ controller ] ) )
2442 utilities.assert_equals(
2443 expect=main.TRUE,
2444 actual=consistentClustersResult,
2445 onpass="Clusters view is consistent across all ONOS nodes",
2446 onfail="ONOS nodes have different views of clusters" )
2447
2448 main.step( "There is only one SCC" )
2449 # there should always only be one cluster
2450 try:
2451 numClusters = len( json.loads( clusters[ 0 ] ) )
2452 except ( ValueError, TypeError ):
2453 main.log.exception( "Error parsing clusters[0]: " +
2454 repr( clusters[0] ) )
2455 clusterResults = main.FALSE
2456 if numClusters == 1:
2457 clusterResults = main.TRUE
2458 utilities.assert_equals(
2459 expect=1,
2460 actual=numClusters,
2461 onpass="ONOS shows 1 SCC",
2462 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2463
2464 topoResult = ( devicesResults and linksResults
2465 and hostsResults and consistentHostsResult
2466 and consistentClustersResult and clusterResults
2467 and ipResult and hostAttachmentResults )
2468
2469 topoResult = topoResult and int( count <= 2 )
2470 note = "note it takes about " + str( int( cliTime ) ) + \
2471 " seconds for the test to make all the cli calls to fetch " +\
2472 "the topology from each ONOS instance"
2473 main.log.info(
2474 "Very crass estimate for topology discovery/convergence( " +
2475 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2476 str( count ) + " tries" )
2477
2478 main.step( "Device information is correct" )
2479 utilities.assert_equals(
2480 expect=main.TRUE,
2481 actual=devicesResults,
2482 onpass="Device information is correct",
2483 onfail="Device information is incorrect" )
2484
2485 main.step( "Links are correct" )
2486 utilities.assert_equals(
2487 expect=main.TRUE,
2488 actual=linksResults,
2489 onpass="Link are correct",
2490 onfail="Links are incorrect" )
2491
2492 # FIXME: move this to an ONOS state case
2493 main.step( "Checking ONOS nodes" )
2494 nodesOutput = []
2495 nodeResults = main.TRUE
2496 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002497 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002498 t = main.Thread( target=main.CLIs[i].nodes,
Jon Hall5cf14d52015-07-16 12:15:19 -07002499 name="nodes-" + str( i ),
2500 args=[ ] )
2501 threads.append( t )
2502 t.start()
2503
2504 for t in threads:
2505 t.join()
2506 nodesOutput.append( t.result )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002507 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
Jon Halle9b1fa32015-12-08 15:32:21 -08002508 ips.sort()
Jon Hall5cf14d52015-07-16 12:15:19 -07002509 for i in nodesOutput:
2510 try:
2511 current = json.loads( i )
Jon Halle9b1fa32015-12-08 15:32:21 -08002512 activeIps = []
2513 currentResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002514 for node in current:
Jon Halle9b1fa32015-12-08 15:32:21 -08002515 if node['state'] == 'ACTIVE':
2516 activeIps.append( node['ip'] )
2517 activeIps.sort()
2518 if ips == activeIps:
2519 currentResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002520 except ( ValueError, TypeError ):
2521 main.log.error( "Error parsing nodes output" )
2522 main.log.warn( repr( i ) )
Jon Halle9b1fa32015-12-08 15:32:21 -08002523 currentResult = main.FALSE
2524 nodeResults = nodeResults and currentResult
Jon Hall5cf14d52015-07-16 12:15:19 -07002525 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2526 onpass="Nodes check successful",
2527 onfail="Nodes check NOT successful" )
2528
2529 def CASE9( self, main ):
2530 """
2531 Link s3-s28 down
2532 """
2533 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002534 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002535 assert main, "main not defined"
2536 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002537 assert main.CLIs, "main.CLIs not defined"
2538 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002539 # NOTE: You should probably run a topology check after this
2540
2541 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2542
2543 description = "Turn off a link to ensure that Link Discovery " +\
2544 "is working properly"
2545 main.case( description )
2546
2547 main.step( "Kill Link between s3 and s28" )
2548 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2549 main.log.info( "Waiting " + str( linkSleep ) +
2550 " seconds for link down to be discovered" )
2551 time.sleep( linkSleep )
2552 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2553 onpass="Link down successful",
2554 onfail="Failed to bring link down" )
2555 # TODO do some sort of check here
2556
2557 def CASE10( self, main ):
2558 """
2559 Link s3-s28 up
2560 """
2561 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002562 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002563 assert main, "main not defined"
2564 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002565 assert main.CLIs, "main.CLIs not defined"
2566 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002567 # NOTE: You should probably run a topology check after this
2568
2569 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2570
2571 description = "Restore a link to ensure that Link Discovery is " + \
2572 "working properly"
2573 main.case( description )
2574
2575 main.step( "Bring link between s3 and s28 back up" )
2576 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2577 main.log.info( "Waiting " + str( linkSleep ) +
2578 " seconds for link up to be discovered" )
2579 time.sleep( linkSleep )
2580 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2581 onpass="Link up successful",
2582 onfail="Failed to bring link up" )
2583 # TODO do some sort of check here
2584
2585 def CASE11( self, main ):
2586 """
2587 Switch Down
2588 """
2589 # NOTE: You should probably run a topology check after this
2590 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002591 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002592 assert main, "main not defined"
2593 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002594 assert main.CLIs, "main.CLIs not defined"
2595 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002596
2597 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2598
2599 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002600 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002601 main.case( description )
2602 switch = main.params[ 'kill' ][ 'switch' ]
2603 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2604
2605 # TODO: Make this switch parameterizable
2606 main.step( "Kill " + switch )
2607 main.log.info( "Deleting " + switch )
2608 main.Mininet1.delSwitch( switch )
2609 main.log.info( "Waiting " + str( switchSleep ) +
2610 " seconds for switch down to be discovered" )
2611 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002612 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002613 # Peek at the deleted switch
2614 main.log.warn( str( device ) )
2615 result = main.FALSE
2616 if device and device[ 'available' ] is False:
2617 result = main.TRUE
2618 utilities.assert_equals( expect=main.TRUE, actual=result,
2619 onpass="Kill switch successful",
2620 onfail="Failed to kill switch?" )
2621
2622 def CASE12( self, main ):
2623 """
2624 Switch Up
2625 """
2626 # NOTE: You should probably run a topology check after this
2627 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002628 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002629 assert main, "main not defined"
2630 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002631 assert main.CLIs, "main.CLIs not defined"
2632 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002633 assert ONOS1Port, "ONOS1Port not defined"
2634 assert ONOS2Port, "ONOS2Port not defined"
2635 assert ONOS3Port, "ONOS3Port not defined"
2636 assert ONOS4Port, "ONOS4Port not defined"
2637 assert ONOS5Port, "ONOS5Port not defined"
2638 assert ONOS6Port, "ONOS6Port not defined"
2639 assert ONOS7Port, "ONOS7Port not defined"
2640
2641 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2642 switch = main.params[ 'kill' ][ 'switch' ]
2643 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2644 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002645 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002646 description = "Adding a switch to ensure it is discovered correctly"
2647 main.case( description )
2648
2649 main.step( "Add back " + switch )
2650 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2651 for peer in links:
2652 main.Mininet1.addLink( switch, peer )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002653 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002654 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2655 main.log.info( "Waiting " + str( switchSleep ) +
2656 " seconds for switch up to be discovered" )
2657 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002658 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002659 # Peek at the deleted switch
2660 main.log.warn( str( device ) )
2661 result = main.FALSE
2662 if device and device[ 'available' ]:
2663 result = main.TRUE
2664 utilities.assert_equals( expect=main.TRUE, actual=result,
2665 onpass="add switch successful",
2666 onfail="Failed to add switch?" )
2667
2668 def CASE13( self, main ):
2669 """
2670 Clean up
2671 """
2672 import os
2673 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002674 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002675 assert main, "main not defined"
2676 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002677 assert main.CLIs, "main.CLIs not defined"
2678 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002679
2680 # printing colors to terminal
2681 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2682 'blue': '\033[94m', 'green': '\033[92m',
2683 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2684 main.case( "Test Cleanup" )
2685 main.step( "Killing tcpdumps" )
2686 main.Mininet2.stopTcpdump()
2687
2688 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002689 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002690 main.step( "Copying MN pcap and ONOS log files to test station" )
2691 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2692 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002693 # NOTE: MN Pcap file is being saved to logdir.
2694 # We scp this file as MN and TestON aren't necessarily the same vm
2695
2696 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002697 # TODO: Load these from params
2698 # NOTE: must end in /
2699 logFolder = "/opt/onos/log/"
2700 logFiles = [ "karaf.log", "karaf.log.1" ]
2701 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002702 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002703 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002704 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002705 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2706 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002707 # std*.log's
2708 # NOTE: must end in /
2709 logFolder = "/opt/onos/var/"
2710 logFiles = [ "stderr.log", "stdout.log" ]
2711 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002712 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002713 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002714 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002715 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2716 logFolder + f, dstName )
2717 else:
2718 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002719
2720 main.step( "Stopping Mininet" )
2721 mnResult = main.Mininet1.stopNet()
2722 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2723 onpass="Mininet stopped",
2724 onfail="MN cleanup NOT successful" )
2725
2726 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002727 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002728 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2729 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002730
2731 try:
2732 timerLog = open( main.logdir + "/Timers.csv", 'w')
2733 # Overwrite with empty line and close
2734 labels = "Gossip Intents, Restart"
2735 data = str( gossipTime ) + ", " + str( main.restartTime )
2736 timerLog.write( labels + "\n" + data )
2737 timerLog.close()
2738 except NameError, e:
2739 main.log.exception(e)
2740
2741 def CASE14( self, main ):
2742 """
2743 start election app on all onos nodes
2744 """
Jon Halle1a3b752015-07-22 13:02:46 -07002745 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002746 assert main, "main not defined"
2747 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002748 assert main.CLIs, "main.CLIs not defined"
2749 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002750
2751 main.case("Start Leadership Election app")
2752 main.step( "Install leadership election app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002753 onosCli = main.CLIs[ main.activeNodes[0] ]
2754 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002755 utilities.assert_equals(
2756 expect=main.TRUE,
2757 actual=appResult,
2758 onpass="Election app installed",
2759 onfail="Something went wrong with installing Leadership election" )
2760
2761 main.step( "Run for election on each node" )
2762 leaderResult = main.TRUE
2763 leaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002764 for i in main.activeNodes:
2765 main.CLIs[i].electionTestRun()
2766 for i in main.activeNodes:
2767 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002768 leader = cli.electionTestLeader()
2769 if leader is None or leader == main.FALSE:
2770 main.log.error( cli.name + ": Leader for the election app " +
2771 "should be an ONOS node, instead got '" +
2772 str( leader ) + "'" )
2773 leaderResult = main.FALSE
2774 leaders.append( leader )
2775 utilities.assert_equals(
2776 expect=main.TRUE,
2777 actual=leaderResult,
2778 onpass="Successfully ran for leadership",
2779 onfail="Failed to run for leadership" )
2780
2781 main.step( "Check that each node shows the same leader" )
2782 sameLeader = main.TRUE
2783 if len( set( leaders ) ) != 1:
2784 sameLeader = main.FALSE
Jon Halle1a3b752015-07-22 13:02:46 -07002785 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
Jon Hall5cf14d52015-07-16 12:15:19 -07002786 str( leaders ) )
2787 utilities.assert_equals(
2788 expect=main.TRUE,
2789 actual=sameLeader,
2790 onpass="Leadership is consistent for the election topic",
2791 onfail="Nodes have different leaders" )
2792
2793 def CASE15( self, main ):
2794 """
2795 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002796 15.1 Run election on each node
2797 15.2 Check that each node has the same leaders and candidates
2798 15.3 Find current leader and withdraw
2799 15.4 Check that a new node was elected leader
2800 15.5 Check that that new leader was the candidate of old leader
2801 15.6 Run for election on old leader
2802 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2803 15.8 Make sure that the old leader was added to the candidate list
2804
2805 old and new variable prefixes refer to data from before vs after
2806 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002807 """
2808 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002809 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002810 assert main, "main not defined"
2811 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002812 assert main.CLIs, "main.CLIs not defined"
2813 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002814
Jon Hall5cf14d52015-07-16 12:15:19 -07002815 description = "Check that Leadership Election is still functional"
2816 main.case( description )
acsmars71adceb2015-08-31 15:09:26 -07002817 # NOTE: Need to re-run since being a canidate is not persistant
2818 # TODO: add check for "Command not found:" in the driver, this
2819 # means the election test app isn't loaded
Jon Hall5cf14d52015-07-16 12:15:19 -07002820
acsmars71adceb2015-08-31 15:09:26 -07002821 oldLeaders = [] # leaders by node before withdrawl from candidates
2822 newLeaders = [] # leaders by node after withdrawl from candidates
2823 oldAllCandidates = [] # list of lists of each nodes' candidates before
2824 newAllCandidates = [] # list of lists of each nodes' candidates after
2825 oldCandidates = [] # list of candidates from node 0 before withdrawl
2826 newCandidates = [] # list of candidates from node 0 after withdrawl
2827 oldLeader = '' # the old leader from oldLeaders, None if not same
2828 newLeader = '' # the new leaders fron newLoeaders, None if not same
2829 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2830 expectNoLeader = False # True when there is only one leader
2831 if main.numCtrls == 1:
2832 expectNoLeader = True
2833
2834 main.step( "Run for election on each node" )
2835 electionResult = main.TRUE
2836
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002837 for i in main.activeNodes: # run test election on each node
2838 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002839 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002840 utilities.assert_equals(
2841 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002842 actual=electionResult,
2843 onpass="All nodes successfully ran for leadership",
2844 onfail="At least one node failed to run for leadership" )
2845
acsmars3a72bde2015-09-02 14:16:22 -07002846 if electionResult == main.FALSE:
2847 main.log.error(
2848 "Skipping Test Case because Election Test App isn't loaded" )
2849 main.skipCase()
2850
acsmars71adceb2015-08-31 15:09:26 -07002851 main.step( "Check that each node shows the same leader and candidates" )
2852 sameResult = main.TRUE
2853 failMessage = "Nodes have different leaders"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002854 for i in main.activeNodes:
2855 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002856 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2857 oldAllCandidates.append( node )
2858 oldLeaders.append( node[ 0 ] )
2859 oldCandidates = oldAllCandidates[ 0 ]
2860
2861 # Check that each node has the same leader. Defines oldLeader
2862 if len( set( oldLeaders ) ) != 1:
2863 sameResult = main.FALSE
2864 main.log.error( "More than one leader present:" + str( oldLeaders ) )
2865 oldLeader = None
2866 else:
2867 oldLeader = oldLeaders[ 0 ]
2868
2869 # Check that each node's candidate list is the same
acsmars29233db2015-11-04 11:15:00 -08002870 candidateDiscrepancy = False # Boolean of candidate mismatches
acsmars71adceb2015-08-31 15:09:26 -07002871 for candidates in oldAllCandidates:
2872 if set( candidates ) != set( oldCandidates ):
2873 sameResult = main.FALSE
acsmars29233db2015-11-04 11:15:00 -08002874 candidateDiscrepancy = True
2875
2876 if candidateDiscrepancy:
2877 failMessage += " and candidates"
2878
acsmars71adceb2015-08-31 15:09:26 -07002879 utilities.assert_equals(
2880 expect=main.TRUE,
2881 actual=sameResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002882 onpass="Leadership is consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002883 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002884
2885 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002886 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002887 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002888 if oldLeader is None:
2889 main.log.error( "Leadership isn't consistent." )
2890 withdrawResult = main.FALSE
2891 # Get the CLI of the oldLeader
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002892 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07002893 if oldLeader == main.nodes[ i ].ip_address:
2894 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002895 break
2896 else: # FOR/ELSE statement
2897 main.log.error( "Leader election, could not find current leader" )
2898 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07002899 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07002900 utilities.assert_equals(
2901 expect=main.TRUE,
2902 actual=withdrawResult,
2903 onpass="Node was withdrawn from election",
2904 onfail="Node was not withdrawn from election" )
2905
acsmars71adceb2015-08-31 15:09:26 -07002906 main.step( "Check that a new node was elected leader" )
2907
Jon Hall5cf14d52015-07-16 12:15:19 -07002908 # FIXME: use threads
acsmars71adceb2015-08-31 15:09:26 -07002909 newLeaderResult = main.TRUE
2910 failMessage = "Nodes have different leaders"
2911
2912 # Get new leaders and candidates
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002913 for i in main.activeNodes:
2914 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002915 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2916 # elections might no have finished yet
2917 if node[ 0 ] == 'none' and not expectNoLeader:
2918 main.log.info( "Node has no leader, waiting 5 seconds to be " +
2919 "sure elections are complete." )
2920 time.sleep(5)
2921 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2922 # election still isn't done or there is a problem
2923 if node[ 0 ] == 'none':
2924 main.log.error( "No leader was elected on at least 1 node" )
2925 newLeaderResult = main.FALSE
2926 newAllCandidates.append( node )
2927 newLeaders.append( node[ 0 ] )
2928 newCandidates = newAllCandidates[ 0 ]
2929
2930 # Check that each node has the same leader. Defines newLeader
2931 if len( set( newLeaders ) ) != 1:
2932 newLeaderResult = main.FALSE
2933 main.log.error( "Nodes have different leaders: " +
2934 str( newLeaders ) )
2935 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07002936 else:
acsmars71adceb2015-08-31 15:09:26 -07002937 newLeader = newLeaders[ 0 ]
2938
2939 # Check that each node's candidate list is the same
2940 for candidates in newAllCandidates:
2941 if set( candidates ) != set( newCandidates ):
2942 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07002943 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07002944
2945 # Check that the new leader is not the older leader, which was withdrawn
2946 if newLeader == oldLeader:
2947 newLeaderResult = main.FALSE
2948 main.log.error( "All nodes still see old leader: " + oldLeader +
2949 " as the current leader" )
2950
Jon Hall5cf14d52015-07-16 12:15:19 -07002951 utilities.assert_equals(
2952 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002953 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002954 onpass="Leadership election passed",
2955 onfail="Something went wrong with Leadership election" )
2956
acsmars71adceb2015-08-31 15:09:26 -07002957 main.step( "Check that that new leader was the candidate of old leader")
2958 # candidates[ 2 ] should be come the top candidate after withdrawl
2959 correctCandidateResult = main.TRUE
2960 if expectNoLeader:
2961 if newLeader == 'none':
2962 main.log.info( "No leader expected. None found. Pass" )
2963 correctCandidateResult = main.TRUE
2964 else:
2965 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2966 correctCandidateResult = main.FALSE
2967 elif newLeader != oldCandidates[ 2 ]:
2968 correctCandidateResult = main.FALSE
2969 main.log.error( "Candidate " + newLeader + " was elected. " +
2970 oldCandidates[ 2 ] + " should have had priority." )
2971
2972 utilities.assert_equals(
2973 expect=main.TRUE,
2974 actual=correctCandidateResult,
2975 onpass="Correct Candidate Elected",
2976 onfail="Incorrect Candidate Elected" )
2977
Jon Hall5cf14d52015-07-16 12:15:19 -07002978 main.step( "Run for election on old leader( just so everyone " +
2979 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07002980 if oldLeaderCLI is not None:
2981 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07002982 else:
acsmars71adceb2015-08-31 15:09:26 -07002983 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002984 runResult = main.FALSE
2985 utilities.assert_equals(
2986 expect=main.TRUE,
2987 actual=runResult,
2988 onpass="App re-ran for election",
2989 onfail="App failed to run for election" )
acsmars71adceb2015-08-31 15:09:26 -07002990 main.step(
2991 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002992 # verify leader didn't just change
acsmars71adceb2015-08-31 15:09:26 -07002993 positionResult = main.TRUE
2994 # Get new leaders and candidates, wait if oldLeader is not a candidate yet
2995
2996 # Reset and reuse the new candidate and leaders lists
2997 newAllCandidates = []
2998 newCandidates = []
2999 newLeaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003000 for i in main.activeNodes:
3001 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07003002 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3003 if oldLeader not in node: # election might no have finished yet
3004 main.log.info( "Old Leader not elected, waiting 5 seconds to " +
3005 "be sure elections are complete" )
3006 time.sleep(5)
3007 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3008 if oldLeader not in node: # election still isn't done, errors
3009 main.log.error(
3010 "Old leader was not elected on at least one node" )
3011 positionResult = main.FALSE
3012 newAllCandidates.append( node )
3013 newLeaders.append( node[ 0 ] )
3014 newCandidates = newAllCandidates[ 0 ]
3015
3016 # Check that each node has the same leader. Defines newLeader
3017 if len( set( newLeaders ) ) != 1:
3018 positionResult = main.FALSE
3019 main.log.error( "Nodes have different leaders: " +
3020 str( newLeaders ) )
3021 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07003022 else:
acsmars71adceb2015-08-31 15:09:26 -07003023 newLeader = newLeaders[ 0 ]
3024
3025 # Check that each node's candidate list is the same
3026 for candidates in newAllCandidates:
3027 if set( candidates ) != set( newCandidates ):
3028 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07003029 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07003030
3031 # Check that the re-elected node is last on the candidate List
3032 if oldLeader != newCandidates[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003033 main.log.error( "Old Leader (" + oldLeader + ") not in the proper position " +
acsmars71adceb2015-08-31 15:09:26 -07003034 str( newCandidates ) )
3035 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003036
3037 utilities.assert_equals(
3038 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07003039 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003040 onpass="Old leader successfully re-ran for election",
3041 onfail="Something went wrong with Leadership election after " +
3042 "the old leader re-ran for election" )
3043
3044 def CASE16( self, main ):
3045 """
3046 Install Distributed Primitives app
3047 """
3048 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003049 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003050 assert main, "main not defined"
3051 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003052 assert main.CLIs, "main.CLIs not defined"
3053 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003054
3055 # Variables for the distributed primitives tests
3056 global pCounterName
3057 global iCounterName
3058 global pCounterValue
3059 global iCounterValue
3060 global onosSet
3061 global onosSetName
3062 pCounterName = "TestON-Partitions"
3063 iCounterName = "TestON-inMemory"
3064 pCounterValue = 0
3065 iCounterValue = 0
3066 onosSet = set([])
3067 onosSetName = "TestON-set"
3068
3069 description = "Install Primitives app"
3070 main.case( description )
3071 main.step( "Install Primitives app" )
3072 appName = "org.onosproject.distributedprimitives"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003073 node = main.activeNodes[0]
3074 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003075 utilities.assert_equals( expect=main.TRUE,
3076 actual=appResults,
3077 onpass="Primitives app activated",
3078 onfail="Primitives app not activated" )
3079 time.sleep( 5 ) # To allow all nodes to activate
3080
3081 def CASE17( self, main ):
3082 """
3083 Check for basic functionality with distributed primitives
3084 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003085 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003086 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003087 assert main, "main not defined"
3088 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003089 assert main.CLIs, "main.CLIs not defined"
3090 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003091 assert pCounterName, "pCounterName not defined"
3092 assert iCounterName, "iCounterName not defined"
3093 assert onosSetName, "onosSetName not defined"
3094 # NOTE: assert fails if value is 0/None/Empty/False
3095 try:
3096 pCounterValue
3097 except NameError:
3098 main.log.error( "pCounterValue not defined, setting to 0" )
3099 pCounterValue = 0
3100 try:
3101 iCounterValue
3102 except NameError:
3103 main.log.error( "iCounterValue not defined, setting to 0" )
3104 iCounterValue = 0
3105 try:
3106 onosSet
3107 except NameError:
3108 main.log.error( "onosSet not defined, setting to empty Set" )
3109 onosSet = set([])
3110 # Variables for the distributed primitives tests. These are local only
3111 addValue = "a"
3112 addAllValue = "a b c d e f"
3113 retainValue = "c d e f"
3114
3115 description = "Check for basic functionality with distributed " +\
3116 "primitives"
3117 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003118 main.caseExplanation = "Test the methods of the distributed " +\
3119 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003120 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003121 # Partitioned counters
3122 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003123 pCounters = []
3124 threads = []
3125 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003126 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003127 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3128 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003129 args=[ pCounterName ] )
3130 pCounterValue += 1
3131 addedPValues.append( pCounterValue )
3132 threads.append( t )
3133 t.start()
3134
3135 for t in threads:
3136 t.join()
3137 pCounters.append( t.result )
3138 # Check that counter incremented numController times
3139 pCounterResults = True
3140 for i in addedPValues:
3141 tmpResult = i in pCounters
3142 pCounterResults = pCounterResults and tmpResult
3143 if not tmpResult:
3144 main.log.error( str( i ) + " is not in partitioned "
3145 "counter incremented results" )
3146 utilities.assert_equals( expect=True,
3147 actual=pCounterResults,
3148 onpass="Default counter incremented",
3149 onfail="Error incrementing default" +
3150 " counter" )
3151
Jon Halle1a3b752015-07-22 13:02:46 -07003152 main.step( "Get then Increment a default counter on each node" )
3153 pCounters = []
3154 threads = []
3155 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003156 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003157 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3158 name="counterGetAndAdd-" + str( i ),
3159 args=[ pCounterName ] )
3160 addedPValues.append( pCounterValue )
3161 pCounterValue += 1
3162 threads.append( t )
3163 t.start()
3164
3165 for t in threads:
3166 t.join()
3167 pCounters.append( t.result )
3168 # Check that counter incremented numController times
3169 pCounterResults = True
3170 for i in addedPValues:
3171 tmpResult = i in pCounters
3172 pCounterResults = pCounterResults and tmpResult
3173 if not tmpResult:
3174 main.log.error( str( i ) + " is not in partitioned "
3175 "counter incremented results" )
3176 utilities.assert_equals( expect=True,
3177 actual=pCounterResults,
3178 onpass="Default counter incremented",
3179 onfail="Error incrementing default" +
3180 " counter" )
3181
3182 main.step( "Counters we added have the correct values" )
3183 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3184 utilities.assert_equals( expect=main.TRUE,
3185 actual=incrementCheck,
3186 onpass="Added counters are correct",
3187 onfail="Added counters are incorrect" )
3188
3189 main.step( "Add -8 to then get a default counter on each node" )
3190 pCounters = []
3191 threads = []
3192 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003193 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003194 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3195 name="counterIncrement-" + str( i ),
3196 args=[ pCounterName ],
3197 kwargs={ "delta": -8 } )
3198 pCounterValue += -8
3199 addedPValues.append( pCounterValue )
3200 threads.append( t )
3201 t.start()
3202
3203 for t in threads:
3204 t.join()
3205 pCounters.append( t.result )
3206 # Check that counter incremented numController times
3207 pCounterResults = True
3208 for i in addedPValues:
3209 tmpResult = i in pCounters
3210 pCounterResults = pCounterResults and tmpResult
3211 if not tmpResult:
3212 main.log.error( str( i ) + " is not in partitioned "
3213 "counter incremented results" )
3214 utilities.assert_equals( expect=True,
3215 actual=pCounterResults,
3216 onpass="Default counter incremented",
3217 onfail="Error incrementing default" +
3218 " counter" )
3219
3220 main.step( "Add 5 to then get a default counter on each node" )
3221 pCounters = []
3222 threads = []
3223 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003224 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003225 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3226 name="counterIncrement-" + str( i ),
3227 args=[ pCounterName ],
3228 kwargs={ "delta": 5 } )
3229 pCounterValue += 5
3230 addedPValues.append( pCounterValue )
3231 threads.append( t )
3232 t.start()
3233
3234 for t in threads:
3235 t.join()
3236 pCounters.append( t.result )
3237 # Check that counter incremented numController times
3238 pCounterResults = True
3239 for i in addedPValues:
3240 tmpResult = i in pCounters
3241 pCounterResults = pCounterResults and tmpResult
3242 if not tmpResult:
3243 main.log.error( str( i ) + " is not in partitioned "
3244 "counter incremented results" )
3245 utilities.assert_equals( expect=True,
3246 actual=pCounterResults,
3247 onpass="Default counter incremented",
3248 onfail="Error incrementing default" +
3249 " counter" )
3250
3251 main.step( "Get then add 5 to a default counter on each node" )
3252 pCounters = []
3253 threads = []
3254 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003255 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003256 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3257 name="counterIncrement-" + str( i ),
3258 args=[ pCounterName ],
3259 kwargs={ "delta": 5 } )
3260 addedPValues.append( pCounterValue )
3261 pCounterValue += 5
3262 threads.append( t )
3263 t.start()
3264
3265 for t in threads:
3266 t.join()
3267 pCounters.append( t.result )
3268 # Check that counter incremented numController times
3269 pCounterResults = True
3270 for i in addedPValues:
3271 tmpResult = i in pCounters
3272 pCounterResults = pCounterResults and tmpResult
3273 if not tmpResult:
3274 main.log.error( str( i ) + " is not in partitioned "
3275 "counter incremented results" )
3276 utilities.assert_equals( expect=True,
3277 actual=pCounterResults,
3278 onpass="Default counter incremented",
3279 onfail="Error incrementing default" +
3280 " counter" )
3281
3282 main.step( "Counters we added have the correct values" )
3283 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3284 utilities.assert_equals( expect=main.TRUE,
3285 actual=incrementCheck,
3286 onpass="Added counters are correct",
3287 onfail="Added counters are incorrect" )
3288
3289 # In-Memory counters
3290 main.step( "Increment and get an in-memory counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003291 iCounters = []
3292 addedIValues = []
3293 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003294 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003295 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003296 name="icounterIncrement-" + str( i ),
3297 args=[ iCounterName ],
3298 kwargs={ "inMemory": True } )
3299 iCounterValue += 1
3300 addedIValues.append( iCounterValue )
3301 threads.append( t )
3302 t.start()
3303
3304 for t in threads:
3305 t.join()
3306 iCounters.append( t.result )
3307 # Check that counter incremented numController times
3308 iCounterResults = True
3309 for i in addedIValues:
3310 tmpResult = i in iCounters
3311 iCounterResults = iCounterResults and tmpResult
3312 if not tmpResult:
3313 main.log.error( str( i ) + " is not in the in-memory "
3314 "counter incremented results" )
3315 utilities.assert_equals( expect=True,
3316 actual=iCounterResults,
Jon Halle1a3b752015-07-22 13:02:46 -07003317 onpass="In-memory counter incremented",
3318 onfail="Error incrementing in-memory" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003319 " counter" )
3320
Jon Halle1a3b752015-07-22 13:02:46 -07003321 main.step( "Get then Increment a in-memory counter on each node" )
3322 iCounters = []
3323 threads = []
3324 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003325 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003326 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3327 name="counterGetAndAdd-" + str( i ),
3328 args=[ iCounterName ],
3329 kwargs={ "inMemory": True } )
3330 addedIValues.append( iCounterValue )
3331 iCounterValue += 1
3332 threads.append( t )
3333 t.start()
3334
3335 for t in threads:
3336 t.join()
3337 iCounters.append( t.result )
3338 # Check that counter incremented numController times
3339 iCounterResults = True
3340 for i in addedIValues:
3341 tmpResult = i in iCounters
3342 iCounterResults = iCounterResults and tmpResult
3343 if not tmpResult:
3344 main.log.error( str( i ) + " is not in in-memory "
3345 "counter incremented results" )
3346 utilities.assert_equals( expect=True,
3347 actual=iCounterResults,
3348 onpass="In-memory counter incremented",
3349 onfail="Error incrementing in-memory" +
3350 " counter" )
3351
3352 main.step( "Counters we added have the correct values" )
3353 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3354 utilities.assert_equals( expect=main.TRUE,
3355 actual=incrementCheck,
3356 onpass="Added counters are correct",
3357 onfail="Added counters are incorrect" )
3358
3359 main.step( "Add -8 to then get a in-memory counter on each node" )
3360 iCounters = []
3361 threads = []
3362 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003363 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003364 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3365 name="counterIncrement-" + str( i ),
3366 args=[ iCounterName ],
3367 kwargs={ "delta": -8, "inMemory": True } )
3368 iCounterValue += -8
3369 addedIValues.append( iCounterValue )
3370 threads.append( t )
3371 t.start()
3372
3373 for t in threads:
3374 t.join()
3375 iCounters.append( t.result )
3376 # Check that counter incremented numController times
3377 iCounterResults = True
3378 for i in addedIValues:
3379 tmpResult = i in iCounters
3380 iCounterResults = iCounterResults and tmpResult
3381 if not tmpResult:
3382 main.log.error( str( i ) + " is not in in-memory "
3383 "counter incremented results" )
3384 utilities.assert_equals( expect=True,
3385 actual=pCounterResults,
3386 onpass="In-memory counter incremented",
3387 onfail="Error incrementing in-memory" +
3388 " counter" )
3389
3390 main.step( "Add 5 to then get a in-memory counter on each node" )
3391 iCounters = []
3392 threads = []
3393 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003394 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003395 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3396 name="counterIncrement-" + str( i ),
3397 args=[ iCounterName ],
3398 kwargs={ "delta": 5, "inMemory": True } )
3399 iCounterValue += 5
3400 addedIValues.append( iCounterValue )
3401 threads.append( t )
3402 t.start()
3403
3404 for t in threads:
3405 t.join()
3406 iCounters.append( t.result )
3407 # Check that counter incremented numController times
3408 iCounterResults = True
3409 for i in addedIValues:
3410 tmpResult = i in iCounters
3411 iCounterResults = iCounterResults and tmpResult
3412 if not tmpResult:
3413 main.log.error( str( i ) + " is not in in-memory "
3414 "counter incremented results" )
3415 utilities.assert_equals( expect=True,
3416 actual=pCounterResults,
3417 onpass="In-memory counter incremented",
3418 onfail="Error incrementing in-memory" +
3419 " counter" )
3420
3421 main.step( "Get then add 5 to a in-memory counter on each node" )
3422 iCounters = []
3423 threads = []
3424 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003425 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003426 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3427 name="counterIncrement-" + str( i ),
3428 args=[ iCounterName ],
3429 kwargs={ "delta": 5, "inMemory": True } )
3430 addedIValues.append( iCounterValue )
3431 iCounterValue += 5
3432 threads.append( t )
3433 t.start()
3434
3435 for t in threads:
3436 t.join()
3437 iCounters.append( t.result )
3438 # Check that counter incremented numController times
3439 iCounterResults = True
3440 for i in addedIValues:
3441 tmpResult = i in iCounters
3442 iCounterResults = iCounterResults and tmpResult
3443 if not tmpResult:
3444 main.log.error( str( i ) + " is not in in-memory "
3445 "counter incremented results" )
3446 utilities.assert_equals( expect=True,
3447 actual=iCounterResults,
3448 onpass="In-memory counter incremented",
3449 onfail="Error incrementing in-memory" +
3450 " counter" )
3451
3452 main.step( "Counters we added have the correct values" )
3453 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3454 utilities.assert_equals( expect=main.TRUE,
3455 actual=incrementCheck,
3456 onpass="Added counters are correct",
3457 onfail="Added counters are incorrect" )
3458
Jon Hall5cf14d52015-07-16 12:15:19 -07003459 main.step( "Check counters are consistant across nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07003460 onosCounters, consistentCounterResults = main.Counters.consistentCheck()
Jon Hall5cf14d52015-07-16 12:15:19 -07003461 utilities.assert_equals( expect=main.TRUE,
3462 actual=consistentCounterResults,
3463 onpass="ONOS counters are consistent " +
3464 "across nodes",
3465 onfail="ONOS Counters are inconsistent " +
3466 "across nodes" )
3467
3468 main.step( "Counters we added have the correct values" )
Jon Halle1a3b752015-07-22 13:02:46 -07003469 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3470 incrementCheck = incrementCheck and \
3471 main.Counters.counterCheck( iCounterName, iCounterValue )
Jon Hall5cf14d52015-07-16 12:15:19 -07003472 utilities.assert_equals( expect=main.TRUE,
Jon Halle1a3b752015-07-22 13:02:46 -07003473 actual=incrementCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -07003474 onpass="Added counters are correct",
3475 onfail="Added counters are incorrect" )
3476 # DISTRIBUTED SETS
3477 main.step( "Distributed Set get" )
3478 size = len( onosSet )
3479 getResponses = []
3480 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003481 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003482 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003483 name="setTestGet-" + str( i ),
3484 args=[ onosSetName ] )
3485 threads.append( t )
3486 t.start()
3487 for t in threads:
3488 t.join()
3489 getResponses.append( t.result )
3490
3491 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003492 for i in range( len( main.activeNodes ) ):
3493 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003494 if isinstance( getResponses[ i ], list):
3495 current = set( getResponses[ i ] )
3496 if len( current ) == len( getResponses[ i ] ):
3497 # no repeats
3498 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003499 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003500 " has incorrect view" +
3501 " of set " + onosSetName + ":\n" +
3502 str( getResponses[ i ] ) )
3503 main.log.debug( "Expected: " + str( onosSet ) )
3504 main.log.debug( "Actual: " + str( current ) )
3505 getResults = main.FALSE
3506 else:
3507 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003508 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003509 " has repeat elements in" +
3510 " set " + onosSetName + ":\n" +
3511 str( getResponses[ i ] ) )
3512 getResults = main.FALSE
3513 elif getResponses[ i ] == main.ERROR:
3514 getResults = main.FALSE
3515 utilities.assert_equals( expect=main.TRUE,
3516 actual=getResults,
3517 onpass="Set elements are correct",
3518 onfail="Set elements are incorrect" )
3519
3520 main.step( "Distributed Set size" )
3521 sizeResponses = []
3522 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003523 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003524 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003525 name="setTestSize-" + str( i ),
3526 args=[ onosSetName ] )
3527 threads.append( t )
3528 t.start()
3529 for t in threads:
3530 t.join()
3531 sizeResponses.append( t.result )
3532
3533 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003534 for i in range( len( main.activeNodes ) ):
3535 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003536 if size != sizeResponses[ i ]:
3537 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003538 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003539 " expected a size of " + str( size ) +
3540 " for set " + onosSetName +
3541 " but got " + str( sizeResponses[ i ] ) )
3542 utilities.assert_equals( expect=main.TRUE,
3543 actual=sizeResults,
3544 onpass="Set sizes are correct",
3545 onfail="Set sizes are incorrect" )
3546
3547 main.step( "Distributed Set add()" )
3548 onosSet.add( addValue )
3549 addResponses = []
3550 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003551 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003552 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003553 name="setTestAdd-" + str( i ),
3554 args=[ onosSetName, addValue ] )
3555 threads.append( t )
3556 t.start()
3557 for t in threads:
3558 t.join()
3559 addResponses.append( t.result )
3560
3561 # main.TRUE = successfully changed the set
3562 # main.FALSE = action resulted in no change in set
3563 # main.ERROR - Some error in executing the function
3564 addResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003565 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003566 if addResponses[ i ] == main.TRUE:
3567 # All is well
3568 pass
3569 elif addResponses[ i ] == main.FALSE:
3570 # Already in set, probably fine
3571 pass
3572 elif addResponses[ i ] == main.ERROR:
3573 # Error in execution
3574 addResults = main.FALSE
3575 else:
3576 # unexpected result
3577 addResults = main.FALSE
3578 if addResults != main.TRUE:
3579 main.log.error( "Error executing set add" )
3580
3581 # Check if set is still correct
3582 size = len( onosSet )
3583 getResponses = []
3584 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003585 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003586 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003587 name="setTestGet-" + str( i ),
3588 args=[ onosSetName ] )
3589 threads.append( t )
3590 t.start()
3591 for t in threads:
3592 t.join()
3593 getResponses.append( t.result )
3594 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003595 for i in range( len( main.activeNodes ) ):
3596 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003597 if isinstance( getResponses[ i ], list):
3598 current = set( getResponses[ i ] )
3599 if len( current ) == len( getResponses[ i ] ):
3600 # no repeats
3601 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003602 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003603 " of set " + onosSetName + ":\n" +
3604 str( getResponses[ i ] ) )
3605 main.log.debug( "Expected: " + str( onosSet ) )
3606 main.log.debug( "Actual: " + str( current ) )
3607 getResults = main.FALSE
3608 else:
3609 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003610 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003611 " set " + onosSetName + ":\n" +
3612 str( getResponses[ i ] ) )
3613 getResults = main.FALSE
3614 elif getResponses[ i ] == main.ERROR:
3615 getResults = main.FALSE
3616 sizeResponses = []
3617 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003618 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003619 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003620 name="setTestSize-" + str( i ),
3621 args=[ onosSetName ] )
3622 threads.append( t )
3623 t.start()
3624 for t in threads:
3625 t.join()
3626 sizeResponses.append( t.result )
3627 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003628 for i in range( len( main.activeNodes ) ):
3629 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003630 if size != sizeResponses[ i ]:
3631 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003632 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003633 " expected a size of " + str( size ) +
3634 " for set " + onosSetName +
3635 " but got " + str( sizeResponses[ i ] ) )
3636 addResults = addResults and getResults and sizeResults
3637 utilities.assert_equals( expect=main.TRUE,
3638 actual=addResults,
3639 onpass="Set add correct",
3640 onfail="Set add was incorrect" )
3641
3642 main.step( "Distributed Set addAll()" )
3643 onosSet.update( addAllValue.split() )
3644 addResponses = []
3645 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003646 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003647 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003648 name="setTestAddAll-" + str( i ),
3649 args=[ onosSetName, addAllValue ] )
3650 threads.append( t )
3651 t.start()
3652 for t in threads:
3653 t.join()
3654 addResponses.append( t.result )
3655
3656 # main.TRUE = successfully changed the set
3657 # main.FALSE = action resulted in no change in set
3658 # main.ERROR - Some error in executing the function
3659 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003660 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003661 if addResponses[ i ] == main.TRUE:
3662 # All is well
3663 pass
3664 elif addResponses[ i ] == main.FALSE:
3665 # Already in set, probably fine
3666 pass
3667 elif addResponses[ i ] == main.ERROR:
3668 # Error in execution
3669 addAllResults = main.FALSE
3670 else:
3671 # unexpected result
3672 addAllResults = main.FALSE
3673 if addAllResults != main.TRUE:
3674 main.log.error( "Error executing set addAll" )
3675
3676 # Check if set is still correct
3677 size = len( onosSet )
3678 getResponses = []
3679 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003680 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003681 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003682 name="setTestGet-" + str( i ),
3683 args=[ onosSetName ] )
3684 threads.append( t )
3685 t.start()
3686 for t in threads:
3687 t.join()
3688 getResponses.append( t.result )
3689 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003690 for i in range( len( main.activeNodes ) ):
3691 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003692 if isinstance( getResponses[ i ], list):
3693 current = set( getResponses[ i ] )
3694 if len( current ) == len( getResponses[ i ] ):
3695 # no repeats
3696 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003697 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003698 " has incorrect view" +
3699 " of set " + onosSetName + ":\n" +
3700 str( getResponses[ i ] ) )
3701 main.log.debug( "Expected: " + str( onosSet ) )
3702 main.log.debug( "Actual: " + str( current ) )
3703 getResults = main.FALSE
3704 else:
3705 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003706 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003707 " has repeat elements in" +
3708 " set " + onosSetName + ":\n" +
3709 str( getResponses[ i ] ) )
3710 getResults = main.FALSE
3711 elif getResponses[ i ] == main.ERROR:
3712 getResults = main.FALSE
3713 sizeResponses = []
3714 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003715 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003716 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003717 name="setTestSize-" + str( i ),
3718 args=[ onosSetName ] )
3719 threads.append( t )
3720 t.start()
3721 for t in threads:
3722 t.join()
3723 sizeResponses.append( t.result )
3724 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003725 for i in range( len( main.activeNodes ) ):
3726 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003727 if size != sizeResponses[ i ]:
3728 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003729 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003730 " expected a size of " + str( size ) +
3731 " for set " + onosSetName +
3732 " but got " + str( sizeResponses[ i ] ) )
3733 addAllResults = addAllResults and getResults and sizeResults
3734 utilities.assert_equals( expect=main.TRUE,
3735 actual=addAllResults,
3736 onpass="Set addAll correct",
3737 onfail="Set addAll was incorrect" )
3738
3739 main.step( "Distributed Set contains()" )
3740 containsResponses = []
3741 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003742 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003743 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003744 name="setContains-" + str( i ),
3745 args=[ onosSetName ],
3746 kwargs={ "values": addValue } )
3747 threads.append( t )
3748 t.start()
3749 for t in threads:
3750 t.join()
3751 # NOTE: This is the tuple
3752 containsResponses.append( t.result )
3753
3754 containsResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003755 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003756 if containsResponses[ i ] == main.ERROR:
3757 containsResults = main.FALSE
3758 else:
3759 containsResults = containsResults and\
3760 containsResponses[ i ][ 1 ]
3761 utilities.assert_equals( expect=main.TRUE,
3762 actual=containsResults,
3763 onpass="Set contains is functional",
3764 onfail="Set contains failed" )
3765
3766 main.step( "Distributed Set containsAll()" )
3767 containsAllResponses = []
3768 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003769 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003770 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003771 name="setContainsAll-" + str( i ),
3772 args=[ onosSetName ],
3773 kwargs={ "values": addAllValue } )
3774 threads.append( t )
3775 t.start()
3776 for t in threads:
3777 t.join()
3778 # NOTE: This is the tuple
3779 containsAllResponses.append( t.result )
3780
3781 containsAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003782 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003783 if containsResponses[ i ] == main.ERROR:
3784 containsResults = main.FALSE
3785 else:
3786 containsResults = containsResults and\
3787 containsResponses[ i ][ 1 ]
3788 utilities.assert_equals( expect=main.TRUE,
3789 actual=containsAllResults,
3790 onpass="Set containsAll is functional",
3791 onfail="Set containsAll failed" )
3792
3793 main.step( "Distributed Set remove()" )
3794 onosSet.remove( addValue )
3795 removeResponses = []
3796 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003797 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003798 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003799 name="setTestRemove-" + str( i ),
3800 args=[ onosSetName, addValue ] )
3801 threads.append( t )
3802 t.start()
3803 for t in threads:
3804 t.join()
3805 removeResponses.append( t.result )
3806
3807 # main.TRUE = successfully changed the set
3808 # main.FALSE = action resulted in no change in set
3809 # main.ERROR - Some error in executing the function
3810 removeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003811 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003812 if removeResponses[ i ] == main.TRUE:
3813 # All is well
3814 pass
3815 elif removeResponses[ i ] == main.FALSE:
3816 # not in set, probably fine
3817 pass
3818 elif removeResponses[ i ] == main.ERROR:
3819 # Error in execution
3820 removeResults = main.FALSE
3821 else:
3822 # unexpected result
3823 removeResults = main.FALSE
3824 if removeResults != main.TRUE:
3825 main.log.error( "Error executing set remove" )
3826
3827 # Check if set is still correct
3828 size = len( onosSet )
3829 getResponses = []
3830 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003831 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003832 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003833 name="setTestGet-" + str( i ),
3834 args=[ onosSetName ] )
3835 threads.append( t )
3836 t.start()
3837 for t in threads:
3838 t.join()
3839 getResponses.append( t.result )
3840 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003841 for i in range( len( main.activeNodes ) ):
3842 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003843 if isinstance( getResponses[ i ], list):
3844 current = set( getResponses[ i ] )
3845 if len( current ) == len( getResponses[ i ] ):
3846 # no repeats
3847 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003848 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003849 " has incorrect view" +
3850 " of set " + onosSetName + ":\n" +
3851 str( getResponses[ i ] ) )
3852 main.log.debug( "Expected: " + str( onosSet ) )
3853 main.log.debug( "Actual: " + str( current ) )
3854 getResults = main.FALSE
3855 else:
3856 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003857 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003858 " has repeat elements in" +
3859 " set " + onosSetName + ":\n" +
3860 str( getResponses[ i ] ) )
3861 getResults = main.FALSE
3862 elif getResponses[ i ] == main.ERROR:
3863 getResults = main.FALSE
3864 sizeResponses = []
3865 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003866 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003867 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003868 name="setTestSize-" + str( i ),
3869 args=[ onosSetName ] )
3870 threads.append( t )
3871 t.start()
3872 for t in threads:
3873 t.join()
3874 sizeResponses.append( t.result )
3875 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003876 for i in range( len( main.activeNodes ) ):
3877 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003878 if size != sizeResponses[ i ]:
3879 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003880 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003881 " expected a size of " + str( size ) +
3882 " for set " + onosSetName +
3883 " but got " + str( sizeResponses[ i ] ) )
3884 removeResults = removeResults and getResults and sizeResults
3885 utilities.assert_equals( expect=main.TRUE,
3886 actual=removeResults,
3887 onpass="Set remove correct",
3888 onfail="Set remove was incorrect" )
3889
3890 main.step( "Distributed Set removeAll()" )
3891 onosSet.difference_update( addAllValue.split() )
3892 removeAllResponses = []
3893 threads = []
3894 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003895 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003896 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003897 name="setTestRemoveAll-" + str( i ),
3898 args=[ onosSetName, addAllValue ] )
3899 threads.append( t )
3900 t.start()
3901 for t in threads:
3902 t.join()
3903 removeAllResponses.append( t.result )
3904 except Exception, e:
3905 main.log.exception(e)
3906
3907 # main.TRUE = successfully changed the set
3908 # main.FALSE = action resulted in no change in set
3909 # main.ERROR - Some error in executing the function
3910 removeAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003911 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003912 if removeAllResponses[ i ] == main.TRUE:
3913 # All is well
3914 pass
3915 elif removeAllResponses[ i ] == main.FALSE:
3916 # not in set, probably fine
3917 pass
3918 elif removeAllResponses[ i ] == main.ERROR:
3919 # Error in execution
3920 removeAllResults = main.FALSE
3921 else:
3922 # unexpected result
3923 removeAllResults = main.FALSE
3924 if removeAllResults != main.TRUE:
3925 main.log.error( "Error executing set removeAll" )
3926
3927 # Check if set is still correct
3928 size = len( onosSet )
3929 getResponses = []
3930 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003931 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003932 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003933 name="setTestGet-" + str( i ),
3934 args=[ onosSetName ] )
3935 threads.append( t )
3936 t.start()
3937 for t in threads:
3938 t.join()
3939 getResponses.append( t.result )
3940 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003941 for i in range( len( main.activeNodes ) ):
3942 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003943 if isinstance( getResponses[ i ], list):
3944 current = set( getResponses[ i ] )
3945 if len( current ) == len( getResponses[ i ] ):
3946 # no repeats
3947 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003948 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003949 " has incorrect view" +
3950 " of set " + onosSetName + ":\n" +
3951 str( getResponses[ i ] ) )
3952 main.log.debug( "Expected: " + str( onosSet ) )
3953 main.log.debug( "Actual: " + str( current ) )
3954 getResults = main.FALSE
3955 else:
3956 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003957 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003958 " has repeat elements in" +
3959 " set " + onosSetName + ":\n" +
3960 str( getResponses[ i ] ) )
3961 getResults = main.FALSE
3962 elif getResponses[ i ] == main.ERROR:
3963 getResults = main.FALSE
3964 sizeResponses = []
3965 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003966 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003967 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003968 name="setTestSize-" + str( i ),
3969 args=[ onosSetName ] )
3970 threads.append( t )
3971 t.start()
3972 for t in threads:
3973 t.join()
3974 sizeResponses.append( t.result )
3975 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003976 for i in range( len( main.activeNodes ) ):
3977 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003978 if size != sizeResponses[ i ]:
3979 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003980 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003981 " expected a size of " + str( size ) +
3982 " for set " + onosSetName +
3983 " but got " + str( sizeResponses[ i ] ) )
3984 removeAllResults = removeAllResults and getResults and sizeResults
3985 utilities.assert_equals( expect=main.TRUE,
3986 actual=removeAllResults,
3987 onpass="Set removeAll correct",
3988 onfail="Set removeAll was incorrect" )
3989
3990 main.step( "Distributed Set addAll()" )
3991 onosSet.update( addAllValue.split() )
3992 addResponses = []
3993 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003994 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003995 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003996 name="setTestAddAll-" + str( i ),
3997 args=[ onosSetName, addAllValue ] )
3998 threads.append( t )
3999 t.start()
4000 for t in threads:
4001 t.join()
4002 addResponses.append( t.result )
4003
4004 # main.TRUE = successfully changed the set
4005 # main.FALSE = action resulted in no change in set
4006 # main.ERROR - Some error in executing the function
4007 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004008 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004009 if addResponses[ i ] == main.TRUE:
4010 # All is well
4011 pass
4012 elif addResponses[ i ] == main.FALSE:
4013 # Already in set, probably fine
4014 pass
4015 elif addResponses[ i ] == main.ERROR:
4016 # Error in execution
4017 addAllResults = main.FALSE
4018 else:
4019 # unexpected result
4020 addAllResults = main.FALSE
4021 if addAllResults != main.TRUE:
4022 main.log.error( "Error executing set addAll" )
4023
4024 # Check if set is still correct
4025 size = len( onosSet )
4026 getResponses = []
4027 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004028 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004029 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004030 name="setTestGet-" + str( i ),
4031 args=[ onosSetName ] )
4032 threads.append( t )
4033 t.start()
4034 for t in threads:
4035 t.join()
4036 getResponses.append( t.result )
4037 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004038 for i in range( len( main.activeNodes ) ):
4039 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004040 if isinstance( getResponses[ i ], list):
4041 current = set( getResponses[ i ] )
4042 if len( current ) == len( getResponses[ i ] ):
4043 # no repeats
4044 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004045 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004046 " has incorrect view" +
4047 " of set " + onosSetName + ":\n" +
4048 str( getResponses[ i ] ) )
4049 main.log.debug( "Expected: " + str( onosSet ) )
4050 main.log.debug( "Actual: " + str( current ) )
4051 getResults = main.FALSE
4052 else:
4053 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004054 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004055 " has repeat elements in" +
4056 " set " + onosSetName + ":\n" +
4057 str( getResponses[ i ] ) )
4058 getResults = main.FALSE
4059 elif getResponses[ i ] == main.ERROR:
4060 getResults = main.FALSE
4061 sizeResponses = []
4062 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004063 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004064 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004065 name="setTestSize-" + str( i ),
4066 args=[ onosSetName ] )
4067 threads.append( t )
4068 t.start()
4069 for t in threads:
4070 t.join()
4071 sizeResponses.append( t.result )
4072 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004073 for i in range( len( main.activeNodes ) ):
4074 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004075 if size != sizeResponses[ i ]:
4076 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004077 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004078 " expected a size of " + str( size ) +
4079 " for set " + onosSetName +
4080 " but got " + str( sizeResponses[ i ] ) )
4081 addAllResults = addAllResults and getResults and sizeResults
4082 utilities.assert_equals( expect=main.TRUE,
4083 actual=addAllResults,
4084 onpass="Set addAll correct",
4085 onfail="Set addAll was incorrect" )
4086
4087 main.step( "Distributed Set clear()" )
4088 onosSet.clear()
4089 clearResponses = []
4090 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004091 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004092 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004093 name="setTestClear-" + str( i ),
4094 args=[ onosSetName, " "], # Values doesn't matter
4095 kwargs={ "clear": True } )
4096 threads.append( t )
4097 t.start()
4098 for t in threads:
4099 t.join()
4100 clearResponses.append( t.result )
4101
4102 # main.TRUE = successfully changed the set
4103 # main.FALSE = action resulted in no change in set
4104 # main.ERROR - Some error in executing the function
4105 clearResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004106 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004107 if clearResponses[ i ] == main.TRUE:
4108 # All is well
4109 pass
4110 elif clearResponses[ i ] == main.FALSE:
4111 # Nothing set, probably fine
4112 pass
4113 elif clearResponses[ i ] == main.ERROR:
4114 # Error in execution
4115 clearResults = main.FALSE
4116 else:
4117 # unexpected result
4118 clearResults = main.FALSE
4119 if clearResults != main.TRUE:
4120 main.log.error( "Error executing set clear" )
4121
4122 # Check if set is still correct
4123 size = len( onosSet )
4124 getResponses = []
4125 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004126 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004127 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004128 name="setTestGet-" + str( i ),
4129 args=[ onosSetName ] )
4130 threads.append( t )
4131 t.start()
4132 for t in threads:
4133 t.join()
4134 getResponses.append( t.result )
4135 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004136 for i in range( len( main.activeNodes ) ):
4137 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004138 if isinstance( getResponses[ i ], list):
4139 current = set( getResponses[ i ] )
4140 if len( current ) == len( getResponses[ i ] ):
4141 # no repeats
4142 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004143 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004144 " has incorrect view" +
4145 " of set " + onosSetName + ":\n" +
4146 str( getResponses[ i ] ) )
4147 main.log.debug( "Expected: " + str( onosSet ) )
4148 main.log.debug( "Actual: " + str( current ) )
4149 getResults = main.FALSE
4150 else:
4151 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004152 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004153 " has repeat elements in" +
4154 " set " + onosSetName + ":\n" +
4155 str( getResponses[ i ] ) )
4156 getResults = main.FALSE
4157 elif getResponses[ i ] == main.ERROR:
4158 getResults = main.FALSE
4159 sizeResponses = []
4160 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004161 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004162 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004163 name="setTestSize-" + str( i ),
4164 args=[ onosSetName ] )
4165 threads.append( t )
4166 t.start()
4167 for t in threads:
4168 t.join()
4169 sizeResponses.append( t.result )
4170 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004171 for i in range( len( main.activeNodes ) ):
4172 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004173 if size != sizeResponses[ i ]:
4174 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004175 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004176 " expected a size of " + str( size ) +
4177 " for set " + onosSetName +
4178 " but got " + str( sizeResponses[ i ] ) )
4179 clearResults = clearResults and getResults and sizeResults
4180 utilities.assert_equals( expect=main.TRUE,
4181 actual=clearResults,
4182 onpass="Set clear correct",
4183 onfail="Set clear was incorrect" )
4184
4185 main.step( "Distributed Set addAll()" )
4186 onosSet.update( addAllValue.split() )
4187 addResponses = []
4188 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004189 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004190 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004191 name="setTestAddAll-" + str( i ),
4192 args=[ onosSetName, addAllValue ] )
4193 threads.append( t )
4194 t.start()
4195 for t in threads:
4196 t.join()
4197 addResponses.append( t.result )
4198
4199 # main.TRUE = successfully changed the set
4200 # main.FALSE = action resulted in no change in set
4201 # main.ERROR - Some error in executing the function
4202 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004203 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004204 if addResponses[ i ] == main.TRUE:
4205 # All is well
4206 pass
4207 elif addResponses[ i ] == main.FALSE:
4208 # Already in set, probably fine
4209 pass
4210 elif addResponses[ i ] == main.ERROR:
4211 # Error in execution
4212 addAllResults = main.FALSE
4213 else:
4214 # unexpected result
4215 addAllResults = main.FALSE
4216 if addAllResults != main.TRUE:
4217 main.log.error( "Error executing set addAll" )
4218
4219 # Check if set is still correct
4220 size = len( onosSet )
4221 getResponses = []
4222 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004223 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004224 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004225 name="setTestGet-" + str( i ),
4226 args=[ onosSetName ] )
4227 threads.append( t )
4228 t.start()
4229 for t in threads:
4230 t.join()
4231 getResponses.append( t.result )
4232 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004233 for i in range( len( main.activeNodes ) ):
4234 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004235 if isinstance( getResponses[ i ], list):
4236 current = set( getResponses[ i ] )
4237 if len( current ) == len( getResponses[ i ] ):
4238 # no repeats
4239 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004240 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004241 " has incorrect view" +
4242 " of set " + onosSetName + ":\n" +
4243 str( getResponses[ i ] ) )
4244 main.log.debug( "Expected: " + str( onosSet ) )
4245 main.log.debug( "Actual: " + str( current ) )
4246 getResults = main.FALSE
4247 else:
4248 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004249 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004250 " has repeat elements in" +
4251 " set " + onosSetName + ":\n" +
4252 str( getResponses[ i ] ) )
4253 getResults = main.FALSE
4254 elif getResponses[ i ] == main.ERROR:
4255 getResults = main.FALSE
4256 sizeResponses = []
4257 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004258 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004259 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004260 name="setTestSize-" + str( i ),
4261 args=[ onosSetName ] )
4262 threads.append( t )
4263 t.start()
4264 for t in threads:
4265 t.join()
4266 sizeResponses.append( t.result )
4267 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004268 for i in range( len( main.activeNodes ) ):
4269 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004270 if size != sizeResponses[ i ]:
4271 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004272 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004273 " expected a size of " + str( size ) +
4274 " for set " + onosSetName +
4275 " but got " + str( sizeResponses[ i ] ) )
4276 addAllResults = addAllResults and getResults and sizeResults
4277 utilities.assert_equals( expect=main.TRUE,
4278 actual=addAllResults,
4279 onpass="Set addAll correct",
4280 onfail="Set addAll was incorrect" )
4281
4282 main.step( "Distributed Set retain()" )
4283 onosSet.intersection_update( retainValue.split() )
4284 retainResponses = []
4285 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004286 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004287 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004288 name="setTestRetain-" + str( i ),
4289 args=[ onosSetName, retainValue ],
4290 kwargs={ "retain": True } )
4291 threads.append( t )
4292 t.start()
4293 for t in threads:
4294 t.join()
4295 retainResponses.append( t.result )
4296
4297 # main.TRUE = successfully changed the set
4298 # main.FALSE = action resulted in no change in set
4299 # main.ERROR - Some error in executing the function
4300 retainResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004301 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004302 if retainResponses[ i ] == main.TRUE:
4303 # All is well
4304 pass
4305 elif retainResponses[ i ] == main.FALSE:
4306 # Already in set, probably fine
4307 pass
4308 elif retainResponses[ i ] == main.ERROR:
4309 # Error in execution
4310 retainResults = main.FALSE
4311 else:
4312 # unexpected result
4313 retainResults = main.FALSE
4314 if retainResults != main.TRUE:
4315 main.log.error( "Error executing set retain" )
4316
4317 # Check if set is still correct
4318 size = len( onosSet )
4319 getResponses = []
4320 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004321 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004322 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004323 name="setTestGet-" + str( i ),
4324 args=[ onosSetName ] )
4325 threads.append( t )
4326 t.start()
4327 for t in threads:
4328 t.join()
4329 getResponses.append( t.result )
4330 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004331 for i in range( len( main.activeNodes ) ):
4332 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004333 if isinstance( getResponses[ i ], list):
4334 current = set( getResponses[ i ] )
4335 if len( current ) == len( getResponses[ i ] ):
4336 # no repeats
4337 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004338 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004339 " has incorrect view" +
4340 " of set " + onosSetName + ":\n" +
4341 str( getResponses[ i ] ) )
4342 main.log.debug( "Expected: " + str( onosSet ) )
4343 main.log.debug( "Actual: " + str( current ) )
4344 getResults = main.FALSE
4345 else:
4346 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004347 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004348 " has repeat elements in" +
4349 " set " + onosSetName + ":\n" +
4350 str( getResponses[ i ] ) )
4351 getResults = main.FALSE
4352 elif getResponses[ i ] == main.ERROR:
4353 getResults = main.FALSE
4354 sizeResponses = []
4355 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004356 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004357 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004358 name="setTestSize-" + str( i ),
4359 args=[ onosSetName ] )
4360 threads.append( t )
4361 t.start()
4362 for t in threads:
4363 t.join()
4364 sizeResponses.append( t.result )
4365 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004366 for i in range( len( main.activeNodes ) ):
4367 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004368 if size != sizeResponses[ i ]:
4369 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004370 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004371 str( size ) + " for set " + onosSetName +
4372 " but got " + str( sizeResponses[ i ] ) )
4373 retainResults = retainResults and getResults and sizeResults
4374 utilities.assert_equals( expect=main.TRUE,
4375 actual=retainResults,
4376 onpass="Set retain correct",
4377 onfail="Set retain was incorrect" )
4378
Jon Hall2a5002c2015-08-21 16:49:11 -07004379 # Transactional maps
4380 main.step( "Partitioned Transactional maps put" )
4381 tMapValue = "Testing"
4382 numKeys = 100
4383 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004384 node = main.activeNodes[0]
4385 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
Jon Hall2a5002c2015-08-21 16:49:11 -07004386 if len( putResponses ) == 100:
4387 for i in putResponses:
4388 if putResponses[ i ][ 'value' ] != tMapValue:
4389 putResult = False
4390 else:
4391 putResult = False
4392 if not putResult:
4393 main.log.debug( "Put response values: " + str( putResponses ) )
4394 utilities.assert_equals( expect=True,
4395 actual=putResult,
4396 onpass="Partitioned Transactional Map put successful",
4397 onfail="Partitioned Transactional Map put values are incorrect" )
4398
4399 main.step( "Partitioned Transactional maps get" )
4400 getCheck = True
4401 for n in range( 1, numKeys + 1 ):
4402 getResponses = []
4403 threads = []
4404 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004405 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004406 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4407 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004408 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004409 threads.append( t )
4410 t.start()
4411 for t in threads:
4412 t.join()
4413 getResponses.append( t.result )
4414 for node in getResponses:
4415 if node != tMapValue:
4416 valueCheck = False
4417 if not valueCheck:
4418 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4419 main.log.warn( getResponses )
4420 getCheck = getCheck and valueCheck
4421 utilities.assert_equals( expect=True,
4422 actual=getCheck,
4423 onpass="Partitioned Transactional Map get values were correct",
4424 onfail="Partitioned Transactional Map values incorrect" )
4425
4426 main.step( "In-memory Transactional maps put" )
4427 tMapValue = "Testing"
4428 numKeys = 100
4429 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004430 node = main.activeNodes[0]
4431 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue, inMemory=True )
Jon Hall2a5002c2015-08-21 16:49:11 -07004432 if len( putResponses ) == 100:
4433 for i in putResponses:
4434 if putResponses[ i ][ 'value' ] != tMapValue:
4435 putResult = False
4436 else:
4437 putResult = False
4438 if not putResult:
4439 main.log.debug( "Put response values: " + str( putResponses ) )
4440 utilities.assert_equals( expect=True,
4441 actual=putResult,
4442 onpass="In-Memory Transactional Map put successful",
4443 onfail="In-Memory Transactional Map put values are incorrect" )
4444
4445 main.step( "In-Memory Transactional maps get" )
4446 getCheck = True
4447 for n in range( 1, numKeys + 1 ):
4448 getResponses = []
4449 threads = []
4450 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004451 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004452 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4453 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004454 args=[ "Key" + str( n ) ],
Jon Hall2a5002c2015-08-21 16:49:11 -07004455 kwargs={ "inMemory": True } )
4456 threads.append( t )
4457 t.start()
4458 for t in threads:
4459 t.join()
4460 getResponses.append( t.result )
4461 for node in getResponses:
4462 if node != tMapValue:
4463 valueCheck = False
4464 if not valueCheck:
4465 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4466 main.log.warn( getResponses )
4467 getCheck = getCheck and valueCheck
4468 utilities.assert_equals( expect=True,
4469 actual=getCheck,
4470 onpass="In-Memory Transactional Map get values were correct",
4471 onfail="In-Memory Transactional Map values incorrect" )