blob: b87af443c3e4dd832f0a7c4526ca5956f396ab19 [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
Jon Hallb3ed8ed2015-10-28 16:43:55 -070012CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
Jon Hall5cf14d52015-07-16 12:15:19 -070014CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
Jon Hallb3ed8ed2015-10-28 16:43:55 -070028class HAstopNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -070029
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hall3b489db2015-10-05 14:38:37 -070052 import pexpect
Jon Hallf3d16e72015-12-16 17:45:08 -080053 import time
Jon Hallb3ed8ed2015-10-28 16:43:55 -070054 main.log.info( "ONOS HA test: Stop a minority of ONOS nodes - " +
Jon Hall5cf14d52015-07-16 12:15:19 -070055 "initialization" )
56 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070057 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070058 "installing ONOS, starting Mininet and ONOS" +\
59 "cli sessions."
60 # TODO: save all the timers and output them for plotting
61
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
Jon Halle1a3b752015-07-22 13:02:46 -070069 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070070 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070071 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070074 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
81
82 # FIXME: just get controller port from params?
83 # TODO: do we really need all these?
84 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
85 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
86 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
87 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
88 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
89 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
90 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
91
Jon Halle1a3b752015-07-22 13:02:46 -070092 try:
93 fileName = "Counters"
94 # TODO: Maybe make a library folder somewhere?
95 path = main.params[ 'imports' ][ 'path' ]
96 main.Counters = imp.load_source( fileName,
97 path + fileName + ".py" )
98 except Exception as e:
99 main.log.exception( e )
100 main.cleanup()
101 main.exit()
102
103 main.CLIs = []
104 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700105 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700106 for i in range( 1, main.numCtrls + 1 ):
107 try:
108 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
109 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
110 ipList.append( main.nodes[ -1 ].ip_address )
111 except AttributeError:
112 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700113
114 main.step( "Create cell file" )
115 cellAppString = main.params[ 'ENV' ][ 'appString' ]
116 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
117 main.Mininet1.ip_address,
118 cellAppString, ipList )
119 main.step( "Applying cell variable to environment" )
120 cellResult = main.ONOSbench.setCell( cellName )
121 verifyResult = main.ONOSbench.verifyCell()
122
123 # FIXME:this is short term fix
124 main.log.info( "Removing raft logs" )
125 main.ONOSbench.onosRemoveRaftLogs()
126
127 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700128 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700129 main.ONOSbench.onosUninstall( node.ip_address )
130
131 # Make sure ONOS is DEAD
132 main.log.info( "Killing any ONOS processes" )
133 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700134 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700135 killed = main.ONOSbench.onosKill( node.ip_address )
136 killResults = killResults and killed
137
138 cleanInstallResult = main.TRUE
139 gitPullResult = main.TRUE
140
141 main.step( "Starting Mininet" )
142 # scp topo file to mininet
143 # TODO: move to params?
144 topoName = "obelisk.py"
145 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700146 main.ONOSbench.scp( main.Mininet1,
147 filePath + topoName,
148 main.Mininet1.home,
149 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700150 mnResult = main.Mininet1.startNet( )
151 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
152 onpass="Mininet Started",
153 onfail="Error starting Mininet" )
154
155 main.step( "Git checkout and pull " + gitBranch )
156 if PULLCODE:
157 main.ONOSbench.gitCheckout( gitBranch )
158 gitPullResult = main.ONOSbench.gitPull()
159 # values of 1 or 3 are good
160 utilities.assert_lesser( expect=0, actual=gitPullResult,
161 onpass="Git pull successful",
162 onfail="Git pull failed" )
163 main.ONOSbench.getVersion( report=True )
164
165 main.step( "Using mvn clean install" )
166 cleanInstallResult = main.TRUE
167 if PULLCODE and gitPullResult == main.TRUE:
168 cleanInstallResult = main.ONOSbench.cleanInstall()
169 else:
170 main.log.warn( "Did not pull new code so skipping mvn " +
171 "clean install" )
172 utilities.assert_equals( expect=main.TRUE,
173 actual=cleanInstallResult,
174 onpass="MCI successful",
175 onfail="MCI failed" )
176 # GRAPHS
177 # NOTE: important params here:
178 # job = name of Jenkins job
179 # Plot Name = Plot-HA, only can be used if multiple plots
180 # index = The number of the graph under plot name
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700181 job = "HAstopNodes"
Jon Hall5cf14d52015-07-16 12:15:19 -0700182 plotName = "Plot-HA"
Jon Halla9845df2016-01-15 14:55:58 -0800183 index = "0"
Jon Hall5cf14d52015-07-16 12:15:19 -0700184 graphs = '<ac:structured-macro ac:name="html">\n'
185 graphs += '<ac:plain-text-body><![CDATA[\n'
186 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
Jon Halla9845df2016-01-15 14:55:58 -0800187 '/plot/' + plotName + '/getPlot?index=' + index +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700188 '&width=500&height=300"' +\
189 'noborder="0" width="500" height="300" scrolling="yes" ' +\
190 'seamless="seamless"></iframe>\n'
191 graphs += ']]></ac:plain-text-body>\n'
192 graphs += '</ac:structured-macro>\n'
193 main.log.wiki(graphs)
194
195 main.step( "Creating ONOS package" )
Jon Hall3b489db2015-10-05 14:38:37 -0700196 # copy gen-partions file to ONOS
197 # NOTE: this assumes TestON and ONOS are on the same machine
198 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
199 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
200 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
201 main.ONOSbench.ip_address,
202 srcFile,
203 dstDir,
204 pwd=main.ONOSbench.pwd,
205 direction="from" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700206 packageResult = main.ONOSbench.onosPackage()
207 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
208 onpass="ONOS package successful",
209 onfail="ONOS package failed" )
210
211 main.step( "Installing ONOS package" )
212 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700213 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700214 tmpResult = main.ONOSbench.onosInstall( options="-f",
215 node=node.ip_address )
216 onosInstallResult = onosInstallResult and tmpResult
217 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
218 onpass="ONOS install successful",
219 onfail="ONOS install failed" )
Jon Hall3b489db2015-10-05 14:38:37 -0700220 # clean up gen-partitions file
221 try:
222 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
223 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
224 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
225 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
226 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
227 str( main.ONOSbench.handle.before ) )
228 except ( pexpect.TIMEOUT, pexpect.EOF ):
229 main.log.exception( "ONOSbench: pexpect exception found:" +
230 main.ONOSbench.handle.before )
231 main.cleanup()
232 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700233
234 main.step( "Checking if ONOS is up yet" )
235 for i in range( 2 ):
236 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700237 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700238 started = main.ONOSbench.isup( node.ip_address )
239 if not started:
240 main.log.error( node.name + " didn't start!" )
241 main.ONOSbench.onosStop( node.ip_address )
242 main.ONOSbench.onosStart( node.ip_address )
243 onosIsupResult = onosIsupResult and started
244 if onosIsupResult == main.TRUE:
245 break
246 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
247 onpass="ONOS startup successful",
248 onfail="ONOS startup failed" )
249
250 main.log.step( "Starting ONOS CLI sessions" )
251 cliResults = main.TRUE
252 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700253 for i in range( main.numCtrls ):
254 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700255 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700256 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700257 threads.append( t )
258 t.start()
259
260 for t in threads:
261 t.join()
262 cliResults = cliResults and t.result
263 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
264 onpass="ONOS cli startup successful",
265 onfail="ONOS cli startup failed" )
266
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700267 # Create a list of active nodes for use when some nodes are stopped
268 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
269
Jon Hall5cf14d52015-07-16 12:15:19 -0700270 if main.params[ 'tcpdump' ].lower() == "true":
271 main.step( "Start Packet Capture MN" )
272 main.Mininet2.startTcpdump(
273 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
274 + "-MN.pcap",
275 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
276 port=main.params[ 'MNtcpdump' ][ 'port' ] )
277
278 main.step( "App Ids check" )
Jon Hallf3d16e72015-12-16 17:45:08 -0800279 time.sleep(60)
Jon Hall5cf14d52015-07-16 12:15:19 -0700280 appCheck = main.TRUE
281 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700282 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700283 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700284 name="appToIDCheck-" + str( i ),
285 args=[] )
286 threads.append( t )
287 t.start()
288
289 for t in threads:
290 t.join()
291 appCheck = appCheck and t.result
292 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700293 node = main.activeNodes[0]
294 main.log.warn( main.CLIs[node].apps() )
295 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700296 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
297 onpass="App Ids seem to be correct",
298 onfail="Something is wrong with app Ids" )
299
300 if cliResults == main.FALSE:
301 main.log.error( "Failed to start ONOS, stopping test" )
302 main.cleanup()
303 main.exit()
304
305 def CASE2( self, main ):
306 """
307 Assign devices to controllers
308 """
309 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700310 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700311 assert main, "main not defined"
312 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700313 assert main.CLIs, "main.CLIs not defined"
314 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700315 assert ONOS1Port, "ONOS1Port not defined"
316 assert ONOS2Port, "ONOS2Port not defined"
317 assert ONOS3Port, "ONOS3Port not defined"
318 assert ONOS4Port, "ONOS4Port not defined"
319 assert ONOS5Port, "ONOS5Port not defined"
320 assert ONOS6Port, "ONOS6Port not defined"
321 assert ONOS7Port, "ONOS7Port not defined"
322
323 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700324 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700325 "and check that an ONOS node becomes the " +\
326 "master of the device."
327 main.step( "Assign switches to controllers" )
328
329 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700330 for i in range( main.numCtrls ):
331 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700332 swList = []
333 for i in range( 1, 29 ):
334 swList.append( "s" + str( i ) )
335 main.Mininet1.assignSwController( sw=swList, ip=ipList )
336
337 mastershipCheck = main.TRUE
338 for i in range( 1, 29 ):
339 response = main.Mininet1.getSwController( "s" + str( i ) )
340 try:
341 main.log.info( str( response ) )
342 except Exception:
343 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700344 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700345 if re.search( "tcp:" + node.ip_address, response ):
346 mastershipCheck = mastershipCheck and main.TRUE
347 else:
348 main.log.error( "Error, node " + node.ip_address + " is " +
349 "not in the list of controllers s" +
350 str( i ) + " is connecting to." )
351 mastershipCheck = main.FALSE
352 utilities.assert_equals(
353 expect=main.TRUE,
354 actual=mastershipCheck,
355 onpass="Switch mastership assigned correctly",
356 onfail="Switches not assigned correctly to controllers" )
357
358 def CASE21( self, main ):
359 """
360 Assign mastership to controllers
361 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700362 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700363 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700364 assert main, "main not defined"
365 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700366 assert main.CLIs, "main.CLIs not defined"
367 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700368 assert ONOS1Port, "ONOS1Port not defined"
369 assert ONOS2Port, "ONOS2Port not defined"
370 assert ONOS3Port, "ONOS3Port not defined"
371 assert ONOS4Port, "ONOS4Port not defined"
372 assert ONOS5Port, "ONOS5Port not defined"
373 assert ONOS6Port, "ONOS6Port not defined"
374 assert ONOS7Port, "ONOS7Port not defined"
375
376 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700377 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700378 "device. Then manually assign" +\
379 " mastership to specific ONOS nodes using" +\
380 " 'device-role'"
381 main.step( "Assign mastership of switches to specific controllers" )
382 # Manually assign mastership to the controller we want
383 roleCall = main.TRUE
384
385 ipList = [ ]
386 deviceList = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700387 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700388 try:
389 # Assign mastership to specific controllers. This assignment was
390 # determined for a 7 node cluser, but will work with any sized
391 # cluster
392 for i in range( 1, 29 ): # switches 1 through 28
393 # set up correct variables:
394 if i == 1:
395 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700396 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700397 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700398 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700399 c = 1 % main.numCtrls
400 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700401 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700402 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700403 c = 1 % main.numCtrls
404 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700405 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700406 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700407 c = 3 % main.numCtrls
408 ip = main.nodes[ c ].ip_address # ONOS4
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700409 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700410 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700411 c = 2 % main.numCtrls
412 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700413 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700414 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700415 c = 2 % main.numCtrls
416 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700417 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700418 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700419 c = 5 % main.numCtrls
420 ip = main.nodes[ c ].ip_address # ONOS6
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700421 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700422 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700423 c = 4 % main.numCtrls
424 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700425 dpid = '3' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700426 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700427 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700428 c = 6 % main.numCtrls
429 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700430 dpid = '6' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700431 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700432 elif i == 28:
433 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700434 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700435 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700436 else:
437 main.log.error( "You didn't write an else statement for " +
438 "switch s" + str( i ) )
439 roleCall = main.FALSE
440 # Assign switch
441 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
442 # TODO: make this controller dynamic
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700443 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700444 ipList.append( ip )
445 deviceList.append( deviceId )
446 except ( AttributeError, AssertionError ):
447 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700448 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700449 utilities.assert_equals(
450 expect=main.TRUE,
451 actual=roleCall,
452 onpass="Re-assigned switch mastership to designated controller",
453 onfail="Something wrong with deviceRole calls" )
454
455 main.step( "Check mastership was correctly assigned" )
456 roleCheck = main.TRUE
457 # NOTE: This is due to the fact that device mastership change is not
458 # atomic and is actually a multi step process
459 time.sleep( 5 )
460 for i in range( len( ipList ) ):
461 ip = ipList[i]
462 deviceId = deviceList[i]
463 # Check assignment
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700464 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700465 if ip in master:
466 roleCheck = roleCheck and main.TRUE
467 else:
468 roleCheck = roleCheck and main.FALSE
469 main.log.error( "Error, controller " + ip + " is not" +
470 " master " + "of device " +
471 str( deviceId ) + ". Master is " +
472 repr( master ) + "." )
473 utilities.assert_equals(
474 expect=main.TRUE,
475 actual=roleCheck,
476 onpass="Switches were successfully reassigned to designated " +
477 "controller",
478 onfail="Switches were not successfully reassigned" )
479
480 def CASE3( self, main ):
481 """
482 Assign intents
483 """
484 import time
485 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700486 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700487 assert main, "main not defined"
488 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700489 assert main.CLIs, "main.CLIs not defined"
490 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700491 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700492 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700493 "assign predetermined host-to-host intents." +\
494 " After installation, check that the intent" +\
495 " is distributed to all nodes and the state" +\
496 " is INSTALLED"
497
498 # install onos-app-fwd
499 main.step( "Install reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700500 onosCli = main.CLIs[ main.activeNodes[0] ]
501 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700502 utilities.assert_equals( expect=main.TRUE, actual=installResults,
503 onpass="Install fwd successful",
504 onfail="Install fwd failed" )
505
506 main.step( "Check app ids" )
507 appCheck = main.TRUE
508 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700509 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700510 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700511 name="appToIDCheck-" + str( i ),
512 args=[] )
513 threads.append( t )
514 t.start()
515
516 for t in threads:
517 t.join()
518 appCheck = appCheck and t.result
519 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700520 main.log.warn( onosCli.apps() )
521 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700522 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
523 onpass="App Ids seem to be correct",
524 onfail="Something is wrong with app Ids" )
525
526 main.step( "Discovering Hosts( Via pingall for now )" )
527 # FIXME: Once we have a host discovery mechanism, use that instead
528 # REACTIVE FWD test
529 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700530 passMsg = "Reactive Pingall test passed"
531 time1 = time.time()
532 pingResult = main.Mininet1.pingall()
533 time2 = time.time()
534 if not pingResult:
535 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700536 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700537 passMsg += " on the second try"
538 utilities.assert_equals(
539 expect=main.TRUE,
540 actual=pingResult,
541 onpass= passMsg,
542 onfail="Reactive Pingall failed, " +
543 "one or more ping pairs failed" )
544 main.log.info( "Time for pingall: %2f seconds" %
545 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700546 # timeout for fwd flows
547 time.sleep( 11 )
548 # uninstall onos-app-fwd
549 main.step( "Uninstall reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700550 node = main.activeNodes[0]
551 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700552 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
553 onpass="Uninstall fwd successful",
554 onfail="Uninstall fwd failed" )
555
556 main.step( "Check app ids" )
557 threads = []
558 appCheck2 = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700559 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700560 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700561 name="appToIDCheck-" + str( i ),
562 args=[] )
563 threads.append( t )
564 t.start()
565
566 for t in threads:
567 t.join()
568 appCheck2 = appCheck2 and t.result
569 if appCheck2 != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700570 node = main.activeNodes[0]
571 main.log.warn( main.CLIs[node].apps() )
572 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700573 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
574 onpass="App Ids seem to be correct",
575 onfail="Something is wrong with app Ids" )
576
577 main.step( "Add host intents via cli" )
578 intentIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700579 # TODO: move the host numbers to params
580 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700581 intentAddResult = True
582 hostResult = main.TRUE
583 for i in range( 8, 18 ):
584 main.log.info( "Adding host intent between h" + str( i ) +
585 " and h" + str( i + 10 ) )
586 host1 = "00:00:00:00:00:" + \
587 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
588 host2 = "00:00:00:00:00:" + \
589 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
590 # NOTE: getHost can return None
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700591 host1Dict = onosCli.getHost( host1 )
592 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700593 host1Id = None
594 host2Id = None
595 if host1Dict and host2Dict:
596 host1Id = host1Dict.get( 'id', None )
597 host2Id = host2Dict.get( 'id', None )
598 if host1Id and host2Id:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700599 nodeNum = ( i % len( main.activeNodes ) )
600 node = main.activeNodes[nodeNum]
601 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700602 if tmpId:
603 main.log.info( "Added intent with id: " + tmpId )
604 intentIds.append( tmpId )
605 else:
606 main.log.error( "addHostIntent returned: " +
607 repr( tmpId ) )
608 else:
609 main.log.error( "Error, getHost() failed for h" + str( i ) +
610 " and/or h" + str( i + 10 ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700611 node = main.activeNodes[0]
612 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700613 main.log.warn( "Hosts output: " )
614 try:
615 main.log.warn( json.dumps( json.loads( hosts ),
616 sort_keys=True,
617 indent=4,
618 separators=( ',', ': ' ) ) )
619 except ( ValueError, TypeError ):
620 main.log.warn( repr( hosts ) )
621 hostResult = main.FALSE
622 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
623 onpass="Found a host id for each host",
624 onfail="Error looking up host ids" )
625
626 intentStart = time.time()
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700627 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700628 main.log.info( "Submitted intents: " + str( intentIds ) )
629 main.log.info( "Intents in ONOS: " + str( onosIds ) )
630 for intent in intentIds:
631 if intent in onosIds:
632 pass # intent submitted is in onos
633 else:
634 intentAddResult = False
635 if intentAddResult:
636 intentStop = time.time()
637 else:
638 intentStop = None
639 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700640 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700641 intentStates = []
642 installedCheck = True
643 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
644 count = 0
645 try:
646 for intent in json.loads( intents ):
647 state = intent.get( 'state', None )
648 if "INSTALLED" not in state:
649 installedCheck = False
650 intentId = intent.get( 'id', None )
651 intentStates.append( ( intentId, state ) )
652 except ( ValueError, TypeError ):
653 main.log.exception( "Error parsing intents" )
654 # add submitted intents not in the store
655 tmplist = [ i for i, s in intentStates ]
656 missingIntents = False
657 for i in intentIds:
658 if i not in tmplist:
659 intentStates.append( ( i, " - " ) )
660 missingIntents = True
661 intentStates.sort()
662 for i, s in intentStates:
663 count += 1
664 main.log.info( "%-6s%-15s%-15s" %
665 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700666 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700667 try:
668 missing = False
669 if leaders:
670 parsedLeaders = json.loads( leaders )
671 main.log.warn( json.dumps( parsedLeaders,
672 sort_keys=True,
673 indent=4,
674 separators=( ',', ': ' ) ) )
675 # check for all intent partitions
676 topics = []
677 for i in range( 14 ):
678 topics.append( "intent-partition-" + str( i ) )
679 main.log.debug( topics )
680 ONOStopics = [ j['topic'] for j in parsedLeaders ]
681 for topic in topics:
682 if topic not in ONOStopics:
683 main.log.error( "Error: " + topic +
684 " not in leaders" )
685 missing = True
686 else:
687 main.log.error( "leaders() returned None" )
688 except ( ValueError, TypeError ):
689 main.log.exception( "Error parsing leaders" )
690 main.log.error( repr( leaders ) )
691 # Check all nodes
692 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700693 for i in main.activeNodes:
694 response = main.CLIs[i].leaders( jsonFormat=False)
695 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700696 str( response ) )
697
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700698 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700699 try:
700 if partitions :
701 parsedPartitions = json.loads( partitions )
702 main.log.warn( json.dumps( parsedPartitions,
703 sort_keys=True,
704 indent=4,
705 separators=( ',', ': ' ) ) )
706 # TODO check for a leader in all paritions
707 # TODO check for consistency among nodes
708 else:
709 main.log.error( "partitions() returned None" )
710 except ( ValueError, TypeError ):
711 main.log.exception( "Error parsing partitions" )
712 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700713 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700714 try:
715 if pendingMap :
716 parsedPending = json.loads( pendingMap )
717 main.log.warn( json.dumps( parsedPending,
718 sort_keys=True,
719 indent=4,
720 separators=( ',', ': ' ) ) )
721 # TODO check something here?
722 else:
723 main.log.error( "pendingMap() returned None" )
724 except ( ValueError, TypeError ):
725 main.log.exception( "Error parsing pending map" )
726 main.log.error( repr( pendingMap ) )
727
728 intentAddResult = bool( intentAddResult and not missingIntents and
729 installedCheck )
730 if not intentAddResult:
731 main.log.error( "Error in pushing host intents to ONOS" )
732
733 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700734 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700735 correct = True
736 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700737 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700738 onosIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700739 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700740 onosIds.append( ids )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700741 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700742 str( sorted( onosIds ) ) )
743 if sorted( ids ) != sorted( intentIds ):
744 main.log.warn( "Set of intent IDs doesn't match" )
745 correct = False
746 break
747 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700748 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700749 for intent in intents:
750 if intent[ 'state' ] != "INSTALLED":
751 main.log.warn( "Intent " + intent[ 'id' ] +
752 " is " + intent[ 'state' ] )
753 correct = False
754 break
755 if correct:
756 break
757 else:
758 time.sleep(1)
759 if not intentStop:
760 intentStop = time.time()
761 global gossipTime
762 gossipTime = intentStop - intentStart
763 main.log.info( "It took about " + str( gossipTime ) +
764 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700765 gossipPeriod = int( main.params['timers']['gossip'] )
766 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700767 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700768 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700769 onpass="ECM anti-entropy for intents worked within " +
770 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700771 onfail="Intent ECM anti-entropy took too long. " +
772 "Expected time:{}, Actual time:{}".format( maxGossipTime,
773 gossipTime ) )
774 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700775 intentAddResult = True
776
777 if not intentAddResult or "key" in pendingMap:
778 import time
779 installedCheck = True
780 main.log.info( "Sleeping 60 seconds to see if intents are found" )
781 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700782 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700783 main.log.info( "Submitted intents: " + str( intentIds ) )
784 main.log.info( "Intents in ONOS: " + str( onosIds ) )
785 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700786 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700787 intentStates = []
788 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
789 count = 0
790 try:
791 for intent in json.loads( intents ):
792 # Iter through intents of a node
793 state = intent.get( 'state', None )
794 if "INSTALLED" not in state:
795 installedCheck = False
796 intentId = intent.get( 'id', None )
797 intentStates.append( ( intentId, state ) )
798 except ( ValueError, TypeError ):
799 main.log.exception( "Error parsing intents" )
800 # add submitted intents not in the store
801 tmplist = [ i for i, s in intentStates ]
802 for i in intentIds:
803 if i not in tmplist:
804 intentStates.append( ( i, " - " ) )
805 intentStates.sort()
806 for i, s in intentStates:
807 count += 1
808 main.log.info( "%-6s%-15s%-15s" %
809 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700810 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700811 try:
812 missing = False
813 if leaders:
814 parsedLeaders = json.loads( leaders )
815 main.log.warn( json.dumps( parsedLeaders,
816 sort_keys=True,
817 indent=4,
818 separators=( ',', ': ' ) ) )
819 # check for all intent partitions
820 # check for election
821 topics = []
822 for i in range( 14 ):
823 topics.append( "intent-partition-" + str( i ) )
824 # FIXME: this should only be after we start the app
825 topics.append( "org.onosproject.election" )
826 main.log.debug( topics )
827 ONOStopics = [ j['topic'] for j in parsedLeaders ]
828 for topic in topics:
829 if topic not in ONOStopics:
830 main.log.error( "Error: " + topic +
831 " not in leaders" )
832 missing = True
833 else:
834 main.log.error( "leaders() returned None" )
835 except ( ValueError, TypeError ):
836 main.log.exception( "Error parsing leaders" )
837 main.log.error( repr( leaders ) )
838 # Check all nodes
839 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700840 for i in main.activeNodes:
841 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700842 response = node.leaders( jsonFormat=False)
843 main.log.warn( str( node.name ) + " leaders output: \n" +
844 str( response ) )
845
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700846 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700847 try:
848 if partitions :
849 parsedPartitions = json.loads( partitions )
850 main.log.warn( json.dumps( parsedPartitions,
851 sort_keys=True,
852 indent=4,
853 separators=( ',', ': ' ) ) )
854 # TODO check for a leader in all paritions
855 # TODO check for consistency among nodes
856 else:
857 main.log.error( "partitions() returned None" )
858 except ( ValueError, TypeError ):
859 main.log.exception( "Error parsing partitions" )
860 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700861 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700862 try:
863 if pendingMap :
864 parsedPending = json.loads( pendingMap )
865 main.log.warn( json.dumps( parsedPending,
866 sort_keys=True,
867 indent=4,
868 separators=( ',', ': ' ) ) )
869 # TODO check something here?
870 else:
871 main.log.error( "pendingMap() returned None" )
872 except ( ValueError, TypeError ):
873 main.log.exception( "Error parsing pending map" )
874 main.log.error( repr( pendingMap ) )
875
876 def CASE4( self, main ):
877 """
878 Ping across added host intents
879 """
880 import json
881 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700882 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700883 assert main, "main not defined"
884 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700885 assert main.CLIs, "main.CLIs not defined"
886 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700887 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700888 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700889 "functionality and check the state of " +\
890 "the intent"
891 main.step( "Ping across added host intents" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700892 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700893 PingResult = main.TRUE
894 for i in range( 8, 18 ):
895 ping = main.Mininet1.pingHost( src="h" + str( i ),
896 target="h" + str( i + 10 ) )
897 PingResult = PingResult and ping
898 if ping == main.FALSE:
899 main.log.warn( "Ping failed between h" + str( i ) +
900 " and h" + str( i + 10 ) )
901 elif ping == main.TRUE:
902 main.log.info( "Ping test passed!" )
903 # Don't set PingResult or you'd override failures
904 if PingResult == main.FALSE:
905 main.log.error(
906 "Intents have not been installed correctly, pings failed." )
907 # TODO: pretty print
908 main.log.warn( "ONOS1 intents: " )
909 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700910 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700911 main.log.warn( json.dumps( json.loads( tmpIntents ),
912 sort_keys=True,
913 indent=4,
914 separators=( ',', ': ' ) ) )
915 except ( ValueError, TypeError ):
916 main.log.warn( repr( tmpIntents ) )
917 utilities.assert_equals(
918 expect=main.TRUE,
919 actual=PingResult,
920 onpass="Intents have been installed correctly and pings work",
921 onfail="Intents have not been installed correctly, pings failed." )
922
923 main.step( "Check Intent state" )
924 installedCheck = False
925 loopCount = 0
926 while not installedCheck and loopCount < 40:
927 installedCheck = True
928 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700929 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700930 intentStates = []
931 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
932 count = 0
933 # Iter through intents of a node
934 try:
935 for intent in json.loads( intents ):
936 state = intent.get( 'state', None )
937 if "INSTALLED" not in state:
938 installedCheck = False
939 intentId = intent.get( 'id', None )
940 intentStates.append( ( intentId, state ) )
941 except ( ValueError, TypeError ):
942 main.log.exception( "Error parsing intents." )
943 # Print states
944 intentStates.sort()
945 for i, s in intentStates:
946 count += 1
947 main.log.info( "%-6s%-15s%-15s" %
948 ( str( count ), str( i ), str( s ) ) )
949 if not installedCheck:
950 time.sleep( 1 )
951 loopCount += 1
952 utilities.assert_equals( expect=True, actual=installedCheck,
953 onpass="Intents are all INSTALLED",
954 onfail="Intents are not all in " +
955 "INSTALLED state" )
956
957 main.step( "Check leadership of topics" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700958 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700959 topicCheck = main.TRUE
960 try:
961 if leaders:
962 parsedLeaders = json.loads( leaders )
963 main.log.warn( json.dumps( parsedLeaders,
964 sort_keys=True,
965 indent=4,
966 separators=( ',', ': ' ) ) )
967 # check for all intent partitions
968 # check for election
969 # TODO: Look at Devices as topics now that it uses this system
970 topics = []
971 for i in range( 14 ):
972 topics.append( "intent-partition-" + str( i ) )
973 # FIXME: this should only be after we start the app
974 # FIXME: topics.append( "org.onosproject.election" )
975 # Print leaders output
976 main.log.debug( topics )
977 ONOStopics = [ j['topic'] for j in parsedLeaders ]
978 for topic in topics:
979 if topic not in ONOStopics:
980 main.log.error( "Error: " + topic +
981 " not in leaders" )
982 topicCheck = main.FALSE
983 else:
984 main.log.error( "leaders() returned None" )
985 topicCheck = main.FALSE
986 except ( ValueError, TypeError ):
987 topicCheck = main.FALSE
988 main.log.exception( "Error parsing leaders" )
989 main.log.error( repr( leaders ) )
990 # TODO: Check for a leader of these topics
991 # Check all nodes
992 if topicCheck:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700993 for i in main.activeNodes:
994 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700995 response = node.leaders( jsonFormat=False)
996 main.log.warn( str( node.name ) + " leaders output: \n" +
997 str( response ) )
998
999 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1000 onpass="intent Partitions is in leaders",
1001 onfail="Some topics were lost " )
1002 # Print partitions
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001003 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001004 try:
1005 if partitions :
1006 parsedPartitions = json.loads( partitions )
1007 main.log.warn( json.dumps( parsedPartitions,
1008 sort_keys=True,
1009 indent=4,
1010 separators=( ',', ': ' ) ) )
1011 # TODO check for a leader in all paritions
1012 # TODO check for consistency among nodes
1013 else:
1014 main.log.error( "partitions() returned None" )
1015 except ( ValueError, TypeError ):
1016 main.log.exception( "Error parsing partitions" )
1017 main.log.error( repr( partitions ) )
1018 # Print Pending Map
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001019 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001020 try:
1021 if pendingMap :
1022 parsedPending = json.loads( pendingMap )
1023 main.log.warn( json.dumps( parsedPending,
1024 sort_keys=True,
1025 indent=4,
1026 separators=( ',', ': ' ) ) )
1027 # TODO check something here?
1028 else:
1029 main.log.error( "pendingMap() returned None" )
1030 except ( ValueError, TypeError ):
1031 main.log.exception( "Error parsing pending map" )
1032 main.log.error( repr( pendingMap ) )
1033
1034 if not installedCheck:
1035 main.log.info( "Waiting 60 seconds to see if the state of " +
1036 "intents change" )
1037 time.sleep( 60 )
1038 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001039 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001040 intentStates = []
1041 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1042 count = 0
1043 # Iter through intents of a node
1044 try:
1045 for intent in json.loads( intents ):
1046 state = intent.get( 'state', None )
1047 if "INSTALLED" not in state:
1048 installedCheck = False
1049 intentId = intent.get( 'id', None )
1050 intentStates.append( ( intentId, state ) )
1051 except ( ValueError, TypeError ):
1052 main.log.exception( "Error parsing intents." )
1053 intentStates.sort()
1054 for i, s in intentStates:
1055 count += 1
1056 main.log.info( "%-6s%-15s%-15s" %
1057 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001058 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001059 try:
1060 missing = False
1061 if leaders:
1062 parsedLeaders = json.loads( leaders )
1063 main.log.warn( json.dumps( parsedLeaders,
1064 sort_keys=True,
1065 indent=4,
1066 separators=( ',', ': ' ) ) )
1067 # check for all intent partitions
1068 # check for election
1069 topics = []
1070 for i in range( 14 ):
1071 topics.append( "intent-partition-" + str( i ) )
1072 # FIXME: this should only be after we start the app
1073 topics.append( "org.onosproject.election" )
1074 main.log.debug( topics )
1075 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1076 for topic in topics:
1077 if topic not in ONOStopics:
1078 main.log.error( "Error: " + topic +
1079 " not in leaders" )
1080 missing = True
1081 else:
1082 main.log.error( "leaders() returned None" )
1083 except ( ValueError, TypeError ):
1084 main.log.exception( "Error parsing leaders" )
1085 main.log.error( repr( leaders ) )
1086 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001087 for i in main.activeNodes:
1088 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001089 response = node.leaders( jsonFormat=False)
1090 main.log.warn( str( node.name ) + " leaders output: \n" +
1091 str( response ) )
1092
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001093 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001094 try:
1095 if partitions :
1096 parsedPartitions = json.loads( partitions )
1097 main.log.warn( json.dumps( parsedPartitions,
1098 sort_keys=True,
1099 indent=4,
1100 separators=( ',', ': ' ) ) )
1101 # TODO check for a leader in all paritions
1102 # TODO check for consistency among nodes
1103 else:
1104 main.log.error( "partitions() returned None" )
1105 except ( ValueError, TypeError ):
1106 main.log.exception( "Error parsing partitions" )
1107 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001108 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001109 try:
1110 if pendingMap :
1111 parsedPending = json.loads( pendingMap )
1112 main.log.warn( json.dumps( parsedPending,
1113 sort_keys=True,
1114 indent=4,
1115 separators=( ',', ': ' ) ) )
1116 # TODO check something here?
1117 else:
1118 main.log.error( "pendingMap() returned None" )
1119 except ( ValueError, TypeError ):
1120 main.log.exception( "Error parsing pending map" )
1121 main.log.error( repr( pendingMap ) )
1122 # Print flowrules
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001123 node = main.activeNodes[0]
1124 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001125 main.step( "Wait a minute then ping again" )
1126 # the wait is above
1127 PingResult = main.TRUE
1128 for i in range( 8, 18 ):
1129 ping = main.Mininet1.pingHost( src="h" + str( i ),
1130 target="h" + str( i + 10 ) )
1131 PingResult = PingResult and ping
1132 if ping == main.FALSE:
1133 main.log.warn( "Ping failed between h" + str( i ) +
1134 " and h" + str( i + 10 ) )
1135 elif ping == main.TRUE:
1136 main.log.info( "Ping test passed!" )
1137 # Don't set PingResult or you'd override failures
1138 if PingResult == main.FALSE:
1139 main.log.error(
1140 "Intents have not been installed correctly, pings failed." )
1141 # TODO: pretty print
1142 main.log.warn( "ONOS1 intents: " )
1143 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001144 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001145 main.log.warn( json.dumps( json.loads( tmpIntents ),
1146 sort_keys=True,
1147 indent=4,
1148 separators=( ',', ': ' ) ) )
1149 except ( ValueError, TypeError ):
1150 main.log.warn( repr( tmpIntents ) )
1151 utilities.assert_equals(
1152 expect=main.TRUE,
1153 actual=PingResult,
1154 onpass="Intents have been installed correctly and pings work",
1155 onfail="Intents have not been installed correctly, pings failed." )
1156
1157 def CASE5( self, main ):
1158 """
1159 Reading state of ONOS
1160 """
1161 import json
1162 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001163 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001164 assert main, "main not defined"
1165 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001166 assert main.CLIs, "main.CLIs not defined"
1167 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001168
1169 main.case( "Setting up and gathering data for current state" )
1170 # The general idea for this test case is to pull the state of
1171 # ( intents,flows, topology,... ) from each ONOS node
1172 # We can then compare them with each other and also with past states
1173
1174 main.step( "Check that each switch has a master" )
1175 global mastershipState
1176 mastershipState = '[]'
1177
1178 # Assert that each device has a master
1179 rolesNotNull = main.TRUE
1180 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001181 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001182 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001183 name="rolesNotNull-" + str( i ),
1184 args=[] )
1185 threads.append( t )
1186 t.start()
1187
1188 for t in threads:
1189 t.join()
1190 rolesNotNull = rolesNotNull and t.result
1191 utilities.assert_equals(
1192 expect=main.TRUE,
1193 actual=rolesNotNull,
1194 onpass="Each device has a master",
1195 onfail="Some devices don't have a master assigned" )
1196
1197 main.step( "Get the Mastership of each switch from each controller" )
1198 ONOSMastership = []
1199 mastershipCheck = main.FALSE
1200 consistentMastership = True
1201 rolesResults = True
1202 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001203 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001204 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001205 name="roles-" + str( i ),
1206 args=[] )
1207 threads.append( t )
1208 t.start()
1209
1210 for t in threads:
1211 t.join()
1212 ONOSMastership.append( t.result )
1213
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001214 for i in range( len( ONOSMastership ) ):
1215 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001216 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001217 main.log.error( "Error in getting ONOS" + node + " roles" )
1218 main.log.warn( "ONOS" + node + " mastership response: " +
1219 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001220 rolesResults = False
1221 utilities.assert_equals(
1222 expect=True,
1223 actual=rolesResults,
1224 onpass="No error in reading roles output",
1225 onfail="Error in reading roles from ONOS" )
1226
1227 main.step( "Check for consistency in roles from each controller" )
1228 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1229 main.log.info(
1230 "Switch roles are consistent across all ONOS nodes" )
1231 else:
1232 consistentMastership = False
1233 utilities.assert_equals(
1234 expect=True,
1235 actual=consistentMastership,
1236 onpass="Switch roles are consistent across all ONOS nodes",
1237 onfail="ONOS nodes have different views of switch roles" )
1238
1239 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001240 for i in range( len( main.activeNodes ) ):
1241 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001242 try:
1243 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001244 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001245 json.dumps(
1246 json.loads( ONOSMastership[ i ] ),
1247 sort_keys=True,
1248 indent=4,
1249 separators=( ',', ': ' ) ) )
1250 except ( ValueError, TypeError ):
1251 main.log.warn( repr( ONOSMastership[ i ] ) )
1252 elif rolesResults and consistentMastership:
1253 mastershipCheck = main.TRUE
1254 mastershipState = ONOSMastership[ 0 ]
1255
1256 main.step( "Get the intents from each controller" )
1257 global intentState
1258 intentState = []
1259 ONOSIntents = []
1260 intentCheck = main.FALSE
1261 consistentIntents = True
1262 intentsResults = True
1263 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001264 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001265 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001266 name="intents-" + str( i ),
1267 args=[],
1268 kwargs={ 'jsonFormat': True } )
1269 threads.append( t )
1270 t.start()
1271
1272 for t in threads:
1273 t.join()
1274 ONOSIntents.append( t.result )
1275
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001276 for i in range( len( ONOSIntents ) ):
1277 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001278 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001279 main.log.error( "Error in getting ONOS" + node + " intents" )
1280 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001281 repr( ONOSIntents[ i ] ) )
1282 intentsResults = False
1283 utilities.assert_equals(
1284 expect=True,
1285 actual=intentsResults,
1286 onpass="No error in reading intents output",
1287 onfail="Error in reading intents from ONOS" )
1288
1289 main.step( "Check for consistency in Intents from each controller" )
1290 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1291 main.log.info( "Intents are consistent across all ONOS " +
1292 "nodes" )
1293 else:
1294 consistentIntents = False
1295 main.log.error( "Intents not consistent" )
1296 utilities.assert_equals(
1297 expect=True,
1298 actual=consistentIntents,
1299 onpass="Intents are consistent across all ONOS nodes",
1300 onfail="ONOS nodes have different views of intents" )
1301
1302 if intentsResults:
1303 # Try to make it easy to figure out what is happening
1304 #
1305 # Intent ONOS1 ONOS2 ...
1306 # 0x01 INSTALLED INSTALLING
1307 # ... ... ...
1308 # ... ... ...
1309 title = " Id"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001310 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001311 title += " " * 10 + "ONOS" + str( n + 1 )
1312 main.log.warn( title )
1313 # get all intent keys in the cluster
1314 keys = []
1315 for nodeStr in ONOSIntents:
1316 node = json.loads( nodeStr )
1317 for intent in node:
1318 keys.append( intent.get( 'id' ) )
1319 keys = set( keys )
1320 for key in keys:
1321 row = "%-13s" % key
1322 for nodeStr in ONOSIntents:
1323 node = json.loads( nodeStr )
1324 for intent in node:
1325 if intent.get( 'id', "Error" ) == key:
1326 row += "%-15s" % intent.get( 'state' )
1327 main.log.warn( row )
1328 # End table view
1329
1330 if intentsResults and not consistentIntents:
1331 # print the json objects
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001332 n = str( main.activeNodes[-1] + 1 )
1333 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001334 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1335 sort_keys=True,
1336 indent=4,
1337 separators=( ',', ': ' ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001338 for i in range( len( ONOSIntents ) ):
1339 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001340 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001341 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001342 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1343 sort_keys=True,
1344 indent=4,
1345 separators=( ',', ': ' ) ) )
1346 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001347 main.log.debug( "ONOS" + node + " intents match ONOS" +
1348 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001349 elif intentsResults and consistentIntents:
1350 intentCheck = main.TRUE
1351 intentState = ONOSIntents[ 0 ]
1352
1353 main.step( "Get the flows from each controller" )
1354 global flowState
1355 flowState = []
1356 ONOSFlows = []
1357 ONOSFlowsJson = []
1358 flowCheck = main.FALSE
1359 consistentFlows = True
1360 flowsResults = True
1361 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001362 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001363 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001364 name="flows-" + str( i ),
1365 args=[],
1366 kwargs={ 'jsonFormat': True } )
1367 threads.append( t )
1368 t.start()
1369
1370 # NOTE: Flows command can take some time to run
1371 time.sleep(30)
1372 for t in threads:
1373 t.join()
1374 result = t.result
1375 ONOSFlows.append( result )
1376
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001377 for i in range( len( ONOSFlows ) ):
1378 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001379 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1380 main.log.error( "Error in getting ONOS" + num + " flows" )
1381 main.log.warn( "ONOS" + num + " flows response: " +
1382 repr( ONOSFlows[ i ] ) )
1383 flowsResults = False
1384 ONOSFlowsJson.append( None )
1385 else:
1386 try:
1387 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1388 except ( ValueError, TypeError ):
1389 # FIXME: change this to log.error?
1390 main.log.exception( "Error in parsing ONOS" + num +
1391 " response as json." )
1392 main.log.error( repr( ONOSFlows[ i ] ) )
1393 ONOSFlowsJson.append( None )
1394 flowsResults = False
1395 utilities.assert_equals(
1396 expect=True,
1397 actual=flowsResults,
1398 onpass="No error in reading flows output",
1399 onfail="Error in reading flows from ONOS" )
1400
1401 main.step( "Check for consistency in Flows from each controller" )
1402 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1403 if all( tmp ):
1404 main.log.info( "Flow count is consistent across all ONOS nodes" )
1405 else:
1406 consistentFlows = False
1407 utilities.assert_equals(
1408 expect=True,
1409 actual=consistentFlows,
1410 onpass="The flow count is consistent across all ONOS nodes",
1411 onfail="ONOS nodes have different flow counts" )
1412
1413 if flowsResults and not consistentFlows:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001414 for i in range( len( ONOSFlows ) ):
1415 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001416 try:
1417 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001418 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001419 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1420 indent=4, separators=( ',', ': ' ) ) )
1421 except ( ValueError, TypeError ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001422 main.log.warn( "ONOS" + node + " flows: " +
1423 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001424 elif flowsResults and consistentFlows:
1425 flowCheck = main.TRUE
1426 flowState = ONOSFlows[ 0 ]
1427
1428 main.step( "Get the OF Table entries" )
1429 global flows
1430 flows = []
1431 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001432 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001433 if flowCheck == main.FALSE:
1434 for table in flows:
1435 main.log.warn( table )
1436 # TODO: Compare switch flow tables with ONOS flow tables
1437
1438 main.step( "Start continuous pings" )
1439 main.Mininet2.pingLong(
1440 src=main.params[ 'PING' ][ 'source1' ],
1441 target=main.params[ 'PING' ][ 'target1' ],
1442 pingTime=500 )
1443 main.Mininet2.pingLong(
1444 src=main.params[ 'PING' ][ 'source2' ],
1445 target=main.params[ 'PING' ][ 'target2' ],
1446 pingTime=500 )
1447 main.Mininet2.pingLong(
1448 src=main.params[ 'PING' ][ 'source3' ],
1449 target=main.params[ 'PING' ][ 'target3' ],
1450 pingTime=500 )
1451 main.Mininet2.pingLong(
1452 src=main.params[ 'PING' ][ 'source4' ],
1453 target=main.params[ 'PING' ][ 'target4' ],
1454 pingTime=500 )
1455 main.Mininet2.pingLong(
1456 src=main.params[ 'PING' ][ 'source5' ],
1457 target=main.params[ 'PING' ][ 'target5' ],
1458 pingTime=500 )
1459 main.Mininet2.pingLong(
1460 src=main.params[ 'PING' ][ 'source6' ],
1461 target=main.params[ 'PING' ][ 'target6' ],
1462 pingTime=500 )
1463 main.Mininet2.pingLong(
1464 src=main.params[ 'PING' ][ 'source7' ],
1465 target=main.params[ 'PING' ][ 'target7' ],
1466 pingTime=500 )
1467 main.Mininet2.pingLong(
1468 src=main.params[ 'PING' ][ 'source8' ],
1469 target=main.params[ 'PING' ][ 'target8' ],
1470 pingTime=500 )
1471 main.Mininet2.pingLong(
1472 src=main.params[ 'PING' ][ 'source9' ],
1473 target=main.params[ 'PING' ][ 'target9' ],
1474 pingTime=500 )
1475 main.Mininet2.pingLong(
1476 src=main.params[ 'PING' ][ 'source10' ],
1477 target=main.params[ 'PING' ][ 'target10' ],
1478 pingTime=500 )
1479
1480 main.step( "Collecting topology information from ONOS" )
1481 devices = []
1482 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001483 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001484 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001485 name="devices-" + str( i ),
1486 args=[ ] )
1487 threads.append( t )
1488 t.start()
1489
1490 for t in threads:
1491 t.join()
1492 devices.append( t.result )
1493 hosts = []
1494 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001495 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001496 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001497 name="hosts-" + str( i ),
1498 args=[ ] )
1499 threads.append( t )
1500 t.start()
1501
1502 for t in threads:
1503 t.join()
1504 try:
1505 hosts.append( json.loads( t.result ) )
1506 except ( ValueError, TypeError ):
1507 # FIXME: better handling of this, print which node
1508 # Maybe use thread name?
1509 main.log.exception( "Error parsing json output of hosts" )
Jon Hallf3d16e72015-12-16 17:45:08 -08001510 main.log.warn( repr( t.result ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001511 hosts.append( None )
1512
1513 ports = []
1514 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001515 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001516 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001517 name="ports-" + str( i ),
1518 args=[ ] )
1519 threads.append( t )
1520 t.start()
1521
1522 for t in threads:
1523 t.join()
1524 ports.append( t.result )
1525 links = []
1526 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001527 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001528 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001529 name="links-" + str( i ),
1530 args=[ ] )
1531 threads.append( t )
1532 t.start()
1533
1534 for t in threads:
1535 t.join()
1536 links.append( t.result )
1537 clusters = []
1538 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001539 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001540 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001541 name="clusters-" + str( i ),
1542 args=[ ] )
1543 threads.append( t )
1544 t.start()
1545
1546 for t in threads:
1547 t.join()
1548 clusters.append( t.result )
1549 # Compare json objects for hosts and dataplane clusters
1550
1551 # hosts
1552 main.step( "Host view is consistent across ONOS nodes" )
1553 consistentHostsResult = main.TRUE
1554 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001555 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001556 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001557 if hosts[ controller ] == hosts[ 0 ]:
1558 continue
1559 else: # hosts not consistent
1560 main.log.error( "hosts from ONOS" +
1561 controllerStr +
1562 " is inconsistent with ONOS1" )
1563 main.log.warn( repr( hosts[ controller ] ) )
1564 consistentHostsResult = main.FALSE
1565
1566 else:
1567 main.log.error( "Error in getting ONOS hosts from ONOS" +
1568 controllerStr )
1569 consistentHostsResult = main.FALSE
1570 main.log.warn( "ONOS" + controllerStr +
1571 " hosts response: " +
1572 repr( hosts[ controller ] ) )
1573 utilities.assert_equals(
1574 expect=main.TRUE,
1575 actual=consistentHostsResult,
1576 onpass="Hosts view is consistent across all ONOS nodes",
1577 onfail="ONOS nodes have different views of hosts" )
1578
1579 main.step( "Each host has an IP address" )
1580 ipResult = main.TRUE
1581 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001582 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001583 if hosts[ controller ]:
1584 for host in hosts[ controller ]:
1585 if not host.get( 'ipAddresses', [ ] ):
1586 main.log.error( "Error with host ips on controller" +
1587 controllerStr + ": " + str( host ) )
1588 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001589 utilities.assert_equals(
1590 expect=main.TRUE,
1591 actual=ipResult,
1592 onpass="The ips of the hosts aren't empty",
1593 onfail="The ip of at least one host is missing" )
1594
1595 # Strongly connected clusters of devices
1596 main.step( "Cluster view is consistent across ONOS nodes" )
1597 consistentClustersResult = main.TRUE
1598 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001599 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001600 if "Error" not in clusters[ controller ]:
1601 if clusters[ controller ] == clusters[ 0 ]:
1602 continue
1603 else: # clusters not consistent
1604 main.log.error( "clusters from ONOS" + controllerStr +
1605 " is inconsistent with ONOS1" )
1606 consistentClustersResult = main.FALSE
1607
1608 else:
1609 main.log.error( "Error in getting dataplane clusters " +
1610 "from ONOS" + controllerStr )
1611 consistentClustersResult = main.FALSE
1612 main.log.warn( "ONOS" + controllerStr +
1613 " clusters response: " +
1614 repr( clusters[ controller ] ) )
1615 utilities.assert_equals(
1616 expect=main.TRUE,
1617 actual=consistentClustersResult,
1618 onpass="Clusters view is consistent across all ONOS nodes",
1619 onfail="ONOS nodes have different views of clusters" )
1620 # there should always only be one cluster
1621 main.step( "Cluster view correct across ONOS nodes" )
1622 try:
1623 numClusters = len( json.loads( clusters[ 0 ] ) )
1624 except ( ValueError, TypeError ):
1625 main.log.exception( "Error parsing clusters[0]: " +
1626 repr( clusters[ 0 ] ) )
1627 clusterResults = main.FALSE
1628 if numClusters == 1:
1629 clusterResults = main.TRUE
1630 utilities.assert_equals(
1631 expect=1,
1632 actual=numClusters,
1633 onpass="ONOS shows 1 SCC",
1634 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1635
1636 main.step( "Comparing ONOS topology to MN" )
1637 devicesResults = main.TRUE
1638 linksResults = main.TRUE
1639 hostsResults = main.TRUE
1640 mnSwitches = main.Mininet1.getSwitches()
1641 mnLinks = main.Mininet1.getLinks()
1642 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001643 for controller in main.activeNodes:
1644 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001645 if devices[ controller ] and ports[ controller ] and\
1646 "Error" not in devices[ controller ] and\
1647 "Error" not in ports[ controller ]:
1648
1649 currentDevicesResult = main.Mininet1.compareSwitches(
1650 mnSwitches,
1651 json.loads( devices[ controller ] ),
1652 json.loads( ports[ controller ] ) )
1653 else:
1654 currentDevicesResult = main.FALSE
1655 utilities.assert_equals( expect=main.TRUE,
1656 actual=currentDevicesResult,
1657 onpass="ONOS" + controllerStr +
1658 " Switches view is correct",
1659 onfail="ONOS" + controllerStr +
1660 " Switches view is incorrect" )
1661 if links[ controller ] and "Error" not in links[ controller ]:
1662 currentLinksResult = main.Mininet1.compareLinks(
1663 mnSwitches, mnLinks,
1664 json.loads( links[ controller ] ) )
1665 else:
1666 currentLinksResult = main.FALSE
1667 utilities.assert_equals( expect=main.TRUE,
1668 actual=currentLinksResult,
1669 onpass="ONOS" + controllerStr +
1670 " links view is correct",
1671 onfail="ONOS" + controllerStr +
1672 " links view is incorrect" )
1673
Jon Hall657cdf62015-12-17 14:40:51 -08001674 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001675 currentHostsResult = main.Mininet1.compareHosts(
1676 mnHosts,
1677 hosts[ controller ] )
1678 else:
1679 currentHostsResult = main.FALSE
1680 utilities.assert_equals( expect=main.TRUE,
1681 actual=currentHostsResult,
1682 onpass="ONOS" + controllerStr +
1683 " hosts exist in Mininet",
1684 onfail="ONOS" + controllerStr +
1685 " hosts don't match Mininet" )
1686
1687 devicesResults = devicesResults and currentDevicesResult
1688 linksResults = linksResults and currentLinksResult
1689 hostsResults = hostsResults and currentHostsResult
1690
1691 main.step( "Device information is correct" )
1692 utilities.assert_equals(
1693 expect=main.TRUE,
1694 actual=devicesResults,
1695 onpass="Device information is correct",
1696 onfail="Device information is incorrect" )
1697
1698 main.step( "Links are correct" )
1699 utilities.assert_equals(
1700 expect=main.TRUE,
1701 actual=linksResults,
1702 onpass="Link are correct",
1703 onfail="Links are incorrect" )
1704
1705 main.step( "Hosts are correct" )
1706 utilities.assert_equals(
1707 expect=main.TRUE,
1708 actual=hostsResults,
1709 onpass="Hosts are correct",
1710 onfail="Hosts are incorrect" )
1711
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001712 def CASE61( self, main ):
Jon Hall5cf14d52015-07-16 12:15:19 -07001713 """
1714 The Failure case.
1715 """
Jon Halle1a3b752015-07-22 13:02:46 -07001716 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001717 assert main, "main not defined"
1718 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001719 assert main.CLIs, "main.CLIs not defined"
1720 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001721 main.case( "Stop minority of ONOS nodes" )
Jon Hall96091e62015-09-21 17:34:17 -07001722
1723 main.step( "Checking ONOS Logs for errors" )
1724 for node in main.nodes:
1725 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1726 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1727
Jon Hall3b489db2015-10-05 14:38:37 -07001728 n = len( main.nodes ) # Number of nodes
1729 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1730 main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
1731 if n > 3:
1732 main.kill.append( p - 1 )
1733 # NOTE: This only works for cluster sizes of 3,5, or 7.
1734
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001735 main.step( "Stopping " + str( len( main.kill ) ) + " ONOS nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07001736 killResults = main.TRUE
1737 for i in main.kill:
1738 killResults = killResults and\
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001739 main.ONOSbench.onosStop( main.nodes[i].ip_address )
1740 main.activeNodes.remove( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001741 utilities.assert_equals( expect=main.TRUE, actual=killResults,
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001742 onpass="ONOS nodes stopped successfully",
1743 onfail="ONOS nodes NOT successfully stopped" )
1744
1745 def CASE62( self, main ):
1746 """
1747 The bring up stopped nodes
1748 """
1749 import time
1750 assert main.numCtrls, "main.numCtrls not defined"
1751 assert main, "main not defined"
1752 assert utilities.assert_equals, "utilities.assert_equals not defined"
1753 assert main.CLIs, "main.CLIs not defined"
1754 assert main.nodes, "main.nodes not defined"
1755 assert main.kill, "main.kill not defined"
1756 main.case( "Restart minority of ONOS nodes" )
1757
1758 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1759 startResults = main.TRUE
1760 restartTime = time.time()
1761 for i in main.kill:
1762 startResults = startResults and\
1763 main.ONOSbench.onosStart( main.nodes[i].ip_address )
1764 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1765 onpass="ONOS nodes started successfully",
1766 onfail="ONOS nodes NOT successfully started" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001767
1768 main.step( "Checking if ONOS is up yet" )
1769 count = 0
1770 onosIsupResult = main.FALSE
1771 while onosIsupResult == main.FALSE and count < 10:
Jon Hall3b489db2015-10-05 14:38:37 -07001772 onosIsupResult = main.TRUE
1773 for i in main.kill:
1774 onosIsupResult = onosIsupResult and\
1775 main.ONOSbench.isup( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07001776 count = count + 1
Jon Hall5cf14d52015-07-16 12:15:19 -07001777 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1778 onpass="ONOS restarted successfully",
1779 onfail="ONOS restart NOT successful" )
1780
Jon Halle1a3b752015-07-22 13:02:46 -07001781 main.step( "Restarting ONOS main.CLIs" )
Jon Hall3b489db2015-10-05 14:38:37 -07001782 cliResults = main.TRUE
1783 for i in main.kill:
1784 cliResults = cliResults and\
1785 main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001786 main.activeNodes.append( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001787 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1788 onpass="ONOS cli restarted",
1789 onfail="ONOS cli did not restart" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001790 main.activeNodes.sort()
1791 try:
1792 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1793 "List of active nodes has duplicates, this likely indicates something was run out of order"
1794 except AssertionError:
1795 main.log.exception( "" )
1796 main.cleanup()
1797 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -07001798
1799 # Grab the time of restart so we chan check how long the gossip
1800 # protocol has had time to work
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001801 main.restartTime = time.time() - restartTime
Jon Hall5cf14d52015-07-16 12:15:19 -07001802 main.log.debug( "Restart time: " + str( main.restartTime ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001803 # TODO: MAke this configurable. Also, we are breaking the above timer
1804 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001805 node = main.activeNodes[0]
1806 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1807 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1808 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001809
1810 def CASE7( self, main ):
1811 """
1812 Check state after ONOS failure
1813 """
1814 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001815 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001816 assert main, "main not defined"
1817 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001818 assert main.CLIs, "main.CLIs not defined"
1819 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001820 try:
1821 main.kill
1822 except AttributeError:
1823 main.kill = []
1824
Jon Hall5cf14d52015-07-16 12:15:19 -07001825 main.case( "Running ONOS Constant State Tests" )
1826
1827 main.step( "Check that each switch has a master" )
1828 # Assert that each device has a master
1829 rolesNotNull = main.TRUE
1830 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001831 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001832 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001833 name="rolesNotNull-" + str( i ),
1834 args=[ ] )
1835 threads.append( t )
1836 t.start()
1837
1838 for t in threads:
1839 t.join()
1840 rolesNotNull = rolesNotNull and t.result
1841 utilities.assert_equals(
1842 expect=main.TRUE,
1843 actual=rolesNotNull,
1844 onpass="Each device has a master",
1845 onfail="Some devices don't have a master assigned" )
1846
1847 main.step( "Read device roles from ONOS" )
1848 ONOSMastership = []
1849 consistentMastership = True
1850 rolesResults = True
1851 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001852 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001853 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001854 name="roles-" + str( i ),
1855 args=[] )
1856 threads.append( t )
1857 t.start()
1858
1859 for t in threads:
1860 t.join()
1861 ONOSMastership.append( t.result )
1862
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001863 for i in range( len( ONOSMastership ) ):
1864 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001865 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001866 main.log.error( "Error in getting ONOS" + node + " roles" )
1867 main.log.warn( "ONOS" + node + " mastership response: " +
1868 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001869 rolesResults = False
1870 utilities.assert_equals(
1871 expect=True,
1872 actual=rolesResults,
1873 onpass="No error in reading roles output",
1874 onfail="Error in reading roles from ONOS" )
1875
1876 main.step( "Check for consistency in roles from each controller" )
1877 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1878 main.log.info(
1879 "Switch roles are consistent across all ONOS nodes" )
1880 else:
1881 consistentMastership = False
1882 utilities.assert_equals(
1883 expect=True,
1884 actual=consistentMastership,
1885 onpass="Switch roles are consistent across all ONOS nodes",
1886 onfail="ONOS nodes have different views of switch roles" )
1887
1888 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001889 for i in range( len( ONOSMastership ) ):
1890 node = str( main.activeNodes[i] + 1 )
1891 main.log.warn( "ONOS" + node + " roles: ",
1892 json.dumps( json.loads( ONOSMastership[ i ] ),
1893 sort_keys=True,
1894 indent=4,
1895 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001896
1897 # NOTE: we expect mastership to change on controller failure
Jon Hall5cf14d52015-07-16 12:15:19 -07001898
1899 main.step( "Get the intents and compare across all nodes" )
1900 ONOSIntents = []
1901 intentCheck = main.FALSE
1902 consistentIntents = True
1903 intentsResults = True
1904 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001905 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001906 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001907 name="intents-" + str( i ),
1908 args=[],
1909 kwargs={ 'jsonFormat': True } )
1910 threads.append( t )
1911 t.start()
1912
1913 for t in threads:
1914 t.join()
1915 ONOSIntents.append( t.result )
1916
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001917 for i in range( len( ONOSIntents) ):
1918 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001919 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001920 main.log.error( "Error in getting ONOS" + node + " intents" )
1921 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001922 repr( ONOSIntents[ i ] ) )
1923 intentsResults = False
1924 utilities.assert_equals(
1925 expect=True,
1926 actual=intentsResults,
1927 onpass="No error in reading intents output",
1928 onfail="Error in reading intents from ONOS" )
1929
1930 main.step( "Check for consistency in Intents from each controller" )
1931 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1932 main.log.info( "Intents are consistent across all ONOS " +
1933 "nodes" )
1934 else:
1935 consistentIntents = False
1936
1937 # Try to make it easy to figure out what is happening
1938 #
1939 # Intent ONOS1 ONOS2 ...
1940 # 0x01 INSTALLED INSTALLING
1941 # ... ... ...
1942 # ... ... ...
1943 title = " ID"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001944 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001945 title += " " * 10 + "ONOS" + str( n + 1 )
1946 main.log.warn( title )
1947 # get all intent keys in the cluster
1948 keys = []
1949 for nodeStr in ONOSIntents:
1950 node = json.loads( nodeStr )
1951 for intent in node:
1952 keys.append( intent.get( 'id' ) )
1953 keys = set( keys )
1954 for key in keys:
1955 row = "%-13s" % key
1956 for nodeStr in ONOSIntents:
1957 node = json.loads( nodeStr )
1958 for intent in node:
1959 if intent.get( 'id' ) == key:
1960 row += "%-15s" % intent.get( 'state' )
1961 main.log.warn( row )
1962 # End table view
1963
1964 utilities.assert_equals(
1965 expect=True,
1966 actual=consistentIntents,
1967 onpass="Intents are consistent across all ONOS nodes",
1968 onfail="ONOS nodes have different views of intents" )
1969 intentStates = []
1970 for node in ONOSIntents: # Iter through ONOS nodes
1971 nodeStates = []
1972 # Iter through intents of a node
1973 try:
1974 for intent in json.loads( node ):
1975 nodeStates.append( intent[ 'state' ] )
1976 except ( ValueError, TypeError ):
1977 main.log.exception( "Error in parsing intents" )
1978 main.log.error( repr( node ) )
1979 intentStates.append( nodeStates )
1980 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1981 main.log.info( dict( out ) )
1982
1983 if intentsResults and not consistentIntents:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001984 for i in range( len( main.activeNodes ) ):
1985 node = str( main.activeNodes[i] + 1 )
1986 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001987 main.log.warn( json.dumps(
1988 json.loads( ONOSIntents[ i ] ),
1989 sort_keys=True,
1990 indent=4,
1991 separators=( ',', ': ' ) ) )
1992 elif intentsResults and consistentIntents:
1993 intentCheck = main.TRUE
1994
1995 # NOTE: Store has no durability, so intents are lost across system
1996 # restarts
1997 main.step( "Compare current intents with intents before the failure" )
1998 # NOTE: this requires case 5 to pass for intentState to be set.
1999 # maybe we should stop the test if that fails?
2000 sameIntents = main.FALSE
2001 if intentState and intentState == ONOSIntents[ 0 ]:
2002 sameIntents = main.TRUE
2003 main.log.info( "Intents are consistent with before failure" )
2004 # TODO: possibly the states have changed? we may need to figure out
2005 # what the acceptable states are
2006 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2007 sameIntents = main.TRUE
2008 try:
2009 before = json.loads( intentState )
2010 after = json.loads( ONOSIntents[ 0 ] )
2011 for intent in before:
2012 if intent not in after:
2013 sameIntents = main.FALSE
2014 main.log.debug( "Intent is not currently in ONOS " +
2015 "(at least in the same form):" )
2016 main.log.debug( json.dumps( intent ) )
2017 except ( ValueError, TypeError ):
2018 main.log.exception( "Exception printing intents" )
2019 main.log.debug( repr( ONOSIntents[0] ) )
2020 main.log.debug( repr( intentState ) )
2021 if sameIntents == main.FALSE:
2022 try:
2023 main.log.debug( "ONOS intents before: " )
2024 main.log.debug( json.dumps( json.loads( intentState ),
2025 sort_keys=True, indent=4,
2026 separators=( ',', ': ' ) ) )
2027 main.log.debug( "Current ONOS intents: " )
2028 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2029 sort_keys=True, indent=4,
2030 separators=( ',', ': ' ) ) )
2031 except ( ValueError, TypeError ):
2032 main.log.exception( "Exception printing intents" )
2033 main.log.debug( repr( ONOSIntents[0] ) )
2034 main.log.debug( repr( intentState ) )
2035 utilities.assert_equals(
2036 expect=main.TRUE,
2037 actual=sameIntents,
2038 onpass="Intents are consistent with before failure",
2039 onfail="The Intents changed during failure" )
2040 intentCheck = intentCheck and sameIntents
2041
2042 main.step( "Get the OF Table entries and compare to before " +
2043 "component failure" )
2044 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002045 for i in range( 28 ):
2046 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002047 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2048 FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
Jon Hall5cf14d52015-07-16 12:15:19 -07002049 if FlowTables == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002050 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2051
Jon Hall5cf14d52015-07-16 12:15:19 -07002052 utilities.assert_equals(
2053 expect=main.TRUE,
2054 actual=FlowTables,
2055 onpass="No changes were found in the flow tables",
2056 onfail="Changes were found in the flow tables" )
2057
2058 main.Mininet2.pingLongKill()
2059 '''
2060 main.step( "Check the continuous pings to ensure that no packets " +
2061 "were dropped during component failure" )
2062 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2063 main.params[ 'TESTONIP' ] )
2064 LossInPings = main.FALSE
2065 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2066 for i in range( 8, 18 ):
2067 main.log.info(
2068 "Checking for a loss in pings along flow from s" +
2069 str( i ) )
2070 LossInPings = main.Mininet2.checkForLoss(
2071 "/tmp/ping.h" +
2072 str( i ) ) or LossInPings
2073 if LossInPings == main.TRUE:
2074 main.log.info( "Loss in ping detected" )
2075 elif LossInPings == main.ERROR:
2076 main.log.info( "There are multiple mininet process running" )
2077 elif LossInPings == main.FALSE:
2078 main.log.info( "No Loss in the pings" )
2079 main.log.info( "No loss of dataplane connectivity" )
2080 utilities.assert_equals(
2081 expect=main.FALSE,
2082 actual=LossInPings,
2083 onpass="No Loss of connectivity",
2084 onfail="Loss of dataplane connectivity detected" )
2085 '''
2086
2087 main.step( "Leadership Election is still functional" )
2088 # Test of LeadershipElection
2089 leaderList = []
Jon Hall5cf14d52015-07-16 12:15:19 -07002090
Jon Hall3b489db2015-10-05 14:38:37 -07002091 restarted = []
2092 for i in main.kill:
2093 restarted.append( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07002094 leaderResult = main.TRUE
Jon Hall3b489db2015-10-05 14:38:37 -07002095
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002096 for i in main.activeNodes:
2097 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002098 leaderN = cli.electionTestLeader()
2099 leaderList.append( leaderN )
2100 if leaderN == main.FALSE:
2101 # error in response
2102 main.log.error( "Something is wrong with " +
2103 "electionTestLeader function, check the" +
2104 " error logs" )
2105 leaderResult = main.FALSE
2106 elif leaderN is None:
2107 main.log.error( cli.name +
2108 " shows no leader for the election-app was" +
2109 " elected after the old one died" )
2110 leaderResult = main.FALSE
2111 elif leaderN in restarted:
2112 main.log.error( cli.name + " shows " + str( leaderN ) +
2113 " as leader for the election-app, but it " +
2114 "was restarted" )
2115 leaderResult = main.FALSE
2116 if len( set( leaderList ) ) != 1:
2117 leaderResult = main.FALSE
2118 main.log.error(
2119 "Inconsistent view of leader for the election test app" )
2120 # TODO: print the list
2121 utilities.assert_equals(
2122 expect=main.TRUE,
2123 actual=leaderResult,
2124 onpass="Leadership election passed",
2125 onfail="Something went wrong with Leadership election" )
2126
2127 def CASE8( self, main ):
2128 """
2129 Compare topo
2130 """
2131 import json
2132 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002133 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002134 assert main, "main not defined"
2135 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002136 assert main.CLIs, "main.CLIs not defined"
2137 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002138
2139 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002140 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002141 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002142 topoResult = main.FALSE
2143 elapsed = 0
2144 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002145 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002146 startTime = time.time()
2147 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002148 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002149 devicesResults = main.TRUE
2150 linksResults = main.TRUE
2151 hostsResults = main.TRUE
2152 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002153 count += 1
2154 cliStart = time.time()
2155 devices = []
2156 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002157 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002158 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07002159 name="devices-" + str( i ),
2160 args=[ ] )
2161 threads.append( t )
2162 t.start()
2163
2164 for t in threads:
2165 t.join()
2166 devices.append( t.result )
2167 hosts = []
2168 ipResult = main.TRUE
2169 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002170 for i in main.activeNodes:
Jon Halld8f6de82015-12-17 17:04:34 -08002171 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002172 name="hosts-" + str( i ),
Jon Halld8f6de82015-12-17 17:04:34 -08002173 args=[ main.CLIs[i].hosts, [ None ] ],
2174 kwargs= { 'sleep': 5, 'attempts': 5,
2175 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002176 threads.append( t )
2177 t.start()
2178
2179 for t in threads:
2180 t.join()
2181 try:
2182 hosts.append( json.loads( t.result ) )
2183 except ( ValueError, TypeError ):
2184 main.log.exception( "Error parsing hosts results" )
2185 main.log.error( repr( t.result ) )
Jon Hallf3d16e72015-12-16 17:45:08 -08002186 hosts.append( None )
Jon Hall5cf14d52015-07-16 12:15:19 -07002187 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002188 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallacd1b182015-12-17 11:43:20 -08002189 if hosts[ controller ]:
2190 for host in hosts[ controller ]:
2191 if host is None or host.get( 'ipAddresses', [] ) == []:
2192 main.log.error(
2193 "Error with host ipAddresses on controller" +
2194 controllerStr + ": " + str( host ) )
2195 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002196 ports = []
2197 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002198 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002199 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07002200 name="ports-" + str( i ),
2201 args=[ ] )
2202 threads.append( t )
2203 t.start()
2204
2205 for t in threads:
2206 t.join()
2207 ports.append( t.result )
2208 links = []
2209 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002210 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002211 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07002212 name="links-" + str( i ),
2213 args=[ ] )
2214 threads.append( t )
2215 t.start()
2216
2217 for t in threads:
2218 t.join()
2219 links.append( t.result )
2220 clusters = []
2221 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002222 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002223 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07002224 name="clusters-" + str( i ),
2225 args=[ ] )
2226 threads.append( t )
2227 t.start()
2228
2229 for t in threads:
2230 t.join()
2231 clusters.append( t.result )
2232
2233 elapsed = time.time() - startTime
2234 cliTime = time.time() - cliStart
2235 print "Elapsed time: " + str( elapsed )
2236 print "CLI time: " + str( cliTime )
2237
2238 mnSwitches = main.Mininet1.getSwitches()
2239 mnLinks = main.Mininet1.getLinks()
2240 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002241 for controller in range( len( main.activeNodes ) ):
2242 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002243 if devices[ controller ] and ports[ controller ] and\
2244 "Error" not in devices[ controller ] and\
2245 "Error" not in ports[ controller ]:
2246
2247 currentDevicesResult = main.Mininet1.compareSwitches(
2248 mnSwitches,
2249 json.loads( devices[ controller ] ),
2250 json.loads( ports[ controller ] ) )
2251 else:
2252 currentDevicesResult = main.FALSE
2253 utilities.assert_equals( expect=main.TRUE,
2254 actual=currentDevicesResult,
2255 onpass="ONOS" + controllerStr +
2256 " Switches view is correct",
2257 onfail="ONOS" + controllerStr +
2258 " Switches view is incorrect" )
2259
2260 if links[ controller ] and "Error" not in links[ controller ]:
2261 currentLinksResult = main.Mininet1.compareLinks(
2262 mnSwitches, mnLinks,
2263 json.loads( links[ controller ] ) )
2264 else:
2265 currentLinksResult = main.FALSE
2266 utilities.assert_equals( expect=main.TRUE,
2267 actual=currentLinksResult,
2268 onpass="ONOS" + controllerStr +
2269 " links view is correct",
2270 onfail="ONOS" + controllerStr +
2271 " links view is incorrect" )
Jon Hall657cdf62015-12-17 14:40:51 -08002272 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002273 currentHostsResult = main.Mininet1.compareHosts(
2274 mnHosts,
2275 hosts[ controller ] )
Jon Hall13b446e2016-01-05 12:17:01 -08002276 elif hosts[ controller ] == []:
2277 currentHostsResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002278 else:
2279 currentHostsResult = main.FALSE
2280 utilities.assert_equals( expect=main.TRUE,
2281 actual=currentHostsResult,
2282 onpass="ONOS" + controllerStr +
2283 " hosts exist in Mininet",
2284 onfail="ONOS" + controllerStr +
2285 " hosts don't match Mininet" )
2286 # CHECKING HOST ATTACHMENT POINTS
2287 hostAttachment = True
2288 zeroHosts = False
2289 # FIXME: topo-HA/obelisk specific mappings:
2290 # key is mac and value is dpid
2291 mappings = {}
2292 for i in range( 1, 29 ): # hosts 1 through 28
2293 # set up correct variables:
2294 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2295 if i == 1:
2296 deviceId = "1000".zfill(16)
2297 elif i == 2:
2298 deviceId = "2000".zfill(16)
2299 elif i == 3:
2300 deviceId = "3000".zfill(16)
2301 elif i == 4:
2302 deviceId = "3004".zfill(16)
2303 elif i == 5:
2304 deviceId = "5000".zfill(16)
2305 elif i == 6:
2306 deviceId = "6000".zfill(16)
2307 elif i == 7:
2308 deviceId = "6007".zfill(16)
2309 elif i >= 8 and i <= 17:
2310 dpid = '3' + str( i ).zfill( 3 )
2311 deviceId = dpid.zfill(16)
2312 elif i >= 18 and i <= 27:
2313 dpid = '6' + str( i ).zfill( 3 )
2314 deviceId = dpid.zfill(16)
2315 elif i == 28:
2316 deviceId = "2800".zfill(16)
2317 mappings[ macId ] = deviceId
Jon Halld8f6de82015-12-17 17:04:34 -08002318 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002319 if hosts[ controller ] == []:
2320 main.log.warn( "There are no hosts discovered" )
2321 zeroHosts = True
2322 else:
2323 for host in hosts[ controller ]:
2324 mac = None
2325 location = None
2326 device = None
2327 port = None
2328 try:
2329 mac = host.get( 'mac' )
2330 assert mac, "mac field could not be found for this host object"
2331
2332 location = host.get( 'location' )
2333 assert location, "location field could not be found for this host object"
2334
2335 # Trim the protocol identifier off deviceId
2336 device = str( location.get( 'elementId' ) ).split(':')[1]
2337 assert device, "elementId field could not be found for this host location object"
2338
2339 port = location.get( 'port' )
2340 assert port, "port field could not be found for this host location object"
2341
2342 # Now check if this matches where they should be
2343 if mac and device and port:
2344 if str( port ) != "1":
2345 main.log.error( "The attachment port is incorrect for " +
2346 "host " + str( mac ) +
2347 ". Expected: 1 Actual: " + str( port) )
2348 hostAttachment = False
2349 if device != mappings[ str( mac ) ]:
2350 main.log.error( "The attachment device is incorrect for " +
2351 "host " + str( mac ) +
2352 ". Expected: " + mappings[ str( mac ) ] +
2353 " Actual: " + device )
2354 hostAttachment = False
2355 else:
2356 hostAttachment = False
2357 except AssertionError:
2358 main.log.exception( "Json object not as expected" )
2359 main.log.error( repr( host ) )
2360 hostAttachment = False
2361 else:
2362 main.log.error( "No hosts json output or \"Error\"" +
2363 " in output. hosts = " +
2364 repr( hosts[ controller ] ) )
2365 if zeroHosts is False:
2366 hostAttachment = True
2367
2368 # END CHECKING HOST ATTACHMENT POINTS
2369 devicesResults = devicesResults and currentDevicesResult
2370 linksResults = linksResults and currentLinksResult
2371 hostsResults = hostsResults and currentHostsResult
2372 hostAttachmentResults = hostAttachmentResults and\
2373 hostAttachment
Jon Halle9b1fa32015-12-08 15:32:21 -08002374 topoResult = devicesResults and linksResults and\
2375 hostsResults and hostAttachmentResults
2376 utilities.assert_equals( expect=True,
2377 actual=topoResult,
2378 onpass="ONOS topology matches Mininet",
2379 onfail="ONOS topology don't match Mininet" )
2380 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002381
2382 # Compare json objects for hosts and dataplane clusters
2383
2384 # hosts
2385 main.step( "Hosts view is consistent across all ONOS nodes" )
2386 consistentHostsResult = main.TRUE
2387 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002388 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall13b446e2016-01-05 12:17:01 -08002389 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002390 if hosts[ controller ] == hosts[ 0 ]:
2391 continue
2392 else: # hosts not consistent
2393 main.log.error( "hosts from ONOS" + controllerStr +
2394 " is inconsistent with ONOS1" )
2395 main.log.warn( repr( hosts[ controller ] ) )
2396 consistentHostsResult = main.FALSE
2397
2398 else:
2399 main.log.error( "Error in getting ONOS hosts from ONOS" +
2400 controllerStr )
2401 consistentHostsResult = main.FALSE
2402 main.log.warn( "ONOS" + controllerStr +
2403 " hosts response: " +
2404 repr( hosts[ controller ] ) )
2405 utilities.assert_equals(
2406 expect=main.TRUE,
2407 actual=consistentHostsResult,
2408 onpass="Hosts view is consistent across all ONOS nodes",
2409 onfail="ONOS nodes have different views of hosts" )
2410
2411 main.step( "Hosts information is correct" )
2412 hostsResults = hostsResults and ipResult
2413 utilities.assert_equals(
2414 expect=main.TRUE,
2415 actual=hostsResults,
2416 onpass="Host information is correct",
2417 onfail="Host information is incorrect" )
2418
2419 main.step( "Host attachment points to the network" )
2420 utilities.assert_equals(
2421 expect=True,
2422 actual=hostAttachmentResults,
2423 onpass="Hosts are correctly attached to the network",
2424 onfail="ONOS did not correctly attach hosts to the network" )
2425
2426 # Strongly connected clusters of devices
2427 main.step( "Clusters view is consistent across all ONOS nodes" )
2428 consistentClustersResult = main.TRUE
2429 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002430 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002431 if "Error" not in clusters[ controller ]:
2432 if clusters[ controller ] == clusters[ 0 ]:
2433 continue
2434 else: # clusters not consistent
2435 main.log.error( "clusters from ONOS" +
2436 controllerStr +
2437 " is inconsistent with ONOS1" )
2438 consistentClustersResult = main.FALSE
2439
2440 else:
2441 main.log.error( "Error in getting dataplane clusters " +
2442 "from ONOS" + controllerStr )
2443 consistentClustersResult = main.FALSE
2444 main.log.warn( "ONOS" + controllerStr +
2445 " clusters response: " +
2446 repr( clusters[ controller ] ) )
2447 utilities.assert_equals(
2448 expect=main.TRUE,
2449 actual=consistentClustersResult,
2450 onpass="Clusters view is consistent across all ONOS nodes",
2451 onfail="ONOS nodes have different views of clusters" )
2452
2453 main.step( "There is only one SCC" )
2454 # there should always only be one cluster
2455 try:
2456 numClusters = len( json.loads( clusters[ 0 ] ) )
2457 except ( ValueError, TypeError ):
2458 main.log.exception( "Error parsing clusters[0]: " +
2459 repr( clusters[0] ) )
2460 clusterResults = main.FALSE
2461 if numClusters == 1:
2462 clusterResults = main.TRUE
2463 utilities.assert_equals(
2464 expect=1,
2465 actual=numClusters,
2466 onpass="ONOS shows 1 SCC",
2467 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2468
2469 topoResult = ( devicesResults and linksResults
2470 and hostsResults and consistentHostsResult
2471 and consistentClustersResult and clusterResults
2472 and ipResult and hostAttachmentResults )
2473
2474 topoResult = topoResult and int( count <= 2 )
2475 note = "note it takes about " + str( int( cliTime ) ) + \
2476 " seconds for the test to make all the cli calls to fetch " +\
2477 "the topology from each ONOS instance"
2478 main.log.info(
2479 "Very crass estimate for topology discovery/convergence( " +
2480 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2481 str( count ) + " tries" )
2482
2483 main.step( "Device information is correct" )
2484 utilities.assert_equals(
2485 expect=main.TRUE,
2486 actual=devicesResults,
2487 onpass="Device information is correct",
2488 onfail="Device information is incorrect" )
2489
2490 main.step( "Links are correct" )
2491 utilities.assert_equals(
2492 expect=main.TRUE,
2493 actual=linksResults,
2494 onpass="Link are correct",
2495 onfail="Links are incorrect" )
2496
2497 # FIXME: move this to an ONOS state case
2498 main.step( "Checking ONOS nodes" )
2499 nodesOutput = []
2500 nodeResults = main.TRUE
2501 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002502 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002503 t = main.Thread( target=main.CLIs[i].nodes,
Jon Hall5cf14d52015-07-16 12:15:19 -07002504 name="nodes-" + str( i ),
2505 args=[ ] )
2506 threads.append( t )
2507 t.start()
2508
2509 for t in threads:
2510 t.join()
2511 nodesOutput.append( t.result )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002512 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
Jon Halle9b1fa32015-12-08 15:32:21 -08002513 ips.sort()
Jon Hall5cf14d52015-07-16 12:15:19 -07002514 for i in nodesOutput:
2515 try:
2516 current = json.loads( i )
Jon Halle9b1fa32015-12-08 15:32:21 -08002517 activeIps = []
2518 currentResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002519 for node in current:
Jon Halle9b1fa32015-12-08 15:32:21 -08002520 if node['state'] == 'ACTIVE':
2521 activeIps.append( node['ip'] )
2522 activeIps.sort()
2523 if ips == activeIps:
2524 currentResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002525 except ( ValueError, TypeError ):
2526 main.log.error( "Error parsing nodes output" )
2527 main.log.warn( repr( i ) )
Jon Halle9b1fa32015-12-08 15:32:21 -08002528 currentResult = main.FALSE
2529 nodeResults = nodeResults and currentResult
Jon Hall5cf14d52015-07-16 12:15:19 -07002530 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2531 onpass="Nodes check successful",
2532 onfail="Nodes check NOT successful" )
2533
2534 def CASE9( self, main ):
2535 """
2536 Link s3-s28 down
2537 """
2538 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002539 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002540 assert main, "main not defined"
2541 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002542 assert main.CLIs, "main.CLIs not defined"
2543 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002544 # NOTE: You should probably run a topology check after this
2545
2546 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2547
2548 description = "Turn off a link to ensure that Link Discovery " +\
2549 "is working properly"
2550 main.case( description )
2551
2552 main.step( "Kill Link between s3 and s28" )
2553 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2554 main.log.info( "Waiting " + str( linkSleep ) +
2555 " seconds for link down to be discovered" )
2556 time.sleep( linkSleep )
2557 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2558 onpass="Link down successful",
2559 onfail="Failed to bring link down" )
2560 # TODO do some sort of check here
2561
2562 def CASE10( self, main ):
2563 """
2564 Link s3-s28 up
2565 """
2566 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002567 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002568 assert main, "main not defined"
2569 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002570 assert main.CLIs, "main.CLIs not defined"
2571 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002572 # NOTE: You should probably run a topology check after this
2573
2574 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2575
2576 description = "Restore a link to ensure that Link Discovery is " + \
2577 "working properly"
2578 main.case( description )
2579
2580 main.step( "Bring link between s3 and s28 back up" )
2581 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2582 main.log.info( "Waiting " + str( linkSleep ) +
2583 " seconds for link up to be discovered" )
2584 time.sleep( linkSleep )
2585 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2586 onpass="Link up successful",
2587 onfail="Failed to bring link up" )
2588 # TODO do some sort of check here
2589
2590 def CASE11( self, main ):
2591 """
2592 Switch Down
2593 """
2594 # NOTE: You should probably run a topology check after this
2595 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002596 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002597 assert main, "main not defined"
2598 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002599 assert main.CLIs, "main.CLIs not defined"
2600 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002601
2602 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2603
2604 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002605 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002606 main.case( description )
2607 switch = main.params[ 'kill' ][ 'switch' ]
2608 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2609
2610 # TODO: Make this switch parameterizable
2611 main.step( "Kill " + switch )
2612 main.log.info( "Deleting " + switch )
2613 main.Mininet1.delSwitch( switch )
2614 main.log.info( "Waiting " + str( switchSleep ) +
2615 " seconds for switch down to be discovered" )
2616 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002617 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002618 # Peek at the deleted switch
2619 main.log.warn( str( device ) )
2620 result = main.FALSE
2621 if device and device[ 'available' ] is False:
2622 result = main.TRUE
2623 utilities.assert_equals( expect=main.TRUE, actual=result,
2624 onpass="Kill switch successful",
2625 onfail="Failed to kill switch?" )
2626
2627 def CASE12( self, main ):
2628 """
2629 Switch Up
2630 """
2631 # NOTE: You should probably run a topology check after this
2632 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002633 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002634 assert main, "main not defined"
2635 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002636 assert main.CLIs, "main.CLIs not defined"
2637 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002638 assert ONOS1Port, "ONOS1Port not defined"
2639 assert ONOS2Port, "ONOS2Port not defined"
2640 assert ONOS3Port, "ONOS3Port not defined"
2641 assert ONOS4Port, "ONOS4Port not defined"
2642 assert ONOS5Port, "ONOS5Port not defined"
2643 assert ONOS6Port, "ONOS6Port not defined"
2644 assert ONOS7Port, "ONOS7Port not defined"
2645
2646 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2647 switch = main.params[ 'kill' ][ 'switch' ]
2648 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2649 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002650 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002651 description = "Adding a switch to ensure it is discovered correctly"
2652 main.case( description )
2653
2654 main.step( "Add back " + switch )
2655 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2656 for peer in links:
2657 main.Mininet1.addLink( switch, peer )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002658 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002659 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2660 main.log.info( "Waiting " + str( switchSleep ) +
2661 " seconds for switch up to be discovered" )
2662 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002663 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002664 # Peek at the deleted switch
2665 main.log.warn( str( device ) )
2666 result = main.FALSE
2667 if device and device[ 'available' ]:
2668 result = main.TRUE
2669 utilities.assert_equals( expect=main.TRUE, actual=result,
2670 onpass="add switch successful",
2671 onfail="Failed to add switch?" )
2672
2673 def CASE13( self, main ):
2674 """
2675 Clean up
2676 """
2677 import os
2678 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002679 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002680 assert main, "main not defined"
2681 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002682 assert main.CLIs, "main.CLIs not defined"
2683 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002684
2685 # printing colors to terminal
2686 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2687 'blue': '\033[94m', 'green': '\033[92m',
2688 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2689 main.case( "Test Cleanup" )
2690 main.step( "Killing tcpdumps" )
2691 main.Mininet2.stopTcpdump()
2692
2693 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002694 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002695 main.step( "Copying MN pcap and ONOS log files to test station" )
2696 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2697 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002698 # NOTE: MN Pcap file is being saved to logdir.
2699 # We scp this file as MN and TestON aren't necessarily the same vm
2700
2701 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002702 # TODO: Load these from params
2703 # NOTE: must end in /
2704 logFolder = "/opt/onos/log/"
2705 logFiles = [ "karaf.log", "karaf.log.1" ]
2706 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002707 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002708 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002709 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002710 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2711 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002712 # std*.log's
2713 # NOTE: must end in /
2714 logFolder = "/opt/onos/var/"
2715 logFiles = [ "stderr.log", "stdout.log" ]
2716 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002717 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002718 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002719 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002720 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2721 logFolder + f, dstName )
2722 else:
2723 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002724
2725 main.step( "Stopping Mininet" )
2726 mnResult = main.Mininet1.stopNet()
2727 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2728 onpass="Mininet stopped",
2729 onfail="MN cleanup NOT successful" )
2730
2731 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002732 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002733 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2734 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002735
2736 try:
2737 timerLog = open( main.logdir + "/Timers.csv", 'w')
2738 # Overwrite with empty line and close
2739 labels = "Gossip Intents, Restart"
2740 data = str( gossipTime ) + ", " + str( main.restartTime )
2741 timerLog.write( labels + "\n" + data )
2742 timerLog.close()
2743 except NameError, e:
2744 main.log.exception(e)
2745
2746 def CASE14( self, main ):
2747 """
2748 start election app on all onos nodes
2749 """
Jon Halle1a3b752015-07-22 13:02:46 -07002750 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002751 assert main, "main not defined"
2752 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002753 assert main.CLIs, "main.CLIs not defined"
2754 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002755
2756 main.case("Start Leadership Election app")
2757 main.step( "Install leadership election app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002758 onosCli = main.CLIs[ main.activeNodes[0] ]
2759 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002760 utilities.assert_equals(
2761 expect=main.TRUE,
2762 actual=appResult,
2763 onpass="Election app installed",
2764 onfail="Something went wrong with installing Leadership election" )
2765
2766 main.step( "Run for election on each node" )
2767 leaderResult = main.TRUE
2768 leaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002769 for i in main.activeNodes:
2770 main.CLIs[i].electionTestRun()
2771 for i in main.activeNodes:
2772 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002773 leader = cli.electionTestLeader()
2774 if leader is None or leader == main.FALSE:
2775 main.log.error( cli.name + ": Leader for the election app " +
2776 "should be an ONOS node, instead got '" +
2777 str( leader ) + "'" )
2778 leaderResult = main.FALSE
2779 leaders.append( leader )
2780 utilities.assert_equals(
2781 expect=main.TRUE,
2782 actual=leaderResult,
2783 onpass="Successfully ran for leadership",
2784 onfail="Failed to run for leadership" )
2785
2786 main.step( "Check that each node shows the same leader" )
2787 sameLeader = main.TRUE
2788 if len( set( leaders ) ) != 1:
2789 sameLeader = main.FALSE
Jon Halle1a3b752015-07-22 13:02:46 -07002790 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
Jon Hall5cf14d52015-07-16 12:15:19 -07002791 str( leaders ) )
2792 utilities.assert_equals(
2793 expect=main.TRUE,
2794 actual=sameLeader,
2795 onpass="Leadership is consistent for the election topic",
2796 onfail="Nodes have different leaders" )
2797
2798 def CASE15( self, main ):
2799 """
2800 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002801 15.1 Run election on each node
2802 15.2 Check that each node has the same leaders and candidates
2803 15.3 Find current leader and withdraw
2804 15.4 Check that a new node was elected leader
2805 15.5 Check that that new leader was the candidate of old leader
2806 15.6 Run for election on old leader
2807 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2808 15.8 Make sure that the old leader was added to the candidate list
2809
2810 old and new variable prefixes refer to data from before vs after
2811 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002812 """
2813 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002814 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002815 assert main, "main not defined"
2816 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002817 assert main.CLIs, "main.CLIs not defined"
2818 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002819
Jon Hall5cf14d52015-07-16 12:15:19 -07002820 description = "Check that Leadership Election is still functional"
2821 main.case( description )
acsmars71adceb2015-08-31 15:09:26 -07002822 # NOTE: Need to re-run since being a canidate is not persistant
2823 # TODO: add check for "Command not found:" in the driver, this
2824 # means the election test app isn't loaded
Jon Hall5cf14d52015-07-16 12:15:19 -07002825
acsmars71adceb2015-08-31 15:09:26 -07002826 oldLeaders = [] # leaders by node before withdrawl from candidates
2827 newLeaders = [] # leaders by node after withdrawl from candidates
2828 oldAllCandidates = [] # list of lists of each nodes' candidates before
2829 newAllCandidates = [] # list of lists of each nodes' candidates after
2830 oldCandidates = [] # list of candidates from node 0 before withdrawl
2831 newCandidates = [] # list of candidates from node 0 after withdrawl
2832 oldLeader = '' # the old leader from oldLeaders, None if not same
2833 newLeader = '' # the new leaders fron newLoeaders, None if not same
2834 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2835 expectNoLeader = False # True when there is only one leader
2836 if main.numCtrls == 1:
2837 expectNoLeader = True
2838
2839 main.step( "Run for election on each node" )
2840 electionResult = main.TRUE
2841
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002842 for i in main.activeNodes: # run test election on each node
2843 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002844 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002845 utilities.assert_equals(
2846 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002847 actual=electionResult,
2848 onpass="All nodes successfully ran for leadership",
2849 onfail="At least one node failed to run for leadership" )
2850
acsmars3a72bde2015-09-02 14:16:22 -07002851 if electionResult == main.FALSE:
2852 main.log.error(
2853 "Skipping Test Case because Election Test App isn't loaded" )
2854 main.skipCase()
2855
acsmars71adceb2015-08-31 15:09:26 -07002856 main.step( "Check that each node shows the same leader and candidates" )
2857 sameResult = main.TRUE
2858 failMessage = "Nodes have different leaders"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002859 for i in main.activeNodes:
2860 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002861 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2862 oldAllCandidates.append( node )
2863 oldLeaders.append( node[ 0 ] )
2864 oldCandidates = oldAllCandidates[ 0 ]
2865
2866 # Check that each node has the same leader. Defines oldLeader
2867 if len( set( oldLeaders ) ) != 1:
2868 sameResult = main.FALSE
2869 main.log.error( "More than one leader present:" + str( oldLeaders ) )
2870 oldLeader = None
2871 else:
2872 oldLeader = oldLeaders[ 0 ]
2873
2874 # Check that each node's candidate list is the same
acsmars29233db2015-11-04 11:15:00 -08002875 candidateDiscrepancy = False # Boolean of candidate mismatches
acsmars71adceb2015-08-31 15:09:26 -07002876 for candidates in oldAllCandidates:
2877 if set( candidates ) != set( oldCandidates ):
2878 sameResult = main.FALSE
acsmars29233db2015-11-04 11:15:00 -08002879 candidateDiscrepancy = True
2880
2881 if candidateDiscrepancy:
2882 failMessage += " and candidates"
2883
acsmars71adceb2015-08-31 15:09:26 -07002884 utilities.assert_equals(
2885 expect=main.TRUE,
2886 actual=sameResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002887 onpass="Leadership is consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002888 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002889
2890 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002891 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002892 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002893 if oldLeader is None:
2894 main.log.error( "Leadership isn't consistent." )
2895 withdrawResult = main.FALSE
2896 # Get the CLI of the oldLeader
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002897 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07002898 if oldLeader == main.nodes[ i ].ip_address:
2899 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002900 break
2901 else: # FOR/ELSE statement
2902 main.log.error( "Leader election, could not find current leader" )
2903 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07002904 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07002905 utilities.assert_equals(
2906 expect=main.TRUE,
2907 actual=withdrawResult,
2908 onpass="Node was withdrawn from election",
2909 onfail="Node was not withdrawn from election" )
2910
acsmars71adceb2015-08-31 15:09:26 -07002911 main.step( "Check that a new node was elected leader" )
2912
Jon Hall5cf14d52015-07-16 12:15:19 -07002913 # FIXME: use threads
acsmars71adceb2015-08-31 15:09:26 -07002914 newLeaderResult = main.TRUE
2915 failMessage = "Nodes have different leaders"
2916
2917 # Get new leaders and candidates
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002918 for i in main.activeNodes:
2919 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002920 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2921 # elections might no have finished yet
2922 if node[ 0 ] == 'none' and not expectNoLeader:
2923 main.log.info( "Node has no leader, waiting 5 seconds to be " +
2924 "sure elections are complete." )
2925 time.sleep(5)
2926 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2927 # election still isn't done or there is a problem
2928 if node[ 0 ] == 'none':
2929 main.log.error( "No leader was elected on at least 1 node" )
2930 newLeaderResult = main.FALSE
2931 newAllCandidates.append( node )
2932 newLeaders.append( node[ 0 ] )
2933 newCandidates = newAllCandidates[ 0 ]
2934
2935 # Check that each node has the same leader. Defines newLeader
2936 if len( set( newLeaders ) ) != 1:
2937 newLeaderResult = main.FALSE
2938 main.log.error( "Nodes have different leaders: " +
2939 str( newLeaders ) )
2940 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07002941 else:
acsmars71adceb2015-08-31 15:09:26 -07002942 newLeader = newLeaders[ 0 ]
2943
2944 # Check that each node's candidate list is the same
2945 for candidates in newAllCandidates:
2946 if set( candidates ) != set( newCandidates ):
2947 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07002948 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07002949
2950 # Check that the new leader is not the older leader, which was withdrawn
2951 if newLeader == oldLeader:
2952 newLeaderResult = main.FALSE
2953 main.log.error( "All nodes still see old leader: " + oldLeader +
2954 " as the current leader" )
2955
Jon Hall5cf14d52015-07-16 12:15:19 -07002956 utilities.assert_equals(
2957 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002958 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002959 onpass="Leadership election passed",
2960 onfail="Something went wrong with Leadership election" )
2961
acsmars71adceb2015-08-31 15:09:26 -07002962 main.step( "Check that that new leader was the candidate of old leader")
2963 # candidates[ 2 ] should be come the top candidate after withdrawl
2964 correctCandidateResult = main.TRUE
2965 if expectNoLeader:
2966 if newLeader == 'none':
2967 main.log.info( "No leader expected. None found. Pass" )
2968 correctCandidateResult = main.TRUE
2969 else:
2970 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2971 correctCandidateResult = main.FALSE
2972 elif newLeader != oldCandidates[ 2 ]:
2973 correctCandidateResult = main.FALSE
2974 main.log.error( "Candidate " + newLeader + " was elected. " +
2975 oldCandidates[ 2 ] + " should have had priority." )
2976
2977 utilities.assert_equals(
2978 expect=main.TRUE,
2979 actual=correctCandidateResult,
2980 onpass="Correct Candidate Elected",
2981 onfail="Incorrect Candidate Elected" )
2982
Jon Hall5cf14d52015-07-16 12:15:19 -07002983 main.step( "Run for election on old leader( just so everyone " +
2984 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07002985 if oldLeaderCLI is not None:
2986 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07002987 else:
acsmars71adceb2015-08-31 15:09:26 -07002988 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002989 runResult = main.FALSE
2990 utilities.assert_equals(
2991 expect=main.TRUE,
2992 actual=runResult,
2993 onpass="App re-ran for election",
2994 onfail="App failed to run for election" )
acsmars71adceb2015-08-31 15:09:26 -07002995 main.step(
2996 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002997 # verify leader didn't just change
acsmars71adceb2015-08-31 15:09:26 -07002998 positionResult = main.TRUE
2999 # Get new leaders and candidates, wait if oldLeader is not a candidate yet
3000
3001 # Reset and reuse the new candidate and leaders lists
3002 newAllCandidates = []
3003 newCandidates = []
3004 newLeaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003005 for i in main.activeNodes:
3006 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07003007 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3008 if oldLeader not in node: # election might no have finished yet
3009 main.log.info( "Old Leader not elected, waiting 5 seconds to " +
3010 "be sure elections are complete" )
3011 time.sleep(5)
3012 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3013 if oldLeader not in node: # election still isn't done, errors
3014 main.log.error(
3015 "Old leader was not elected on at least one node" )
3016 positionResult = main.FALSE
3017 newAllCandidates.append( node )
3018 newLeaders.append( node[ 0 ] )
3019 newCandidates = newAllCandidates[ 0 ]
3020
3021 # Check that each node has the same leader. Defines newLeader
3022 if len( set( newLeaders ) ) != 1:
3023 positionResult = main.FALSE
3024 main.log.error( "Nodes have different leaders: " +
3025 str( newLeaders ) )
3026 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07003027 else:
acsmars71adceb2015-08-31 15:09:26 -07003028 newLeader = newLeaders[ 0 ]
3029
3030 # Check that each node's candidate list is the same
3031 for candidates in newAllCandidates:
3032 if set( candidates ) != set( newCandidates ):
3033 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07003034 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07003035
3036 # Check that the re-elected node is last on the candidate List
3037 if oldLeader != newCandidates[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003038 main.log.error( "Old Leader (" + oldLeader + ") not in the proper position " +
acsmars71adceb2015-08-31 15:09:26 -07003039 str( newCandidates ) )
3040 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003041
3042 utilities.assert_equals(
3043 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07003044 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003045 onpass="Old leader successfully re-ran for election",
3046 onfail="Something went wrong with Leadership election after " +
3047 "the old leader re-ran for election" )
3048
3049 def CASE16( self, main ):
3050 """
3051 Install Distributed Primitives app
3052 """
3053 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003054 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003055 assert main, "main not defined"
3056 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003057 assert main.CLIs, "main.CLIs not defined"
3058 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003059
3060 # Variables for the distributed primitives tests
3061 global pCounterName
3062 global iCounterName
3063 global pCounterValue
3064 global iCounterValue
3065 global onosSet
3066 global onosSetName
3067 pCounterName = "TestON-Partitions"
3068 iCounterName = "TestON-inMemory"
3069 pCounterValue = 0
3070 iCounterValue = 0
3071 onosSet = set([])
3072 onosSetName = "TestON-set"
3073
3074 description = "Install Primitives app"
3075 main.case( description )
3076 main.step( "Install Primitives app" )
3077 appName = "org.onosproject.distributedprimitives"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003078 node = main.activeNodes[0]
3079 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003080 utilities.assert_equals( expect=main.TRUE,
3081 actual=appResults,
3082 onpass="Primitives app activated",
3083 onfail="Primitives app not activated" )
3084 time.sleep( 5 ) # To allow all nodes to activate
3085
3086 def CASE17( self, main ):
3087 """
3088 Check for basic functionality with distributed primitives
3089 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003090 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003091 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003092 assert main, "main not defined"
3093 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003094 assert main.CLIs, "main.CLIs not defined"
3095 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003096 assert pCounterName, "pCounterName not defined"
3097 assert iCounterName, "iCounterName not defined"
3098 assert onosSetName, "onosSetName not defined"
3099 # NOTE: assert fails if value is 0/None/Empty/False
3100 try:
3101 pCounterValue
3102 except NameError:
3103 main.log.error( "pCounterValue not defined, setting to 0" )
3104 pCounterValue = 0
3105 try:
3106 iCounterValue
3107 except NameError:
3108 main.log.error( "iCounterValue not defined, setting to 0" )
3109 iCounterValue = 0
3110 try:
3111 onosSet
3112 except NameError:
3113 main.log.error( "onosSet not defined, setting to empty Set" )
3114 onosSet = set([])
3115 # Variables for the distributed primitives tests. These are local only
3116 addValue = "a"
3117 addAllValue = "a b c d e f"
3118 retainValue = "c d e f"
3119
3120 description = "Check for basic functionality with distributed " +\
3121 "primitives"
3122 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003123 main.caseExplanation = "Test the methods of the distributed " +\
3124 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003125 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003126 # Partitioned counters
3127 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003128 pCounters = []
3129 threads = []
3130 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003131 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003132 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3133 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003134 args=[ pCounterName ] )
3135 pCounterValue += 1
3136 addedPValues.append( pCounterValue )
3137 threads.append( t )
3138 t.start()
3139
3140 for t in threads:
3141 t.join()
3142 pCounters.append( t.result )
3143 # Check that counter incremented numController times
3144 pCounterResults = True
3145 for i in addedPValues:
3146 tmpResult = i in pCounters
3147 pCounterResults = pCounterResults and tmpResult
3148 if not tmpResult:
3149 main.log.error( str( i ) + " is not in partitioned "
3150 "counter incremented results" )
3151 utilities.assert_equals( expect=True,
3152 actual=pCounterResults,
3153 onpass="Default counter incremented",
3154 onfail="Error incrementing default" +
3155 " counter" )
3156
Jon Halle1a3b752015-07-22 13:02:46 -07003157 main.step( "Get then Increment a default counter on each node" )
3158 pCounters = []
3159 threads = []
3160 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003161 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003162 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3163 name="counterGetAndAdd-" + str( i ),
3164 args=[ pCounterName ] )
3165 addedPValues.append( pCounterValue )
3166 pCounterValue += 1
3167 threads.append( t )
3168 t.start()
3169
3170 for t in threads:
3171 t.join()
3172 pCounters.append( t.result )
3173 # Check that counter incremented numController times
3174 pCounterResults = True
3175 for i in addedPValues:
3176 tmpResult = i in pCounters
3177 pCounterResults = pCounterResults and tmpResult
3178 if not tmpResult:
3179 main.log.error( str( i ) + " is not in partitioned "
3180 "counter incremented results" )
3181 utilities.assert_equals( expect=True,
3182 actual=pCounterResults,
3183 onpass="Default counter incremented",
3184 onfail="Error incrementing default" +
3185 " counter" )
3186
3187 main.step( "Counters we added have the correct values" )
3188 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3189 utilities.assert_equals( expect=main.TRUE,
3190 actual=incrementCheck,
3191 onpass="Added counters are correct",
3192 onfail="Added counters are incorrect" )
3193
3194 main.step( "Add -8 to then get a default counter on each node" )
3195 pCounters = []
3196 threads = []
3197 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003198 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003199 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3200 name="counterIncrement-" + str( i ),
3201 args=[ pCounterName ],
3202 kwargs={ "delta": -8 } )
3203 pCounterValue += -8
3204 addedPValues.append( pCounterValue )
3205 threads.append( t )
3206 t.start()
3207
3208 for t in threads:
3209 t.join()
3210 pCounters.append( t.result )
3211 # Check that counter incremented numController times
3212 pCounterResults = True
3213 for i in addedPValues:
3214 tmpResult = i in pCounters
3215 pCounterResults = pCounterResults and tmpResult
3216 if not tmpResult:
3217 main.log.error( str( i ) + " is not in partitioned "
3218 "counter incremented results" )
3219 utilities.assert_equals( expect=True,
3220 actual=pCounterResults,
3221 onpass="Default counter incremented",
3222 onfail="Error incrementing default" +
3223 " counter" )
3224
3225 main.step( "Add 5 to then get a default counter on each node" )
3226 pCounters = []
3227 threads = []
3228 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003229 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003230 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3231 name="counterIncrement-" + str( i ),
3232 args=[ pCounterName ],
3233 kwargs={ "delta": 5 } )
3234 pCounterValue += 5
3235 addedPValues.append( pCounterValue )
3236 threads.append( t )
3237 t.start()
3238
3239 for t in threads:
3240 t.join()
3241 pCounters.append( t.result )
3242 # Check that counter incremented numController times
3243 pCounterResults = True
3244 for i in addedPValues:
3245 tmpResult = i in pCounters
3246 pCounterResults = pCounterResults and tmpResult
3247 if not tmpResult:
3248 main.log.error( str( i ) + " is not in partitioned "
3249 "counter incremented results" )
3250 utilities.assert_equals( expect=True,
3251 actual=pCounterResults,
3252 onpass="Default counter incremented",
3253 onfail="Error incrementing default" +
3254 " counter" )
3255
3256 main.step( "Get then add 5 to a default counter on each node" )
3257 pCounters = []
3258 threads = []
3259 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003260 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003261 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3262 name="counterIncrement-" + str( i ),
3263 args=[ pCounterName ],
3264 kwargs={ "delta": 5 } )
3265 addedPValues.append( pCounterValue )
3266 pCounterValue += 5
3267 threads.append( t )
3268 t.start()
3269
3270 for t in threads:
3271 t.join()
3272 pCounters.append( t.result )
3273 # Check that counter incremented numController times
3274 pCounterResults = True
3275 for i in addedPValues:
3276 tmpResult = i in pCounters
3277 pCounterResults = pCounterResults and tmpResult
3278 if not tmpResult:
3279 main.log.error( str( i ) + " is not in partitioned "
3280 "counter incremented results" )
3281 utilities.assert_equals( expect=True,
3282 actual=pCounterResults,
3283 onpass="Default counter incremented",
3284 onfail="Error incrementing default" +
3285 " counter" )
3286
3287 main.step( "Counters we added have the correct values" )
3288 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3289 utilities.assert_equals( expect=main.TRUE,
3290 actual=incrementCheck,
3291 onpass="Added counters are correct",
3292 onfail="Added counters are incorrect" )
3293
3294 # In-Memory counters
3295 main.step( "Increment and get an in-memory counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003296 iCounters = []
3297 addedIValues = []
3298 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003299 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003300 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003301 name="icounterIncrement-" + str( i ),
3302 args=[ iCounterName ],
3303 kwargs={ "inMemory": True } )
3304 iCounterValue += 1
3305 addedIValues.append( iCounterValue )
3306 threads.append( t )
3307 t.start()
3308
3309 for t in threads:
3310 t.join()
3311 iCounters.append( t.result )
3312 # Check that counter incremented numController times
3313 iCounterResults = True
3314 for i in addedIValues:
3315 tmpResult = i in iCounters
3316 iCounterResults = iCounterResults and tmpResult
3317 if not tmpResult:
3318 main.log.error( str( i ) + " is not in the in-memory "
3319 "counter incremented results" )
3320 utilities.assert_equals( expect=True,
3321 actual=iCounterResults,
Jon Halle1a3b752015-07-22 13:02:46 -07003322 onpass="In-memory counter incremented",
3323 onfail="Error incrementing in-memory" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003324 " counter" )
3325
Jon Halle1a3b752015-07-22 13:02:46 -07003326 main.step( "Get then Increment a in-memory counter on each node" )
3327 iCounters = []
3328 threads = []
3329 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003330 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003331 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3332 name="counterGetAndAdd-" + str( i ),
3333 args=[ iCounterName ],
3334 kwargs={ "inMemory": True } )
3335 addedIValues.append( iCounterValue )
3336 iCounterValue += 1
3337 threads.append( t )
3338 t.start()
3339
3340 for t in threads:
3341 t.join()
3342 iCounters.append( t.result )
3343 # Check that counter incremented numController times
3344 iCounterResults = True
3345 for i in addedIValues:
3346 tmpResult = i in iCounters
3347 iCounterResults = iCounterResults and tmpResult
3348 if not tmpResult:
3349 main.log.error( str( i ) + " is not in in-memory "
3350 "counter incremented results" )
3351 utilities.assert_equals( expect=True,
3352 actual=iCounterResults,
3353 onpass="In-memory counter incremented",
3354 onfail="Error incrementing in-memory" +
3355 " counter" )
3356
3357 main.step( "Counters we added have the correct values" )
3358 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3359 utilities.assert_equals( expect=main.TRUE,
3360 actual=incrementCheck,
3361 onpass="Added counters are correct",
3362 onfail="Added counters are incorrect" )
3363
3364 main.step( "Add -8 to then get a in-memory counter on each node" )
3365 iCounters = []
3366 threads = []
3367 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003368 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003369 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3370 name="counterIncrement-" + str( i ),
3371 args=[ iCounterName ],
3372 kwargs={ "delta": -8, "inMemory": True } )
3373 iCounterValue += -8
3374 addedIValues.append( iCounterValue )
3375 threads.append( t )
3376 t.start()
3377
3378 for t in threads:
3379 t.join()
3380 iCounters.append( t.result )
3381 # Check that counter incremented numController times
3382 iCounterResults = True
3383 for i in addedIValues:
3384 tmpResult = i in iCounters
3385 iCounterResults = iCounterResults and tmpResult
3386 if not tmpResult:
3387 main.log.error( str( i ) + " is not in in-memory "
3388 "counter incremented results" )
3389 utilities.assert_equals( expect=True,
3390 actual=pCounterResults,
3391 onpass="In-memory counter incremented",
3392 onfail="Error incrementing in-memory" +
3393 " counter" )
3394
3395 main.step( "Add 5 to then get a in-memory counter on each node" )
3396 iCounters = []
3397 threads = []
3398 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003399 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003400 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3401 name="counterIncrement-" + str( i ),
3402 args=[ iCounterName ],
3403 kwargs={ "delta": 5, "inMemory": True } )
3404 iCounterValue += 5
3405 addedIValues.append( iCounterValue )
3406 threads.append( t )
3407 t.start()
3408
3409 for t in threads:
3410 t.join()
3411 iCounters.append( t.result )
3412 # Check that counter incremented numController times
3413 iCounterResults = True
3414 for i in addedIValues:
3415 tmpResult = i in iCounters
3416 iCounterResults = iCounterResults and tmpResult
3417 if not tmpResult:
3418 main.log.error( str( i ) + " is not in in-memory "
3419 "counter incremented results" )
3420 utilities.assert_equals( expect=True,
3421 actual=pCounterResults,
3422 onpass="In-memory counter incremented",
3423 onfail="Error incrementing in-memory" +
3424 " counter" )
3425
3426 main.step( "Get then add 5 to a in-memory counter on each node" )
3427 iCounters = []
3428 threads = []
3429 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003430 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003431 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3432 name="counterIncrement-" + str( i ),
3433 args=[ iCounterName ],
3434 kwargs={ "delta": 5, "inMemory": True } )
3435 addedIValues.append( iCounterValue )
3436 iCounterValue += 5
3437 threads.append( t )
3438 t.start()
3439
3440 for t in threads:
3441 t.join()
3442 iCounters.append( t.result )
3443 # Check that counter incremented numController times
3444 iCounterResults = True
3445 for i in addedIValues:
3446 tmpResult = i in iCounters
3447 iCounterResults = iCounterResults and tmpResult
3448 if not tmpResult:
3449 main.log.error( str( i ) + " is not in in-memory "
3450 "counter incremented results" )
3451 utilities.assert_equals( expect=True,
3452 actual=iCounterResults,
3453 onpass="In-memory counter incremented",
3454 onfail="Error incrementing in-memory" +
3455 " counter" )
3456
3457 main.step( "Counters we added have the correct values" )
3458 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3459 utilities.assert_equals( expect=main.TRUE,
3460 actual=incrementCheck,
3461 onpass="Added counters are correct",
3462 onfail="Added counters are incorrect" )
3463
Jon Hall5cf14d52015-07-16 12:15:19 -07003464 main.step( "Check counters are consistant across nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07003465 onosCounters, consistentCounterResults = main.Counters.consistentCheck()
Jon Hall5cf14d52015-07-16 12:15:19 -07003466 utilities.assert_equals( expect=main.TRUE,
3467 actual=consistentCounterResults,
3468 onpass="ONOS counters are consistent " +
3469 "across nodes",
3470 onfail="ONOS Counters are inconsistent " +
3471 "across nodes" )
3472
3473 main.step( "Counters we added have the correct values" )
Jon Halle1a3b752015-07-22 13:02:46 -07003474 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3475 incrementCheck = incrementCheck and \
3476 main.Counters.counterCheck( iCounterName, iCounterValue )
Jon Hall5cf14d52015-07-16 12:15:19 -07003477 utilities.assert_equals( expect=main.TRUE,
Jon Halle1a3b752015-07-22 13:02:46 -07003478 actual=incrementCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -07003479 onpass="Added counters are correct",
3480 onfail="Added counters are incorrect" )
3481 # DISTRIBUTED SETS
3482 main.step( "Distributed Set get" )
3483 size = len( onosSet )
3484 getResponses = []
3485 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003486 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003487 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003488 name="setTestGet-" + str( i ),
3489 args=[ onosSetName ] )
3490 threads.append( t )
3491 t.start()
3492 for t in threads:
3493 t.join()
3494 getResponses.append( t.result )
3495
3496 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003497 for i in range( len( main.activeNodes ) ):
3498 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003499 if isinstance( getResponses[ i ], list):
3500 current = set( getResponses[ i ] )
3501 if len( current ) == len( getResponses[ i ] ):
3502 # no repeats
3503 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003504 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003505 " has incorrect view" +
3506 " of set " + onosSetName + ":\n" +
3507 str( getResponses[ i ] ) )
3508 main.log.debug( "Expected: " + str( onosSet ) )
3509 main.log.debug( "Actual: " + str( current ) )
3510 getResults = main.FALSE
3511 else:
3512 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003513 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003514 " has repeat elements in" +
3515 " set " + onosSetName + ":\n" +
3516 str( getResponses[ i ] ) )
3517 getResults = main.FALSE
3518 elif getResponses[ i ] == main.ERROR:
3519 getResults = main.FALSE
3520 utilities.assert_equals( expect=main.TRUE,
3521 actual=getResults,
3522 onpass="Set elements are correct",
3523 onfail="Set elements are incorrect" )
3524
3525 main.step( "Distributed Set size" )
3526 sizeResponses = []
3527 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003528 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003529 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003530 name="setTestSize-" + str( i ),
3531 args=[ onosSetName ] )
3532 threads.append( t )
3533 t.start()
3534 for t in threads:
3535 t.join()
3536 sizeResponses.append( t.result )
3537
3538 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003539 for i in range( len( main.activeNodes ) ):
3540 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003541 if size != sizeResponses[ i ]:
3542 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003543 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003544 " expected a size of " + str( size ) +
3545 " for set " + onosSetName +
3546 " but got " + str( sizeResponses[ i ] ) )
3547 utilities.assert_equals( expect=main.TRUE,
3548 actual=sizeResults,
3549 onpass="Set sizes are correct",
3550 onfail="Set sizes are incorrect" )
3551
3552 main.step( "Distributed Set add()" )
3553 onosSet.add( addValue )
3554 addResponses = []
3555 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003556 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003557 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003558 name="setTestAdd-" + str( i ),
3559 args=[ onosSetName, addValue ] )
3560 threads.append( t )
3561 t.start()
3562 for t in threads:
3563 t.join()
3564 addResponses.append( t.result )
3565
3566 # main.TRUE = successfully changed the set
3567 # main.FALSE = action resulted in no change in set
3568 # main.ERROR - Some error in executing the function
3569 addResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003570 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003571 if addResponses[ i ] == main.TRUE:
3572 # All is well
3573 pass
3574 elif addResponses[ i ] == main.FALSE:
3575 # Already in set, probably fine
3576 pass
3577 elif addResponses[ i ] == main.ERROR:
3578 # Error in execution
3579 addResults = main.FALSE
3580 else:
3581 # unexpected result
3582 addResults = main.FALSE
3583 if addResults != main.TRUE:
3584 main.log.error( "Error executing set add" )
3585
3586 # Check if set is still correct
3587 size = len( onosSet )
3588 getResponses = []
3589 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003590 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003591 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003592 name="setTestGet-" + str( i ),
3593 args=[ onosSetName ] )
3594 threads.append( t )
3595 t.start()
3596 for t in threads:
3597 t.join()
3598 getResponses.append( t.result )
3599 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003600 for i in range( len( main.activeNodes ) ):
3601 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003602 if isinstance( getResponses[ i ], list):
3603 current = set( getResponses[ i ] )
3604 if len( current ) == len( getResponses[ i ] ):
3605 # no repeats
3606 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003607 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003608 " of set " + onosSetName + ":\n" +
3609 str( getResponses[ i ] ) )
3610 main.log.debug( "Expected: " + str( onosSet ) )
3611 main.log.debug( "Actual: " + str( current ) )
3612 getResults = main.FALSE
3613 else:
3614 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003615 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003616 " set " + onosSetName + ":\n" +
3617 str( getResponses[ i ] ) )
3618 getResults = main.FALSE
3619 elif getResponses[ i ] == main.ERROR:
3620 getResults = main.FALSE
3621 sizeResponses = []
3622 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003623 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003624 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003625 name="setTestSize-" + str( i ),
3626 args=[ onosSetName ] )
3627 threads.append( t )
3628 t.start()
3629 for t in threads:
3630 t.join()
3631 sizeResponses.append( t.result )
3632 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003633 for i in range( len( main.activeNodes ) ):
3634 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003635 if size != sizeResponses[ i ]:
3636 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003637 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003638 " expected a size of " + str( size ) +
3639 " for set " + onosSetName +
3640 " but got " + str( sizeResponses[ i ] ) )
3641 addResults = addResults and getResults and sizeResults
3642 utilities.assert_equals( expect=main.TRUE,
3643 actual=addResults,
3644 onpass="Set add correct",
3645 onfail="Set add was incorrect" )
3646
3647 main.step( "Distributed Set addAll()" )
3648 onosSet.update( addAllValue.split() )
3649 addResponses = []
3650 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003651 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003652 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003653 name="setTestAddAll-" + str( i ),
3654 args=[ onosSetName, addAllValue ] )
3655 threads.append( t )
3656 t.start()
3657 for t in threads:
3658 t.join()
3659 addResponses.append( t.result )
3660
3661 # main.TRUE = successfully changed the set
3662 # main.FALSE = action resulted in no change in set
3663 # main.ERROR - Some error in executing the function
3664 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003665 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003666 if addResponses[ i ] == main.TRUE:
3667 # All is well
3668 pass
3669 elif addResponses[ i ] == main.FALSE:
3670 # Already in set, probably fine
3671 pass
3672 elif addResponses[ i ] == main.ERROR:
3673 # Error in execution
3674 addAllResults = main.FALSE
3675 else:
3676 # unexpected result
3677 addAllResults = main.FALSE
3678 if addAllResults != main.TRUE:
3679 main.log.error( "Error executing set addAll" )
3680
3681 # Check if set is still correct
3682 size = len( onosSet )
3683 getResponses = []
3684 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003685 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003686 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003687 name="setTestGet-" + str( i ),
3688 args=[ onosSetName ] )
3689 threads.append( t )
3690 t.start()
3691 for t in threads:
3692 t.join()
3693 getResponses.append( t.result )
3694 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003695 for i in range( len( main.activeNodes ) ):
3696 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003697 if isinstance( getResponses[ i ], list):
3698 current = set( getResponses[ i ] )
3699 if len( current ) == len( getResponses[ i ] ):
3700 # no repeats
3701 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003702 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003703 " has incorrect view" +
3704 " of set " + onosSetName + ":\n" +
3705 str( getResponses[ i ] ) )
3706 main.log.debug( "Expected: " + str( onosSet ) )
3707 main.log.debug( "Actual: " + str( current ) )
3708 getResults = main.FALSE
3709 else:
3710 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003711 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003712 " has repeat elements in" +
3713 " set " + onosSetName + ":\n" +
3714 str( getResponses[ i ] ) )
3715 getResults = main.FALSE
3716 elif getResponses[ i ] == main.ERROR:
3717 getResults = main.FALSE
3718 sizeResponses = []
3719 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003720 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003721 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003722 name="setTestSize-" + str( i ),
3723 args=[ onosSetName ] )
3724 threads.append( t )
3725 t.start()
3726 for t in threads:
3727 t.join()
3728 sizeResponses.append( t.result )
3729 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003730 for i in range( len( main.activeNodes ) ):
3731 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003732 if size != sizeResponses[ i ]:
3733 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003734 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003735 " expected a size of " + str( size ) +
3736 " for set " + onosSetName +
3737 " but got " + str( sizeResponses[ i ] ) )
3738 addAllResults = addAllResults and getResults and sizeResults
3739 utilities.assert_equals( expect=main.TRUE,
3740 actual=addAllResults,
3741 onpass="Set addAll correct",
3742 onfail="Set addAll was incorrect" )
3743
3744 main.step( "Distributed Set contains()" )
3745 containsResponses = []
3746 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003747 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003748 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003749 name="setContains-" + str( i ),
3750 args=[ onosSetName ],
3751 kwargs={ "values": addValue } )
3752 threads.append( t )
3753 t.start()
3754 for t in threads:
3755 t.join()
3756 # NOTE: This is the tuple
3757 containsResponses.append( t.result )
3758
3759 containsResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003760 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003761 if containsResponses[ i ] == main.ERROR:
3762 containsResults = main.FALSE
3763 else:
3764 containsResults = containsResults and\
3765 containsResponses[ i ][ 1 ]
3766 utilities.assert_equals( expect=main.TRUE,
3767 actual=containsResults,
3768 onpass="Set contains is functional",
3769 onfail="Set contains failed" )
3770
3771 main.step( "Distributed Set containsAll()" )
3772 containsAllResponses = []
3773 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003774 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003775 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003776 name="setContainsAll-" + str( i ),
3777 args=[ onosSetName ],
3778 kwargs={ "values": addAllValue } )
3779 threads.append( t )
3780 t.start()
3781 for t in threads:
3782 t.join()
3783 # NOTE: This is the tuple
3784 containsAllResponses.append( t.result )
3785
3786 containsAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003787 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003788 if containsResponses[ i ] == main.ERROR:
3789 containsResults = main.FALSE
3790 else:
3791 containsResults = containsResults and\
3792 containsResponses[ i ][ 1 ]
3793 utilities.assert_equals( expect=main.TRUE,
3794 actual=containsAllResults,
3795 onpass="Set containsAll is functional",
3796 onfail="Set containsAll failed" )
3797
3798 main.step( "Distributed Set remove()" )
3799 onosSet.remove( addValue )
3800 removeResponses = []
3801 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003802 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003803 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003804 name="setTestRemove-" + str( i ),
3805 args=[ onosSetName, addValue ] )
3806 threads.append( t )
3807 t.start()
3808 for t in threads:
3809 t.join()
3810 removeResponses.append( t.result )
3811
3812 # main.TRUE = successfully changed the set
3813 # main.FALSE = action resulted in no change in set
3814 # main.ERROR - Some error in executing the function
3815 removeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003816 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003817 if removeResponses[ i ] == main.TRUE:
3818 # All is well
3819 pass
3820 elif removeResponses[ i ] == main.FALSE:
3821 # not in set, probably fine
3822 pass
3823 elif removeResponses[ i ] == main.ERROR:
3824 # Error in execution
3825 removeResults = main.FALSE
3826 else:
3827 # unexpected result
3828 removeResults = main.FALSE
3829 if removeResults != main.TRUE:
3830 main.log.error( "Error executing set remove" )
3831
3832 # Check if set is still correct
3833 size = len( onosSet )
3834 getResponses = []
3835 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003836 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003837 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003838 name="setTestGet-" + str( i ),
3839 args=[ onosSetName ] )
3840 threads.append( t )
3841 t.start()
3842 for t in threads:
3843 t.join()
3844 getResponses.append( t.result )
3845 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003846 for i in range( len( main.activeNodes ) ):
3847 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003848 if isinstance( getResponses[ i ], list):
3849 current = set( getResponses[ i ] )
3850 if len( current ) == len( getResponses[ i ] ):
3851 # no repeats
3852 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003853 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003854 " has incorrect view" +
3855 " of set " + onosSetName + ":\n" +
3856 str( getResponses[ i ] ) )
3857 main.log.debug( "Expected: " + str( onosSet ) )
3858 main.log.debug( "Actual: " + str( current ) )
3859 getResults = main.FALSE
3860 else:
3861 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003862 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003863 " has repeat elements in" +
3864 " set " + onosSetName + ":\n" +
3865 str( getResponses[ i ] ) )
3866 getResults = main.FALSE
3867 elif getResponses[ i ] == main.ERROR:
3868 getResults = main.FALSE
3869 sizeResponses = []
3870 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003871 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003872 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003873 name="setTestSize-" + str( i ),
3874 args=[ onosSetName ] )
3875 threads.append( t )
3876 t.start()
3877 for t in threads:
3878 t.join()
3879 sizeResponses.append( t.result )
3880 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003881 for i in range( len( main.activeNodes ) ):
3882 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003883 if size != sizeResponses[ i ]:
3884 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003885 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003886 " expected a size of " + str( size ) +
3887 " for set " + onosSetName +
3888 " but got " + str( sizeResponses[ i ] ) )
3889 removeResults = removeResults and getResults and sizeResults
3890 utilities.assert_equals( expect=main.TRUE,
3891 actual=removeResults,
3892 onpass="Set remove correct",
3893 onfail="Set remove was incorrect" )
3894
3895 main.step( "Distributed Set removeAll()" )
3896 onosSet.difference_update( addAllValue.split() )
3897 removeAllResponses = []
3898 threads = []
3899 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003900 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003901 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003902 name="setTestRemoveAll-" + str( i ),
3903 args=[ onosSetName, addAllValue ] )
3904 threads.append( t )
3905 t.start()
3906 for t in threads:
3907 t.join()
3908 removeAllResponses.append( t.result )
3909 except Exception, e:
3910 main.log.exception(e)
3911
3912 # main.TRUE = successfully changed the set
3913 # main.FALSE = action resulted in no change in set
3914 # main.ERROR - Some error in executing the function
3915 removeAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003916 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003917 if removeAllResponses[ i ] == main.TRUE:
3918 # All is well
3919 pass
3920 elif removeAllResponses[ i ] == main.FALSE:
3921 # not in set, probably fine
3922 pass
3923 elif removeAllResponses[ i ] == main.ERROR:
3924 # Error in execution
3925 removeAllResults = main.FALSE
3926 else:
3927 # unexpected result
3928 removeAllResults = main.FALSE
3929 if removeAllResults != main.TRUE:
3930 main.log.error( "Error executing set removeAll" )
3931
3932 # Check if set is still correct
3933 size = len( onosSet )
3934 getResponses = []
3935 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003936 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003937 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003938 name="setTestGet-" + str( i ),
3939 args=[ onosSetName ] )
3940 threads.append( t )
3941 t.start()
3942 for t in threads:
3943 t.join()
3944 getResponses.append( t.result )
3945 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003946 for i in range( len( main.activeNodes ) ):
3947 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003948 if isinstance( getResponses[ i ], list):
3949 current = set( getResponses[ i ] )
3950 if len( current ) == len( getResponses[ i ] ):
3951 # no repeats
3952 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003953 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003954 " has incorrect view" +
3955 " of set " + onosSetName + ":\n" +
3956 str( getResponses[ i ] ) )
3957 main.log.debug( "Expected: " + str( onosSet ) )
3958 main.log.debug( "Actual: " + str( current ) )
3959 getResults = main.FALSE
3960 else:
3961 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003962 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003963 " has repeat elements in" +
3964 " set " + onosSetName + ":\n" +
3965 str( getResponses[ i ] ) )
3966 getResults = main.FALSE
3967 elif getResponses[ i ] == main.ERROR:
3968 getResults = main.FALSE
3969 sizeResponses = []
3970 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003971 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003972 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003973 name="setTestSize-" + str( i ),
3974 args=[ onosSetName ] )
3975 threads.append( t )
3976 t.start()
3977 for t in threads:
3978 t.join()
3979 sizeResponses.append( t.result )
3980 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003981 for i in range( len( main.activeNodes ) ):
3982 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003983 if size != sizeResponses[ i ]:
3984 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003985 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003986 " expected a size of " + str( size ) +
3987 " for set " + onosSetName +
3988 " but got " + str( sizeResponses[ i ] ) )
3989 removeAllResults = removeAllResults and getResults and sizeResults
3990 utilities.assert_equals( expect=main.TRUE,
3991 actual=removeAllResults,
3992 onpass="Set removeAll correct",
3993 onfail="Set removeAll was incorrect" )
3994
3995 main.step( "Distributed Set addAll()" )
3996 onosSet.update( addAllValue.split() )
3997 addResponses = []
3998 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003999 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004000 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004001 name="setTestAddAll-" + str( i ),
4002 args=[ onosSetName, addAllValue ] )
4003 threads.append( t )
4004 t.start()
4005 for t in threads:
4006 t.join()
4007 addResponses.append( t.result )
4008
4009 # main.TRUE = successfully changed the set
4010 # main.FALSE = action resulted in no change in set
4011 # main.ERROR - Some error in executing the function
4012 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004013 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004014 if addResponses[ i ] == main.TRUE:
4015 # All is well
4016 pass
4017 elif addResponses[ i ] == main.FALSE:
4018 # Already in set, probably fine
4019 pass
4020 elif addResponses[ i ] == main.ERROR:
4021 # Error in execution
4022 addAllResults = main.FALSE
4023 else:
4024 # unexpected result
4025 addAllResults = main.FALSE
4026 if addAllResults != main.TRUE:
4027 main.log.error( "Error executing set addAll" )
4028
4029 # Check if set is still correct
4030 size = len( onosSet )
4031 getResponses = []
4032 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004033 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004034 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004035 name="setTestGet-" + str( i ),
4036 args=[ onosSetName ] )
4037 threads.append( t )
4038 t.start()
4039 for t in threads:
4040 t.join()
4041 getResponses.append( t.result )
4042 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004043 for i in range( len( main.activeNodes ) ):
4044 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004045 if isinstance( getResponses[ i ], list):
4046 current = set( getResponses[ i ] )
4047 if len( current ) == len( getResponses[ i ] ):
4048 # no repeats
4049 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004050 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004051 " has incorrect view" +
4052 " of set " + onosSetName + ":\n" +
4053 str( getResponses[ i ] ) )
4054 main.log.debug( "Expected: " + str( onosSet ) )
4055 main.log.debug( "Actual: " + str( current ) )
4056 getResults = main.FALSE
4057 else:
4058 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004059 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004060 " has repeat elements in" +
4061 " set " + onosSetName + ":\n" +
4062 str( getResponses[ i ] ) )
4063 getResults = main.FALSE
4064 elif getResponses[ i ] == main.ERROR:
4065 getResults = main.FALSE
4066 sizeResponses = []
4067 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004068 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004069 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004070 name="setTestSize-" + str( i ),
4071 args=[ onosSetName ] )
4072 threads.append( t )
4073 t.start()
4074 for t in threads:
4075 t.join()
4076 sizeResponses.append( t.result )
4077 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004078 for i in range( len( main.activeNodes ) ):
4079 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004080 if size != sizeResponses[ i ]:
4081 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004082 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004083 " expected a size of " + str( size ) +
4084 " for set " + onosSetName +
4085 " but got " + str( sizeResponses[ i ] ) )
4086 addAllResults = addAllResults and getResults and sizeResults
4087 utilities.assert_equals( expect=main.TRUE,
4088 actual=addAllResults,
4089 onpass="Set addAll correct",
4090 onfail="Set addAll was incorrect" )
4091
4092 main.step( "Distributed Set clear()" )
4093 onosSet.clear()
4094 clearResponses = []
4095 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004096 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004097 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004098 name="setTestClear-" + str( i ),
4099 args=[ onosSetName, " "], # Values doesn't matter
4100 kwargs={ "clear": True } )
4101 threads.append( t )
4102 t.start()
4103 for t in threads:
4104 t.join()
4105 clearResponses.append( t.result )
4106
4107 # main.TRUE = successfully changed the set
4108 # main.FALSE = action resulted in no change in set
4109 # main.ERROR - Some error in executing the function
4110 clearResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004111 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004112 if clearResponses[ i ] == main.TRUE:
4113 # All is well
4114 pass
4115 elif clearResponses[ i ] == main.FALSE:
4116 # Nothing set, probably fine
4117 pass
4118 elif clearResponses[ i ] == main.ERROR:
4119 # Error in execution
4120 clearResults = main.FALSE
4121 else:
4122 # unexpected result
4123 clearResults = main.FALSE
4124 if clearResults != main.TRUE:
4125 main.log.error( "Error executing set clear" )
4126
4127 # Check if set is still correct
4128 size = len( onosSet )
4129 getResponses = []
4130 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004131 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004132 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004133 name="setTestGet-" + str( i ),
4134 args=[ onosSetName ] )
4135 threads.append( t )
4136 t.start()
4137 for t in threads:
4138 t.join()
4139 getResponses.append( t.result )
4140 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004141 for i in range( len( main.activeNodes ) ):
4142 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004143 if isinstance( getResponses[ i ], list):
4144 current = set( getResponses[ i ] )
4145 if len( current ) == len( getResponses[ i ] ):
4146 # no repeats
4147 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004148 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004149 " has incorrect view" +
4150 " of set " + onosSetName + ":\n" +
4151 str( getResponses[ i ] ) )
4152 main.log.debug( "Expected: " + str( onosSet ) )
4153 main.log.debug( "Actual: " + str( current ) )
4154 getResults = main.FALSE
4155 else:
4156 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004157 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004158 " has repeat elements in" +
4159 " set " + onosSetName + ":\n" +
4160 str( getResponses[ i ] ) )
4161 getResults = main.FALSE
4162 elif getResponses[ i ] == main.ERROR:
4163 getResults = main.FALSE
4164 sizeResponses = []
4165 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004166 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004167 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004168 name="setTestSize-" + str( i ),
4169 args=[ onosSetName ] )
4170 threads.append( t )
4171 t.start()
4172 for t in threads:
4173 t.join()
4174 sizeResponses.append( t.result )
4175 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004176 for i in range( len( main.activeNodes ) ):
4177 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004178 if size != sizeResponses[ i ]:
4179 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004180 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004181 " expected a size of " + str( size ) +
4182 " for set " + onosSetName +
4183 " but got " + str( sizeResponses[ i ] ) )
4184 clearResults = clearResults and getResults and sizeResults
4185 utilities.assert_equals( expect=main.TRUE,
4186 actual=clearResults,
4187 onpass="Set clear correct",
4188 onfail="Set clear was incorrect" )
4189
4190 main.step( "Distributed Set addAll()" )
4191 onosSet.update( addAllValue.split() )
4192 addResponses = []
4193 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004194 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004195 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004196 name="setTestAddAll-" + str( i ),
4197 args=[ onosSetName, addAllValue ] )
4198 threads.append( t )
4199 t.start()
4200 for t in threads:
4201 t.join()
4202 addResponses.append( t.result )
4203
4204 # main.TRUE = successfully changed the set
4205 # main.FALSE = action resulted in no change in set
4206 # main.ERROR - Some error in executing the function
4207 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004208 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004209 if addResponses[ i ] == main.TRUE:
4210 # All is well
4211 pass
4212 elif addResponses[ i ] == main.FALSE:
4213 # Already in set, probably fine
4214 pass
4215 elif addResponses[ i ] == main.ERROR:
4216 # Error in execution
4217 addAllResults = main.FALSE
4218 else:
4219 # unexpected result
4220 addAllResults = main.FALSE
4221 if addAllResults != main.TRUE:
4222 main.log.error( "Error executing set addAll" )
4223
4224 # Check if set is still correct
4225 size = len( onosSet )
4226 getResponses = []
4227 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004228 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004229 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004230 name="setTestGet-" + str( i ),
4231 args=[ onosSetName ] )
4232 threads.append( t )
4233 t.start()
4234 for t in threads:
4235 t.join()
4236 getResponses.append( t.result )
4237 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004238 for i in range( len( main.activeNodes ) ):
4239 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004240 if isinstance( getResponses[ i ], list):
4241 current = set( getResponses[ i ] )
4242 if len( current ) == len( getResponses[ i ] ):
4243 # no repeats
4244 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004245 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004246 " has incorrect view" +
4247 " of set " + onosSetName + ":\n" +
4248 str( getResponses[ i ] ) )
4249 main.log.debug( "Expected: " + str( onosSet ) )
4250 main.log.debug( "Actual: " + str( current ) )
4251 getResults = main.FALSE
4252 else:
4253 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004254 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004255 " has repeat elements in" +
4256 " set " + onosSetName + ":\n" +
4257 str( getResponses[ i ] ) )
4258 getResults = main.FALSE
4259 elif getResponses[ i ] == main.ERROR:
4260 getResults = main.FALSE
4261 sizeResponses = []
4262 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004263 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004264 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004265 name="setTestSize-" + str( i ),
4266 args=[ onosSetName ] )
4267 threads.append( t )
4268 t.start()
4269 for t in threads:
4270 t.join()
4271 sizeResponses.append( t.result )
4272 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004273 for i in range( len( main.activeNodes ) ):
4274 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004275 if size != sizeResponses[ i ]:
4276 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004277 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004278 " expected a size of " + str( size ) +
4279 " for set " + onosSetName +
4280 " but got " + str( sizeResponses[ i ] ) )
4281 addAllResults = addAllResults and getResults and sizeResults
4282 utilities.assert_equals( expect=main.TRUE,
4283 actual=addAllResults,
4284 onpass="Set addAll correct",
4285 onfail="Set addAll was incorrect" )
4286
4287 main.step( "Distributed Set retain()" )
4288 onosSet.intersection_update( retainValue.split() )
4289 retainResponses = []
4290 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004291 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004292 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004293 name="setTestRetain-" + str( i ),
4294 args=[ onosSetName, retainValue ],
4295 kwargs={ "retain": True } )
4296 threads.append( t )
4297 t.start()
4298 for t in threads:
4299 t.join()
4300 retainResponses.append( t.result )
4301
4302 # main.TRUE = successfully changed the set
4303 # main.FALSE = action resulted in no change in set
4304 # main.ERROR - Some error in executing the function
4305 retainResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004306 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004307 if retainResponses[ i ] == main.TRUE:
4308 # All is well
4309 pass
4310 elif retainResponses[ i ] == main.FALSE:
4311 # Already in set, probably fine
4312 pass
4313 elif retainResponses[ i ] == main.ERROR:
4314 # Error in execution
4315 retainResults = main.FALSE
4316 else:
4317 # unexpected result
4318 retainResults = main.FALSE
4319 if retainResults != main.TRUE:
4320 main.log.error( "Error executing set retain" )
4321
4322 # Check if set is still correct
4323 size = len( onosSet )
4324 getResponses = []
4325 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004326 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004327 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004328 name="setTestGet-" + str( i ),
4329 args=[ onosSetName ] )
4330 threads.append( t )
4331 t.start()
4332 for t in threads:
4333 t.join()
4334 getResponses.append( t.result )
4335 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004336 for i in range( len( main.activeNodes ) ):
4337 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004338 if isinstance( getResponses[ i ], list):
4339 current = set( getResponses[ i ] )
4340 if len( current ) == len( getResponses[ i ] ):
4341 # no repeats
4342 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004343 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004344 " has incorrect view" +
4345 " of set " + onosSetName + ":\n" +
4346 str( getResponses[ i ] ) )
4347 main.log.debug( "Expected: " + str( onosSet ) )
4348 main.log.debug( "Actual: " + str( current ) )
4349 getResults = main.FALSE
4350 else:
4351 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004352 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004353 " has repeat elements in" +
4354 " set " + onosSetName + ":\n" +
4355 str( getResponses[ i ] ) )
4356 getResults = main.FALSE
4357 elif getResponses[ i ] == main.ERROR:
4358 getResults = main.FALSE
4359 sizeResponses = []
4360 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004361 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004362 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004363 name="setTestSize-" + str( i ),
4364 args=[ onosSetName ] )
4365 threads.append( t )
4366 t.start()
4367 for t in threads:
4368 t.join()
4369 sizeResponses.append( t.result )
4370 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004371 for i in range( len( main.activeNodes ) ):
4372 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004373 if size != sizeResponses[ i ]:
4374 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004375 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004376 str( size ) + " for set " + onosSetName +
4377 " but got " + str( sizeResponses[ i ] ) )
4378 retainResults = retainResults and getResults and sizeResults
4379 utilities.assert_equals( expect=main.TRUE,
4380 actual=retainResults,
4381 onpass="Set retain correct",
4382 onfail="Set retain was incorrect" )
4383
Jon Hall2a5002c2015-08-21 16:49:11 -07004384 # Transactional maps
4385 main.step( "Partitioned Transactional maps put" )
4386 tMapValue = "Testing"
4387 numKeys = 100
4388 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004389 node = main.activeNodes[0]
4390 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
Jon Hall2a5002c2015-08-21 16:49:11 -07004391 if len( putResponses ) == 100:
4392 for i in putResponses:
4393 if putResponses[ i ][ 'value' ] != tMapValue:
4394 putResult = False
4395 else:
4396 putResult = False
4397 if not putResult:
4398 main.log.debug( "Put response values: " + str( putResponses ) )
4399 utilities.assert_equals( expect=True,
4400 actual=putResult,
4401 onpass="Partitioned Transactional Map put successful",
4402 onfail="Partitioned Transactional Map put values are incorrect" )
4403
4404 main.step( "Partitioned Transactional maps get" )
4405 getCheck = True
4406 for n in range( 1, numKeys + 1 ):
4407 getResponses = []
4408 threads = []
4409 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004410 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004411 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4412 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004413 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004414 threads.append( t )
4415 t.start()
4416 for t in threads:
4417 t.join()
4418 getResponses.append( t.result )
4419 for node in getResponses:
4420 if node != tMapValue:
4421 valueCheck = False
4422 if not valueCheck:
4423 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4424 main.log.warn( getResponses )
4425 getCheck = getCheck and valueCheck
4426 utilities.assert_equals( expect=True,
4427 actual=getCheck,
4428 onpass="Partitioned Transactional Map get values were correct",
4429 onfail="Partitioned Transactional Map values incorrect" )
4430
4431 main.step( "In-memory Transactional maps put" )
4432 tMapValue = "Testing"
4433 numKeys = 100
4434 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004435 node = main.activeNodes[0]
4436 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue, inMemory=True )
Jon Hall2a5002c2015-08-21 16:49:11 -07004437 if len( putResponses ) == 100:
4438 for i in putResponses:
4439 if putResponses[ i ][ 'value' ] != tMapValue:
4440 putResult = False
4441 else:
4442 putResult = False
4443 if not putResult:
4444 main.log.debug( "Put response values: " + str( putResponses ) )
4445 utilities.assert_equals( expect=True,
4446 actual=putResult,
4447 onpass="In-Memory Transactional Map put successful",
4448 onfail="In-Memory Transactional Map put values are incorrect" )
4449
4450 main.step( "In-Memory Transactional maps get" )
4451 getCheck = True
4452 for n in range( 1, numKeys + 1 ):
4453 getResponses = []
4454 threads = []
4455 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004456 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004457 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4458 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004459 args=[ "Key" + str( n ) ],
Jon Hall2a5002c2015-08-21 16:49:11 -07004460 kwargs={ "inMemory": True } )
4461 threads.append( t )
4462 t.start()
4463 for t in threads:
4464 t.join()
4465 getResponses.append( t.result )
4466 for node in getResponses:
4467 if node != tMapValue:
4468 valueCheck = False
4469 if not valueCheck:
4470 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4471 main.log.warn( getResponses )
4472 getCheck = getCheck and valueCheck
4473 utilities.assert_equals( expect=True,
4474 actual=getCheck,
4475 onpass="In-Memory Transactional Map get values were correct",
4476 onfail="In-Memory Transactional Map values incorrect" )