blob: edcc4d1d29dd6fd0f8a61db232fb7a7edbea6e85 [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
Jon Hallb3ed8ed2015-10-28 16:43:55 -070012CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
Jon Hall5cf14d52015-07-16 12:15:19 -070014CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
Jon Hallb3ed8ed2015-10-28 16:43:55 -070028class HAstopNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -070029
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hall3b489db2015-10-05 14:38:37 -070052 import pexpect
Jon Hallf3d16e72015-12-16 17:45:08 -080053 import time
Jon Hallb3ed8ed2015-10-28 16:43:55 -070054 main.log.info( "ONOS HA test: Stop a minority of ONOS nodes - " +
Jon Hall5cf14d52015-07-16 12:15:19 -070055 "initialization" )
56 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070057 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070058 "installing ONOS, starting Mininet and ONOS" +\
59 "cli sessions."
60 # TODO: save all the timers and output them for plotting
61
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
Jon Halle1a3b752015-07-22 13:02:46 -070069 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070070 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070071 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070074 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
81
82 # FIXME: just get controller port from params?
83 # TODO: do we really need all these?
84 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
85 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
86 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
87 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
88 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
89 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
90 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
91
Jon Halle1a3b752015-07-22 13:02:46 -070092 try:
93 fileName = "Counters"
94 # TODO: Maybe make a library folder somewhere?
95 path = main.params[ 'imports' ][ 'path' ]
96 main.Counters = imp.load_source( fileName,
97 path + fileName + ".py" )
98 except Exception as e:
99 main.log.exception( e )
100 main.cleanup()
101 main.exit()
102
103 main.CLIs = []
104 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700105 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700106 for i in range( 1, main.numCtrls + 1 ):
107 try:
108 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
109 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
110 ipList.append( main.nodes[ -1 ].ip_address )
111 except AttributeError:
112 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700113
114 main.step( "Create cell file" )
115 cellAppString = main.params[ 'ENV' ][ 'appString' ]
116 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
117 main.Mininet1.ip_address,
118 cellAppString, ipList )
119 main.step( "Applying cell variable to environment" )
120 cellResult = main.ONOSbench.setCell( cellName )
121 verifyResult = main.ONOSbench.verifyCell()
122
123 # FIXME:this is short term fix
124 main.log.info( "Removing raft logs" )
125 main.ONOSbench.onosRemoveRaftLogs()
126
127 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700128 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700129 main.ONOSbench.onosUninstall( node.ip_address )
130
131 # Make sure ONOS is DEAD
132 main.log.info( "Killing any ONOS processes" )
133 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700134 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700135 killed = main.ONOSbench.onosKill( node.ip_address )
136 killResults = killResults and killed
137
138 cleanInstallResult = main.TRUE
139 gitPullResult = main.TRUE
140
141 main.step( "Starting Mininet" )
142 # scp topo file to mininet
143 # TODO: move to params?
144 topoName = "obelisk.py"
145 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700146 main.ONOSbench.scp( main.Mininet1,
147 filePath + topoName,
148 main.Mininet1.home,
149 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700150 mnResult = main.Mininet1.startNet( )
151 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
152 onpass="Mininet Started",
153 onfail="Error starting Mininet" )
154
155 main.step( "Git checkout and pull " + gitBranch )
156 if PULLCODE:
157 main.ONOSbench.gitCheckout( gitBranch )
158 gitPullResult = main.ONOSbench.gitPull()
159 # values of 1 or 3 are good
160 utilities.assert_lesser( expect=0, actual=gitPullResult,
161 onpass="Git pull successful",
162 onfail="Git pull failed" )
163 main.ONOSbench.getVersion( report=True )
164
165 main.step( "Using mvn clean install" )
166 cleanInstallResult = main.TRUE
167 if PULLCODE and gitPullResult == main.TRUE:
168 cleanInstallResult = main.ONOSbench.cleanInstall()
169 else:
170 main.log.warn( "Did not pull new code so skipping mvn " +
171 "clean install" )
172 utilities.assert_equals( expect=main.TRUE,
173 actual=cleanInstallResult,
174 onpass="MCI successful",
175 onfail="MCI failed" )
176 # GRAPHS
177 # NOTE: important params here:
178 # job = name of Jenkins job
179 # Plot Name = Plot-HA, only can be used if multiple plots
180 # index = The number of the graph under plot name
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700181 job = "HAstopNodes"
Jon Hall5cf14d52015-07-16 12:15:19 -0700182 plotName = "Plot-HA"
Jon Hallff566d52016-01-15 14:45:36 -0800183 index = "1"
Jon Hall5cf14d52015-07-16 12:15:19 -0700184 graphs = '<ac:structured-macro ac:name="html">\n'
185 graphs += '<ac:plain-text-body><![CDATA[\n'
186 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
Jon Halla9845df2016-01-15 14:55:58 -0800187 '/plot/' + plotName + '/getPlot?index=' + index +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700188 '&width=500&height=300"' +\
189 'noborder="0" width="500" height="300" scrolling="yes" ' +\
190 'seamless="seamless"></iframe>\n'
191 graphs += ']]></ac:plain-text-body>\n'
192 graphs += '</ac:structured-macro>\n'
193 main.log.wiki(graphs)
194
195 main.step( "Creating ONOS package" )
Jon Hall3b489db2015-10-05 14:38:37 -0700196 # copy gen-partions file to ONOS
197 # NOTE: this assumes TestON and ONOS are on the same machine
198 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
199 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
200 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
201 main.ONOSbench.ip_address,
202 srcFile,
203 dstDir,
204 pwd=main.ONOSbench.pwd,
205 direction="from" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700206 packageResult = main.ONOSbench.onosPackage()
207 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
208 onpass="ONOS package successful",
209 onfail="ONOS package failed" )
210
211 main.step( "Installing ONOS package" )
212 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700213 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700214 tmpResult = main.ONOSbench.onosInstall( options="-f",
215 node=node.ip_address )
216 onosInstallResult = onosInstallResult and tmpResult
217 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
218 onpass="ONOS install successful",
219 onfail="ONOS install failed" )
Jon Hall3b489db2015-10-05 14:38:37 -0700220 # clean up gen-partitions file
221 try:
222 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
223 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
224 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
225 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
226 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
227 str( main.ONOSbench.handle.before ) )
228 except ( pexpect.TIMEOUT, pexpect.EOF ):
229 main.log.exception( "ONOSbench: pexpect exception found:" +
230 main.ONOSbench.handle.before )
231 main.cleanup()
232 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700233
234 main.step( "Checking if ONOS is up yet" )
235 for i in range( 2 ):
236 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700237 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700238 started = main.ONOSbench.isup( node.ip_address )
239 if not started:
Jon Hallc6793552016-01-19 14:18:37 -0800240 main.log.error( node.name + " hasn't started" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700241 onosIsupResult = onosIsupResult and started
242 if onosIsupResult == main.TRUE:
243 break
244 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
245 onpass="ONOS startup successful",
246 onfail="ONOS startup failed" )
247
248 main.log.step( "Starting ONOS CLI sessions" )
249 cliResults = main.TRUE
250 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700251 for i in range( main.numCtrls ):
252 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700253 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700254 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700255 threads.append( t )
256 t.start()
257
258 for t in threads:
259 t.join()
260 cliResults = cliResults and t.result
261 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
262 onpass="ONOS cli startup successful",
263 onfail="ONOS cli startup failed" )
264
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700265 # Create a list of active nodes for use when some nodes are stopped
266 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
267
Jon Hall5cf14d52015-07-16 12:15:19 -0700268 if main.params[ 'tcpdump' ].lower() == "true":
269 main.step( "Start Packet Capture MN" )
270 main.Mininet2.startTcpdump(
271 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
272 + "-MN.pcap",
273 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
274 port=main.params[ 'MNtcpdump' ][ 'port' ] )
275
276 main.step( "App Ids check" )
Jon Hallf3d16e72015-12-16 17:45:08 -0800277 time.sleep(60)
Jon Hall5cf14d52015-07-16 12:15:19 -0700278 appCheck = main.TRUE
279 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700280 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700281 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700282 name="appToIDCheck-" + str( i ),
283 args=[] )
284 threads.append( t )
285 t.start()
286
287 for t in threads:
288 t.join()
289 appCheck = appCheck and t.result
290 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700291 node = main.activeNodes[0]
292 main.log.warn( main.CLIs[node].apps() )
293 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700294 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
295 onpass="App Ids seem to be correct",
296 onfail="Something is wrong with app Ids" )
297
298 if cliResults == main.FALSE:
299 main.log.error( "Failed to start ONOS, stopping test" )
300 main.cleanup()
301 main.exit()
302
303 def CASE2( self, main ):
304 """
305 Assign devices to controllers
306 """
307 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700308 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700309 assert main, "main not defined"
310 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700311 assert main.CLIs, "main.CLIs not defined"
312 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700313 assert ONOS1Port, "ONOS1Port not defined"
314 assert ONOS2Port, "ONOS2Port not defined"
315 assert ONOS3Port, "ONOS3Port not defined"
316 assert ONOS4Port, "ONOS4Port not defined"
317 assert ONOS5Port, "ONOS5Port not defined"
318 assert ONOS6Port, "ONOS6Port not defined"
319 assert ONOS7Port, "ONOS7Port not defined"
320
321 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700322 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700323 "and check that an ONOS node becomes the " +\
324 "master of the device."
325 main.step( "Assign switches to controllers" )
326
327 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700328 for i in range( main.numCtrls ):
329 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700330 swList = []
331 for i in range( 1, 29 ):
332 swList.append( "s" + str( i ) )
333 main.Mininet1.assignSwController( sw=swList, ip=ipList )
334
335 mastershipCheck = main.TRUE
336 for i in range( 1, 29 ):
337 response = main.Mininet1.getSwController( "s" + str( i ) )
338 try:
339 main.log.info( str( response ) )
340 except Exception:
341 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700342 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700343 if re.search( "tcp:" + node.ip_address, response ):
344 mastershipCheck = mastershipCheck and main.TRUE
345 else:
346 main.log.error( "Error, node " + node.ip_address + " is " +
347 "not in the list of controllers s" +
348 str( i ) + " is connecting to." )
349 mastershipCheck = main.FALSE
350 utilities.assert_equals(
351 expect=main.TRUE,
352 actual=mastershipCheck,
353 onpass="Switch mastership assigned correctly",
354 onfail="Switches not assigned correctly to controllers" )
355
356 def CASE21( self, main ):
357 """
358 Assign mastership to controllers
359 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700360 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700361 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700362 assert main, "main not defined"
363 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700364 assert main.CLIs, "main.CLIs not defined"
365 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700366 assert ONOS1Port, "ONOS1Port not defined"
367 assert ONOS2Port, "ONOS2Port not defined"
368 assert ONOS3Port, "ONOS3Port not defined"
369 assert ONOS4Port, "ONOS4Port not defined"
370 assert ONOS5Port, "ONOS5Port not defined"
371 assert ONOS6Port, "ONOS6Port not defined"
372 assert ONOS7Port, "ONOS7Port not defined"
373
374 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700375 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700376 "device. Then manually assign" +\
377 " mastership to specific ONOS nodes using" +\
378 " 'device-role'"
379 main.step( "Assign mastership of switches to specific controllers" )
380 # Manually assign mastership to the controller we want
381 roleCall = main.TRUE
382
383 ipList = [ ]
384 deviceList = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700385 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700386 try:
387 # Assign mastership to specific controllers. This assignment was
388 # determined for a 7 node cluser, but will work with any sized
389 # cluster
390 for i in range( 1, 29 ): # switches 1 through 28
391 # set up correct variables:
392 if i == 1:
393 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700394 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700395 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700396 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700397 c = 1 % main.numCtrls
398 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700399 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700400 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700401 c = 1 % main.numCtrls
402 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700403 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700404 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700405 c = 3 % main.numCtrls
406 ip = main.nodes[ c ].ip_address # ONOS4
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700407 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700408 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700409 c = 2 % main.numCtrls
410 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700411 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700412 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700413 c = 2 % main.numCtrls
414 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700415 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700416 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700417 c = 5 % main.numCtrls
418 ip = main.nodes[ c ].ip_address # ONOS6
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700419 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700420 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700421 c = 4 % main.numCtrls
422 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700423 dpid = '3' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700424 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700425 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700426 c = 6 % main.numCtrls
427 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700428 dpid = '6' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700429 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700430 elif i == 28:
431 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700432 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700433 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700434 else:
435 main.log.error( "You didn't write an else statement for " +
436 "switch s" + str( i ) )
437 roleCall = main.FALSE
438 # Assign switch
439 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
440 # TODO: make this controller dynamic
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700441 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700442 ipList.append( ip )
443 deviceList.append( deviceId )
444 except ( AttributeError, AssertionError ):
445 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700446 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700447 utilities.assert_equals(
448 expect=main.TRUE,
449 actual=roleCall,
450 onpass="Re-assigned switch mastership to designated controller",
451 onfail="Something wrong with deviceRole calls" )
452
453 main.step( "Check mastership was correctly assigned" )
454 roleCheck = main.TRUE
455 # NOTE: This is due to the fact that device mastership change is not
456 # atomic and is actually a multi step process
457 time.sleep( 5 )
458 for i in range( len( ipList ) ):
459 ip = ipList[i]
460 deviceId = deviceList[i]
461 # Check assignment
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700462 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700463 if ip in master:
464 roleCheck = roleCheck and main.TRUE
465 else:
466 roleCheck = roleCheck and main.FALSE
467 main.log.error( "Error, controller " + ip + " is not" +
468 " master " + "of device " +
469 str( deviceId ) + ". Master is " +
470 repr( master ) + "." )
471 utilities.assert_equals(
472 expect=main.TRUE,
473 actual=roleCheck,
474 onpass="Switches were successfully reassigned to designated " +
475 "controller",
476 onfail="Switches were not successfully reassigned" )
477
478 def CASE3( self, main ):
479 """
480 Assign intents
481 """
482 import time
483 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700484 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700485 assert main, "main not defined"
486 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700487 assert main.CLIs, "main.CLIs not defined"
488 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700489 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700490 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700491 "assign predetermined host-to-host intents." +\
492 " After installation, check that the intent" +\
493 " is distributed to all nodes and the state" +\
494 " is INSTALLED"
495
496 # install onos-app-fwd
497 main.step( "Install reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700498 onosCli = main.CLIs[ main.activeNodes[0] ]
499 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700500 utilities.assert_equals( expect=main.TRUE, actual=installResults,
501 onpass="Install fwd successful",
502 onfail="Install fwd failed" )
503
504 main.step( "Check app ids" )
505 appCheck = main.TRUE
506 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700507 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700508 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700509 name="appToIDCheck-" + str( i ),
510 args=[] )
511 threads.append( t )
512 t.start()
513
514 for t in threads:
515 t.join()
516 appCheck = appCheck and t.result
517 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700518 main.log.warn( onosCli.apps() )
519 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700520 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
521 onpass="App Ids seem to be correct",
522 onfail="Something is wrong with app Ids" )
523
524 main.step( "Discovering Hosts( Via pingall for now )" )
525 # FIXME: Once we have a host discovery mechanism, use that instead
526 # REACTIVE FWD test
527 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700528 passMsg = "Reactive Pingall test passed"
529 time1 = time.time()
530 pingResult = main.Mininet1.pingall()
531 time2 = time.time()
532 if not pingResult:
533 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700534 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700535 passMsg += " on the second try"
536 utilities.assert_equals(
537 expect=main.TRUE,
538 actual=pingResult,
539 onpass= passMsg,
540 onfail="Reactive Pingall failed, " +
541 "one or more ping pairs failed" )
542 main.log.info( "Time for pingall: %2f seconds" %
543 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700544 # timeout for fwd flows
545 time.sleep( 11 )
546 # uninstall onos-app-fwd
547 main.step( "Uninstall reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700548 node = main.activeNodes[0]
549 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700550 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
551 onpass="Uninstall fwd successful",
552 onfail="Uninstall fwd failed" )
553
554 main.step( "Check app ids" )
555 threads = []
556 appCheck2 = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700557 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700558 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700559 name="appToIDCheck-" + str( i ),
560 args=[] )
561 threads.append( t )
562 t.start()
563
564 for t in threads:
565 t.join()
566 appCheck2 = appCheck2 and t.result
567 if appCheck2 != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700568 node = main.activeNodes[0]
569 main.log.warn( main.CLIs[node].apps() )
570 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700571 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
572 onpass="App Ids seem to be correct",
573 onfail="Something is wrong with app Ids" )
574
575 main.step( "Add host intents via cli" )
576 intentIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700577 # TODO: move the host numbers to params
578 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700579 intentAddResult = True
580 hostResult = main.TRUE
581 for i in range( 8, 18 ):
582 main.log.info( "Adding host intent between h" + str( i ) +
583 " and h" + str( i + 10 ) )
584 host1 = "00:00:00:00:00:" + \
585 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
586 host2 = "00:00:00:00:00:" + \
587 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
588 # NOTE: getHost can return None
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700589 host1Dict = onosCli.getHost( host1 )
590 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700591 host1Id = None
592 host2Id = None
593 if host1Dict and host2Dict:
594 host1Id = host1Dict.get( 'id', None )
595 host2Id = host2Dict.get( 'id', None )
596 if host1Id and host2Id:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700597 nodeNum = ( i % len( main.activeNodes ) )
598 node = main.activeNodes[nodeNum]
599 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700600 if tmpId:
601 main.log.info( "Added intent with id: " + tmpId )
602 intentIds.append( tmpId )
603 else:
604 main.log.error( "addHostIntent returned: " +
605 repr( tmpId ) )
606 else:
607 main.log.error( "Error, getHost() failed for h" + str( i ) +
608 " and/or h" + str( i + 10 ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700609 node = main.activeNodes[0]
610 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700611 main.log.warn( "Hosts output: " )
612 try:
613 main.log.warn( json.dumps( json.loads( hosts ),
614 sort_keys=True,
615 indent=4,
616 separators=( ',', ': ' ) ) )
617 except ( ValueError, TypeError ):
618 main.log.warn( repr( hosts ) )
619 hostResult = main.FALSE
620 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
621 onpass="Found a host id for each host",
622 onfail="Error looking up host ids" )
623
624 intentStart = time.time()
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700625 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700626 main.log.info( "Submitted intents: " + str( intentIds ) )
627 main.log.info( "Intents in ONOS: " + str( onosIds ) )
628 for intent in intentIds:
629 if intent in onosIds:
630 pass # intent submitted is in onos
631 else:
632 intentAddResult = False
633 if intentAddResult:
634 intentStop = time.time()
635 else:
636 intentStop = None
637 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700638 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700639 intentStates = []
640 installedCheck = True
641 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
642 count = 0
643 try:
644 for intent in json.loads( intents ):
645 state = intent.get( 'state', None )
646 if "INSTALLED" not in state:
647 installedCheck = False
648 intentId = intent.get( 'id', None )
649 intentStates.append( ( intentId, state ) )
650 except ( ValueError, TypeError ):
651 main.log.exception( "Error parsing intents" )
652 # add submitted intents not in the store
653 tmplist = [ i for i, s in intentStates ]
654 missingIntents = False
655 for i in intentIds:
656 if i not in tmplist:
657 intentStates.append( ( i, " - " ) )
658 missingIntents = True
659 intentStates.sort()
660 for i, s in intentStates:
661 count += 1
662 main.log.info( "%-6s%-15s%-15s" %
663 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700664 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700665 try:
666 missing = False
667 if leaders:
668 parsedLeaders = json.loads( leaders )
669 main.log.warn( json.dumps( parsedLeaders,
670 sort_keys=True,
671 indent=4,
672 separators=( ',', ': ' ) ) )
673 # check for all intent partitions
674 topics = []
675 for i in range( 14 ):
676 topics.append( "intent-partition-" + str( i ) )
677 main.log.debug( topics )
678 ONOStopics = [ j['topic'] for j in parsedLeaders ]
679 for topic in topics:
680 if topic not in ONOStopics:
681 main.log.error( "Error: " + topic +
682 " not in leaders" )
683 missing = True
684 else:
685 main.log.error( "leaders() returned None" )
686 except ( ValueError, TypeError ):
687 main.log.exception( "Error parsing leaders" )
688 main.log.error( repr( leaders ) )
689 # Check all nodes
690 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700691 for i in main.activeNodes:
692 response = main.CLIs[i].leaders( jsonFormat=False)
693 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700694 str( response ) )
695
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700696 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700697 try:
698 if partitions :
699 parsedPartitions = json.loads( partitions )
700 main.log.warn( json.dumps( parsedPartitions,
701 sort_keys=True,
702 indent=4,
703 separators=( ',', ': ' ) ) )
704 # TODO check for a leader in all paritions
705 # TODO check for consistency among nodes
706 else:
707 main.log.error( "partitions() returned None" )
708 except ( ValueError, TypeError ):
709 main.log.exception( "Error parsing partitions" )
710 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700711 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700712 try:
713 if pendingMap :
714 parsedPending = json.loads( pendingMap )
715 main.log.warn( json.dumps( parsedPending,
716 sort_keys=True,
717 indent=4,
718 separators=( ',', ': ' ) ) )
719 # TODO check something here?
720 else:
721 main.log.error( "pendingMap() returned None" )
722 except ( ValueError, TypeError ):
723 main.log.exception( "Error parsing pending map" )
724 main.log.error( repr( pendingMap ) )
725
726 intentAddResult = bool( intentAddResult and not missingIntents and
727 installedCheck )
728 if not intentAddResult:
729 main.log.error( "Error in pushing host intents to ONOS" )
730
731 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700732 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700733 correct = True
734 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700735 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700736 onosIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700737 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700738 onosIds.append( ids )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700739 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700740 str( sorted( onosIds ) ) )
741 if sorted( ids ) != sorted( intentIds ):
742 main.log.warn( "Set of intent IDs doesn't match" )
743 correct = False
744 break
745 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700746 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700747 for intent in intents:
748 if intent[ 'state' ] != "INSTALLED":
749 main.log.warn( "Intent " + intent[ 'id' ] +
750 " is " + intent[ 'state' ] )
751 correct = False
752 break
753 if correct:
754 break
755 else:
756 time.sleep(1)
757 if not intentStop:
758 intentStop = time.time()
759 global gossipTime
760 gossipTime = intentStop - intentStart
761 main.log.info( "It took about " + str( gossipTime ) +
762 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700763 gossipPeriod = int( main.params['timers']['gossip'] )
764 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700765 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700766 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700767 onpass="ECM anti-entropy for intents worked within " +
768 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700769 onfail="Intent ECM anti-entropy took too long. " +
770 "Expected time:{}, Actual time:{}".format( maxGossipTime,
771 gossipTime ) )
772 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700773 intentAddResult = True
774
775 if not intentAddResult or "key" in pendingMap:
776 import time
777 installedCheck = True
778 main.log.info( "Sleeping 60 seconds to see if intents are found" )
779 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700780 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700781 main.log.info( "Submitted intents: " + str( intentIds ) )
782 main.log.info( "Intents in ONOS: " + str( onosIds ) )
783 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700784 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700785 intentStates = []
786 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
787 count = 0
788 try:
789 for intent in json.loads( intents ):
790 # Iter through intents of a node
791 state = intent.get( 'state', None )
792 if "INSTALLED" not in state:
793 installedCheck = False
794 intentId = intent.get( 'id', None )
795 intentStates.append( ( intentId, state ) )
796 except ( ValueError, TypeError ):
797 main.log.exception( "Error parsing intents" )
798 # add submitted intents not in the store
799 tmplist = [ i for i, s in intentStates ]
800 for i in intentIds:
801 if i not in tmplist:
802 intentStates.append( ( i, " - " ) )
803 intentStates.sort()
804 for i, s in intentStates:
805 count += 1
806 main.log.info( "%-6s%-15s%-15s" %
807 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700808 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700809 try:
810 missing = False
811 if leaders:
812 parsedLeaders = json.loads( leaders )
813 main.log.warn( json.dumps( parsedLeaders,
814 sort_keys=True,
815 indent=4,
816 separators=( ',', ': ' ) ) )
817 # check for all intent partitions
818 # check for election
819 topics = []
820 for i in range( 14 ):
821 topics.append( "intent-partition-" + str( i ) )
822 # FIXME: this should only be after we start the app
823 topics.append( "org.onosproject.election" )
824 main.log.debug( topics )
825 ONOStopics = [ j['topic'] for j in parsedLeaders ]
826 for topic in topics:
827 if topic not in ONOStopics:
828 main.log.error( "Error: " + topic +
829 " not in leaders" )
830 missing = True
831 else:
832 main.log.error( "leaders() returned None" )
833 except ( ValueError, TypeError ):
834 main.log.exception( "Error parsing leaders" )
835 main.log.error( repr( leaders ) )
836 # Check all nodes
837 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700838 for i in main.activeNodes:
839 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700840 response = node.leaders( jsonFormat=False)
841 main.log.warn( str( node.name ) + " leaders output: \n" +
842 str( response ) )
843
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700844 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700845 try:
846 if partitions :
847 parsedPartitions = json.loads( partitions )
848 main.log.warn( json.dumps( parsedPartitions,
849 sort_keys=True,
850 indent=4,
851 separators=( ',', ': ' ) ) )
852 # TODO check for a leader in all paritions
853 # TODO check for consistency among nodes
854 else:
855 main.log.error( "partitions() returned None" )
856 except ( ValueError, TypeError ):
857 main.log.exception( "Error parsing partitions" )
858 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700859 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700860 try:
861 if pendingMap :
862 parsedPending = json.loads( pendingMap )
863 main.log.warn( json.dumps( parsedPending,
864 sort_keys=True,
865 indent=4,
866 separators=( ',', ': ' ) ) )
867 # TODO check something here?
868 else:
869 main.log.error( "pendingMap() returned None" )
870 except ( ValueError, TypeError ):
871 main.log.exception( "Error parsing pending map" )
872 main.log.error( repr( pendingMap ) )
873
874 def CASE4( self, main ):
875 """
876 Ping across added host intents
877 """
878 import json
879 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700880 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700881 assert main, "main not defined"
882 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700883 assert main.CLIs, "main.CLIs not defined"
884 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700885 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700886 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700887 "functionality and check the state of " +\
888 "the intent"
889 main.step( "Ping across added host intents" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700890 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700891 PingResult = main.TRUE
892 for i in range( 8, 18 ):
893 ping = main.Mininet1.pingHost( src="h" + str( i ),
894 target="h" + str( i + 10 ) )
895 PingResult = PingResult and ping
896 if ping == main.FALSE:
897 main.log.warn( "Ping failed between h" + str( i ) +
898 " and h" + str( i + 10 ) )
899 elif ping == main.TRUE:
900 main.log.info( "Ping test passed!" )
901 # Don't set PingResult or you'd override failures
902 if PingResult == main.FALSE:
903 main.log.error(
904 "Intents have not been installed correctly, pings failed." )
905 # TODO: pretty print
906 main.log.warn( "ONOS1 intents: " )
907 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700908 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700909 main.log.warn( json.dumps( json.loads( tmpIntents ),
910 sort_keys=True,
911 indent=4,
912 separators=( ',', ': ' ) ) )
913 except ( ValueError, TypeError ):
914 main.log.warn( repr( tmpIntents ) )
915 utilities.assert_equals(
916 expect=main.TRUE,
917 actual=PingResult,
918 onpass="Intents have been installed correctly and pings work",
919 onfail="Intents have not been installed correctly, pings failed." )
920
921 main.step( "Check Intent state" )
922 installedCheck = False
923 loopCount = 0
924 while not installedCheck and loopCount < 40:
925 installedCheck = True
926 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700927 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700928 intentStates = []
929 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
930 count = 0
931 # Iter through intents of a node
932 try:
933 for intent in json.loads( intents ):
934 state = intent.get( 'state', None )
935 if "INSTALLED" not in state:
936 installedCheck = False
937 intentId = intent.get( 'id', None )
938 intentStates.append( ( intentId, state ) )
939 except ( ValueError, TypeError ):
940 main.log.exception( "Error parsing intents." )
941 # Print states
942 intentStates.sort()
943 for i, s in intentStates:
944 count += 1
945 main.log.info( "%-6s%-15s%-15s" %
946 ( str( count ), str( i ), str( s ) ) )
947 if not installedCheck:
948 time.sleep( 1 )
949 loopCount += 1
950 utilities.assert_equals( expect=True, actual=installedCheck,
951 onpass="Intents are all INSTALLED",
952 onfail="Intents are not all in " +
953 "INSTALLED state" )
954
955 main.step( "Check leadership of topics" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700956 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700957 topicCheck = main.TRUE
958 try:
959 if leaders:
960 parsedLeaders = json.loads( leaders )
961 main.log.warn( json.dumps( parsedLeaders,
962 sort_keys=True,
963 indent=4,
964 separators=( ',', ': ' ) ) )
965 # check for all intent partitions
966 # check for election
967 # TODO: Look at Devices as topics now that it uses this system
968 topics = []
969 for i in range( 14 ):
970 topics.append( "intent-partition-" + str( i ) )
971 # FIXME: this should only be after we start the app
972 # FIXME: topics.append( "org.onosproject.election" )
973 # Print leaders output
974 main.log.debug( topics )
975 ONOStopics = [ j['topic'] for j in parsedLeaders ]
976 for topic in topics:
977 if topic not in ONOStopics:
978 main.log.error( "Error: " + topic +
979 " not in leaders" )
980 topicCheck = main.FALSE
981 else:
982 main.log.error( "leaders() returned None" )
983 topicCheck = main.FALSE
984 except ( ValueError, TypeError ):
985 topicCheck = main.FALSE
986 main.log.exception( "Error parsing leaders" )
987 main.log.error( repr( leaders ) )
988 # TODO: Check for a leader of these topics
989 # Check all nodes
990 if topicCheck:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700991 for i in main.activeNodes:
992 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700993 response = node.leaders( jsonFormat=False)
994 main.log.warn( str( node.name ) + " leaders output: \n" +
995 str( response ) )
996
997 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
998 onpass="intent Partitions is in leaders",
999 onfail="Some topics were lost " )
1000 # Print partitions
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001001 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001002 try:
1003 if partitions :
1004 parsedPartitions = json.loads( partitions )
1005 main.log.warn( json.dumps( parsedPartitions,
1006 sort_keys=True,
1007 indent=4,
1008 separators=( ',', ': ' ) ) )
1009 # TODO check for a leader in all paritions
1010 # TODO check for consistency among nodes
1011 else:
1012 main.log.error( "partitions() returned None" )
1013 except ( ValueError, TypeError ):
1014 main.log.exception( "Error parsing partitions" )
1015 main.log.error( repr( partitions ) )
1016 # Print Pending Map
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001017 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001018 try:
1019 if pendingMap :
1020 parsedPending = json.loads( pendingMap )
1021 main.log.warn( json.dumps( parsedPending,
1022 sort_keys=True,
1023 indent=4,
1024 separators=( ',', ': ' ) ) )
1025 # TODO check something here?
1026 else:
1027 main.log.error( "pendingMap() returned None" )
1028 except ( ValueError, TypeError ):
1029 main.log.exception( "Error parsing pending map" )
1030 main.log.error( repr( pendingMap ) )
1031
1032 if not installedCheck:
1033 main.log.info( "Waiting 60 seconds to see if the state of " +
1034 "intents change" )
1035 time.sleep( 60 )
1036 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001037 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001038 intentStates = []
1039 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1040 count = 0
1041 # Iter through intents of a node
1042 try:
1043 for intent in json.loads( intents ):
1044 state = intent.get( 'state', None )
1045 if "INSTALLED" not in state:
1046 installedCheck = False
1047 intentId = intent.get( 'id', None )
1048 intentStates.append( ( intentId, state ) )
1049 except ( ValueError, TypeError ):
1050 main.log.exception( "Error parsing intents." )
1051 intentStates.sort()
1052 for i, s in intentStates:
1053 count += 1
1054 main.log.info( "%-6s%-15s%-15s" %
1055 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001056 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001057 try:
1058 missing = False
1059 if leaders:
1060 parsedLeaders = json.loads( leaders )
1061 main.log.warn( json.dumps( parsedLeaders,
1062 sort_keys=True,
1063 indent=4,
1064 separators=( ',', ': ' ) ) )
1065 # check for all intent partitions
1066 # check for election
1067 topics = []
1068 for i in range( 14 ):
1069 topics.append( "intent-partition-" + str( i ) )
1070 # FIXME: this should only be after we start the app
1071 topics.append( "org.onosproject.election" )
1072 main.log.debug( topics )
1073 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1074 for topic in topics:
1075 if topic not in ONOStopics:
1076 main.log.error( "Error: " + topic +
1077 " not in leaders" )
1078 missing = True
1079 else:
1080 main.log.error( "leaders() returned None" )
1081 except ( ValueError, TypeError ):
1082 main.log.exception( "Error parsing leaders" )
1083 main.log.error( repr( leaders ) )
1084 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001085 for i in main.activeNodes:
1086 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001087 response = node.leaders( jsonFormat=False)
1088 main.log.warn( str( node.name ) + " leaders output: \n" +
1089 str( response ) )
1090
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001091 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001092 try:
1093 if partitions :
1094 parsedPartitions = json.loads( partitions )
1095 main.log.warn( json.dumps( parsedPartitions,
1096 sort_keys=True,
1097 indent=4,
1098 separators=( ',', ': ' ) ) )
1099 # TODO check for a leader in all paritions
1100 # TODO check for consistency among nodes
1101 else:
1102 main.log.error( "partitions() returned None" )
1103 except ( ValueError, TypeError ):
1104 main.log.exception( "Error parsing partitions" )
1105 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001106 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001107 try:
1108 if pendingMap :
1109 parsedPending = json.loads( pendingMap )
1110 main.log.warn( json.dumps( parsedPending,
1111 sort_keys=True,
1112 indent=4,
1113 separators=( ',', ': ' ) ) )
1114 # TODO check something here?
1115 else:
1116 main.log.error( "pendingMap() returned None" )
1117 except ( ValueError, TypeError ):
1118 main.log.exception( "Error parsing pending map" )
1119 main.log.error( repr( pendingMap ) )
1120 # Print flowrules
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001121 node = main.activeNodes[0]
1122 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001123 main.step( "Wait a minute then ping again" )
1124 # the wait is above
1125 PingResult = main.TRUE
1126 for i in range( 8, 18 ):
1127 ping = main.Mininet1.pingHost( src="h" + str( i ),
1128 target="h" + str( i + 10 ) )
1129 PingResult = PingResult and ping
1130 if ping == main.FALSE:
1131 main.log.warn( "Ping failed between h" + str( i ) +
1132 " and h" + str( i + 10 ) )
1133 elif ping == main.TRUE:
1134 main.log.info( "Ping test passed!" )
1135 # Don't set PingResult or you'd override failures
1136 if PingResult == main.FALSE:
1137 main.log.error(
1138 "Intents have not been installed correctly, pings failed." )
1139 # TODO: pretty print
1140 main.log.warn( "ONOS1 intents: " )
1141 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001142 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001143 main.log.warn( json.dumps( json.loads( tmpIntents ),
1144 sort_keys=True,
1145 indent=4,
1146 separators=( ',', ': ' ) ) )
1147 except ( ValueError, TypeError ):
1148 main.log.warn( repr( tmpIntents ) )
1149 utilities.assert_equals(
1150 expect=main.TRUE,
1151 actual=PingResult,
1152 onpass="Intents have been installed correctly and pings work",
1153 onfail="Intents have not been installed correctly, pings failed." )
1154
1155 def CASE5( self, main ):
1156 """
1157 Reading state of ONOS
1158 """
1159 import json
1160 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001161 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001162 assert main, "main not defined"
1163 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001164 assert main.CLIs, "main.CLIs not defined"
1165 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001166
1167 main.case( "Setting up and gathering data for current state" )
1168 # The general idea for this test case is to pull the state of
1169 # ( intents,flows, topology,... ) from each ONOS node
1170 # We can then compare them with each other and also with past states
1171
1172 main.step( "Check that each switch has a master" )
1173 global mastershipState
1174 mastershipState = '[]'
1175
1176 # Assert that each device has a master
1177 rolesNotNull = main.TRUE
1178 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001179 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001180 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001181 name="rolesNotNull-" + str( i ),
1182 args=[] )
1183 threads.append( t )
1184 t.start()
1185
1186 for t in threads:
1187 t.join()
1188 rolesNotNull = rolesNotNull and t.result
1189 utilities.assert_equals(
1190 expect=main.TRUE,
1191 actual=rolesNotNull,
1192 onpass="Each device has a master",
1193 onfail="Some devices don't have a master assigned" )
1194
1195 main.step( "Get the Mastership of each switch from each controller" )
1196 ONOSMastership = []
1197 mastershipCheck = main.FALSE
1198 consistentMastership = True
1199 rolesResults = True
1200 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001201 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001202 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001203 name="roles-" + str( i ),
1204 args=[] )
1205 threads.append( t )
1206 t.start()
1207
1208 for t in threads:
1209 t.join()
1210 ONOSMastership.append( t.result )
1211
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001212 for i in range( len( ONOSMastership ) ):
1213 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001214 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001215 main.log.error( "Error in getting ONOS" + node + " roles" )
1216 main.log.warn( "ONOS" + node + " mastership response: " +
1217 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001218 rolesResults = False
1219 utilities.assert_equals(
1220 expect=True,
1221 actual=rolesResults,
1222 onpass="No error in reading roles output",
1223 onfail="Error in reading roles from ONOS" )
1224
1225 main.step( "Check for consistency in roles from each controller" )
1226 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1227 main.log.info(
1228 "Switch roles are consistent across all ONOS nodes" )
1229 else:
1230 consistentMastership = False
1231 utilities.assert_equals(
1232 expect=True,
1233 actual=consistentMastership,
1234 onpass="Switch roles are consistent across all ONOS nodes",
1235 onfail="ONOS nodes have different views of switch roles" )
1236
1237 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001238 for i in range( len( main.activeNodes ) ):
1239 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001240 try:
1241 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001242 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001243 json.dumps(
1244 json.loads( ONOSMastership[ i ] ),
1245 sort_keys=True,
1246 indent=4,
1247 separators=( ',', ': ' ) ) )
1248 except ( ValueError, TypeError ):
1249 main.log.warn( repr( ONOSMastership[ i ] ) )
1250 elif rolesResults and consistentMastership:
1251 mastershipCheck = main.TRUE
1252 mastershipState = ONOSMastership[ 0 ]
1253
1254 main.step( "Get the intents from each controller" )
1255 global intentState
1256 intentState = []
1257 ONOSIntents = []
1258 intentCheck = main.FALSE
1259 consistentIntents = True
1260 intentsResults = True
1261 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001262 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001263 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001264 name="intents-" + str( i ),
1265 args=[],
1266 kwargs={ 'jsonFormat': True } )
1267 threads.append( t )
1268 t.start()
1269
1270 for t in threads:
1271 t.join()
1272 ONOSIntents.append( t.result )
1273
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001274 for i in range( len( ONOSIntents ) ):
1275 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001276 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001277 main.log.error( "Error in getting ONOS" + node + " intents" )
1278 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001279 repr( ONOSIntents[ i ] ) )
1280 intentsResults = False
1281 utilities.assert_equals(
1282 expect=True,
1283 actual=intentsResults,
1284 onpass="No error in reading intents output",
1285 onfail="Error in reading intents from ONOS" )
1286
1287 main.step( "Check for consistency in Intents from each controller" )
1288 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1289 main.log.info( "Intents are consistent across all ONOS " +
1290 "nodes" )
1291 else:
1292 consistentIntents = False
1293 main.log.error( "Intents not consistent" )
1294 utilities.assert_equals(
1295 expect=True,
1296 actual=consistentIntents,
1297 onpass="Intents are consistent across all ONOS nodes",
1298 onfail="ONOS nodes have different views of intents" )
1299
1300 if intentsResults:
1301 # Try to make it easy to figure out what is happening
1302 #
1303 # Intent ONOS1 ONOS2 ...
1304 # 0x01 INSTALLED INSTALLING
1305 # ... ... ...
1306 # ... ... ...
1307 title = " Id"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001308 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001309 title += " " * 10 + "ONOS" + str( n + 1 )
1310 main.log.warn( title )
1311 # get all intent keys in the cluster
1312 keys = []
1313 for nodeStr in ONOSIntents:
1314 node = json.loads( nodeStr )
1315 for intent in node:
1316 keys.append( intent.get( 'id' ) )
1317 keys = set( keys )
1318 for key in keys:
1319 row = "%-13s" % key
1320 for nodeStr in ONOSIntents:
1321 node = json.loads( nodeStr )
1322 for intent in node:
1323 if intent.get( 'id', "Error" ) == key:
1324 row += "%-15s" % intent.get( 'state' )
1325 main.log.warn( row )
1326 # End table view
1327
1328 if intentsResults and not consistentIntents:
1329 # print the json objects
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001330 n = str( main.activeNodes[-1] + 1 )
1331 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001332 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1333 sort_keys=True,
1334 indent=4,
1335 separators=( ',', ': ' ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001336 for i in range( len( ONOSIntents ) ):
1337 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001338 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001339 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001340 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1341 sort_keys=True,
1342 indent=4,
1343 separators=( ',', ': ' ) ) )
1344 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001345 main.log.debug( "ONOS" + node + " intents match ONOS" +
1346 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001347 elif intentsResults and consistentIntents:
1348 intentCheck = main.TRUE
1349 intentState = ONOSIntents[ 0 ]
1350
1351 main.step( "Get the flows from each controller" )
1352 global flowState
1353 flowState = []
1354 ONOSFlows = []
1355 ONOSFlowsJson = []
1356 flowCheck = main.FALSE
1357 consistentFlows = True
1358 flowsResults = True
1359 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001360 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001361 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001362 name="flows-" + str( i ),
1363 args=[],
1364 kwargs={ 'jsonFormat': True } )
1365 threads.append( t )
1366 t.start()
1367
1368 # NOTE: Flows command can take some time to run
1369 time.sleep(30)
1370 for t in threads:
1371 t.join()
1372 result = t.result
1373 ONOSFlows.append( result )
1374
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001375 for i in range( len( ONOSFlows ) ):
1376 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001377 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1378 main.log.error( "Error in getting ONOS" + num + " flows" )
1379 main.log.warn( "ONOS" + num + " flows response: " +
1380 repr( ONOSFlows[ i ] ) )
1381 flowsResults = False
1382 ONOSFlowsJson.append( None )
1383 else:
1384 try:
1385 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1386 except ( ValueError, TypeError ):
1387 # FIXME: change this to log.error?
1388 main.log.exception( "Error in parsing ONOS" + num +
1389 " response as json." )
1390 main.log.error( repr( ONOSFlows[ i ] ) )
1391 ONOSFlowsJson.append( None )
1392 flowsResults = False
1393 utilities.assert_equals(
1394 expect=True,
1395 actual=flowsResults,
1396 onpass="No error in reading flows output",
1397 onfail="Error in reading flows from ONOS" )
1398
1399 main.step( "Check for consistency in Flows from each controller" )
1400 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1401 if all( tmp ):
1402 main.log.info( "Flow count is consistent across all ONOS nodes" )
1403 else:
1404 consistentFlows = False
1405 utilities.assert_equals(
1406 expect=True,
1407 actual=consistentFlows,
1408 onpass="The flow count is consistent across all ONOS nodes",
1409 onfail="ONOS nodes have different flow counts" )
1410
1411 if flowsResults and not consistentFlows:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001412 for i in range( len( ONOSFlows ) ):
1413 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001414 try:
1415 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001416 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001417 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1418 indent=4, separators=( ',', ': ' ) ) )
1419 except ( ValueError, TypeError ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001420 main.log.warn( "ONOS" + node + " flows: " +
1421 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001422 elif flowsResults and consistentFlows:
1423 flowCheck = main.TRUE
1424 flowState = ONOSFlows[ 0 ]
1425
1426 main.step( "Get the OF Table entries" )
1427 global flows
1428 flows = []
1429 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001430 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001431 if flowCheck == main.FALSE:
1432 for table in flows:
1433 main.log.warn( table )
1434 # TODO: Compare switch flow tables with ONOS flow tables
1435
1436 main.step( "Start continuous pings" )
1437 main.Mininet2.pingLong(
1438 src=main.params[ 'PING' ][ 'source1' ],
1439 target=main.params[ 'PING' ][ 'target1' ],
1440 pingTime=500 )
1441 main.Mininet2.pingLong(
1442 src=main.params[ 'PING' ][ 'source2' ],
1443 target=main.params[ 'PING' ][ 'target2' ],
1444 pingTime=500 )
1445 main.Mininet2.pingLong(
1446 src=main.params[ 'PING' ][ 'source3' ],
1447 target=main.params[ 'PING' ][ 'target3' ],
1448 pingTime=500 )
1449 main.Mininet2.pingLong(
1450 src=main.params[ 'PING' ][ 'source4' ],
1451 target=main.params[ 'PING' ][ 'target4' ],
1452 pingTime=500 )
1453 main.Mininet2.pingLong(
1454 src=main.params[ 'PING' ][ 'source5' ],
1455 target=main.params[ 'PING' ][ 'target5' ],
1456 pingTime=500 )
1457 main.Mininet2.pingLong(
1458 src=main.params[ 'PING' ][ 'source6' ],
1459 target=main.params[ 'PING' ][ 'target6' ],
1460 pingTime=500 )
1461 main.Mininet2.pingLong(
1462 src=main.params[ 'PING' ][ 'source7' ],
1463 target=main.params[ 'PING' ][ 'target7' ],
1464 pingTime=500 )
1465 main.Mininet2.pingLong(
1466 src=main.params[ 'PING' ][ 'source8' ],
1467 target=main.params[ 'PING' ][ 'target8' ],
1468 pingTime=500 )
1469 main.Mininet2.pingLong(
1470 src=main.params[ 'PING' ][ 'source9' ],
1471 target=main.params[ 'PING' ][ 'target9' ],
1472 pingTime=500 )
1473 main.Mininet2.pingLong(
1474 src=main.params[ 'PING' ][ 'source10' ],
1475 target=main.params[ 'PING' ][ 'target10' ],
1476 pingTime=500 )
1477
1478 main.step( "Collecting topology information from ONOS" )
1479 devices = []
1480 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001481 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001482 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001483 name="devices-" + str( i ),
1484 args=[ ] )
1485 threads.append( t )
1486 t.start()
1487
1488 for t in threads:
1489 t.join()
1490 devices.append( t.result )
1491 hosts = []
1492 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001493 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001494 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001495 name="hosts-" + str( i ),
1496 args=[ ] )
1497 threads.append( t )
1498 t.start()
1499
1500 for t in threads:
1501 t.join()
1502 try:
1503 hosts.append( json.loads( t.result ) )
1504 except ( ValueError, TypeError ):
1505 # FIXME: better handling of this, print which node
1506 # Maybe use thread name?
1507 main.log.exception( "Error parsing json output of hosts" )
Jon Hallf3d16e72015-12-16 17:45:08 -08001508 main.log.warn( repr( t.result ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001509 hosts.append( None )
1510
1511 ports = []
1512 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001513 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001514 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001515 name="ports-" + str( i ),
1516 args=[ ] )
1517 threads.append( t )
1518 t.start()
1519
1520 for t in threads:
1521 t.join()
1522 ports.append( t.result )
1523 links = []
1524 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001525 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001526 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001527 name="links-" + str( i ),
1528 args=[ ] )
1529 threads.append( t )
1530 t.start()
1531
1532 for t in threads:
1533 t.join()
1534 links.append( t.result )
1535 clusters = []
1536 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001537 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001538 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001539 name="clusters-" + str( i ),
1540 args=[ ] )
1541 threads.append( t )
1542 t.start()
1543
1544 for t in threads:
1545 t.join()
1546 clusters.append( t.result )
1547 # Compare json objects for hosts and dataplane clusters
1548
1549 # hosts
1550 main.step( "Host view is consistent across ONOS nodes" )
1551 consistentHostsResult = main.TRUE
1552 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001553 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001554 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001555 if hosts[ controller ] == hosts[ 0 ]:
1556 continue
1557 else: # hosts not consistent
1558 main.log.error( "hosts from ONOS" +
1559 controllerStr +
1560 " is inconsistent with ONOS1" )
1561 main.log.warn( repr( hosts[ controller ] ) )
1562 consistentHostsResult = main.FALSE
1563
1564 else:
1565 main.log.error( "Error in getting ONOS hosts from ONOS" +
1566 controllerStr )
1567 consistentHostsResult = main.FALSE
1568 main.log.warn( "ONOS" + controllerStr +
1569 " hosts response: " +
1570 repr( hosts[ controller ] ) )
1571 utilities.assert_equals(
1572 expect=main.TRUE,
1573 actual=consistentHostsResult,
1574 onpass="Hosts view is consistent across all ONOS nodes",
1575 onfail="ONOS nodes have different views of hosts" )
1576
1577 main.step( "Each host has an IP address" )
1578 ipResult = main.TRUE
1579 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001580 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001581 if hosts[ controller ]:
1582 for host in hosts[ controller ]:
1583 if not host.get( 'ipAddresses', [ ] ):
1584 main.log.error( "Error with host ips on controller" +
1585 controllerStr + ": " + str( host ) )
1586 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001587 utilities.assert_equals(
1588 expect=main.TRUE,
1589 actual=ipResult,
1590 onpass="The ips of the hosts aren't empty",
1591 onfail="The ip of at least one host is missing" )
1592
1593 # Strongly connected clusters of devices
1594 main.step( "Cluster view is consistent across ONOS nodes" )
1595 consistentClustersResult = main.TRUE
1596 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001597 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001598 if "Error" not in clusters[ controller ]:
1599 if clusters[ controller ] == clusters[ 0 ]:
1600 continue
1601 else: # clusters not consistent
1602 main.log.error( "clusters from ONOS" + controllerStr +
1603 " is inconsistent with ONOS1" )
1604 consistentClustersResult = main.FALSE
1605
1606 else:
1607 main.log.error( "Error in getting dataplane clusters " +
1608 "from ONOS" + controllerStr )
1609 consistentClustersResult = main.FALSE
1610 main.log.warn( "ONOS" + controllerStr +
1611 " clusters response: " +
1612 repr( clusters[ controller ] ) )
1613 utilities.assert_equals(
1614 expect=main.TRUE,
1615 actual=consistentClustersResult,
1616 onpass="Clusters view is consistent across all ONOS nodes",
1617 onfail="ONOS nodes have different views of clusters" )
1618 # there should always only be one cluster
1619 main.step( "Cluster view correct across ONOS nodes" )
1620 try:
1621 numClusters = len( json.loads( clusters[ 0 ] ) )
1622 except ( ValueError, TypeError ):
1623 main.log.exception( "Error parsing clusters[0]: " +
1624 repr( clusters[ 0 ] ) )
1625 clusterResults = main.FALSE
1626 if numClusters == 1:
1627 clusterResults = main.TRUE
1628 utilities.assert_equals(
1629 expect=1,
1630 actual=numClusters,
1631 onpass="ONOS shows 1 SCC",
1632 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1633
1634 main.step( "Comparing ONOS topology to MN" )
1635 devicesResults = main.TRUE
1636 linksResults = main.TRUE
1637 hostsResults = main.TRUE
1638 mnSwitches = main.Mininet1.getSwitches()
1639 mnLinks = main.Mininet1.getLinks()
1640 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001641 for controller in main.activeNodes:
1642 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001643 if devices[ controller ] and ports[ controller ] and\
1644 "Error" not in devices[ controller ] and\
1645 "Error" not in ports[ controller ]:
1646
1647 currentDevicesResult = main.Mininet1.compareSwitches(
1648 mnSwitches,
1649 json.loads( devices[ controller ] ),
1650 json.loads( ports[ controller ] ) )
1651 else:
1652 currentDevicesResult = main.FALSE
1653 utilities.assert_equals( expect=main.TRUE,
1654 actual=currentDevicesResult,
1655 onpass="ONOS" + controllerStr +
1656 " Switches view is correct",
1657 onfail="ONOS" + controllerStr +
1658 " Switches view is incorrect" )
1659 if links[ controller ] and "Error" not in links[ controller ]:
1660 currentLinksResult = main.Mininet1.compareLinks(
1661 mnSwitches, mnLinks,
1662 json.loads( links[ controller ] ) )
1663 else:
1664 currentLinksResult = main.FALSE
1665 utilities.assert_equals( expect=main.TRUE,
1666 actual=currentLinksResult,
1667 onpass="ONOS" + controllerStr +
1668 " links view is correct",
1669 onfail="ONOS" + controllerStr +
1670 " links view is incorrect" )
1671
Jon Hall657cdf62015-12-17 14:40:51 -08001672 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001673 currentHostsResult = main.Mininet1.compareHosts(
1674 mnHosts,
1675 hosts[ controller ] )
1676 else:
1677 currentHostsResult = main.FALSE
1678 utilities.assert_equals( expect=main.TRUE,
1679 actual=currentHostsResult,
1680 onpass="ONOS" + controllerStr +
1681 " hosts exist in Mininet",
1682 onfail="ONOS" + controllerStr +
1683 " hosts don't match Mininet" )
1684
1685 devicesResults = devicesResults and currentDevicesResult
1686 linksResults = linksResults and currentLinksResult
1687 hostsResults = hostsResults and currentHostsResult
1688
1689 main.step( "Device information is correct" )
1690 utilities.assert_equals(
1691 expect=main.TRUE,
1692 actual=devicesResults,
1693 onpass="Device information is correct",
1694 onfail="Device information is incorrect" )
1695
1696 main.step( "Links are correct" )
1697 utilities.assert_equals(
1698 expect=main.TRUE,
1699 actual=linksResults,
1700 onpass="Link are correct",
1701 onfail="Links are incorrect" )
1702
1703 main.step( "Hosts are correct" )
1704 utilities.assert_equals(
1705 expect=main.TRUE,
1706 actual=hostsResults,
1707 onpass="Hosts are correct",
1708 onfail="Hosts are incorrect" )
1709
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001710 def CASE61( self, main ):
Jon Hall5cf14d52015-07-16 12:15:19 -07001711 """
1712 The Failure case.
1713 """
Jon Halle1a3b752015-07-22 13:02:46 -07001714 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001715 assert main, "main not defined"
1716 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001717 assert main.CLIs, "main.CLIs not defined"
1718 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001719 main.case( "Stop minority of ONOS nodes" )
Jon Hall96091e62015-09-21 17:34:17 -07001720
1721 main.step( "Checking ONOS Logs for errors" )
1722 for node in main.nodes:
1723 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1724 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1725
Jon Hall3b489db2015-10-05 14:38:37 -07001726 n = len( main.nodes ) # Number of nodes
1727 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1728 main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
1729 if n > 3:
1730 main.kill.append( p - 1 )
1731 # NOTE: This only works for cluster sizes of 3,5, or 7.
1732
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001733 main.step( "Stopping " + str( len( main.kill ) ) + " ONOS nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07001734 killResults = main.TRUE
1735 for i in main.kill:
1736 killResults = killResults and\
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001737 main.ONOSbench.onosStop( main.nodes[i].ip_address )
1738 main.activeNodes.remove( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001739 utilities.assert_equals( expect=main.TRUE, actual=killResults,
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001740 onpass="ONOS nodes stopped successfully",
1741 onfail="ONOS nodes NOT successfully stopped" )
1742
1743 def CASE62( self, main ):
1744 """
1745 The bring up stopped nodes
1746 """
1747 import time
1748 assert main.numCtrls, "main.numCtrls not defined"
1749 assert main, "main not defined"
1750 assert utilities.assert_equals, "utilities.assert_equals not defined"
1751 assert main.CLIs, "main.CLIs not defined"
1752 assert main.nodes, "main.nodes not defined"
1753 assert main.kill, "main.kill not defined"
1754 main.case( "Restart minority of ONOS nodes" )
1755
1756 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1757 startResults = main.TRUE
1758 restartTime = time.time()
1759 for i in main.kill:
1760 startResults = startResults and\
1761 main.ONOSbench.onosStart( main.nodes[i].ip_address )
1762 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1763 onpass="ONOS nodes started successfully",
1764 onfail="ONOS nodes NOT successfully started" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001765
1766 main.step( "Checking if ONOS is up yet" )
1767 count = 0
1768 onosIsupResult = main.FALSE
1769 while onosIsupResult == main.FALSE and count < 10:
Jon Hall3b489db2015-10-05 14:38:37 -07001770 onosIsupResult = main.TRUE
1771 for i in main.kill:
1772 onosIsupResult = onosIsupResult and\
1773 main.ONOSbench.isup( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07001774 count = count + 1
Jon Hall5cf14d52015-07-16 12:15:19 -07001775 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1776 onpass="ONOS restarted successfully",
1777 onfail="ONOS restart NOT successful" )
1778
Jon Halle1a3b752015-07-22 13:02:46 -07001779 main.step( "Restarting ONOS main.CLIs" )
Jon Hall3b489db2015-10-05 14:38:37 -07001780 cliResults = main.TRUE
1781 for i in main.kill:
1782 cliResults = cliResults and\
1783 main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001784 main.activeNodes.append( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001785 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1786 onpass="ONOS cli restarted",
1787 onfail="ONOS cli did not restart" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001788 main.activeNodes.sort()
1789 try:
1790 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1791 "List of active nodes has duplicates, this likely indicates something was run out of order"
1792 except AssertionError:
1793 main.log.exception( "" )
1794 main.cleanup()
1795 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -07001796
1797 # Grab the time of restart so we chan check how long the gossip
1798 # protocol has had time to work
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001799 main.restartTime = time.time() - restartTime
Jon Hall5cf14d52015-07-16 12:15:19 -07001800 main.log.debug( "Restart time: " + str( main.restartTime ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001801 # TODO: MAke this configurable. Also, we are breaking the above timer
1802 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001803 node = main.activeNodes[0]
1804 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1805 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1806 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001807
1808 def CASE7( self, main ):
1809 """
1810 Check state after ONOS failure
1811 """
1812 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001813 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001814 assert main, "main not defined"
1815 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001816 assert main.CLIs, "main.CLIs not defined"
1817 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001818 try:
1819 main.kill
1820 except AttributeError:
1821 main.kill = []
1822
Jon Hall5cf14d52015-07-16 12:15:19 -07001823 main.case( "Running ONOS Constant State Tests" )
1824
1825 main.step( "Check that each switch has a master" )
1826 # Assert that each device has a master
1827 rolesNotNull = main.TRUE
1828 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001829 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001830 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001831 name="rolesNotNull-" + str( i ),
1832 args=[ ] )
1833 threads.append( t )
1834 t.start()
1835
1836 for t in threads:
1837 t.join()
1838 rolesNotNull = rolesNotNull and t.result
1839 utilities.assert_equals(
1840 expect=main.TRUE,
1841 actual=rolesNotNull,
1842 onpass="Each device has a master",
1843 onfail="Some devices don't have a master assigned" )
1844
1845 main.step( "Read device roles from ONOS" )
1846 ONOSMastership = []
1847 consistentMastership = True
1848 rolesResults = True
1849 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001850 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001851 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001852 name="roles-" + str( i ),
1853 args=[] )
1854 threads.append( t )
1855 t.start()
1856
1857 for t in threads:
1858 t.join()
1859 ONOSMastership.append( t.result )
1860
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001861 for i in range( len( ONOSMastership ) ):
1862 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001863 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001864 main.log.error( "Error in getting ONOS" + node + " roles" )
1865 main.log.warn( "ONOS" + node + " mastership response: " +
1866 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001867 rolesResults = False
1868 utilities.assert_equals(
1869 expect=True,
1870 actual=rolesResults,
1871 onpass="No error in reading roles output",
1872 onfail="Error in reading roles from ONOS" )
1873
1874 main.step( "Check for consistency in roles from each controller" )
1875 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1876 main.log.info(
1877 "Switch roles are consistent across all ONOS nodes" )
1878 else:
1879 consistentMastership = False
1880 utilities.assert_equals(
1881 expect=True,
1882 actual=consistentMastership,
1883 onpass="Switch roles are consistent across all ONOS nodes",
1884 onfail="ONOS nodes have different views of switch roles" )
1885
1886 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001887 for i in range( len( ONOSMastership ) ):
1888 node = str( main.activeNodes[i] + 1 )
1889 main.log.warn( "ONOS" + node + " roles: ",
1890 json.dumps( json.loads( ONOSMastership[ i ] ),
1891 sort_keys=True,
1892 indent=4,
1893 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001894
1895 # NOTE: we expect mastership to change on controller failure
Jon Hall5cf14d52015-07-16 12:15:19 -07001896
1897 main.step( "Get the intents and compare across all nodes" )
1898 ONOSIntents = []
1899 intentCheck = main.FALSE
1900 consistentIntents = True
1901 intentsResults = True
1902 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001903 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001904 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001905 name="intents-" + str( i ),
1906 args=[],
1907 kwargs={ 'jsonFormat': True } )
1908 threads.append( t )
1909 t.start()
1910
1911 for t in threads:
1912 t.join()
1913 ONOSIntents.append( t.result )
1914
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001915 for i in range( len( ONOSIntents) ):
1916 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001917 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001918 main.log.error( "Error in getting ONOS" + node + " intents" )
1919 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001920 repr( ONOSIntents[ i ] ) )
1921 intentsResults = False
1922 utilities.assert_equals(
1923 expect=True,
1924 actual=intentsResults,
1925 onpass="No error in reading intents output",
1926 onfail="Error in reading intents from ONOS" )
1927
1928 main.step( "Check for consistency in Intents from each controller" )
1929 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1930 main.log.info( "Intents are consistent across all ONOS " +
1931 "nodes" )
1932 else:
1933 consistentIntents = False
1934
1935 # Try to make it easy to figure out what is happening
1936 #
1937 # Intent ONOS1 ONOS2 ...
1938 # 0x01 INSTALLED INSTALLING
1939 # ... ... ...
1940 # ... ... ...
1941 title = " ID"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001942 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001943 title += " " * 10 + "ONOS" + str( n + 1 )
1944 main.log.warn( title )
1945 # get all intent keys in the cluster
1946 keys = []
1947 for nodeStr in ONOSIntents:
1948 node = json.loads( nodeStr )
1949 for intent in node:
1950 keys.append( intent.get( 'id' ) )
1951 keys = set( keys )
1952 for key in keys:
1953 row = "%-13s" % key
1954 for nodeStr in ONOSIntents:
1955 node = json.loads( nodeStr )
1956 for intent in node:
1957 if intent.get( 'id' ) == key:
1958 row += "%-15s" % intent.get( 'state' )
1959 main.log.warn( row )
1960 # End table view
1961
1962 utilities.assert_equals(
1963 expect=True,
1964 actual=consistentIntents,
1965 onpass="Intents are consistent across all ONOS nodes",
1966 onfail="ONOS nodes have different views of intents" )
1967 intentStates = []
1968 for node in ONOSIntents: # Iter through ONOS nodes
1969 nodeStates = []
1970 # Iter through intents of a node
1971 try:
1972 for intent in json.loads( node ):
1973 nodeStates.append( intent[ 'state' ] )
1974 except ( ValueError, TypeError ):
1975 main.log.exception( "Error in parsing intents" )
1976 main.log.error( repr( node ) )
1977 intentStates.append( nodeStates )
1978 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1979 main.log.info( dict( out ) )
1980
1981 if intentsResults and not consistentIntents:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001982 for i in range( len( main.activeNodes ) ):
1983 node = str( main.activeNodes[i] + 1 )
1984 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001985 main.log.warn( json.dumps(
1986 json.loads( ONOSIntents[ i ] ),
1987 sort_keys=True,
1988 indent=4,
1989 separators=( ',', ': ' ) ) )
1990 elif intentsResults and consistentIntents:
1991 intentCheck = main.TRUE
1992
1993 # NOTE: Store has no durability, so intents are lost across system
1994 # restarts
1995 main.step( "Compare current intents with intents before the failure" )
1996 # NOTE: this requires case 5 to pass for intentState to be set.
1997 # maybe we should stop the test if that fails?
1998 sameIntents = main.FALSE
1999 if intentState and intentState == ONOSIntents[ 0 ]:
2000 sameIntents = main.TRUE
2001 main.log.info( "Intents are consistent with before failure" )
2002 # TODO: possibly the states have changed? we may need to figure out
2003 # what the acceptable states are
2004 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2005 sameIntents = main.TRUE
2006 try:
2007 before = json.loads( intentState )
2008 after = json.loads( ONOSIntents[ 0 ] )
2009 for intent in before:
2010 if intent not in after:
2011 sameIntents = main.FALSE
2012 main.log.debug( "Intent is not currently in ONOS " +
2013 "(at least in the same form):" )
2014 main.log.debug( json.dumps( intent ) )
2015 except ( ValueError, TypeError ):
2016 main.log.exception( "Exception printing intents" )
2017 main.log.debug( repr( ONOSIntents[0] ) )
2018 main.log.debug( repr( intentState ) )
2019 if sameIntents == main.FALSE:
2020 try:
2021 main.log.debug( "ONOS intents before: " )
2022 main.log.debug( json.dumps( json.loads( intentState ),
2023 sort_keys=True, indent=4,
2024 separators=( ',', ': ' ) ) )
2025 main.log.debug( "Current ONOS intents: " )
2026 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2027 sort_keys=True, indent=4,
2028 separators=( ',', ': ' ) ) )
2029 except ( ValueError, TypeError ):
2030 main.log.exception( "Exception printing intents" )
2031 main.log.debug( repr( ONOSIntents[0] ) )
2032 main.log.debug( repr( intentState ) )
2033 utilities.assert_equals(
2034 expect=main.TRUE,
2035 actual=sameIntents,
2036 onpass="Intents are consistent with before failure",
2037 onfail="The Intents changed during failure" )
2038 intentCheck = intentCheck and sameIntents
2039
2040 main.step( "Get the OF Table entries and compare to before " +
2041 "component failure" )
2042 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002043 for i in range( 28 ):
2044 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002045 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2046 FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
Jon Hall5cf14d52015-07-16 12:15:19 -07002047 if FlowTables == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002048 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2049
Jon Hall5cf14d52015-07-16 12:15:19 -07002050 utilities.assert_equals(
2051 expect=main.TRUE,
2052 actual=FlowTables,
2053 onpass="No changes were found in the flow tables",
2054 onfail="Changes were found in the flow tables" )
2055
2056 main.Mininet2.pingLongKill()
2057 '''
2058 main.step( "Check the continuous pings to ensure that no packets " +
2059 "were dropped during component failure" )
2060 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2061 main.params[ 'TESTONIP' ] )
2062 LossInPings = main.FALSE
2063 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2064 for i in range( 8, 18 ):
2065 main.log.info(
2066 "Checking for a loss in pings along flow from s" +
2067 str( i ) )
2068 LossInPings = main.Mininet2.checkForLoss(
2069 "/tmp/ping.h" +
2070 str( i ) ) or LossInPings
2071 if LossInPings == main.TRUE:
2072 main.log.info( "Loss in ping detected" )
2073 elif LossInPings == main.ERROR:
2074 main.log.info( "There are multiple mininet process running" )
2075 elif LossInPings == main.FALSE:
2076 main.log.info( "No Loss in the pings" )
2077 main.log.info( "No loss of dataplane connectivity" )
2078 utilities.assert_equals(
2079 expect=main.FALSE,
2080 actual=LossInPings,
2081 onpass="No Loss of connectivity",
2082 onfail="Loss of dataplane connectivity detected" )
2083 '''
2084
2085 main.step( "Leadership Election is still functional" )
2086 # Test of LeadershipElection
2087 leaderList = []
Jon Hall5cf14d52015-07-16 12:15:19 -07002088
Jon Hall3b489db2015-10-05 14:38:37 -07002089 restarted = []
2090 for i in main.kill:
2091 restarted.append( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07002092 leaderResult = main.TRUE
Jon Hall3b489db2015-10-05 14:38:37 -07002093
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002094 for i in main.activeNodes:
2095 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002096 leaderN = cli.electionTestLeader()
2097 leaderList.append( leaderN )
2098 if leaderN == main.FALSE:
2099 # error in response
2100 main.log.error( "Something is wrong with " +
2101 "electionTestLeader function, check the" +
2102 " error logs" )
2103 leaderResult = main.FALSE
2104 elif leaderN is None:
2105 main.log.error( cli.name +
2106 " shows no leader for the election-app was" +
2107 " elected after the old one died" )
2108 leaderResult = main.FALSE
2109 elif leaderN in restarted:
2110 main.log.error( cli.name + " shows " + str( leaderN ) +
2111 " as leader for the election-app, but it " +
2112 "was restarted" )
2113 leaderResult = main.FALSE
2114 if len( set( leaderList ) ) != 1:
2115 leaderResult = main.FALSE
2116 main.log.error(
2117 "Inconsistent view of leader for the election test app" )
2118 # TODO: print the list
2119 utilities.assert_equals(
2120 expect=main.TRUE,
2121 actual=leaderResult,
2122 onpass="Leadership election passed",
2123 onfail="Something went wrong with Leadership election" )
2124
2125 def CASE8( self, main ):
2126 """
2127 Compare topo
2128 """
2129 import json
2130 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002131 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002132 assert main, "main not defined"
2133 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002134 assert main.CLIs, "main.CLIs not defined"
2135 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002136
2137 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002138 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002139 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002140 topoResult = main.FALSE
2141 elapsed = 0
2142 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002143 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002144 startTime = time.time()
2145 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002146 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002147 devicesResults = main.TRUE
2148 linksResults = main.TRUE
2149 hostsResults = main.TRUE
2150 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002151 count += 1
2152 cliStart = time.time()
2153 devices = []
2154 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002155 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002156 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07002157 name="devices-" + str( i ),
2158 args=[ ] )
2159 threads.append( t )
2160 t.start()
2161
2162 for t in threads:
2163 t.join()
2164 devices.append( t.result )
2165 hosts = []
2166 ipResult = main.TRUE
2167 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002168 for i in main.activeNodes:
Jon Halld8f6de82015-12-17 17:04:34 -08002169 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002170 name="hosts-" + str( i ),
Jon Halld8f6de82015-12-17 17:04:34 -08002171 args=[ main.CLIs[i].hosts, [ None ] ],
2172 kwargs= { 'sleep': 5, 'attempts': 5,
2173 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002174 threads.append( t )
2175 t.start()
2176
2177 for t in threads:
2178 t.join()
2179 try:
2180 hosts.append( json.loads( t.result ) )
2181 except ( ValueError, TypeError ):
2182 main.log.exception( "Error parsing hosts results" )
2183 main.log.error( repr( t.result ) )
Jon Hallf3d16e72015-12-16 17:45:08 -08002184 hosts.append( None )
Jon Hall5cf14d52015-07-16 12:15:19 -07002185 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002186 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallacd1b182015-12-17 11:43:20 -08002187 if hosts[ controller ]:
2188 for host in hosts[ controller ]:
2189 if host is None or host.get( 'ipAddresses', [] ) == []:
2190 main.log.error(
2191 "Error with host ipAddresses on controller" +
2192 controllerStr + ": " + str( host ) )
2193 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002194 ports = []
2195 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002196 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002197 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07002198 name="ports-" + str( i ),
2199 args=[ ] )
2200 threads.append( t )
2201 t.start()
2202
2203 for t in threads:
2204 t.join()
2205 ports.append( t.result )
2206 links = []
2207 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002208 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002209 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07002210 name="links-" + str( i ),
2211 args=[ ] )
2212 threads.append( t )
2213 t.start()
2214
2215 for t in threads:
2216 t.join()
2217 links.append( t.result )
2218 clusters = []
2219 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002220 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002221 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07002222 name="clusters-" + str( i ),
2223 args=[ ] )
2224 threads.append( t )
2225 t.start()
2226
2227 for t in threads:
2228 t.join()
2229 clusters.append( t.result )
2230
2231 elapsed = time.time() - startTime
2232 cliTime = time.time() - cliStart
2233 print "Elapsed time: " + str( elapsed )
2234 print "CLI time: " + str( cliTime )
2235
2236 mnSwitches = main.Mininet1.getSwitches()
2237 mnLinks = main.Mininet1.getLinks()
2238 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002239 for controller in range( len( main.activeNodes ) ):
2240 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002241 if devices[ controller ] and ports[ controller ] and\
2242 "Error" not in devices[ controller ] and\
2243 "Error" not in ports[ controller ]:
2244
Jon Hallc6793552016-01-19 14:18:37 -08002245 try:
2246 currentDevicesResult = main.Mininet1.compareSwitches(
2247 mnSwitches,
2248 json.loads( devices[ controller ] ),
2249 json.loads( ports[ controller ] ) )
2250 except ( TypeError, ValueError ) as e:
2251 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2252 devices[ controller ], ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002253 else:
2254 currentDevicesResult = main.FALSE
2255 utilities.assert_equals( expect=main.TRUE,
2256 actual=currentDevicesResult,
2257 onpass="ONOS" + controllerStr +
2258 " Switches view is correct",
2259 onfail="ONOS" + controllerStr +
2260 " Switches view is incorrect" )
2261
2262 if links[ controller ] and "Error" not in links[ controller ]:
2263 currentLinksResult = main.Mininet1.compareLinks(
2264 mnSwitches, mnLinks,
2265 json.loads( links[ controller ] ) )
2266 else:
2267 currentLinksResult = main.FALSE
2268 utilities.assert_equals( expect=main.TRUE,
2269 actual=currentLinksResult,
2270 onpass="ONOS" + controllerStr +
2271 " links view is correct",
2272 onfail="ONOS" + controllerStr +
2273 " links view is incorrect" )
Jon Hall657cdf62015-12-17 14:40:51 -08002274 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002275 currentHostsResult = main.Mininet1.compareHosts(
2276 mnHosts,
2277 hosts[ controller ] )
Jon Hall13b446e2016-01-05 12:17:01 -08002278 elif hosts[ controller ] == []:
2279 currentHostsResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002280 else:
2281 currentHostsResult = main.FALSE
2282 utilities.assert_equals( expect=main.TRUE,
2283 actual=currentHostsResult,
2284 onpass="ONOS" + controllerStr +
2285 " hosts exist in Mininet",
2286 onfail="ONOS" + controllerStr +
2287 " hosts don't match Mininet" )
2288 # CHECKING HOST ATTACHMENT POINTS
2289 hostAttachment = True
2290 zeroHosts = False
2291 # FIXME: topo-HA/obelisk specific mappings:
2292 # key is mac and value is dpid
2293 mappings = {}
2294 for i in range( 1, 29 ): # hosts 1 through 28
2295 # set up correct variables:
2296 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2297 if i == 1:
2298 deviceId = "1000".zfill(16)
2299 elif i == 2:
2300 deviceId = "2000".zfill(16)
2301 elif i == 3:
2302 deviceId = "3000".zfill(16)
2303 elif i == 4:
2304 deviceId = "3004".zfill(16)
2305 elif i == 5:
2306 deviceId = "5000".zfill(16)
2307 elif i == 6:
2308 deviceId = "6000".zfill(16)
2309 elif i == 7:
2310 deviceId = "6007".zfill(16)
2311 elif i >= 8 and i <= 17:
2312 dpid = '3' + str( i ).zfill( 3 )
2313 deviceId = dpid.zfill(16)
2314 elif i >= 18 and i <= 27:
2315 dpid = '6' + str( i ).zfill( 3 )
2316 deviceId = dpid.zfill(16)
2317 elif i == 28:
2318 deviceId = "2800".zfill(16)
2319 mappings[ macId ] = deviceId
Jon Halld8f6de82015-12-17 17:04:34 -08002320 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002321 if hosts[ controller ] == []:
2322 main.log.warn( "There are no hosts discovered" )
2323 zeroHosts = True
2324 else:
2325 for host in hosts[ controller ]:
2326 mac = None
2327 location = None
2328 device = None
2329 port = None
2330 try:
2331 mac = host.get( 'mac' )
2332 assert mac, "mac field could not be found for this host object"
2333
2334 location = host.get( 'location' )
2335 assert location, "location field could not be found for this host object"
2336
2337 # Trim the protocol identifier off deviceId
2338 device = str( location.get( 'elementId' ) ).split(':')[1]
2339 assert device, "elementId field could not be found for this host location object"
2340
2341 port = location.get( 'port' )
2342 assert port, "port field could not be found for this host location object"
2343
2344 # Now check if this matches where they should be
2345 if mac and device and port:
2346 if str( port ) != "1":
2347 main.log.error( "The attachment port is incorrect for " +
2348 "host " + str( mac ) +
2349 ". Expected: 1 Actual: " + str( port) )
2350 hostAttachment = False
2351 if device != mappings[ str( mac ) ]:
2352 main.log.error( "The attachment device is incorrect for " +
2353 "host " + str( mac ) +
2354 ". Expected: " + mappings[ str( mac ) ] +
2355 " Actual: " + device )
2356 hostAttachment = False
2357 else:
2358 hostAttachment = False
2359 except AssertionError:
2360 main.log.exception( "Json object not as expected" )
2361 main.log.error( repr( host ) )
2362 hostAttachment = False
2363 else:
2364 main.log.error( "No hosts json output or \"Error\"" +
2365 " in output. hosts = " +
2366 repr( hosts[ controller ] ) )
2367 if zeroHosts is False:
2368 hostAttachment = True
2369
2370 # END CHECKING HOST ATTACHMENT POINTS
2371 devicesResults = devicesResults and currentDevicesResult
2372 linksResults = linksResults and currentLinksResult
2373 hostsResults = hostsResults and currentHostsResult
2374 hostAttachmentResults = hostAttachmentResults and\
2375 hostAttachment
Jon Halle9b1fa32015-12-08 15:32:21 -08002376 topoResult = devicesResults and linksResults and\
2377 hostsResults and hostAttachmentResults
2378 utilities.assert_equals( expect=True,
2379 actual=topoResult,
2380 onpass="ONOS topology matches Mininet",
2381 onfail="ONOS topology don't match Mininet" )
2382 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002383
2384 # Compare json objects for hosts and dataplane clusters
2385
2386 # hosts
2387 main.step( "Hosts view is consistent across all ONOS nodes" )
2388 consistentHostsResult = main.TRUE
2389 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002390 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall13b446e2016-01-05 12:17:01 -08002391 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002392 if hosts[ controller ] == hosts[ 0 ]:
2393 continue
2394 else: # hosts not consistent
2395 main.log.error( "hosts from ONOS" + controllerStr +
2396 " is inconsistent with ONOS1" )
2397 main.log.warn( repr( hosts[ controller ] ) )
2398 consistentHostsResult = main.FALSE
2399
2400 else:
2401 main.log.error( "Error in getting ONOS hosts from ONOS" +
2402 controllerStr )
2403 consistentHostsResult = main.FALSE
2404 main.log.warn( "ONOS" + controllerStr +
2405 " hosts response: " +
2406 repr( hosts[ controller ] ) )
2407 utilities.assert_equals(
2408 expect=main.TRUE,
2409 actual=consistentHostsResult,
2410 onpass="Hosts view is consistent across all ONOS nodes",
2411 onfail="ONOS nodes have different views of hosts" )
2412
2413 main.step( "Hosts information is correct" )
2414 hostsResults = hostsResults and ipResult
2415 utilities.assert_equals(
2416 expect=main.TRUE,
2417 actual=hostsResults,
2418 onpass="Host information is correct",
2419 onfail="Host information is incorrect" )
2420
2421 main.step( "Host attachment points to the network" )
2422 utilities.assert_equals(
2423 expect=True,
2424 actual=hostAttachmentResults,
2425 onpass="Hosts are correctly attached to the network",
2426 onfail="ONOS did not correctly attach hosts to the network" )
2427
2428 # Strongly connected clusters of devices
2429 main.step( "Clusters view is consistent across all ONOS nodes" )
2430 consistentClustersResult = main.TRUE
2431 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002432 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002433 if "Error" not in clusters[ controller ]:
2434 if clusters[ controller ] == clusters[ 0 ]:
2435 continue
2436 else: # clusters not consistent
2437 main.log.error( "clusters from ONOS" +
2438 controllerStr +
2439 " is inconsistent with ONOS1" )
2440 consistentClustersResult = main.FALSE
2441
2442 else:
2443 main.log.error( "Error in getting dataplane clusters " +
2444 "from ONOS" + controllerStr )
2445 consistentClustersResult = main.FALSE
2446 main.log.warn( "ONOS" + controllerStr +
2447 " clusters response: " +
2448 repr( clusters[ controller ] ) )
2449 utilities.assert_equals(
2450 expect=main.TRUE,
2451 actual=consistentClustersResult,
2452 onpass="Clusters view is consistent across all ONOS nodes",
2453 onfail="ONOS nodes have different views of clusters" )
2454
2455 main.step( "There is only one SCC" )
2456 # there should always only be one cluster
2457 try:
2458 numClusters = len( json.loads( clusters[ 0 ] ) )
2459 except ( ValueError, TypeError ):
2460 main.log.exception( "Error parsing clusters[0]: " +
2461 repr( clusters[0] ) )
2462 clusterResults = main.FALSE
2463 if numClusters == 1:
2464 clusterResults = main.TRUE
2465 utilities.assert_equals(
2466 expect=1,
2467 actual=numClusters,
2468 onpass="ONOS shows 1 SCC",
2469 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2470
2471 topoResult = ( devicesResults and linksResults
2472 and hostsResults and consistentHostsResult
2473 and consistentClustersResult and clusterResults
2474 and ipResult and hostAttachmentResults )
2475
2476 topoResult = topoResult and int( count <= 2 )
2477 note = "note it takes about " + str( int( cliTime ) ) + \
2478 " seconds for the test to make all the cli calls to fetch " +\
2479 "the topology from each ONOS instance"
2480 main.log.info(
2481 "Very crass estimate for topology discovery/convergence( " +
2482 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2483 str( count ) + " tries" )
2484
2485 main.step( "Device information is correct" )
2486 utilities.assert_equals(
2487 expect=main.TRUE,
2488 actual=devicesResults,
2489 onpass="Device information is correct",
2490 onfail="Device information is incorrect" )
2491
2492 main.step( "Links are correct" )
2493 utilities.assert_equals(
2494 expect=main.TRUE,
2495 actual=linksResults,
2496 onpass="Link are correct",
2497 onfail="Links are incorrect" )
2498
2499 # FIXME: move this to an ONOS state case
2500 main.step( "Checking ONOS nodes" )
2501 nodesOutput = []
2502 nodeResults = main.TRUE
2503 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002504 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002505 t = main.Thread( target=main.CLIs[i].nodes,
Jon Hall5cf14d52015-07-16 12:15:19 -07002506 name="nodes-" + str( i ),
2507 args=[ ] )
2508 threads.append( t )
2509 t.start()
2510
2511 for t in threads:
2512 t.join()
2513 nodesOutput.append( t.result )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002514 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
Jon Halle9b1fa32015-12-08 15:32:21 -08002515 ips.sort()
Jon Hall5cf14d52015-07-16 12:15:19 -07002516 for i in nodesOutput:
2517 try:
2518 current = json.loads( i )
Jon Halle9b1fa32015-12-08 15:32:21 -08002519 activeIps = []
2520 currentResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002521 for node in current:
Jon Halle9b1fa32015-12-08 15:32:21 -08002522 if node['state'] == 'ACTIVE':
2523 activeIps.append( node['ip'] )
2524 activeIps.sort()
2525 if ips == activeIps:
2526 currentResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002527 except ( ValueError, TypeError ):
2528 main.log.error( "Error parsing nodes output" )
2529 main.log.warn( repr( i ) )
Jon Halle9b1fa32015-12-08 15:32:21 -08002530 currentResult = main.FALSE
2531 nodeResults = nodeResults and currentResult
Jon Hall5cf14d52015-07-16 12:15:19 -07002532 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2533 onpass="Nodes check successful",
2534 onfail="Nodes check NOT successful" )
2535
2536 def CASE9( self, main ):
2537 """
2538 Link s3-s28 down
2539 """
2540 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002541 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002542 assert main, "main not defined"
2543 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002544 assert main.CLIs, "main.CLIs not defined"
2545 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002546 # NOTE: You should probably run a topology check after this
2547
2548 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2549
2550 description = "Turn off a link to ensure that Link Discovery " +\
2551 "is working properly"
2552 main.case( description )
2553
2554 main.step( "Kill Link between s3 and s28" )
2555 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2556 main.log.info( "Waiting " + str( linkSleep ) +
2557 " seconds for link down to be discovered" )
2558 time.sleep( linkSleep )
2559 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2560 onpass="Link down successful",
2561 onfail="Failed to bring link down" )
2562 # TODO do some sort of check here
2563
2564 def CASE10( self, main ):
2565 """
2566 Link s3-s28 up
2567 """
2568 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002569 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002570 assert main, "main not defined"
2571 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002572 assert main.CLIs, "main.CLIs not defined"
2573 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002574 # NOTE: You should probably run a topology check after this
2575
2576 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2577
2578 description = "Restore a link to ensure that Link Discovery is " + \
2579 "working properly"
2580 main.case( description )
2581
2582 main.step( "Bring link between s3 and s28 back up" )
2583 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2584 main.log.info( "Waiting " + str( linkSleep ) +
2585 " seconds for link up to be discovered" )
2586 time.sleep( linkSleep )
2587 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2588 onpass="Link up successful",
2589 onfail="Failed to bring link up" )
2590 # TODO do some sort of check here
2591
2592 def CASE11( self, main ):
2593 """
2594 Switch Down
2595 """
2596 # NOTE: You should probably run a topology check after this
2597 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002598 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002599 assert main, "main not defined"
2600 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002601 assert main.CLIs, "main.CLIs not defined"
2602 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002603
2604 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2605
2606 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002607 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002608 main.case( description )
2609 switch = main.params[ 'kill' ][ 'switch' ]
2610 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2611
2612 # TODO: Make this switch parameterizable
2613 main.step( "Kill " + switch )
2614 main.log.info( "Deleting " + switch )
2615 main.Mininet1.delSwitch( switch )
2616 main.log.info( "Waiting " + str( switchSleep ) +
2617 " seconds for switch down to be discovered" )
2618 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002619 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002620 # Peek at the deleted switch
2621 main.log.warn( str( device ) )
2622 result = main.FALSE
2623 if device and device[ 'available' ] is False:
2624 result = main.TRUE
2625 utilities.assert_equals( expect=main.TRUE, actual=result,
2626 onpass="Kill switch successful",
2627 onfail="Failed to kill switch?" )
2628
2629 def CASE12( self, main ):
2630 """
2631 Switch Up
2632 """
2633 # NOTE: You should probably run a topology check after this
2634 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002635 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002636 assert main, "main not defined"
2637 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002638 assert main.CLIs, "main.CLIs not defined"
2639 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002640 assert ONOS1Port, "ONOS1Port not defined"
2641 assert ONOS2Port, "ONOS2Port not defined"
2642 assert ONOS3Port, "ONOS3Port not defined"
2643 assert ONOS4Port, "ONOS4Port not defined"
2644 assert ONOS5Port, "ONOS5Port not defined"
2645 assert ONOS6Port, "ONOS6Port not defined"
2646 assert ONOS7Port, "ONOS7Port not defined"
2647
2648 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2649 switch = main.params[ 'kill' ][ 'switch' ]
2650 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2651 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002652 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002653 description = "Adding a switch to ensure it is discovered correctly"
2654 main.case( description )
2655
2656 main.step( "Add back " + switch )
2657 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2658 for peer in links:
2659 main.Mininet1.addLink( switch, peer )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002660 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002661 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2662 main.log.info( "Waiting " + str( switchSleep ) +
2663 " seconds for switch up to be discovered" )
2664 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002665 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002666 # Peek at the deleted switch
2667 main.log.warn( str( device ) )
2668 result = main.FALSE
2669 if device and device[ 'available' ]:
2670 result = main.TRUE
2671 utilities.assert_equals( expect=main.TRUE, actual=result,
2672 onpass="add switch successful",
2673 onfail="Failed to add switch?" )
2674
2675 def CASE13( self, main ):
2676 """
2677 Clean up
2678 """
2679 import os
2680 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002681 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002682 assert main, "main not defined"
2683 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002684 assert main.CLIs, "main.CLIs not defined"
2685 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002686
2687 # printing colors to terminal
2688 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2689 'blue': '\033[94m', 'green': '\033[92m',
2690 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2691 main.case( "Test Cleanup" )
2692 main.step( "Killing tcpdumps" )
2693 main.Mininet2.stopTcpdump()
2694
2695 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002696 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002697 main.step( "Copying MN pcap and ONOS log files to test station" )
2698 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2699 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002700 # NOTE: MN Pcap file is being saved to logdir.
2701 # We scp this file as MN and TestON aren't necessarily the same vm
2702
2703 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002704 # TODO: Load these from params
2705 # NOTE: must end in /
2706 logFolder = "/opt/onos/log/"
2707 logFiles = [ "karaf.log", "karaf.log.1" ]
2708 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002709 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002710 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002711 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002712 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2713 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002714 # std*.log's
2715 # NOTE: must end in /
2716 logFolder = "/opt/onos/var/"
2717 logFiles = [ "stderr.log", "stdout.log" ]
2718 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002719 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002720 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002721 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002722 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2723 logFolder + f, dstName )
2724 else:
2725 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002726
2727 main.step( "Stopping Mininet" )
2728 mnResult = main.Mininet1.stopNet()
2729 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2730 onpass="Mininet stopped",
2731 onfail="MN cleanup NOT successful" )
2732
2733 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002734 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002735 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2736 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002737
2738 try:
2739 timerLog = open( main.logdir + "/Timers.csv", 'w')
2740 # Overwrite with empty line and close
2741 labels = "Gossip Intents, Restart"
2742 data = str( gossipTime ) + ", " + str( main.restartTime )
2743 timerLog.write( labels + "\n" + data )
2744 timerLog.close()
2745 except NameError, e:
2746 main.log.exception(e)
2747
2748 def CASE14( self, main ):
2749 """
2750 start election app on all onos nodes
2751 """
Jon Halle1a3b752015-07-22 13:02:46 -07002752 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002753 assert main, "main not defined"
2754 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002755 assert main.CLIs, "main.CLIs not defined"
2756 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002757
2758 main.case("Start Leadership Election app")
2759 main.step( "Install leadership election app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002760 onosCli = main.CLIs[ main.activeNodes[0] ]
2761 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002762 utilities.assert_equals(
2763 expect=main.TRUE,
2764 actual=appResult,
2765 onpass="Election app installed",
2766 onfail="Something went wrong with installing Leadership election" )
2767
2768 main.step( "Run for election on each node" )
2769 leaderResult = main.TRUE
2770 leaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002771 for i in main.activeNodes:
2772 main.CLIs[i].electionTestRun()
2773 for i in main.activeNodes:
2774 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002775 leader = cli.electionTestLeader()
2776 if leader is None or leader == main.FALSE:
2777 main.log.error( cli.name + ": Leader for the election app " +
2778 "should be an ONOS node, instead got '" +
2779 str( leader ) + "'" )
2780 leaderResult = main.FALSE
2781 leaders.append( leader )
2782 utilities.assert_equals(
2783 expect=main.TRUE,
2784 actual=leaderResult,
2785 onpass="Successfully ran for leadership",
2786 onfail="Failed to run for leadership" )
2787
2788 main.step( "Check that each node shows the same leader" )
2789 sameLeader = main.TRUE
2790 if len( set( leaders ) ) != 1:
2791 sameLeader = main.FALSE
Jon Halle1a3b752015-07-22 13:02:46 -07002792 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
Jon Hall5cf14d52015-07-16 12:15:19 -07002793 str( leaders ) )
2794 utilities.assert_equals(
2795 expect=main.TRUE,
2796 actual=sameLeader,
2797 onpass="Leadership is consistent for the election topic",
2798 onfail="Nodes have different leaders" )
2799
2800 def CASE15( self, main ):
2801 """
2802 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002803 15.1 Run election on each node
2804 15.2 Check that each node has the same leaders and candidates
2805 15.3 Find current leader and withdraw
2806 15.4 Check that a new node was elected leader
2807 15.5 Check that that new leader was the candidate of old leader
2808 15.6 Run for election on old leader
2809 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2810 15.8 Make sure that the old leader was added to the candidate list
2811
2812 old and new variable prefixes refer to data from before vs after
2813 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002814 """
2815 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002816 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002817 assert main, "main not defined"
2818 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002819 assert main.CLIs, "main.CLIs not defined"
2820 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002821
Jon Hall5cf14d52015-07-16 12:15:19 -07002822 description = "Check that Leadership Election is still functional"
2823 main.case( description )
acsmars71adceb2015-08-31 15:09:26 -07002824 # NOTE: Need to re-run since being a canidate is not persistant
2825 # TODO: add check for "Command not found:" in the driver, this
2826 # means the election test app isn't loaded
Jon Hall5cf14d52015-07-16 12:15:19 -07002827
acsmars71adceb2015-08-31 15:09:26 -07002828 oldLeaders = [] # leaders by node before withdrawl from candidates
2829 newLeaders = [] # leaders by node after withdrawl from candidates
2830 oldAllCandidates = [] # list of lists of each nodes' candidates before
2831 newAllCandidates = [] # list of lists of each nodes' candidates after
2832 oldCandidates = [] # list of candidates from node 0 before withdrawl
2833 newCandidates = [] # list of candidates from node 0 after withdrawl
2834 oldLeader = '' # the old leader from oldLeaders, None if not same
2835 newLeader = '' # the new leaders fron newLoeaders, None if not same
2836 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2837 expectNoLeader = False # True when there is only one leader
2838 if main.numCtrls == 1:
2839 expectNoLeader = True
2840
2841 main.step( "Run for election on each node" )
2842 electionResult = main.TRUE
2843
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002844 for i in main.activeNodes: # run test election on each node
2845 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002846 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002847 utilities.assert_equals(
2848 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002849 actual=electionResult,
2850 onpass="All nodes successfully ran for leadership",
2851 onfail="At least one node failed to run for leadership" )
2852
acsmars3a72bde2015-09-02 14:16:22 -07002853 if electionResult == main.FALSE:
2854 main.log.error(
2855 "Skipping Test Case because Election Test App isn't loaded" )
2856 main.skipCase()
2857
acsmars71adceb2015-08-31 15:09:26 -07002858 main.step( "Check that each node shows the same leader and candidates" )
2859 sameResult = main.TRUE
2860 failMessage = "Nodes have different leaders"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002861 for i in main.activeNodes:
2862 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002863 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2864 oldAllCandidates.append( node )
2865 oldLeaders.append( node[ 0 ] )
2866 oldCandidates = oldAllCandidates[ 0 ]
2867
2868 # Check that each node has the same leader. Defines oldLeader
2869 if len( set( oldLeaders ) ) != 1:
2870 sameResult = main.FALSE
2871 main.log.error( "More than one leader present:" + str( oldLeaders ) )
2872 oldLeader = None
2873 else:
2874 oldLeader = oldLeaders[ 0 ]
2875
2876 # Check that each node's candidate list is the same
acsmars29233db2015-11-04 11:15:00 -08002877 candidateDiscrepancy = False # Boolean of candidate mismatches
acsmars71adceb2015-08-31 15:09:26 -07002878 for candidates in oldAllCandidates:
2879 if set( candidates ) != set( oldCandidates ):
2880 sameResult = main.FALSE
acsmars29233db2015-11-04 11:15:00 -08002881 candidateDiscrepancy = True
2882
2883 if candidateDiscrepancy:
2884 failMessage += " and candidates"
2885
acsmars71adceb2015-08-31 15:09:26 -07002886 utilities.assert_equals(
2887 expect=main.TRUE,
2888 actual=sameResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002889 onpass="Leadership is consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002890 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002891
2892 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002893 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002894 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002895 if oldLeader is None:
2896 main.log.error( "Leadership isn't consistent." )
2897 withdrawResult = main.FALSE
2898 # Get the CLI of the oldLeader
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002899 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07002900 if oldLeader == main.nodes[ i ].ip_address:
2901 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002902 break
2903 else: # FOR/ELSE statement
2904 main.log.error( "Leader election, could not find current leader" )
2905 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07002906 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07002907 utilities.assert_equals(
2908 expect=main.TRUE,
2909 actual=withdrawResult,
2910 onpass="Node was withdrawn from election",
2911 onfail="Node was not withdrawn from election" )
2912
acsmars71adceb2015-08-31 15:09:26 -07002913 main.step( "Check that a new node was elected leader" )
2914
Jon Hall5cf14d52015-07-16 12:15:19 -07002915 # FIXME: use threads
acsmars71adceb2015-08-31 15:09:26 -07002916 newLeaderResult = main.TRUE
2917 failMessage = "Nodes have different leaders"
2918
2919 # Get new leaders and candidates
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002920 for i in main.activeNodes:
2921 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002922 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2923 # elections might no have finished yet
2924 if node[ 0 ] == 'none' and not expectNoLeader:
2925 main.log.info( "Node has no leader, waiting 5 seconds to be " +
2926 "sure elections are complete." )
2927 time.sleep(5)
2928 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2929 # election still isn't done or there is a problem
2930 if node[ 0 ] == 'none':
2931 main.log.error( "No leader was elected on at least 1 node" )
2932 newLeaderResult = main.FALSE
2933 newAllCandidates.append( node )
2934 newLeaders.append( node[ 0 ] )
2935 newCandidates = newAllCandidates[ 0 ]
2936
2937 # Check that each node has the same leader. Defines newLeader
2938 if len( set( newLeaders ) ) != 1:
2939 newLeaderResult = main.FALSE
2940 main.log.error( "Nodes have different leaders: " +
2941 str( newLeaders ) )
2942 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07002943 else:
acsmars71adceb2015-08-31 15:09:26 -07002944 newLeader = newLeaders[ 0 ]
2945
2946 # Check that each node's candidate list is the same
2947 for candidates in newAllCandidates:
2948 if set( candidates ) != set( newCandidates ):
2949 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07002950 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07002951
2952 # Check that the new leader is not the older leader, which was withdrawn
2953 if newLeader == oldLeader:
2954 newLeaderResult = main.FALSE
2955 main.log.error( "All nodes still see old leader: " + oldLeader +
2956 " as the current leader" )
2957
Jon Hall5cf14d52015-07-16 12:15:19 -07002958 utilities.assert_equals(
2959 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002960 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002961 onpass="Leadership election passed",
2962 onfail="Something went wrong with Leadership election" )
2963
acsmars71adceb2015-08-31 15:09:26 -07002964 main.step( "Check that that new leader was the candidate of old leader")
2965 # candidates[ 2 ] should be come the top candidate after withdrawl
2966 correctCandidateResult = main.TRUE
2967 if expectNoLeader:
2968 if newLeader == 'none':
2969 main.log.info( "No leader expected. None found. Pass" )
2970 correctCandidateResult = main.TRUE
2971 else:
2972 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2973 correctCandidateResult = main.FALSE
2974 elif newLeader != oldCandidates[ 2 ]:
2975 correctCandidateResult = main.FALSE
2976 main.log.error( "Candidate " + newLeader + " was elected. " +
2977 oldCandidates[ 2 ] + " should have had priority." )
2978
2979 utilities.assert_equals(
2980 expect=main.TRUE,
2981 actual=correctCandidateResult,
2982 onpass="Correct Candidate Elected",
2983 onfail="Incorrect Candidate Elected" )
2984
Jon Hall5cf14d52015-07-16 12:15:19 -07002985 main.step( "Run for election on old leader( just so everyone " +
2986 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07002987 if oldLeaderCLI is not None:
2988 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07002989 else:
acsmars71adceb2015-08-31 15:09:26 -07002990 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002991 runResult = main.FALSE
2992 utilities.assert_equals(
2993 expect=main.TRUE,
2994 actual=runResult,
2995 onpass="App re-ran for election",
2996 onfail="App failed to run for election" )
acsmars71adceb2015-08-31 15:09:26 -07002997 main.step(
2998 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002999 # verify leader didn't just change
acsmars71adceb2015-08-31 15:09:26 -07003000 positionResult = main.TRUE
3001 # Get new leaders and candidates, wait if oldLeader is not a candidate yet
3002
3003 # Reset and reuse the new candidate and leaders lists
3004 newAllCandidates = []
3005 newCandidates = []
3006 newLeaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003007 for i in main.activeNodes:
3008 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07003009 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3010 if oldLeader not in node: # election might no have finished yet
3011 main.log.info( "Old Leader not elected, waiting 5 seconds to " +
3012 "be sure elections are complete" )
3013 time.sleep(5)
3014 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3015 if oldLeader not in node: # election still isn't done, errors
3016 main.log.error(
3017 "Old leader was not elected on at least one node" )
3018 positionResult = main.FALSE
3019 newAllCandidates.append( node )
3020 newLeaders.append( node[ 0 ] )
3021 newCandidates = newAllCandidates[ 0 ]
3022
3023 # Check that each node has the same leader. Defines newLeader
3024 if len( set( newLeaders ) ) != 1:
3025 positionResult = main.FALSE
3026 main.log.error( "Nodes have different leaders: " +
3027 str( newLeaders ) )
3028 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07003029 else:
acsmars71adceb2015-08-31 15:09:26 -07003030 newLeader = newLeaders[ 0 ]
3031
3032 # Check that each node's candidate list is the same
3033 for candidates in newAllCandidates:
3034 if set( candidates ) != set( newCandidates ):
3035 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07003036 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07003037
3038 # Check that the re-elected node is last on the candidate List
3039 if oldLeader != newCandidates[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003040 main.log.error( "Old Leader (" + oldLeader + ") not in the proper position " +
acsmars71adceb2015-08-31 15:09:26 -07003041 str( newCandidates ) )
3042 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003043
3044 utilities.assert_equals(
3045 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07003046 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003047 onpass="Old leader successfully re-ran for election",
3048 onfail="Something went wrong with Leadership election after " +
3049 "the old leader re-ran for election" )
3050
3051 def CASE16( self, main ):
3052 """
3053 Install Distributed Primitives app
3054 """
3055 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003056 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003057 assert main, "main not defined"
3058 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003059 assert main.CLIs, "main.CLIs not defined"
3060 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003061
3062 # Variables for the distributed primitives tests
3063 global pCounterName
3064 global iCounterName
3065 global pCounterValue
3066 global iCounterValue
3067 global onosSet
3068 global onosSetName
3069 pCounterName = "TestON-Partitions"
3070 iCounterName = "TestON-inMemory"
3071 pCounterValue = 0
3072 iCounterValue = 0
3073 onosSet = set([])
3074 onosSetName = "TestON-set"
3075
3076 description = "Install Primitives app"
3077 main.case( description )
3078 main.step( "Install Primitives app" )
3079 appName = "org.onosproject.distributedprimitives"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003080 node = main.activeNodes[0]
3081 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003082 utilities.assert_equals( expect=main.TRUE,
3083 actual=appResults,
3084 onpass="Primitives app activated",
3085 onfail="Primitives app not activated" )
3086 time.sleep( 5 ) # To allow all nodes to activate
3087
3088 def CASE17( self, main ):
3089 """
3090 Check for basic functionality with distributed primitives
3091 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003092 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003093 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003094 assert main, "main not defined"
3095 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003096 assert main.CLIs, "main.CLIs not defined"
3097 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003098 assert pCounterName, "pCounterName not defined"
3099 assert iCounterName, "iCounterName not defined"
3100 assert onosSetName, "onosSetName not defined"
3101 # NOTE: assert fails if value is 0/None/Empty/False
3102 try:
3103 pCounterValue
3104 except NameError:
3105 main.log.error( "pCounterValue not defined, setting to 0" )
3106 pCounterValue = 0
3107 try:
3108 iCounterValue
3109 except NameError:
3110 main.log.error( "iCounterValue not defined, setting to 0" )
3111 iCounterValue = 0
3112 try:
3113 onosSet
3114 except NameError:
3115 main.log.error( "onosSet not defined, setting to empty Set" )
3116 onosSet = set([])
3117 # Variables for the distributed primitives tests. These are local only
3118 addValue = "a"
3119 addAllValue = "a b c d e f"
3120 retainValue = "c d e f"
3121
3122 description = "Check for basic functionality with distributed " +\
3123 "primitives"
3124 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003125 main.caseExplanation = "Test the methods of the distributed " +\
3126 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003127 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003128 # Partitioned counters
3129 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003130 pCounters = []
3131 threads = []
3132 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003133 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003134 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3135 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003136 args=[ pCounterName ] )
3137 pCounterValue += 1
3138 addedPValues.append( pCounterValue )
3139 threads.append( t )
3140 t.start()
3141
3142 for t in threads:
3143 t.join()
3144 pCounters.append( t.result )
3145 # Check that counter incremented numController times
3146 pCounterResults = True
3147 for i in addedPValues:
3148 tmpResult = i in pCounters
3149 pCounterResults = pCounterResults and tmpResult
3150 if not tmpResult:
3151 main.log.error( str( i ) + " is not in partitioned "
3152 "counter incremented results" )
3153 utilities.assert_equals( expect=True,
3154 actual=pCounterResults,
3155 onpass="Default counter incremented",
3156 onfail="Error incrementing default" +
3157 " counter" )
3158
Jon Halle1a3b752015-07-22 13:02:46 -07003159 main.step( "Get then Increment a default counter on each node" )
3160 pCounters = []
3161 threads = []
3162 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003163 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003164 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3165 name="counterGetAndAdd-" + str( i ),
3166 args=[ pCounterName ] )
3167 addedPValues.append( pCounterValue )
3168 pCounterValue += 1
3169 threads.append( t )
3170 t.start()
3171
3172 for t in threads:
3173 t.join()
3174 pCounters.append( t.result )
3175 # Check that counter incremented numController times
3176 pCounterResults = True
3177 for i in addedPValues:
3178 tmpResult = i in pCounters
3179 pCounterResults = pCounterResults and tmpResult
3180 if not tmpResult:
3181 main.log.error( str( i ) + " is not in partitioned "
3182 "counter incremented results" )
3183 utilities.assert_equals( expect=True,
3184 actual=pCounterResults,
3185 onpass="Default counter incremented",
3186 onfail="Error incrementing default" +
3187 " counter" )
3188
3189 main.step( "Counters we added have the correct values" )
3190 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3191 utilities.assert_equals( expect=main.TRUE,
3192 actual=incrementCheck,
3193 onpass="Added counters are correct",
3194 onfail="Added counters are incorrect" )
3195
3196 main.step( "Add -8 to then get a default counter on each node" )
3197 pCounters = []
3198 threads = []
3199 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003200 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003201 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3202 name="counterIncrement-" + str( i ),
3203 args=[ pCounterName ],
3204 kwargs={ "delta": -8 } )
3205 pCounterValue += -8
3206 addedPValues.append( pCounterValue )
3207 threads.append( t )
3208 t.start()
3209
3210 for t in threads:
3211 t.join()
3212 pCounters.append( t.result )
3213 # Check that counter incremented numController times
3214 pCounterResults = True
3215 for i in addedPValues:
3216 tmpResult = i in pCounters
3217 pCounterResults = pCounterResults and tmpResult
3218 if not tmpResult:
3219 main.log.error( str( i ) + " is not in partitioned "
3220 "counter incremented results" )
3221 utilities.assert_equals( expect=True,
3222 actual=pCounterResults,
3223 onpass="Default counter incremented",
3224 onfail="Error incrementing default" +
3225 " counter" )
3226
3227 main.step( "Add 5 to then get a default counter on each node" )
3228 pCounters = []
3229 threads = []
3230 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003231 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003232 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3233 name="counterIncrement-" + str( i ),
3234 args=[ pCounterName ],
3235 kwargs={ "delta": 5 } )
3236 pCounterValue += 5
3237 addedPValues.append( pCounterValue )
3238 threads.append( t )
3239 t.start()
3240
3241 for t in threads:
3242 t.join()
3243 pCounters.append( t.result )
3244 # Check that counter incremented numController times
3245 pCounterResults = True
3246 for i in addedPValues:
3247 tmpResult = i in pCounters
3248 pCounterResults = pCounterResults and tmpResult
3249 if not tmpResult:
3250 main.log.error( str( i ) + " is not in partitioned "
3251 "counter incremented results" )
3252 utilities.assert_equals( expect=True,
3253 actual=pCounterResults,
3254 onpass="Default counter incremented",
3255 onfail="Error incrementing default" +
3256 " counter" )
3257
3258 main.step( "Get then add 5 to a default counter on each node" )
3259 pCounters = []
3260 threads = []
3261 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003262 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003263 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3264 name="counterIncrement-" + str( i ),
3265 args=[ pCounterName ],
3266 kwargs={ "delta": 5 } )
3267 addedPValues.append( pCounterValue )
3268 pCounterValue += 5
3269 threads.append( t )
3270 t.start()
3271
3272 for t in threads:
3273 t.join()
3274 pCounters.append( t.result )
3275 # Check that counter incremented numController times
3276 pCounterResults = True
3277 for i in addedPValues:
3278 tmpResult = i in pCounters
3279 pCounterResults = pCounterResults and tmpResult
3280 if not tmpResult:
3281 main.log.error( str( i ) + " is not in partitioned "
3282 "counter incremented results" )
3283 utilities.assert_equals( expect=True,
3284 actual=pCounterResults,
3285 onpass="Default counter incremented",
3286 onfail="Error incrementing default" +
3287 " counter" )
3288
3289 main.step( "Counters we added have the correct values" )
3290 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3291 utilities.assert_equals( expect=main.TRUE,
3292 actual=incrementCheck,
3293 onpass="Added counters are correct",
3294 onfail="Added counters are incorrect" )
3295
3296 # In-Memory counters
3297 main.step( "Increment and get an in-memory counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003298 iCounters = []
3299 addedIValues = []
3300 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003301 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003302 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003303 name="icounterIncrement-" + str( i ),
3304 args=[ iCounterName ],
3305 kwargs={ "inMemory": True } )
3306 iCounterValue += 1
3307 addedIValues.append( iCounterValue )
3308 threads.append( t )
3309 t.start()
3310
3311 for t in threads:
3312 t.join()
3313 iCounters.append( t.result )
3314 # Check that counter incremented numController times
3315 iCounterResults = True
3316 for i in addedIValues:
3317 tmpResult = i in iCounters
3318 iCounterResults = iCounterResults and tmpResult
3319 if not tmpResult:
3320 main.log.error( str( i ) + " is not in the in-memory "
3321 "counter incremented results" )
3322 utilities.assert_equals( expect=True,
3323 actual=iCounterResults,
Jon Halle1a3b752015-07-22 13:02:46 -07003324 onpass="In-memory counter incremented",
3325 onfail="Error incrementing in-memory" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003326 " counter" )
3327
Jon Halle1a3b752015-07-22 13:02:46 -07003328 main.step( "Get then Increment a in-memory counter on each node" )
3329 iCounters = []
3330 threads = []
3331 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003332 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003333 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3334 name="counterGetAndAdd-" + str( i ),
3335 args=[ iCounterName ],
3336 kwargs={ "inMemory": True } )
3337 addedIValues.append( iCounterValue )
3338 iCounterValue += 1
3339 threads.append( t )
3340 t.start()
3341
3342 for t in threads:
3343 t.join()
3344 iCounters.append( t.result )
3345 # Check that counter incremented numController times
3346 iCounterResults = True
3347 for i in addedIValues:
3348 tmpResult = i in iCounters
3349 iCounterResults = iCounterResults and tmpResult
3350 if not tmpResult:
3351 main.log.error( str( i ) + " is not in in-memory "
3352 "counter incremented results" )
3353 utilities.assert_equals( expect=True,
3354 actual=iCounterResults,
3355 onpass="In-memory counter incremented",
3356 onfail="Error incrementing in-memory" +
3357 " counter" )
3358
3359 main.step( "Counters we added have the correct values" )
3360 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3361 utilities.assert_equals( expect=main.TRUE,
3362 actual=incrementCheck,
3363 onpass="Added counters are correct",
3364 onfail="Added counters are incorrect" )
3365
3366 main.step( "Add -8 to then get a in-memory counter on each node" )
3367 iCounters = []
3368 threads = []
3369 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003370 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003371 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3372 name="counterIncrement-" + str( i ),
3373 args=[ iCounterName ],
3374 kwargs={ "delta": -8, "inMemory": True } )
3375 iCounterValue += -8
3376 addedIValues.append( iCounterValue )
3377 threads.append( t )
3378 t.start()
3379
3380 for t in threads:
3381 t.join()
3382 iCounters.append( t.result )
3383 # Check that counter incremented numController times
3384 iCounterResults = True
3385 for i in addedIValues:
3386 tmpResult = i in iCounters
3387 iCounterResults = iCounterResults and tmpResult
3388 if not tmpResult:
3389 main.log.error( str( i ) + " is not in in-memory "
3390 "counter incremented results" )
3391 utilities.assert_equals( expect=True,
3392 actual=pCounterResults,
3393 onpass="In-memory counter incremented",
3394 onfail="Error incrementing in-memory" +
3395 " counter" )
3396
3397 main.step( "Add 5 to then get a in-memory counter on each node" )
3398 iCounters = []
3399 threads = []
3400 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003401 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003402 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3403 name="counterIncrement-" + str( i ),
3404 args=[ iCounterName ],
3405 kwargs={ "delta": 5, "inMemory": True } )
3406 iCounterValue += 5
3407 addedIValues.append( iCounterValue )
3408 threads.append( t )
3409 t.start()
3410
3411 for t in threads:
3412 t.join()
3413 iCounters.append( t.result )
3414 # Check that counter incremented numController times
3415 iCounterResults = True
3416 for i in addedIValues:
3417 tmpResult = i in iCounters
3418 iCounterResults = iCounterResults and tmpResult
3419 if not tmpResult:
3420 main.log.error( str( i ) + " is not in in-memory "
3421 "counter incremented results" )
3422 utilities.assert_equals( expect=True,
3423 actual=pCounterResults,
3424 onpass="In-memory counter incremented",
3425 onfail="Error incrementing in-memory" +
3426 " counter" )
3427
3428 main.step( "Get then add 5 to a in-memory counter on each node" )
3429 iCounters = []
3430 threads = []
3431 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003432 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003433 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3434 name="counterIncrement-" + str( i ),
3435 args=[ iCounterName ],
3436 kwargs={ "delta": 5, "inMemory": True } )
3437 addedIValues.append( iCounterValue )
3438 iCounterValue += 5
3439 threads.append( t )
3440 t.start()
3441
3442 for t in threads:
3443 t.join()
3444 iCounters.append( t.result )
3445 # Check that counter incremented numController times
3446 iCounterResults = True
3447 for i in addedIValues:
3448 tmpResult = i in iCounters
3449 iCounterResults = iCounterResults and tmpResult
3450 if not tmpResult:
3451 main.log.error( str( i ) + " is not in in-memory "
3452 "counter incremented results" )
3453 utilities.assert_equals( expect=True,
3454 actual=iCounterResults,
3455 onpass="In-memory counter incremented",
3456 onfail="Error incrementing in-memory" +
3457 " counter" )
3458
3459 main.step( "Counters we added have the correct values" )
3460 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3461 utilities.assert_equals( expect=main.TRUE,
3462 actual=incrementCheck,
3463 onpass="Added counters are correct",
3464 onfail="Added counters are incorrect" )
3465
Jon Hall5cf14d52015-07-16 12:15:19 -07003466 main.step( "Check counters are consistant across nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07003467 onosCounters, consistentCounterResults = main.Counters.consistentCheck()
Jon Hall5cf14d52015-07-16 12:15:19 -07003468 utilities.assert_equals( expect=main.TRUE,
3469 actual=consistentCounterResults,
3470 onpass="ONOS counters are consistent " +
3471 "across nodes",
3472 onfail="ONOS Counters are inconsistent " +
3473 "across nodes" )
3474
3475 main.step( "Counters we added have the correct values" )
Jon Halle1a3b752015-07-22 13:02:46 -07003476 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3477 incrementCheck = incrementCheck and \
3478 main.Counters.counterCheck( iCounterName, iCounterValue )
Jon Hall5cf14d52015-07-16 12:15:19 -07003479 utilities.assert_equals( expect=main.TRUE,
Jon Halle1a3b752015-07-22 13:02:46 -07003480 actual=incrementCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -07003481 onpass="Added counters are correct",
3482 onfail="Added counters are incorrect" )
3483 # DISTRIBUTED SETS
3484 main.step( "Distributed Set get" )
3485 size = len( onosSet )
3486 getResponses = []
3487 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003488 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003489 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003490 name="setTestGet-" + str( i ),
3491 args=[ onosSetName ] )
3492 threads.append( t )
3493 t.start()
3494 for t in threads:
3495 t.join()
3496 getResponses.append( t.result )
3497
3498 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003499 for i in range( len( main.activeNodes ) ):
3500 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003501 if isinstance( getResponses[ i ], list):
3502 current = set( getResponses[ i ] )
3503 if len( current ) == len( getResponses[ i ] ):
3504 # no repeats
3505 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003506 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003507 " has incorrect view" +
3508 " of set " + onosSetName + ":\n" +
3509 str( getResponses[ i ] ) )
3510 main.log.debug( "Expected: " + str( onosSet ) )
3511 main.log.debug( "Actual: " + str( current ) )
3512 getResults = main.FALSE
3513 else:
3514 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003515 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003516 " has repeat elements in" +
3517 " set " + onosSetName + ":\n" +
3518 str( getResponses[ i ] ) )
3519 getResults = main.FALSE
3520 elif getResponses[ i ] == main.ERROR:
3521 getResults = main.FALSE
3522 utilities.assert_equals( expect=main.TRUE,
3523 actual=getResults,
3524 onpass="Set elements are correct",
3525 onfail="Set elements are incorrect" )
3526
3527 main.step( "Distributed Set size" )
3528 sizeResponses = []
3529 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003530 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003531 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003532 name="setTestSize-" + str( i ),
3533 args=[ onosSetName ] )
3534 threads.append( t )
3535 t.start()
3536 for t in threads:
3537 t.join()
3538 sizeResponses.append( t.result )
3539
3540 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003541 for i in range( len( main.activeNodes ) ):
3542 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003543 if size != sizeResponses[ i ]:
3544 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003545 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003546 " expected a size of " + str( size ) +
3547 " for set " + onosSetName +
3548 " but got " + str( sizeResponses[ i ] ) )
3549 utilities.assert_equals( expect=main.TRUE,
3550 actual=sizeResults,
3551 onpass="Set sizes are correct",
3552 onfail="Set sizes are incorrect" )
3553
3554 main.step( "Distributed Set add()" )
3555 onosSet.add( addValue )
3556 addResponses = []
3557 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003558 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003559 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003560 name="setTestAdd-" + str( i ),
3561 args=[ onosSetName, addValue ] )
3562 threads.append( t )
3563 t.start()
3564 for t in threads:
3565 t.join()
3566 addResponses.append( t.result )
3567
3568 # main.TRUE = successfully changed the set
3569 # main.FALSE = action resulted in no change in set
3570 # main.ERROR - Some error in executing the function
3571 addResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003572 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003573 if addResponses[ i ] == main.TRUE:
3574 # All is well
3575 pass
3576 elif addResponses[ i ] == main.FALSE:
3577 # Already in set, probably fine
3578 pass
3579 elif addResponses[ i ] == main.ERROR:
3580 # Error in execution
3581 addResults = main.FALSE
3582 else:
3583 # unexpected result
3584 addResults = main.FALSE
3585 if addResults != main.TRUE:
3586 main.log.error( "Error executing set add" )
3587
3588 # Check if set is still correct
3589 size = len( onosSet )
3590 getResponses = []
3591 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003592 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003593 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003594 name="setTestGet-" + str( i ),
3595 args=[ onosSetName ] )
3596 threads.append( t )
3597 t.start()
3598 for t in threads:
3599 t.join()
3600 getResponses.append( t.result )
3601 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003602 for i in range( len( main.activeNodes ) ):
3603 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003604 if isinstance( getResponses[ i ], list):
3605 current = set( getResponses[ i ] )
3606 if len( current ) == len( getResponses[ i ] ):
3607 # no repeats
3608 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003609 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003610 " of set " + onosSetName + ":\n" +
3611 str( getResponses[ i ] ) )
3612 main.log.debug( "Expected: " + str( onosSet ) )
3613 main.log.debug( "Actual: " + str( current ) )
3614 getResults = main.FALSE
3615 else:
3616 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003617 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003618 " set " + onosSetName + ":\n" +
3619 str( getResponses[ i ] ) )
3620 getResults = main.FALSE
3621 elif getResponses[ i ] == main.ERROR:
3622 getResults = main.FALSE
3623 sizeResponses = []
3624 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003625 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003626 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003627 name="setTestSize-" + str( i ),
3628 args=[ onosSetName ] )
3629 threads.append( t )
3630 t.start()
3631 for t in threads:
3632 t.join()
3633 sizeResponses.append( t.result )
3634 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003635 for i in range( len( main.activeNodes ) ):
3636 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003637 if size != sizeResponses[ i ]:
3638 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003639 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003640 " expected a size of " + str( size ) +
3641 " for set " + onosSetName +
3642 " but got " + str( sizeResponses[ i ] ) )
3643 addResults = addResults and getResults and sizeResults
3644 utilities.assert_equals( expect=main.TRUE,
3645 actual=addResults,
3646 onpass="Set add correct",
3647 onfail="Set add was incorrect" )
3648
3649 main.step( "Distributed Set addAll()" )
3650 onosSet.update( addAllValue.split() )
3651 addResponses = []
3652 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003653 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003654 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003655 name="setTestAddAll-" + str( i ),
3656 args=[ onosSetName, addAllValue ] )
3657 threads.append( t )
3658 t.start()
3659 for t in threads:
3660 t.join()
3661 addResponses.append( t.result )
3662
3663 # main.TRUE = successfully changed the set
3664 # main.FALSE = action resulted in no change in set
3665 # main.ERROR - Some error in executing the function
3666 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003667 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003668 if addResponses[ i ] == main.TRUE:
3669 # All is well
3670 pass
3671 elif addResponses[ i ] == main.FALSE:
3672 # Already in set, probably fine
3673 pass
3674 elif addResponses[ i ] == main.ERROR:
3675 # Error in execution
3676 addAllResults = main.FALSE
3677 else:
3678 # unexpected result
3679 addAllResults = main.FALSE
3680 if addAllResults != main.TRUE:
3681 main.log.error( "Error executing set addAll" )
3682
3683 # Check if set is still correct
3684 size = len( onosSet )
3685 getResponses = []
3686 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003687 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003688 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003689 name="setTestGet-" + str( i ),
3690 args=[ onosSetName ] )
3691 threads.append( t )
3692 t.start()
3693 for t in threads:
3694 t.join()
3695 getResponses.append( t.result )
3696 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003697 for i in range( len( main.activeNodes ) ):
3698 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003699 if isinstance( getResponses[ i ], list):
3700 current = set( getResponses[ i ] )
3701 if len( current ) == len( getResponses[ i ] ):
3702 # no repeats
3703 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003704 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003705 " has incorrect view" +
3706 " of set " + onosSetName + ":\n" +
3707 str( getResponses[ i ] ) )
3708 main.log.debug( "Expected: " + str( onosSet ) )
3709 main.log.debug( "Actual: " + str( current ) )
3710 getResults = main.FALSE
3711 else:
3712 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003713 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003714 " has repeat elements in" +
3715 " set " + onosSetName + ":\n" +
3716 str( getResponses[ i ] ) )
3717 getResults = main.FALSE
3718 elif getResponses[ i ] == main.ERROR:
3719 getResults = main.FALSE
3720 sizeResponses = []
3721 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003722 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003723 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003724 name="setTestSize-" + str( i ),
3725 args=[ onosSetName ] )
3726 threads.append( t )
3727 t.start()
3728 for t in threads:
3729 t.join()
3730 sizeResponses.append( t.result )
3731 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003732 for i in range( len( main.activeNodes ) ):
3733 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003734 if size != sizeResponses[ i ]:
3735 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003736 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003737 " expected a size of " + str( size ) +
3738 " for set " + onosSetName +
3739 " but got " + str( sizeResponses[ i ] ) )
3740 addAllResults = addAllResults and getResults and sizeResults
3741 utilities.assert_equals( expect=main.TRUE,
3742 actual=addAllResults,
3743 onpass="Set addAll correct",
3744 onfail="Set addAll was incorrect" )
3745
3746 main.step( "Distributed Set contains()" )
3747 containsResponses = []
3748 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003749 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003750 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003751 name="setContains-" + str( i ),
3752 args=[ onosSetName ],
3753 kwargs={ "values": addValue } )
3754 threads.append( t )
3755 t.start()
3756 for t in threads:
3757 t.join()
3758 # NOTE: This is the tuple
3759 containsResponses.append( t.result )
3760
3761 containsResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003762 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003763 if containsResponses[ i ] == main.ERROR:
3764 containsResults = main.FALSE
3765 else:
3766 containsResults = containsResults and\
3767 containsResponses[ i ][ 1 ]
3768 utilities.assert_equals( expect=main.TRUE,
3769 actual=containsResults,
3770 onpass="Set contains is functional",
3771 onfail="Set contains failed" )
3772
3773 main.step( "Distributed Set containsAll()" )
3774 containsAllResponses = []
3775 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003776 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003777 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003778 name="setContainsAll-" + str( i ),
3779 args=[ onosSetName ],
3780 kwargs={ "values": addAllValue } )
3781 threads.append( t )
3782 t.start()
3783 for t in threads:
3784 t.join()
3785 # NOTE: This is the tuple
3786 containsAllResponses.append( t.result )
3787
3788 containsAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003789 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003790 if containsResponses[ i ] == main.ERROR:
3791 containsResults = main.FALSE
3792 else:
3793 containsResults = containsResults and\
3794 containsResponses[ i ][ 1 ]
3795 utilities.assert_equals( expect=main.TRUE,
3796 actual=containsAllResults,
3797 onpass="Set containsAll is functional",
3798 onfail="Set containsAll failed" )
3799
3800 main.step( "Distributed Set remove()" )
3801 onosSet.remove( addValue )
3802 removeResponses = []
3803 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003804 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003805 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003806 name="setTestRemove-" + str( i ),
3807 args=[ onosSetName, addValue ] )
3808 threads.append( t )
3809 t.start()
3810 for t in threads:
3811 t.join()
3812 removeResponses.append( t.result )
3813
3814 # main.TRUE = successfully changed the set
3815 # main.FALSE = action resulted in no change in set
3816 # main.ERROR - Some error in executing the function
3817 removeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003818 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003819 if removeResponses[ i ] == main.TRUE:
3820 # All is well
3821 pass
3822 elif removeResponses[ i ] == main.FALSE:
3823 # not in set, probably fine
3824 pass
3825 elif removeResponses[ i ] == main.ERROR:
3826 # Error in execution
3827 removeResults = main.FALSE
3828 else:
3829 # unexpected result
3830 removeResults = main.FALSE
3831 if removeResults != main.TRUE:
3832 main.log.error( "Error executing set remove" )
3833
3834 # Check if set is still correct
3835 size = len( onosSet )
3836 getResponses = []
3837 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003838 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003839 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003840 name="setTestGet-" + str( i ),
3841 args=[ onosSetName ] )
3842 threads.append( t )
3843 t.start()
3844 for t in threads:
3845 t.join()
3846 getResponses.append( t.result )
3847 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003848 for i in range( len( main.activeNodes ) ):
3849 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003850 if isinstance( getResponses[ i ], list):
3851 current = set( getResponses[ i ] )
3852 if len( current ) == len( getResponses[ i ] ):
3853 # no repeats
3854 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003855 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003856 " has incorrect view" +
3857 " of set " + onosSetName + ":\n" +
3858 str( getResponses[ i ] ) )
3859 main.log.debug( "Expected: " + str( onosSet ) )
3860 main.log.debug( "Actual: " + str( current ) )
3861 getResults = main.FALSE
3862 else:
3863 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003864 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003865 " has repeat elements in" +
3866 " set " + onosSetName + ":\n" +
3867 str( getResponses[ i ] ) )
3868 getResults = main.FALSE
3869 elif getResponses[ i ] == main.ERROR:
3870 getResults = main.FALSE
3871 sizeResponses = []
3872 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003873 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003874 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003875 name="setTestSize-" + str( i ),
3876 args=[ onosSetName ] )
3877 threads.append( t )
3878 t.start()
3879 for t in threads:
3880 t.join()
3881 sizeResponses.append( t.result )
3882 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003883 for i in range( len( main.activeNodes ) ):
3884 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003885 if size != sizeResponses[ i ]:
3886 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003887 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003888 " expected a size of " + str( size ) +
3889 " for set " + onosSetName +
3890 " but got " + str( sizeResponses[ i ] ) )
3891 removeResults = removeResults and getResults and sizeResults
3892 utilities.assert_equals( expect=main.TRUE,
3893 actual=removeResults,
3894 onpass="Set remove correct",
3895 onfail="Set remove was incorrect" )
3896
3897 main.step( "Distributed Set removeAll()" )
3898 onosSet.difference_update( addAllValue.split() )
3899 removeAllResponses = []
3900 threads = []
3901 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003902 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003903 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003904 name="setTestRemoveAll-" + str( i ),
3905 args=[ onosSetName, addAllValue ] )
3906 threads.append( t )
3907 t.start()
3908 for t in threads:
3909 t.join()
3910 removeAllResponses.append( t.result )
3911 except Exception, e:
3912 main.log.exception(e)
3913
3914 # main.TRUE = successfully changed the set
3915 # main.FALSE = action resulted in no change in set
3916 # main.ERROR - Some error in executing the function
3917 removeAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003918 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003919 if removeAllResponses[ i ] == main.TRUE:
3920 # All is well
3921 pass
3922 elif removeAllResponses[ i ] == main.FALSE:
3923 # not in set, probably fine
3924 pass
3925 elif removeAllResponses[ i ] == main.ERROR:
3926 # Error in execution
3927 removeAllResults = main.FALSE
3928 else:
3929 # unexpected result
3930 removeAllResults = main.FALSE
3931 if removeAllResults != main.TRUE:
3932 main.log.error( "Error executing set removeAll" )
3933
3934 # Check if set is still correct
3935 size = len( onosSet )
3936 getResponses = []
3937 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003938 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003939 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003940 name="setTestGet-" + str( i ),
3941 args=[ onosSetName ] )
3942 threads.append( t )
3943 t.start()
3944 for t in threads:
3945 t.join()
3946 getResponses.append( t.result )
3947 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003948 for i in range( len( main.activeNodes ) ):
3949 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003950 if isinstance( getResponses[ i ], list):
3951 current = set( getResponses[ i ] )
3952 if len( current ) == len( getResponses[ i ] ):
3953 # no repeats
3954 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003955 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003956 " has incorrect view" +
3957 " of set " + onosSetName + ":\n" +
3958 str( getResponses[ i ] ) )
3959 main.log.debug( "Expected: " + str( onosSet ) )
3960 main.log.debug( "Actual: " + str( current ) )
3961 getResults = main.FALSE
3962 else:
3963 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003964 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003965 " has repeat elements in" +
3966 " set " + onosSetName + ":\n" +
3967 str( getResponses[ i ] ) )
3968 getResults = main.FALSE
3969 elif getResponses[ i ] == main.ERROR:
3970 getResults = main.FALSE
3971 sizeResponses = []
3972 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003973 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003974 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003975 name="setTestSize-" + str( i ),
3976 args=[ onosSetName ] )
3977 threads.append( t )
3978 t.start()
3979 for t in threads:
3980 t.join()
3981 sizeResponses.append( t.result )
3982 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003983 for i in range( len( main.activeNodes ) ):
3984 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003985 if size != sizeResponses[ i ]:
3986 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003987 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003988 " expected a size of " + str( size ) +
3989 " for set " + onosSetName +
3990 " but got " + str( sizeResponses[ i ] ) )
3991 removeAllResults = removeAllResults and getResults and sizeResults
3992 utilities.assert_equals( expect=main.TRUE,
3993 actual=removeAllResults,
3994 onpass="Set removeAll correct",
3995 onfail="Set removeAll was incorrect" )
3996
3997 main.step( "Distributed Set addAll()" )
3998 onosSet.update( addAllValue.split() )
3999 addResponses = []
4000 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004001 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004002 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004003 name="setTestAddAll-" + str( i ),
4004 args=[ onosSetName, addAllValue ] )
4005 threads.append( t )
4006 t.start()
4007 for t in threads:
4008 t.join()
4009 addResponses.append( t.result )
4010
4011 # main.TRUE = successfully changed the set
4012 # main.FALSE = action resulted in no change in set
4013 # main.ERROR - Some error in executing the function
4014 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004015 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004016 if addResponses[ i ] == main.TRUE:
4017 # All is well
4018 pass
4019 elif addResponses[ i ] == main.FALSE:
4020 # Already in set, probably fine
4021 pass
4022 elif addResponses[ i ] == main.ERROR:
4023 # Error in execution
4024 addAllResults = main.FALSE
4025 else:
4026 # unexpected result
4027 addAllResults = main.FALSE
4028 if addAllResults != main.TRUE:
4029 main.log.error( "Error executing set addAll" )
4030
4031 # Check if set is still correct
4032 size = len( onosSet )
4033 getResponses = []
4034 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004035 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004036 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004037 name="setTestGet-" + str( i ),
4038 args=[ onosSetName ] )
4039 threads.append( t )
4040 t.start()
4041 for t in threads:
4042 t.join()
4043 getResponses.append( t.result )
4044 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004045 for i in range( len( main.activeNodes ) ):
4046 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004047 if isinstance( getResponses[ i ], list):
4048 current = set( getResponses[ i ] )
4049 if len( current ) == len( getResponses[ i ] ):
4050 # no repeats
4051 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004052 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004053 " has incorrect view" +
4054 " of set " + onosSetName + ":\n" +
4055 str( getResponses[ i ] ) )
4056 main.log.debug( "Expected: " + str( onosSet ) )
4057 main.log.debug( "Actual: " + str( current ) )
4058 getResults = main.FALSE
4059 else:
4060 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004061 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004062 " has repeat elements in" +
4063 " set " + onosSetName + ":\n" +
4064 str( getResponses[ i ] ) )
4065 getResults = main.FALSE
4066 elif getResponses[ i ] == main.ERROR:
4067 getResults = main.FALSE
4068 sizeResponses = []
4069 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004070 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004071 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004072 name="setTestSize-" + str( i ),
4073 args=[ onosSetName ] )
4074 threads.append( t )
4075 t.start()
4076 for t in threads:
4077 t.join()
4078 sizeResponses.append( t.result )
4079 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004080 for i in range( len( main.activeNodes ) ):
4081 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004082 if size != sizeResponses[ i ]:
4083 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004084 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004085 " expected a size of " + str( size ) +
4086 " for set " + onosSetName +
4087 " but got " + str( sizeResponses[ i ] ) )
4088 addAllResults = addAllResults and getResults and sizeResults
4089 utilities.assert_equals( expect=main.TRUE,
4090 actual=addAllResults,
4091 onpass="Set addAll correct",
4092 onfail="Set addAll was incorrect" )
4093
4094 main.step( "Distributed Set clear()" )
4095 onosSet.clear()
4096 clearResponses = []
4097 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004098 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004099 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004100 name="setTestClear-" + str( i ),
4101 args=[ onosSetName, " "], # Values doesn't matter
4102 kwargs={ "clear": True } )
4103 threads.append( t )
4104 t.start()
4105 for t in threads:
4106 t.join()
4107 clearResponses.append( t.result )
4108
4109 # main.TRUE = successfully changed the set
4110 # main.FALSE = action resulted in no change in set
4111 # main.ERROR - Some error in executing the function
4112 clearResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004113 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004114 if clearResponses[ i ] == main.TRUE:
4115 # All is well
4116 pass
4117 elif clearResponses[ i ] == main.FALSE:
4118 # Nothing set, probably fine
4119 pass
4120 elif clearResponses[ i ] == main.ERROR:
4121 # Error in execution
4122 clearResults = main.FALSE
4123 else:
4124 # unexpected result
4125 clearResults = main.FALSE
4126 if clearResults != main.TRUE:
4127 main.log.error( "Error executing set clear" )
4128
4129 # Check if set is still correct
4130 size = len( onosSet )
4131 getResponses = []
4132 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004133 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004134 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004135 name="setTestGet-" + str( i ),
4136 args=[ onosSetName ] )
4137 threads.append( t )
4138 t.start()
4139 for t in threads:
4140 t.join()
4141 getResponses.append( t.result )
4142 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004143 for i in range( len( main.activeNodes ) ):
4144 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004145 if isinstance( getResponses[ i ], list):
4146 current = set( getResponses[ i ] )
4147 if len( current ) == len( getResponses[ i ] ):
4148 # no repeats
4149 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004150 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004151 " has incorrect view" +
4152 " of set " + onosSetName + ":\n" +
4153 str( getResponses[ i ] ) )
4154 main.log.debug( "Expected: " + str( onosSet ) )
4155 main.log.debug( "Actual: " + str( current ) )
4156 getResults = main.FALSE
4157 else:
4158 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004159 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004160 " has repeat elements in" +
4161 " set " + onosSetName + ":\n" +
4162 str( getResponses[ i ] ) )
4163 getResults = main.FALSE
4164 elif getResponses[ i ] == main.ERROR:
4165 getResults = main.FALSE
4166 sizeResponses = []
4167 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004168 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004169 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004170 name="setTestSize-" + str( i ),
4171 args=[ onosSetName ] )
4172 threads.append( t )
4173 t.start()
4174 for t in threads:
4175 t.join()
4176 sizeResponses.append( t.result )
4177 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004178 for i in range( len( main.activeNodes ) ):
4179 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004180 if size != sizeResponses[ i ]:
4181 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004182 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004183 " expected a size of " + str( size ) +
4184 " for set " + onosSetName +
4185 " but got " + str( sizeResponses[ i ] ) )
4186 clearResults = clearResults and getResults and sizeResults
4187 utilities.assert_equals( expect=main.TRUE,
4188 actual=clearResults,
4189 onpass="Set clear correct",
4190 onfail="Set clear was incorrect" )
4191
4192 main.step( "Distributed Set addAll()" )
4193 onosSet.update( addAllValue.split() )
4194 addResponses = []
4195 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004196 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004197 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004198 name="setTestAddAll-" + str( i ),
4199 args=[ onosSetName, addAllValue ] )
4200 threads.append( t )
4201 t.start()
4202 for t in threads:
4203 t.join()
4204 addResponses.append( t.result )
4205
4206 # main.TRUE = successfully changed the set
4207 # main.FALSE = action resulted in no change in set
4208 # main.ERROR - Some error in executing the function
4209 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004210 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004211 if addResponses[ i ] == main.TRUE:
4212 # All is well
4213 pass
4214 elif addResponses[ i ] == main.FALSE:
4215 # Already in set, probably fine
4216 pass
4217 elif addResponses[ i ] == main.ERROR:
4218 # Error in execution
4219 addAllResults = main.FALSE
4220 else:
4221 # unexpected result
4222 addAllResults = main.FALSE
4223 if addAllResults != main.TRUE:
4224 main.log.error( "Error executing set addAll" )
4225
4226 # Check if set is still correct
4227 size = len( onosSet )
4228 getResponses = []
4229 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004230 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004231 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004232 name="setTestGet-" + str( i ),
4233 args=[ onosSetName ] )
4234 threads.append( t )
4235 t.start()
4236 for t in threads:
4237 t.join()
4238 getResponses.append( t.result )
4239 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004240 for i in range( len( main.activeNodes ) ):
4241 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004242 if isinstance( getResponses[ i ], list):
4243 current = set( getResponses[ i ] )
4244 if len( current ) == len( getResponses[ i ] ):
4245 # no repeats
4246 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004247 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004248 " has incorrect view" +
4249 " of set " + onosSetName + ":\n" +
4250 str( getResponses[ i ] ) )
4251 main.log.debug( "Expected: " + str( onosSet ) )
4252 main.log.debug( "Actual: " + str( current ) )
4253 getResults = main.FALSE
4254 else:
4255 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004256 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004257 " has repeat elements in" +
4258 " set " + onosSetName + ":\n" +
4259 str( getResponses[ i ] ) )
4260 getResults = main.FALSE
4261 elif getResponses[ i ] == main.ERROR:
4262 getResults = main.FALSE
4263 sizeResponses = []
4264 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004265 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004266 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004267 name="setTestSize-" + str( i ),
4268 args=[ onosSetName ] )
4269 threads.append( t )
4270 t.start()
4271 for t in threads:
4272 t.join()
4273 sizeResponses.append( t.result )
4274 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004275 for i in range( len( main.activeNodes ) ):
4276 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004277 if size != sizeResponses[ i ]:
4278 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004279 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004280 " expected a size of " + str( size ) +
4281 " for set " + onosSetName +
4282 " but got " + str( sizeResponses[ i ] ) )
4283 addAllResults = addAllResults and getResults and sizeResults
4284 utilities.assert_equals( expect=main.TRUE,
4285 actual=addAllResults,
4286 onpass="Set addAll correct",
4287 onfail="Set addAll was incorrect" )
4288
4289 main.step( "Distributed Set retain()" )
4290 onosSet.intersection_update( retainValue.split() )
4291 retainResponses = []
4292 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004293 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004294 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004295 name="setTestRetain-" + str( i ),
4296 args=[ onosSetName, retainValue ],
4297 kwargs={ "retain": True } )
4298 threads.append( t )
4299 t.start()
4300 for t in threads:
4301 t.join()
4302 retainResponses.append( t.result )
4303
4304 # main.TRUE = successfully changed the set
4305 # main.FALSE = action resulted in no change in set
4306 # main.ERROR - Some error in executing the function
4307 retainResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004308 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004309 if retainResponses[ i ] == main.TRUE:
4310 # All is well
4311 pass
4312 elif retainResponses[ i ] == main.FALSE:
4313 # Already in set, probably fine
4314 pass
4315 elif retainResponses[ i ] == main.ERROR:
4316 # Error in execution
4317 retainResults = main.FALSE
4318 else:
4319 # unexpected result
4320 retainResults = main.FALSE
4321 if retainResults != main.TRUE:
4322 main.log.error( "Error executing set retain" )
4323
4324 # Check if set is still correct
4325 size = len( onosSet )
4326 getResponses = []
4327 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004328 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004329 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004330 name="setTestGet-" + str( i ),
4331 args=[ onosSetName ] )
4332 threads.append( t )
4333 t.start()
4334 for t in threads:
4335 t.join()
4336 getResponses.append( t.result )
4337 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004338 for i in range( len( main.activeNodes ) ):
4339 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004340 if isinstance( getResponses[ i ], list):
4341 current = set( getResponses[ i ] )
4342 if len( current ) == len( getResponses[ i ] ):
4343 # no repeats
4344 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004345 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004346 " has incorrect view" +
4347 " of set " + onosSetName + ":\n" +
4348 str( getResponses[ i ] ) )
4349 main.log.debug( "Expected: " + str( onosSet ) )
4350 main.log.debug( "Actual: " + str( current ) )
4351 getResults = main.FALSE
4352 else:
4353 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004354 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004355 " has repeat elements in" +
4356 " set " + onosSetName + ":\n" +
4357 str( getResponses[ i ] ) )
4358 getResults = main.FALSE
4359 elif getResponses[ i ] == main.ERROR:
4360 getResults = main.FALSE
4361 sizeResponses = []
4362 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004363 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004364 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004365 name="setTestSize-" + str( i ),
4366 args=[ onosSetName ] )
4367 threads.append( t )
4368 t.start()
4369 for t in threads:
4370 t.join()
4371 sizeResponses.append( t.result )
4372 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004373 for i in range( len( main.activeNodes ) ):
4374 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004375 if size != sizeResponses[ i ]:
4376 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004377 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004378 str( size ) + " for set " + onosSetName +
4379 " but got " + str( sizeResponses[ i ] ) )
4380 retainResults = retainResults and getResults and sizeResults
4381 utilities.assert_equals( expect=main.TRUE,
4382 actual=retainResults,
4383 onpass="Set retain correct",
4384 onfail="Set retain was incorrect" )
4385
Jon Hall2a5002c2015-08-21 16:49:11 -07004386 # Transactional maps
4387 main.step( "Partitioned Transactional maps put" )
4388 tMapValue = "Testing"
4389 numKeys = 100
4390 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004391 node = main.activeNodes[0]
4392 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
Jon Hall2a5002c2015-08-21 16:49:11 -07004393 if len( putResponses ) == 100:
4394 for i in putResponses:
4395 if putResponses[ i ][ 'value' ] != tMapValue:
4396 putResult = False
4397 else:
4398 putResult = False
4399 if not putResult:
4400 main.log.debug( "Put response values: " + str( putResponses ) )
4401 utilities.assert_equals( expect=True,
4402 actual=putResult,
4403 onpass="Partitioned Transactional Map put successful",
4404 onfail="Partitioned Transactional Map put values are incorrect" )
4405
4406 main.step( "Partitioned Transactional maps get" )
4407 getCheck = True
4408 for n in range( 1, numKeys + 1 ):
4409 getResponses = []
4410 threads = []
4411 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004412 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004413 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4414 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004415 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004416 threads.append( t )
4417 t.start()
4418 for t in threads:
4419 t.join()
4420 getResponses.append( t.result )
4421 for node in getResponses:
4422 if node != tMapValue:
4423 valueCheck = False
4424 if not valueCheck:
4425 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4426 main.log.warn( getResponses )
4427 getCheck = getCheck and valueCheck
4428 utilities.assert_equals( expect=True,
4429 actual=getCheck,
4430 onpass="Partitioned Transactional Map get values were correct",
4431 onfail="Partitioned Transactional Map values incorrect" )
4432
4433 main.step( "In-memory Transactional maps put" )
4434 tMapValue = "Testing"
4435 numKeys = 100
4436 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004437 node = main.activeNodes[0]
4438 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue, inMemory=True )
Jon Hall2a5002c2015-08-21 16:49:11 -07004439 if len( putResponses ) == 100:
4440 for i in putResponses:
4441 if putResponses[ i ][ 'value' ] != tMapValue:
4442 putResult = False
4443 else:
4444 putResult = False
4445 if not putResult:
4446 main.log.debug( "Put response values: " + str( putResponses ) )
4447 utilities.assert_equals( expect=True,
4448 actual=putResult,
4449 onpass="In-Memory Transactional Map put successful",
4450 onfail="In-Memory Transactional Map put values are incorrect" )
4451
4452 main.step( "In-Memory Transactional maps get" )
4453 getCheck = True
4454 for n in range( 1, numKeys + 1 ):
4455 getResponses = []
4456 threads = []
4457 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004458 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004459 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4460 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004461 args=[ "Key" + str( n ) ],
Jon Hall2a5002c2015-08-21 16:49:11 -07004462 kwargs={ "inMemory": True } )
4463 threads.append( t )
4464 t.start()
4465 for t in threads:
4466 t.join()
4467 getResponses.append( t.result )
4468 for node in getResponses:
4469 if node != tMapValue:
4470 valueCheck = False
4471 if not valueCheck:
4472 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4473 main.log.warn( getResponses )
4474 getCheck = getCheck and valueCheck
4475 utilities.assert_equals( expect=True,
4476 actual=getCheck,
4477 onpass="In-Memory Transactional Map get values were correct",
4478 onfail="In-Memory Transactional Map values incorrect" )