blob: a98c62a24d1d11797b0ef802e640fccad2722b1e [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
Jon Hallb3ed8ed2015-10-28 16:43:55 -070012CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
Jon Hall5cf14d52015-07-16 12:15:19 -070014CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
Jon Hallb3ed8ed2015-10-28 16:43:55 -070028class HAstopNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -070029
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hall3b489db2015-10-05 14:38:37 -070052 import pexpect
Jon Hallf3d16e72015-12-16 17:45:08 -080053 import time
Jon Hallb3ed8ed2015-10-28 16:43:55 -070054 main.log.info( "ONOS HA test: Stop a minority of ONOS nodes - " +
Jon Hall5cf14d52015-07-16 12:15:19 -070055 "initialization" )
56 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070057 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070058 "installing ONOS, starting Mininet and ONOS" +\
59 "cli sessions."
Jon Hall5cf14d52015-07-16 12:15:19 -070060
61 # load some variables from the params file
62 PULLCODE = False
63 if main.params[ 'Git' ] == 'True':
64 PULLCODE = True
65 gitBranch = main.params[ 'branch' ]
66 cellName = main.params[ 'ENV' ][ 'cellName' ]
67
Jon Halle1a3b752015-07-22 13:02:46 -070068 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070069 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070070 if main.ONOSbench.maxNodes < main.numCtrls:
71 main.numCtrls = int( main.ONOSbench.maxNodes )
72 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070073 global ONOS1Port
74 global ONOS2Port
75 global ONOS3Port
76 global ONOS4Port
77 global ONOS5Port
78 global ONOS6Port
79 global ONOS7Port
80
81 # FIXME: just get controller port from params?
82 # TODO: do we really need all these?
83 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
84 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
85 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
86 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
87 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
88 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
89 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
90
Jon Halle1a3b752015-07-22 13:02:46 -070091 try:
92 fileName = "Counters"
93 # TODO: Maybe make a library folder somewhere?
94 path = main.params[ 'imports' ][ 'path' ]
95 main.Counters = imp.load_source( fileName,
96 path + fileName + ".py" )
97 except Exception as e:
98 main.log.exception( e )
99 main.cleanup()
100 main.exit()
101
102 main.CLIs = []
103 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700104 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700105 for i in range( 1, main.numCtrls + 1 ):
106 try:
107 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
108 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
109 ipList.append( main.nodes[ -1 ].ip_address )
110 except AttributeError:
111 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700112
113 main.step( "Create cell file" )
114 cellAppString = main.params[ 'ENV' ][ 'appString' ]
115 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
116 main.Mininet1.ip_address,
117 cellAppString, ipList )
118 main.step( "Applying cell variable to environment" )
119 cellResult = main.ONOSbench.setCell( cellName )
120 verifyResult = main.ONOSbench.verifyCell()
121
122 # FIXME:this is short term fix
123 main.log.info( "Removing raft logs" )
124 main.ONOSbench.onosRemoveRaftLogs()
125
126 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700127 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700128 main.ONOSbench.onosUninstall( node.ip_address )
129
130 # Make sure ONOS is DEAD
131 main.log.info( "Killing any ONOS processes" )
132 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700133 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700134 killed = main.ONOSbench.onosKill( node.ip_address )
135 killResults = killResults and killed
136
137 cleanInstallResult = main.TRUE
138 gitPullResult = main.TRUE
139
140 main.step( "Starting Mininet" )
141 # scp topo file to mininet
142 # TODO: move to params?
143 topoName = "obelisk.py"
144 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700145 main.ONOSbench.scp( main.Mininet1,
146 filePath + topoName,
147 main.Mininet1.home,
148 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700149 mnResult = main.Mininet1.startNet( )
150 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
151 onpass="Mininet Started",
152 onfail="Error starting Mininet" )
153
154 main.step( "Git checkout and pull " + gitBranch )
155 if PULLCODE:
156 main.ONOSbench.gitCheckout( gitBranch )
157 gitPullResult = main.ONOSbench.gitPull()
158 # values of 1 or 3 are good
159 utilities.assert_lesser( expect=0, actual=gitPullResult,
160 onpass="Git pull successful",
161 onfail="Git pull failed" )
162 main.ONOSbench.getVersion( report=True )
163
164 main.step( "Using mvn clean install" )
165 cleanInstallResult = main.TRUE
166 if PULLCODE and gitPullResult == main.TRUE:
167 cleanInstallResult = main.ONOSbench.cleanInstall()
168 else:
169 main.log.warn( "Did not pull new code so skipping mvn " +
170 "clean install" )
171 utilities.assert_equals( expect=main.TRUE,
172 actual=cleanInstallResult,
173 onpass="MCI successful",
174 onfail="MCI failed" )
175 # GRAPHS
176 # NOTE: important params here:
177 # job = name of Jenkins job
178 # Plot Name = Plot-HA, only can be used if multiple plots
179 # index = The number of the graph under plot name
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700180 job = "HAstopNodes"
Jon Hall5cf14d52015-07-16 12:15:19 -0700181 plotName = "Plot-HA"
Jon Hallff566d52016-01-15 14:45:36 -0800182 index = "1"
Jon Hall5cf14d52015-07-16 12:15:19 -0700183 graphs = '<ac:structured-macro ac:name="html">\n'
184 graphs += '<ac:plain-text-body><![CDATA[\n'
185 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
Jon Halla9845df2016-01-15 14:55:58 -0800186 '/plot/' + plotName + '/getPlot?index=' + index +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700187 '&width=500&height=300"' +\
188 'noborder="0" width="500" height="300" scrolling="yes" ' +\
189 'seamless="seamless"></iframe>\n'
190 graphs += ']]></ac:plain-text-body>\n'
191 graphs += '</ac:structured-macro>\n'
192 main.log.wiki(graphs)
193
194 main.step( "Creating ONOS package" )
Jon Hall3b489db2015-10-05 14:38:37 -0700195 # copy gen-partions file to ONOS
196 # NOTE: this assumes TestON and ONOS are on the same machine
197 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
198 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
199 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
200 main.ONOSbench.ip_address,
201 srcFile,
202 dstDir,
203 pwd=main.ONOSbench.pwd,
204 direction="from" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700205 packageResult = main.ONOSbench.onosPackage()
206 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
207 onpass="ONOS package successful",
208 onfail="ONOS package failed" )
209
210 main.step( "Installing ONOS package" )
211 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700212 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700213 tmpResult = main.ONOSbench.onosInstall( options="-f",
214 node=node.ip_address )
215 onosInstallResult = onosInstallResult and tmpResult
216 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
217 onpass="ONOS install successful",
218 onfail="ONOS install failed" )
Jon Hall3b489db2015-10-05 14:38:37 -0700219 # clean up gen-partitions file
220 try:
221 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
222 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
223 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
224 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
225 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
226 str( main.ONOSbench.handle.before ) )
227 except ( pexpect.TIMEOUT, pexpect.EOF ):
228 main.log.exception( "ONOSbench: pexpect exception found:" +
229 main.ONOSbench.handle.before )
230 main.cleanup()
231 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700232
233 main.step( "Checking if ONOS is up yet" )
234 for i in range( 2 ):
235 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700236 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700237 started = main.ONOSbench.isup( node.ip_address )
238 if not started:
Jon Hallc6793552016-01-19 14:18:37 -0800239 main.log.error( node.name + " hasn't started" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700240 onosIsupResult = onosIsupResult and started
241 if onosIsupResult == main.TRUE:
242 break
243 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
244 onpass="ONOS startup successful",
245 onfail="ONOS startup failed" )
246
247 main.log.step( "Starting ONOS CLI sessions" )
248 cliResults = main.TRUE
249 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700250 for i in range( main.numCtrls ):
251 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700252 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700253 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700254 threads.append( t )
255 t.start()
256
257 for t in threads:
258 t.join()
259 cliResults = cliResults and t.result
260 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
261 onpass="ONOS cli startup successful",
262 onfail="ONOS cli startup failed" )
263
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700264 # Create a list of active nodes for use when some nodes are stopped
265 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
266
Jon Hall5cf14d52015-07-16 12:15:19 -0700267 if main.params[ 'tcpdump' ].lower() == "true":
268 main.step( "Start Packet Capture MN" )
269 main.Mininet2.startTcpdump(
270 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
271 + "-MN.pcap",
272 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
273 port=main.params[ 'MNtcpdump' ][ 'port' ] )
274
275 main.step( "App Ids check" )
Jon Hallf3d16e72015-12-16 17:45:08 -0800276 time.sleep(60)
Jon Hall5cf14d52015-07-16 12:15:19 -0700277 appCheck = main.TRUE
278 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700279 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700280 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700281 name="appToIDCheck-" + str( i ),
282 args=[] )
283 threads.append( t )
284 t.start()
285
286 for t in threads:
287 t.join()
288 appCheck = appCheck and t.result
289 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700290 node = main.activeNodes[0]
291 main.log.warn( main.CLIs[node].apps() )
292 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700293 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
294 onpass="App Ids seem to be correct",
295 onfail="Something is wrong with app Ids" )
296
297 if cliResults == main.FALSE:
298 main.log.error( "Failed to start ONOS, stopping test" )
299 main.cleanup()
300 main.exit()
301
302 def CASE2( self, main ):
303 """
304 Assign devices to controllers
305 """
306 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700307 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700308 assert main, "main not defined"
309 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700310 assert main.CLIs, "main.CLIs not defined"
311 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700312 assert ONOS1Port, "ONOS1Port not defined"
313 assert ONOS2Port, "ONOS2Port not defined"
314 assert ONOS3Port, "ONOS3Port not defined"
315 assert ONOS4Port, "ONOS4Port not defined"
316 assert ONOS5Port, "ONOS5Port not defined"
317 assert ONOS6Port, "ONOS6Port not defined"
318 assert ONOS7Port, "ONOS7Port not defined"
319
320 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700321 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700322 "and check that an ONOS node becomes the " +\
323 "master of the device."
324 main.step( "Assign switches to controllers" )
325
326 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700327 for i in range( main.numCtrls ):
328 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700329 swList = []
330 for i in range( 1, 29 ):
331 swList.append( "s" + str( i ) )
332 main.Mininet1.assignSwController( sw=swList, ip=ipList )
333
334 mastershipCheck = main.TRUE
335 for i in range( 1, 29 ):
336 response = main.Mininet1.getSwController( "s" + str( i ) )
337 try:
338 main.log.info( str( response ) )
339 except Exception:
340 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700341 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700342 if re.search( "tcp:" + node.ip_address, response ):
343 mastershipCheck = mastershipCheck and main.TRUE
344 else:
345 main.log.error( "Error, node " + node.ip_address + " is " +
346 "not in the list of controllers s" +
347 str( i ) + " is connecting to." )
348 mastershipCheck = main.FALSE
349 utilities.assert_equals(
350 expect=main.TRUE,
351 actual=mastershipCheck,
352 onpass="Switch mastership assigned correctly",
353 onfail="Switches not assigned correctly to controllers" )
354
355 def CASE21( self, main ):
356 """
357 Assign mastership to controllers
358 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700359 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700360 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700361 assert main, "main not defined"
362 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700363 assert main.CLIs, "main.CLIs not defined"
364 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700365 assert ONOS1Port, "ONOS1Port not defined"
366 assert ONOS2Port, "ONOS2Port not defined"
367 assert ONOS3Port, "ONOS3Port not defined"
368 assert ONOS4Port, "ONOS4Port not defined"
369 assert ONOS5Port, "ONOS5Port not defined"
370 assert ONOS6Port, "ONOS6Port not defined"
371 assert ONOS7Port, "ONOS7Port not defined"
372
373 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700374 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700375 "device. Then manually assign" +\
376 " mastership to specific ONOS nodes using" +\
377 " 'device-role'"
378 main.step( "Assign mastership of switches to specific controllers" )
379 # Manually assign mastership to the controller we want
380 roleCall = main.TRUE
381
382 ipList = [ ]
383 deviceList = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700384 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700385 try:
386 # Assign mastership to specific controllers. This assignment was
387 # determined for a 7 node cluser, but will work with any sized
388 # cluster
389 for i in range( 1, 29 ): # switches 1 through 28
390 # set up correct variables:
391 if i == 1:
392 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700393 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700394 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700395 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700396 c = 1 % main.numCtrls
397 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700398 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700399 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700400 c = 1 % main.numCtrls
401 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700402 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700403 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700404 c = 3 % main.numCtrls
405 ip = main.nodes[ c ].ip_address # ONOS4
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700406 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700407 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700408 c = 2 % main.numCtrls
409 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700410 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700411 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700412 c = 2 % main.numCtrls
413 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700414 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700415 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700416 c = 5 % main.numCtrls
417 ip = main.nodes[ c ].ip_address # ONOS6
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700418 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700419 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700420 c = 4 % main.numCtrls
421 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700422 dpid = '3' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700423 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700424 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700425 c = 6 % main.numCtrls
426 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700427 dpid = '6' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700428 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700429 elif i == 28:
430 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700431 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700432 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700433 else:
434 main.log.error( "You didn't write an else statement for " +
435 "switch s" + str( i ) )
436 roleCall = main.FALSE
437 # Assign switch
438 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
439 # TODO: make this controller dynamic
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700440 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700441 ipList.append( ip )
442 deviceList.append( deviceId )
443 except ( AttributeError, AssertionError ):
444 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700445 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700446 utilities.assert_equals(
447 expect=main.TRUE,
448 actual=roleCall,
449 onpass="Re-assigned switch mastership to designated controller",
450 onfail="Something wrong with deviceRole calls" )
451
452 main.step( "Check mastership was correctly assigned" )
453 roleCheck = main.TRUE
454 # NOTE: This is due to the fact that device mastership change is not
455 # atomic and is actually a multi step process
456 time.sleep( 5 )
457 for i in range( len( ipList ) ):
458 ip = ipList[i]
459 deviceId = deviceList[i]
460 # Check assignment
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700461 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700462 if ip in master:
463 roleCheck = roleCheck and main.TRUE
464 else:
465 roleCheck = roleCheck and main.FALSE
466 main.log.error( "Error, controller " + ip + " is not" +
467 " master " + "of device " +
468 str( deviceId ) + ". Master is " +
469 repr( master ) + "." )
470 utilities.assert_equals(
471 expect=main.TRUE,
472 actual=roleCheck,
473 onpass="Switches were successfully reassigned to designated " +
474 "controller",
475 onfail="Switches were not successfully reassigned" )
476
477 def CASE3( self, main ):
478 """
479 Assign intents
480 """
481 import time
482 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700483 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700484 assert main, "main not defined"
485 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700486 assert main.CLIs, "main.CLIs not defined"
487 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700488 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700489 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700490 "assign predetermined host-to-host intents." +\
491 " After installation, check that the intent" +\
492 " is distributed to all nodes and the state" +\
493 " is INSTALLED"
494
495 # install onos-app-fwd
496 main.step( "Install reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700497 onosCli = main.CLIs[ main.activeNodes[0] ]
498 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700499 utilities.assert_equals( expect=main.TRUE, actual=installResults,
500 onpass="Install fwd successful",
501 onfail="Install fwd failed" )
502
503 main.step( "Check app ids" )
504 appCheck = main.TRUE
505 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700506 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700507 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700508 name="appToIDCheck-" + str( i ),
509 args=[] )
510 threads.append( t )
511 t.start()
512
513 for t in threads:
514 t.join()
515 appCheck = appCheck and t.result
516 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700517 main.log.warn( onosCli.apps() )
518 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700519 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
520 onpass="App Ids seem to be correct",
521 onfail="Something is wrong with app Ids" )
522
523 main.step( "Discovering Hosts( Via pingall for now )" )
524 # FIXME: Once we have a host discovery mechanism, use that instead
525 # REACTIVE FWD test
526 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700527 passMsg = "Reactive Pingall test passed"
528 time1 = time.time()
529 pingResult = main.Mininet1.pingall()
530 time2 = time.time()
531 if not pingResult:
532 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700533 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700534 passMsg += " on the second try"
535 utilities.assert_equals(
536 expect=main.TRUE,
537 actual=pingResult,
538 onpass= passMsg,
539 onfail="Reactive Pingall failed, " +
540 "one or more ping pairs failed" )
541 main.log.info( "Time for pingall: %2f seconds" %
542 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700543 # timeout for fwd flows
544 time.sleep( 11 )
545 # uninstall onos-app-fwd
546 main.step( "Uninstall reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700547 node = main.activeNodes[0]
548 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700549 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
550 onpass="Uninstall fwd successful",
551 onfail="Uninstall fwd failed" )
552
553 main.step( "Check app ids" )
554 threads = []
555 appCheck2 = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700556 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700557 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700558 name="appToIDCheck-" + str( i ),
559 args=[] )
560 threads.append( t )
561 t.start()
562
563 for t in threads:
564 t.join()
565 appCheck2 = appCheck2 and t.result
566 if appCheck2 != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700567 node = main.activeNodes[0]
568 main.log.warn( main.CLIs[node].apps() )
569 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700570 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
571 onpass="App Ids seem to be correct",
572 onfail="Something is wrong with app Ids" )
573
574 main.step( "Add host intents via cli" )
575 intentIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700576 # TODO: move the host numbers to params
577 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700578 intentAddResult = True
579 hostResult = main.TRUE
580 for i in range( 8, 18 ):
581 main.log.info( "Adding host intent between h" + str( i ) +
582 " and h" + str( i + 10 ) )
583 host1 = "00:00:00:00:00:" + \
584 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
585 host2 = "00:00:00:00:00:" + \
586 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
587 # NOTE: getHost can return None
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700588 host1Dict = onosCli.getHost( host1 )
589 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700590 host1Id = None
591 host2Id = None
592 if host1Dict and host2Dict:
593 host1Id = host1Dict.get( 'id', None )
594 host2Id = host2Dict.get( 'id', None )
595 if host1Id and host2Id:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700596 nodeNum = ( i % len( main.activeNodes ) )
597 node = main.activeNodes[nodeNum]
598 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700599 if tmpId:
600 main.log.info( "Added intent with id: " + tmpId )
601 intentIds.append( tmpId )
602 else:
603 main.log.error( "addHostIntent returned: " +
604 repr( tmpId ) )
605 else:
606 main.log.error( "Error, getHost() failed for h" + str( i ) +
607 " and/or h" + str( i + 10 ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700608 node = main.activeNodes[0]
609 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700610 main.log.warn( "Hosts output: " )
611 try:
612 main.log.warn( json.dumps( json.loads( hosts ),
613 sort_keys=True,
614 indent=4,
615 separators=( ',', ': ' ) ) )
616 except ( ValueError, TypeError ):
617 main.log.warn( repr( hosts ) )
618 hostResult = main.FALSE
619 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
620 onpass="Found a host id for each host",
621 onfail="Error looking up host ids" )
622
623 intentStart = time.time()
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700624 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700625 main.log.info( "Submitted intents: " + str( intentIds ) )
626 main.log.info( "Intents in ONOS: " + str( onosIds ) )
627 for intent in intentIds:
628 if intent in onosIds:
629 pass # intent submitted is in onos
630 else:
631 intentAddResult = False
632 if intentAddResult:
633 intentStop = time.time()
634 else:
635 intentStop = None
636 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700637 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700638 intentStates = []
639 installedCheck = True
640 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
641 count = 0
642 try:
643 for intent in json.loads( intents ):
644 state = intent.get( 'state', None )
645 if "INSTALLED" not in state:
646 installedCheck = False
647 intentId = intent.get( 'id', None )
648 intentStates.append( ( intentId, state ) )
649 except ( ValueError, TypeError ):
650 main.log.exception( "Error parsing intents" )
651 # add submitted intents not in the store
652 tmplist = [ i for i, s in intentStates ]
653 missingIntents = False
654 for i in intentIds:
655 if i not in tmplist:
656 intentStates.append( ( i, " - " ) )
657 missingIntents = True
658 intentStates.sort()
659 for i, s in intentStates:
660 count += 1
661 main.log.info( "%-6s%-15s%-15s" %
662 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700663 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700664 try:
665 missing = False
666 if leaders:
667 parsedLeaders = json.loads( leaders )
668 main.log.warn( json.dumps( parsedLeaders,
669 sort_keys=True,
670 indent=4,
671 separators=( ',', ': ' ) ) )
672 # check for all intent partitions
673 topics = []
674 for i in range( 14 ):
675 topics.append( "intent-partition-" + str( i ) )
676 main.log.debug( topics )
677 ONOStopics = [ j['topic'] for j in parsedLeaders ]
678 for topic in topics:
679 if topic not in ONOStopics:
680 main.log.error( "Error: " + topic +
681 " not in leaders" )
682 missing = True
683 else:
684 main.log.error( "leaders() returned None" )
685 except ( ValueError, TypeError ):
686 main.log.exception( "Error parsing leaders" )
687 main.log.error( repr( leaders ) )
688 # Check all nodes
689 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700690 for i in main.activeNodes:
691 response = main.CLIs[i].leaders( jsonFormat=False)
692 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700693 str( response ) )
694
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700695 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700696 try:
697 if partitions :
698 parsedPartitions = json.loads( partitions )
699 main.log.warn( json.dumps( parsedPartitions,
700 sort_keys=True,
701 indent=4,
702 separators=( ',', ': ' ) ) )
703 # TODO check for a leader in all paritions
704 # TODO check for consistency among nodes
705 else:
706 main.log.error( "partitions() returned None" )
707 except ( ValueError, TypeError ):
708 main.log.exception( "Error parsing partitions" )
709 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700710 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700711 try:
712 if pendingMap :
713 parsedPending = json.loads( pendingMap )
714 main.log.warn( json.dumps( parsedPending,
715 sort_keys=True,
716 indent=4,
717 separators=( ',', ': ' ) ) )
718 # TODO check something here?
719 else:
720 main.log.error( "pendingMap() returned None" )
721 except ( ValueError, TypeError ):
722 main.log.exception( "Error parsing pending map" )
723 main.log.error( repr( pendingMap ) )
724
725 intentAddResult = bool( intentAddResult and not missingIntents and
726 installedCheck )
727 if not intentAddResult:
728 main.log.error( "Error in pushing host intents to ONOS" )
729
730 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700731 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700732 correct = True
733 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700734 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700735 onosIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700736 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700737 onosIds.append( ids )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700738 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700739 str( sorted( onosIds ) ) )
740 if sorted( ids ) != sorted( intentIds ):
741 main.log.warn( "Set of intent IDs doesn't match" )
742 correct = False
743 break
744 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700745 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700746 for intent in intents:
747 if intent[ 'state' ] != "INSTALLED":
748 main.log.warn( "Intent " + intent[ 'id' ] +
749 " is " + intent[ 'state' ] )
750 correct = False
751 break
752 if correct:
753 break
754 else:
755 time.sleep(1)
756 if not intentStop:
757 intentStop = time.time()
758 global gossipTime
759 gossipTime = intentStop - intentStart
760 main.log.info( "It took about " + str( gossipTime ) +
761 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700762 gossipPeriod = int( main.params['timers']['gossip'] )
763 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700764 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700765 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700766 onpass="ECM anti-entropy for intents worked within " +
767 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700768 onfail="Intent ECM anti-entropy took too long. " +
769 "Expected time:{}, Actual time:{}".format( maxGossipTime,
770 gossipTime ) )
771 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700772 intentAddResult = True
773
774 if not intentAddResult or "key" in pendingMap:
775 import time
776 installedCheck = True
777 main.log.info( "Sleeping 60 seconds to see if intents are found" )
778 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700779 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700780 main.log.info( "Submitted intents: " + str( intentIds ) )
781 main.log.info( "Intents in ONOS: " + str( onosIds ) )
782 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700783 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700784 intentStates = []
785 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
786 count = 0
787 try:
788 for intent in json.loads( intents ):
789 # Iter through intents of a node
790 state = intent.get( 'state', None )
791 if "INSTALLED" not in state:
792 installedCheck = False
793 intentId = intent.get( 'id', None )
794 intentStates.append( ( intentId, state ) )
795 except ( ValueError, TypeError ):
796 main.log.exception( "Error parsing intents" )
797 # add submitted intents not in the store
798 tmplist = [ i for i, s in intentStates ]
799 for i in intentIds:
800 if i not in tmplist:
801 intentStates.append( ( i, " - " ) )
802 intentStates.sort()
803 for i, s in intentStates:
804 count += 1
805 main.log.info( "%-6s%-15s%-15s" %
806 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700807 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700808 try:
809 missing = False
810 if leaders:
811 parsedLeaders = json.loads( leaders )
812 main.log.warn( json.dumps( parsedLeaders,
813 sort_keys=True,
814 indent=4,
815 separators=( ',', ': ' ) ) )
816 # check for all intent partitions
817 # check for election
818 topics = []
819 for i in range( 14 ):
820 topics.append( "intent-partition-" + str( i ) )
821 # FIXME: this should only be after we start the app
822 topics.append( "org.onosproject.election" )
823 main.log.debug( topics )
824 ONOStopics = [ j['topic'] for j in parsedLeaders ]
825 for topic in topics:
826 if topic not in ONOStopics:
827 main.log.error( "Error: " + topic +
828 " not in leaders" )
829 missing = True
830 else:
831 main.log.error( "leaders() returned None" )
832 except ( ValueError, TypeError ):
833 main.log.exception( "Error parsing leaders" )
834 main.log.error( repr( leaders ) )
835 # Check all nodes
836 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700837 for i in main.activeNodes:
838 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700839 response = node.leaders( jsonFormat=False)
840 main.log.warn( str( node.name ) + " leaders output: \n" +
841 str( response ) )
842
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700843 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700844 try:
845 if partitions :
846 parsedPartitions = json.loads( partitions )
847 main.log.warn( json.dumps( parsedPartitions,
848 sort_keys=True,
849 indent=4,
850 separators=( ',', ': ' ) ) )
851 # TODO check for a leader in all paritions
852 # TODO check for consistency among nodes
853 else:
854 main.log.error( "partitions() returned None" )
855 except ( ValueError, TypeError ):
856 main.log.exception( "Error parsing partitions" )
857 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700858 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700859 try:
860 if pendingMap :
861 parsedPending = json.loads( pendingMap )
862 main.log.warn( json.dumps( parsedPending,
863 sort_keys=True,
864 indent=4,
865 separators=( ',', ': ' ) ) )
866 # TODO check something here?
867 else:
868 main.log.error( "pendingMap() returned None" )
869 except ( ValueError, TypeError ):
870 main.log.exception( "Error parsing pending map" )
871 main.log.error( repr( pendingMap ) )
872
873 def CASE4( self, main ):
874 """
875 Ping across added host intents
876 """
877 import json
878 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700879 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700880 assert main, "main not defined"
881 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700882 assert main.CLIs, "main.CLIs not defined"
883 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700884 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700885 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700886 "functionality and check the state of " +\
887 "the intent"
888 main.step( "Ping across added host intents" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700889 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700890 PingResult = main.TRUE
891 for i in range( 8, 18 ):
892 ping = main.Mininet1.pingHost( src="h" + str( i ),
893 target="h" + str( i + 10 ) )
894 PingResult = PingResult and ping
895 if ping == main.FALSE:
896 main.log.warn( "Ping failed between h" + str( i ) +
897 " and h" + str( i + 10 ) )
898 elif ping == main.TRUE:
899 main.log.info( "Ping test passed!" )
900 # Don't set PingResult or you'd override failures
901 if PingResult == main.FALSE:
902 main.log.error(
903 "Intents have not been installed correctly, pings failed." )
904 # TODO: pretty print
905 main.log.warn( "ONOS1 intents: " )
906 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700907 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700908 main.log.warn( json.dumps( json.loads( tmpIntents ),
909 sort_keys=True,
910 indent=4,
911 separators=( ',', ': ' ) ) )
912 except ( ValueError, TypeError ):
913 main.log.warn( repr( tmpIntents ) )
914 utilities.assert_equals(
915 expect=main.TRUE,
916 actual=PingResult,
917 onpass="Intents have been installed correctly and pings work",
918 onfail="Intents have not been installed correctly, pings failed." )
919
920 main.step( "Check Intent state" )
921 installedCheck = False
922 loopCount = 0
923 while not installedCheck and loopCount < 40:
924 installedCheck = True
925 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700926 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700927 intentStates = []
928 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
929 count = 0
930 # Iter through intents of a node
931 try:
932 for intent in json.loads( intents ):
933 state = intent.get( 'state', None )
934 if "INSTALLED" not in state:
935 installedCheck = False
936 intentId = intent.get( 'id', None )
937 intentStates.append( ( intentId, state ) )
938 except ( ValueError, TypeError ):
939 main.log.exception( "Error parsing intents." )
940 # Print states
941 intentStates.sort()
942 for i, s in intentStates:
943 count += 1
944 main.log.info( "%-6s%-15s%-15s" %
945 ( str( count ), str( i ), str( s ) ) )
946 if not installedCheck:
947 time.sleep( 1 )
948 loopCount += 1
949 utilities.assert_equals( expect=True, actual=installedCheck,
950 onpass="Intents are all INSTALLED",
951 onfail="Intents are not all in " +
952 "INSTALLED state" )
953
954 main.step( "Check leadership of topics" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700955 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700956 topicCheck = main.TRUE
957 try:
958 if leaders:
959 parsedLeaders = json.loads( leaders )
960 main.log.warn( json.dumps( parsedLeaders,
961 sort_keys=True,
962 indent=4,
963 separators=( ',', ': ' ) ) )
964 # check for all intent partitions
965 # check for election
966 # TODO: Look at Devices as topics now that it uses this system
967 topics = []
968 for i in range( 14 ):
969 topics.append( "intent-partition-" + str( i ) )
970 # FIXME: this should only be after we start the app
971 # FIXME: topics.append( "org.onosproject.election" )
972 # Print leaders output
973 main.log.debug( topics )
974 ONOStopics = [ j['topic'] for j in parsedLeaders ]
975 for topic in topics:
976 if topic not in ONOStopics:
977 main.log.error( "Error: " + topic +
978 " not in leaders" )
979 topicCheck = main.FALSE
980 else:
981 main.log.error( "leaders() returned None" )
982 topicCheck = main.FALSE
983 except ( ValueError, TypeError ):
984 topicCheck = main.FALSE
985 main.log.exception( "Error parsing leaders" )
986 main.log.error( repr( leaders ) )
987 # TODO: Check for a leader of these topics
988 # Check all nodes
989 if topicCheck:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700990 for i in main.activeNodes:
991 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700992 response = node.leaders( jsonFormat=False)
993 main.log.warn( str( node.name ) + " leaders output: \n" +
994 str( response ) )
995
996 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
997 onpass="intent Partitions is in leaders",
998 onfail="Some topics were lost " )
999 # Print partitions
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001000 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001001 try:
1002 if partitions :
1003 parsedPartitions = json.loads( partitions )
1004 main.log.warn( json.dumps( parsedPartitions,
1005 sort_keys=True,
1006 indent=4,
1007 separators=( ',', ': ' ) ) )
1008 # TODO check for a leader in all paritions
1009 # TODO check for consistency among nodes
1010 else:
1011 main.log.error( "partitions() returned None" )
1012 except ( ValueError, TypeError ):
1013 main.log.exception( "Error parsing partitions" )
1014 main.log.error( repr( partitions ) )
1015 # Print Pending Map
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001016 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001017 try:
1018 if pendingMap :
1019 parsedPending = json.loads( pendingMap )
1020 main.log.warn( json.dumps( parsedPending,
1021 sort_keys=True,
1022 indent=4,
1023 separators=( ',', ': ' ) ) )
1024 # TODO check something here?
1025 else:
1026 main.log.error( "pendingMap() returned None" )
1027 except ( ValueError, TypeError ):
1028 main.log.exception( "Error parsing pending map" )
1029 main.log.error( repr( pendingMap ) )
1030
1031 if not installedCheck:
1032 main.log.info( "Waiting 60 seconds to see if the state of " +
1033 "intents change" )
1034 time.sleep( 60 )
1035 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001036 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001037 intentStates = []
1038 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1039 count = 0
1040 # Iter through intents of a node
1041 try:
1042 for intent in json.loads( intents ):
1043 state = intent.get( 'state', None )
1044 if "INSTALLED" not in state:
1045 installedCheck = False
1046 intentId = intent.get( 'id', None )
1047 intentStates.append( ( intentId, state ) )
1048 except ( ValueError, TypeError ):
1049 main.log.exception( "Error parsing intents." )
1050 intentStates.sort()
1051 for i, s in intentStates:
1052 count += 1
1053 main.log.info( "%-6s%-15s%-15s" %
1054 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001055 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001056 try:
1057 missing = False
1058 if leaders:
1059 parsedLeaders = json.loads( leaders )
1060 main.log.warn( json.dumps( parsedLeaders,
1061 sort_keys=True,
1062 indent=4,
1063 separators=( ',', ': ' ) ) )
1064 # check for all intent partitions
1065 # check for election
1066 topics = []
1067 for i in range( 14 ):
1068 topics.append( "intent-partition-" + str( i ) )
1069 # FIXME: this should only be after we start the app
1070 topics.append( "org.onosproject.election" )
1071 main.log.debug( topics )
1072 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1073 for topic in topics:
1074 if topic not in ONOStopics:
1075 main.log.error( "Error: " + topic +
1076 " not in leaders" )
1077 missing = True
1078 else:
1079 main.log.error( "leaders() returned None" )
1080 except ( ValueError, TypeError ):
1081 main.log.exception( "Error parsing leaders" )
1082 main.log.error( repr( leaders ) )
1083 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001084 for i in main.activeNodes:
1085 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001086 response = node.leaders( jsonFormat=False)
1087 main.log.warn( str( node.name ) + " leaders output: \n" +
1088 str( response ) )
1089
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001090 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001091 try:
1092 if partitions :
1093 parsedPartitions = json.loads( partitions )
1094 main.log.warn( json.dumps( parsedPartitions,
1095 sort_keys=True,
1096 indent=4,
1097 separators=( ',', ': ' ) ) )
1098 # TODO check for a leader in all paritions
1099 # TODO check for consistency among nodes
1100 else:
1101 main.log.error( "partitions() returned None" )
1102 except ( ValueError, TypeError ):
1103 main.log.exception( "Error parsing partitions" )
1104 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001105 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001106 try:
1107 if pendingMap :
1108 parsedPending = json.loads( pendingMap )
1109 main.log.warn( json.dumps( parsedPending,
1110 sort_keys=True,
1111 indent=4,
1112 separators=( ',', ': ' ) ) )
1113 # TODO check something here?
1114 else:
1115 main.log.error( "pendingMap() returned None" )
1116 except ( ValueError, TypeError ):
1117 main.log.exception( "Error parsing pending map" )
1118 main.log.error( repr( pendingMap ) )
1119 # Print flowrules
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001120 node = main.activeNodes[0]
1121 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001122 main.step( "Wait a minute then ping again" )
1123 # the wait is above
1124 PingResult = main.TRUE
1125 for i in range( 8, 18 ):
1126 ping = main.Mininet1.pingHost( src="h" + str( i ),
1127 target="h" + str( i + 10 ) )
1128 PingResult = PingResult and ping
1129 if ping == main.FALSE:
1130 main.log.warn( "Ping failed between h" + str( i ) +
1131 " and h" + str( i + 10 ) )
1132 elif ping == main.TRUE:
1133 main.log.info( "Ping test passed!" )
1134 # Don't set PingResult or you'd override failures
1135 if PingResult == main.FALSE:
1136 main.log.error(
1137 "Intents have not been installed correctly, pings failed." )
1138 # TODO: pretty print
1139 main.log.warn( "ONOS1 intents: " )
1140 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001141 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001142 main.log.warn( json.dumps( json.loads( tmpIntents ),
1143 sort_keys=True,
1144 indent=4,
1145 separators=( ',', ': ' ) ) )
1146 except ( ValueError, TypeError ):
1147 main.log.warn( repr( tmpIntents ) )
1148 utilities.assert_equals(
1149 expect=main.TRUE,
1150 actual=PingResult,
1151 onpass="Intents have been installed correctly and pings work",
1152 onfail="Intents have not been installed correctly, pings failed." )
1153
1154 def CASE5( self, main ):
1155 """
1156 Reading state of ONOS
1157 """
1158 import json
1159 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001160 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001161 assert main, "main not defined"
1162 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001163 assert main.CLIs, "main.CLIs not defined"
1164 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001165
1166 main.case( "Setting up and gathering data for current state" )
1167 # The general idea for this test case is to pull the state of
1168 # ( intents,flows, topology,... ) from each ONOS node
1169 # We can then compare them with each other and also with past states
1170
1171 main.step( "Check that each switch has a master" )
1172 global mastershipState
1173 mastershipState = '[]'
1174
1175 # Assert that each device has a master
1176 rolesNotNull = main.TRUE
1177 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001178 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001179 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001180 name="rolesNotNull-" + str( i ),
1181 args=[] )
1182 threads.append( t )
1183 t.start()
1184
1185 for t in threads:
1186 t.join()
1187 rolesNotNull = rolesNotNull and t.result
1188 utilities.assert_equals(
1189 expect=main.TRUE,
1190 actual=rolesNotNull,
1191 onpass="Each device has a master",
1192 onfail="Some devices don't have a master assigned" )
1193
1194 main.step( "Get the Mastership of each switch from each controller" )
1195 ONOSMastership = []
1196 mastershipCheck = main.FALSE
1197 consistentMastership = True
1198 rolesResults = True
1199 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001200 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001201 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001202 name="roles-" + str( i ),
1203 args=[] )
1204 threads.append( t )
1205 t.start()
1206
1207 for t in threads:
1208 t.join()
1209 ONOSMastership.append( t.result )
1210
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001211 for i in range( len( ONOSMastership ) ):
1212 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001213 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001214 main.log.error( "Error in getting ONOS" + node + " roles" )
1215 main.log.warn( "ONOS" + node + " mastership response: " +
1216 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001217 rolesResults = False
1218 utilities.assert_equals(
1219 expect=True,
1220 actual=rolesResults,
1221 onpass="No error in reading roles output",
1222 onfail="Error in reading roles from ONOS" )
1223
1224 main.step( "Check for consistency in roles from each controller" )
1225 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1226 main.log.info(
1227 "Switch roles are consistent across all ONOS nodes" )
1228 else:
1229 consistentMastership = False
1230 utilities.assert_equals(
1231 expect=True,
1232 actual=consistentMastership,
1233 onpass="Switch roles are consistent across all ONOS nodes",
1234 onfail="ONOS nodes have different views of switch roles" )
1235
1236 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001237 for i in range( len( main.activeNodes ) ):
1238 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001239 try:
1240 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001241 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001242 json.dumps(
1243 json.loads( ONOSMastership[ i ] ),
1244 sort_keys=True,
1245 indent=4,
1246 separators=( ',', ': ' ) ) )
1247 except ( ValueError, TypeError ):
1248 main.log.warn( repr( ONOSMastership[ i ] ) )
1249 elif rolesResults and consistentMastership:
1250 mastershipCheck = main.TRUE
1251 mastershipState = ONOSMastership[ 0 ]
1252
1253 main.step( "Get the intents from each controller" )
1254 global intentState
1255 intentState = []
1256 ONOSIntents = []
1257 intentCheck = main.FALSE
1258 consistentIntents = True
1259 intentsResults = True
1260 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001261 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001262 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001263 name="intents-" + str( i ),
1264 args=[],
1265 kwargs={ 'jsonFormat': True } )
1266 threads.append( t )
1267 t.start()
1268
1269 for t in threads:
1270 t.join()
1271 ONOSIntents.append( t.result )
1272
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001273 for i in range( len( ONOSIntents ) ):
1274 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001275 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001276 main.log.error( "Error in getting ONOS" + node + " intents" )
1277 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001278 repr( ONOSIntents[ i ] ) )
1279 intentsResults = False
1280 utilities.assert_equals(
1281 expect=True,
1282 actual=intentsResults,
1283 onpass="No error in reading intents output",
1284 onfail="Error in reading intents from ONOS" )
1285
1286 main.step( "Check for consistency in Intents from each controller" )
1287 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1288 main.log.info( "Intents are consistent across all ONOS " +
1289 "nodes" )
1290 else:
1291 consistentIntents = False
1292 main.log.error( "Intents not consistent" )
1293 utilities.assert_equals(
1294 expect=True,
1295 actual=consistentIntents,
1296 onpass="Intents are consistent across all ONOS nodes",
1297 onfail="ONOS nodes have different views of intents" )
1298
1299 if intentsResults:
1300 # Try to make it easy to figure out what is happening
1301 #
1302 # Intent ONOS1 ONOS2 ...
1303 # 0x01 INSTALLED INSTALLING
1304 # ... ... ...
1305 # ... ... ...
1306 title = " Id"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001307 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001308 title += " " * 10 + "ONOS" + str( n + 1 )
1309 main.log.warn( title )
1310 # get all intent keys in the cluster
1311 keys = []
1312 for nodeStr in ONOSIntents:
1313 node = json.loads( nodeStr )
1314 for intent in node:
1315 keys.append( intent.get( 'id' ) )
1316 keys = set( keys )
1317 for key in keys:
1318 row = "%-13s" % key
1319 for nodeStr in ONOSIntents:
1320 node = json.loads( nodeStr )
1321 for intent in node:
1322 if intent.get( 'id', "Error" ) == key:
1323 row += "%-15s" % intent.get( 'state' )
1324 main.log.warn( row )
1325 # End table view
1326
1327 if intentsResults and not consistentIntents:
1328 # print the json objects
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001329 n = str( main.activeNodes[-1] + 1 )
1330 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001331 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1332 sort_keys=True,
1333 indent=4,
1334 separators=( ',', ': ' ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001335 for i in range( len( ONOSIntents ) ):
1336 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001337 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001338 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001339 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1340 sort_keys=True,
1341 indent=4,
1342 separators=( ',', ': ' ) ) )
1343 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001344 main.log.debug( "ONOS" + node + " intents match ONOS" +
1345 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001346 elif intentsResults and consistentIntents:
1347 intentCheck = main.TRUE
1348 intentState = ONOSIntents[ 0 ]
1349
1350 main.step( "Get the flows from each controller" )
1351 global flowState
1352 flowState = []
1353 ONOSFlows = []
1354 ONOSFlowsJson = []
1355 flowCheck = main.FALSE
1356 consistentFlows = True
1357 flowsResults = True
1358 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001359 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001360 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001361 name="flows-" + str( i ),
1362 args=[],
1363 kwargs={ 'jsonFormat': True } )
1364 threads.append( t )
1365 t.start()
1366
1367 # NOTE: Flows command can take some time to run
1368 time.sleep(30)
1369 for t in threads:
1370 t.join()
1371 result = t.result
1372 ONOSFlows.append( result )
1373
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001374 for i in range( len( ONOSFlows ) ):
1375 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001376 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1377 main.log.error( "Error in getting ONOS" + num + " flows" )
1378 main.log.warn( "ONOS" + num + " flows response: " +
1379 repr( ONOSFlows[ i ] ) )
1380 flowsResults = False
1381 ONOSFlowsJson.append( None )
1382 else:
1383 try:
1384 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1385 except ( ValueError, TypeError ):
1386 # FIXME: change this to log.error?
1387 main.log.exception( "Error in parsing ONOS" + num +
1388 " response as json." )
1389 main.log.error( repr( ONOSFlows[ i ] ) )
1390 ONOSFlowsJson.append( None )
1391 flowsResults = False
1392 utilities.assert_equals(
1393 expect=True,
1394 actual=flowsResults,
1395 onpass="No error in reading flows output",
1396 onfail="Error in reading flows from ONOS" )
1397
1398 main.step( "Check for consistency in Flows from each controller" )
1399 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1400 if all( tmp ):
1401 main.log.info( "Flow count is consistent across all ONOS nodes" )
1402 else:
1403 consistentFlows = False
1404 utilities.assert_equals(
1405 expect=True,
1406 actual=consistentFlows,
1407 onpass="The flow count is consistent across all ONOS nodes",
1408 onfail="ONOS nodes have different flow counts" )
1409
1410 if flowsResults and not consistentFlows:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001411 for i in range( len( ONOSFlows ) ):
1412 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001413 try:
1414 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001415 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001416 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1417 indent=4, separators=( ',', ': ' ) ) )
1418 except ( ValueError, TypeError ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001419 main.log.warn( "ONOS" + node + " flows: " +
1420 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001421 elif flowsResults and consistentFlows:
1422 flowCheck = main.TRUE
1423 flowState = ONOSFlows[ 0 ]
1424
1425 main.step( "Get the OF Table entries" )
1426 global flows
1427 flows = []
1428 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001429 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001430 if flowCheck == main.FALSE:
1431 for table in flows:
1432 main.log.warn( table )
1433 # TODO: Compare switch flow tables with ONOS flow tables
1434
1435 main.step( "Start continuous pings" )
1436 main.Mininet2.pingLong(
1437 src=main.params[ 'PING' ][ 'source1' ],
1438 target=main.params[ 'PING' ][ 'target1' ],
1439 pingTime=500 )
1440 main.Mininet2.pingLong(
1441 src=main.params[ 'PING' ][ 'source2' ],
1442 target=main.params[ 'PING' ][ 'target2' ],
1443 pingTime=500 )
1444 main.Mininet2.pingLong(
1445 src=main.params[ 'PING' ][ 'source3' ],
1446 target=main.params[ 'PING' ][ 'target3' ],
1447 pingTime=500 )
1448 main.Mininet2.pingLong(
1449 src=main.params[ 'PING' ][ 'source4' ],
1450 target=main.params[ 'PING' ][ 'target4' ],
1451 pingTime=500 )
1452 main.Mininet2.pingLong(
1453 src=main.params[ 'PING' ][ 'source5' ],
1454 target=main.params[ 'PING' ][ 'target5' ],
1455 pingTime=500 )
1456 main.Mininet2.pingLong(
1457 src=main.params[ 'PING' ][ 'source6' ],
1458 target=main.params[ 'PING' ][ 'target6' ],
1459 pingTime=500 )
1460 main.Mininet2.pingLong(
1461 src=main.params[ 'PING' ][ 'source7' ],
1462 target=main.params[ 'PING' ][ 'target7' ],
1463 pingTime=500 )
1464 main.Mininet2.pingLong(
1465 src=main.params[ 'PING' ][ 'source8' ],
1466 target=main.params[ 'PING' ][ 'target8' ],
1467 pingTime=500 )
1468 main.Mininet2.pingLong(
1469 src=main.params[ 'PING' ][ 'source9' ],
1470 target=main.params[ 'PING' ][ 'target9' ],
1471 pingTime=500 )
1472 main.Mininet2.pingLong(
1473 src=main.params[ 'PING' ][ 'source10' ],
1474 target=main.params[ 'PING' ][ 'target10' ],
1475 pingTime=500 )
1476
1477 main.step( "Collecting topology information from ONOS" )
1478 devices = []
1479 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001480 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001481 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001482 name="devices-" + str( i ),
1483 args=[ ] )
1484 threads.append( t )
1485 t.start()
1486
1487 for t in threads:
1488 t.join()
1489 devices.append( t.result )
1490 hosts = []
1491 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001492 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001493 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001494 name="hosts-" + str( i ),
1495 args=[ ] )
1496 threads.append( t )
1497 t.start()
1498
1499 for t in threads:
1500 t.join()
1501 try:
1502 hosts.append( json.loads( t.result ) )
1503 except ( ValueError, TypeError ):
1504 # FIXME: better handling of this, print which node
1505 # Maybe use thread name?
1506 main.log.exception( "Error parsing json output of hosts" )
Jon Hallf3d16e72015-12-16 17:45:08 -08001507 main.log.warn( repr( t.result ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001508 hosts.append( None )
1509
1510 ports = []
1511 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001512 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001513 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001514 name="ports-" + str( i ),
1515 args=[ ] )
1516 threads.append( t )
1517 t.start()
1518
1519 for t in threads:
1520 t.join()
1521 ports.append( t.result )
1522 links = []
1523 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001524 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001525 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001526 name="links-" + str( i ),
1527 args=[ ] )
1528 threads.append( t )
1529 t.start()
1530
1531 for t in threads:
1532 t.join()
1533 links.append( t.result )
1534 clusters = []
1535 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001536 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001537 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001538 name="clusters-" + str( i ),
1539 args=[ ] )
1540 threads.append( t )
1541 t.start()
1542
1543 for t in threads:
1544 t.join()
1545 clusters.append( t.result )
1546 # Compare json objects for hosts and dataplane clusters
1547
1548 # hosts
1549 main.step( "Host view is consistent across ONOS nodes" )
1550 consistentHostsResult = main.TRUE
1551 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001552 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001553 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001554 if hosts[ controller ] == hosts[ 0 ]:
1555 continue
1556 else: # hosts not consistent
1557 main.log.error( "hosts from ONOS" +
1558 controllerStr +
1559 " is inconsistent with ONOS1" )
1560 main.log.warn( repr( hosts[ controller ] ) )
1561 consistentHostsResult = main.FALSE
1562
1563 else:
1564 main.log.error( "Error in getting ONOS hosts from ONOS" +
1565 controllerStr )
1566 consistentHostsResult = main.FALSE
1567 main.log.warn( "ONOS" + controllerStr +
1568 " hosts response: " +
1569 repr( hosts[ controller ] ) )
1570 utilities.assert_equals(
1571 expect=main.TRUE,
1572 actual=consistentHostsResult,
1573 onpass="Hosts view is consistent across all ONOS nodes",
1574 onfail="ONOS nodes have different views of hosts" )
1575
1576 main.step( "Each host has an IP address" )
1577 ipResult = main.TRUE
1578 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001579 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001580 if hosts[ controller ]:
1581 for host in hosts[ controller ]:
1582 if not host.get( 'ipAddresses', [ ] ):
1583 main.log.error( "Error with host ips on controller" +
1584 controllerStr + ": " + str( host ) )
1585 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001586 utilities.assert_equals(
1587 expect=main.TRUE,
1588 actual=ipResult,
1589 onpass="The ips of the hosts aren't empty",
1590 onfail="The ip of at least one host is missing" )
1591
1592 # Strongly connected clusters of devices
1593 main.step( "Cluster view is consistent across ONOS nodes" )
1594 consistentClustersResult = main.TRUE
1595 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001596 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001597 if "Error" not in clusters[ controller ]:
1598 if clusters[ controller ] == clusters[ 0 ]:
1599 continue
1600 else: # clusters not consistent
1601 main.log.error( "clusters from ONOS" + controllerStr +
1602 " is inconsistent with ONOS1" )
1603 consistentClustersResult = main.FALSE
1604
1605 else:
1606 main.log.error( "Error in getting dataplane clusters " +
1607 "from ONOS" + controllerStr )
1608 consistentClustersResult = main.FALSE
1609 main.log.warn( "ONOS" + controllerStr +
1610 " clusters response: " +
1611 repr( clusters[ controller ] ) )
1612 utilities.assert_equals(
1613 expect=main.TRUE,
1614 actual=consistentClustersResult,
1615 onpass="Clusters view is consistent across all ONOS nodes",
1616 onfail="ONOS nodes have different views of clusters" )
1617 # there should always only be one cluster
1618 main.step( "Cluster view correct across ONOS nodes" )
1619 try:
1620 numClusters = len( json.loads( clusters[ 0 ] ) )
1621 except ( ValueError, TypeError ):
1622 main.log.exception( "Error parsing clusters[0]: " +
1623 repr( clusters[ 0 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08001624 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07001625 clusterResults = main.FALSE
1626 if numClusters == 1:
1627 clusterResults = main.TRUE
1628 utilities.assert_equals(
1629 expect=1,
1630 actual=numClusters,
1631 onpass="ONOS shows 1 SCC",
1632 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1633
1634 main.step( "Comparing ONOS topology to MN" )
1635 devicesResults = main.TRUE
1636 linksResults = main.TRUE
1637 hostsResults = main.TRUE
1638 mnSwitches = main.Mininet1.getSwitches()
1639 mnLinks = main.Mininet1.getLinks()
1640 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001641 for controller in main.activeNodes:
1642 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001643 if devices[ controller ] and ports[ controller ] and\
1644 "Error" not in devices[ controller ] and\
1645 "Error" not in ports[ controller ]:
Jon Hall6e709752016-02-01 13:38:46 -08001646 currentDevicesResult = main.Mininet1.compareSwitches(
1647 mnSwitches,
1648 json.loads( devices[ controller ] ),
1649 json.loads( ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001650 else:
1651 currentDevicesResult = main.FALSE
1652 utilities.assert_equals( expect=main.TRUE,
1653 actual=currentDevicesResult,
1654 onpass="ONOS" + controllerStr +
1655 " Switches view is correct",
1656 onfail="ONOS" + controllerStr +
1657 " Switches view is incorrect" )
1658 if links[ controller ] and "Error" not in links[ controller ]:
1659 currentLinksResult = main.Mininet1.compareLinks(
1660 mnSwitches, mnLinks,
1661 json.loads( links[ controller ] ) )
1662 else:
1663 currentLinksResult = main.FALSE
1664 utilities.assert_equals( expect=main.TRUE,
1665 actual=currentLinksResult,
1666 onpass="ONOS" + controllerStr +
1667 " links view is correct",
1668 onfail="ONOS" + controllerStr +
1669 " links view is incorrect" )
1670
Jon Hall657cdf62015-12-17 14:40:51 -08001671 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001672 currentHostsResult = main.Mininet1.compareHosts(
1673 mnHosts,
1674 hosts[ controller ] )
1675 else:
1676 currentHostsResult = main.FALSE
1677 utilities.assert_equals( expect=main.TRUE,
1678 actual=currentHostsResult,
1679 onpass="ONOS" + controllerStr +
1680 " hosts exist in Mininet",
1681 onfail="ONOS" + controllerStr +
1682 " hosts don't match Mininet" )
1683
1684 devicesResults = devicesResults and currentDevicesResult
1685 linksResults = linksResults and currentLinksResult
1686 hostsResults = hostsResults and currentHostsResult
1687
1688 main.step( "Device information is correct" )
1689 utilities.assert_equals(
1690 expect=main.TRUE,
1691 actual=devicesResults,
1692 onpass="Device information is correct",
1693 onfail="Device information is incorrect" )
1694
1695 main.step( "Links are correct" )
1696 utilities.assert_equals(
1697 expect=main.TRUE,
1698 actual=linksResults,
1699 onpass="Link are correct",
1700 onfail="Links are incorrect" )
1701
1702 main.step( "Hosts are correct" )
1703 utilities.assert_equals(
1704 expect=main.TRUE,
1705 actual=hostsResults,
1706 onpass="Hosts are correct",
1707 onfail="Hosts are incorrect" )
1708
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001709 def CASE61( self, main ):
Jon Hall5cf14d52015-07-16 12:15:19 -07001710 """
1711 The Failure case.
1712 """
Jon Halle1a3b752015-07-22 13:02:46 -07001713 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001714 assert main, "main not defined"
1715 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001716 assert main.CLIs, "main.CLIs not defined"
1717 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001718 main.case( "Stop minority of ONOS nodes" )
Jon Hall96091e62015-09-21 17:34:17 -07001719
1720 main.step( "Checking ONOS Logs for errors" )
1721 for node in main.nodes:
1722 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1723 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1724
Jon Hall3b489db2015-10-05 14:38:37 -07001725 n = len( main.nodes ) # Number of nodes
1726 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1727 main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
1728 if n > 3:
1729 main.kill.append( p - 1 )
1730 # NOTE: This only works for cluster sizes of 3,5, or 7.
1731
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001732 main.step( "Stopping " + str( len( main.kill ) ) + " ONOS nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07001733 killResults = main.TRUE
1734 for i in main.kill:
1735 killResults = killResults and\
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001736 main.ONOSbench.onosStop( main.nodes[i].ip_address )
1737 main.activeNodes.remove( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001738 utilities.assert_equals( expect=main.TRUE, actual=killResults,
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001739 onpass="ONOS nodes stopped successfully",
1740 onfail="ONOS nodes NOT successfully stopped" )
1741
1742 def CASE62( self, main ):
1743 """
1744 The bring up stopped nodes
1745 """
1746 import time
1747 assert main.numCtrls, "main.numCtrls not defined"
1748 assert main, "main not defined"
1749 assert utilities.assert_equals, "utilities.assert_equals not defined"
1750 assert main.CLIs, "main.CLIs not defined"
1751 assert main.nodes, "main.nodes not defined"
1752 assert main.kill, "main.kill not defined"
1753 main.case( "Restart minority of ONOS nodes" )
1754
1755 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1756 startResults = main.TRUE
1757 restartTime = time.time()
1758 for i in main.kill:
1759 startResults = startResults and\
1760 main.ONOSbench.onosStart( main.nodes[i].ip_address )
1761 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1762 onpass="ONOS nodes started successfully",
1763 onfail="ONOS nodes NOT successfully started" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001764
1765 main.step( "Checking if ONOS is up yet" )
1766 count = 0
1767 onosIsupResult = main.FALSE
1768 while onosIsupResult == main.FALSE and count < 10:
Jon Hall3b489db2015-10-05 14:38:37 -07001769 onosIsupResult = main.TRUE
1770 for i in main.kill:
1771 onosIsupResult = onosIsupResult and\
1772 main.ONOSbench.isup( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07001773 count = count + 1
Jon Hall5cf14d52015-07-16 12:15:19 -07001774 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1775 onpass="ONOS restarted successfully",
1776 onfail="ONOS restart NOT successful" )
1777
Jon Halle1a3b752015-07-22 13:02:46 -07001778 main.step( "Restarting ONOS main.CLIs" )
Jon Hall3b489db2015-10-05 14:38:37 -07001779 cliResults = main.TRUE
1780 for i in main.kill:
1781 cliResults = cliResults and\
1782 main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001783 main.activeNodes.append( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001784 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1785 onpass="ONOS cli restarted",
1786 onfail="ONOS cli did not restart" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001787 main.activeNodes.sort()
1788 try:
1789 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1790 "List of active nodes has duplicates, this likely indicates something was run out of order"
1791 except AssertionError:
1792 main.log.exception( "" )
1793 main.cleanup()
1794 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -07001795
1796 # Grab the time of restart so we chan check how long the gossip
1797 # protocol has had time to work
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001798 main.restartTime = time.time() - restartTime
Jon Hall5cf14d52015-07-16 12:15:19 -07001799 main.log.debug( "Restart time: " + str( main.restartTime ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001800 # TODO: MAke this configurable. Also, we are breaking the above timer
1801 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001802 node = main.activeNodes[0]
1803 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1804 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1805 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001806
1807 def CASE7( self, main ):
1808 """
1809 Check state after ONOS failure
1810 """
1811 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001812 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001813 assert main, "main not defined"
1814 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001815 assert main.CLIs, "main.CLIs not defined"
1816 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001817 try:
1818 main.kill
1819 except AttributeError:
1820 main.kill = []
1821
Jon Hall5cf14d52015-07-16 12:15:19 -07001822 main.case( "Running ONOS Constant State Tests" )
1823
1824 main.step( "Check that each switch has a master" )
1825 # Assert that each device has a master
1826 rolesNotNull = main.TRUE
1827 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001828 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001829 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001830 name="rolesNotNull-" + str( i ),
1831 args=[ ] )
1832 threads.append( t )
1833 t.start()
1834
1835 for t in threads:
1836 t.join()
1837 rolesNotNull = rolesNotNull and t.result
1838 utilities.assert_equals(
1839 expect=main.TRUE,
1840 actual=rolesNotNull,
1841 onpass="Each device has a master",
1842 onfail="Some devices don't have a master assigned" )
1843
1844 main.step( "Read device roles from ONOS" )
1845 ONOSMastership = []
1846 consistentMastership = True
1847 rolesResults = True
1848 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001849 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001850 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001851 name="roles-" + str( i ),
1852 args=[] )
1853 threads.append( t )
1854 t.start()
1855
1856 for t in threads:
1857 t.join()
1858 ONOSMastership.append( t.result )
1859
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001860 for i in range( len( ONOSMastership ) ):
1861 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001862 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001863 main.log.error( "Error in getting ONOS" + node + " roles" )
1864 main.log.warn( "ONOS" + node + " mastership response: " +
1865 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001866 rolesResults = False
1867 utilities.assert_equals(
1868 expect=True,
1869 actual=rolesResults,
1870 onpass="No error in reading roles output",
1871 onfail="Error in reading roles from ONOS" )
1872
1873 main.step( "Check for consistency in roles from each controller" )
1874 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1875 main.log.info(
1876 "Switch roles are consistent across all ONOS nodes" )
1877 else:
1878 consistentMastership = False
1879 utilities.assert_equals(
1880 expect=True,
1881 actual=consistentMastership,
1882 onpass="Switch roles are consistent across all ONOS nodes",
1883 onfail="ONOS nodes have different views of switch roles" )
1884
1885 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001886 for i in range( len( ONOSMastership ) ):
1887 node = str( main.activeNodes[i] + 1 )
1888 main.log.warn( "ONOS" + node + " roles: ",
1889 json.dumps( json.loads( ONOSMastership[ i ] ),
1890 sort_keys=True,
1891 indent=4,
1892 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001893
1894 # NOTE: we expect mastership to change on controller failure
Jon Hall5cf14d52015-07-16 12:15:19 -07001895
1896 main.step( "Get the intents and compare across all nodes" )
1897 ONOSIntents = []
1898 intentCheck = main.FALSE
1899 consistentIntents = True
1900 intentsResults = True
1901 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001902 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001903 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001904 name="intents-" + str( i ),
1905 args=[],
1906 kwargs={ 'jsonFormat': True } )
1907 threads.append( t )
1908 t.start()
1909
1910 for t in threads:
1911 t.join()
1912 ONOSIntents.append( t.result )
1913
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001914 for i in range( len( ONOSIntents) ):
1915 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001916 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001917 main.log.error( "Error in getting ONOS" + node + " intents" )
1918 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001919 repr( ONOSIntents[ i ] ) )
1920 intentsResults = False
1921 utilities.assert_equals(
1922 expect=True,
1923 actual=intentsResults,
1924 onpass="No error in reading intents output",
1925 onfail="Error in reading intents from ONOS" )
1926
1927 main.step( "Check for consistency in Intents from each controller" )
1928 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1929 main.log.info( "Intents are consistent across all ONOS " +
1930 "nodes" )
1931 else:
1932 consistentIntents = False
1933
1934 # Try to make it easy to figure out what is happening
1935 #
1936 # Intent ONOS1 ONOS2 ...
1937 # 0x01 INSTALLED INSTALLING
1938 # ... ... ...
1939 # ... ... ...
1940 title = " ID"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001941 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001942 title += " " * 10 + "ONOS" + str( n + 1 )
1943 main.log.warn( title )
1944 # get all intent keys in the cluster
1945 keys = []
1946 for nodeStr in ONOSIntents:
1947 node = json.loads( nodeStr )
1948 for intent in node:
1949 keys.append( intent.get( 'id' ) )
1950 keys = set( keys )
1951 for key in keys:
1952 row = "%-13s" % key
1953 for nodeStr in ONOSIntents:
1954 node = json.loads( nodeStr )
1955 for intent in node:
1956 if intent.get( 'id' ) == key:
1957 row += "%-15s" % intent.get( 'state' )
1958 main.log.warn( row )
1959 # End table view
1960
1961 utilities.assert_equals(
1962 expect=True,
1963 actual=consistentIntents,
1964 onpass="Intents are consistent across all ONOS nodes",
1965 onfail="ONOS nodes have different views of intents" )
1966 intentStates = []
1967 for node in ONOSIntents: # Iter through ONOS nodes
1968 nodeStates = []
1969 # Iter through intents of a node
1970 try:
1971 for intent in json.loads( node ):
1972 nodeStates.append( intent[ 'state' ] )
1973 except ( ValueError, TypeError ):
1974 main.log.exception( "Error in parsing intents" )
1975 main.log.error( repr( node ) )
1976 intentStates.append( nodeStates )
1977 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1978 main.log.info( dict( out ) )
1979
1980 if intentsResults and not consistentIntents:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001981 for i in range( len( main.activeNodes ) ):
1982 node = str( main.activeNodes[i] + 1 )
1983 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001984 main.log.warn( json.dumps(
1985 json.loads( ONOSIntents[ i ] ),
1986 sort_keys=True,
1987 indent=4,
1988 separators=( ',', ': ' ) ) )
1989 elif intentsResults and consistentIntents:
1990 intentCheck = main.TRUE
1991
1992 # NOTE: Store has no durability, so intents are lost across system
1993 # restarts
1994 main.step( "Compare current intents with intents before the failure" )
1995 # NOTE: this requires case 5 to pass for intentState to be set.
1996 # maybe we should stop the test if that fails?
1997 sameIntents = main.FALSE
1998 if intentState and intentState == ONOSIntents[ 0 ]:
1999 sameIntents = main.TRUE
2000 main.log.info( "Intents are consistent with before failure" )
2001 # TODO: possibly the states have changed? we may need to figure out
2002 # what the acceptable states are
2003 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2004 sameIntents = main.TRUE
2005 try:
2006 before = json.loads( intentState )
2007 after = json.loads( ONOSIntents[ 0 ] )
2008 for intent in before:
2009 if intent not in after:
2010 sameIntents = main.FALSE
2011 main.log.debug( "Intent is not currently in ONOS " +
2012 "(at least in the same form):" )
2013 main.log.debug( json.dumps( intent ) )
2014 except ( ValueError, TypeError ):
2015 main.log.exception( "Exception printing intents" )
2016 main.log.debug( repr( ONOSIntents[0] ) )
2017 main.log.debug( repr( intentState ) )
2018 if sameIntents == main.FALSE:
2019 try:
2020 main.log.debug( "ONOS intents before: " )
2021 main.log.debug( json.dumps( json.loads( intentState ),
2022 sort_keys=True, indent=4,
2023 separators=( ',', ': ' ) ) )
2024 main.log.debug( "Current ONOS intents: " )
2025 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2026 sort_keys=True, indent=4,
2027 separators=( ',', ': ' ) ) )
2028 except ( ValueError, TypeError ):
2029 main.log.exception( "Exception printing intents" )
2030 main.log.debug( repr( ONOSIntents[0] ) )
2031 main.log.debug( repr( intentState ) )
2032 utilities.assert_equals(
2033 expect=main.TRUE,
2034 actual=sameIntents,
2035 onpass="Intents are consistent with before failure",
2036 onfail="The Intents changed during failure" )
2037 intentCheck = intentCheck and sameIntents
2038
2039 main.step( "Get the OF Table entries and compare to before " +
2040 "component failure" )
2041 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002042 for i in range( 28 ):
2043 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002044 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2045 FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
Jon Hall5cf14d52015-07-16 12:15:19 -07002046 if FlowTables == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002047 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2048
Jon Hall5cf14d52015-07-16 12:15:19 -07002049 utilities.assert_equals(
2050 expect=main.TRUE,
2051 actual=FlowTables,
2052 onpass="No changes were found in the flow tables",
2053 onfail="Changes were found in the flow tables" )
2054
2055 main.Mininet2.pingLongKill()
2056 '''
2057 main.step( "Check the continuous pings to ensure that no packets " +
2058 "were dropped during component failure" )
2059 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2060 main.params[ 'TESTONIP' ] )
2061 LossInPings = main.FALSE
2062 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2063 for i in range( 8, 18 ):
2064 main.log.info(
2065 "Checking for a loss in pings along flow from s" +
2066 str( i ) )
2067 LossInPings = main.Mininet2.checkForLoss(
2068 "/tmp/ping.h" +
2069 str( i ) ) or LossInPings
2070 if LossInPings == main.TRUE:
2071 main.log.info( "Loss in ping detected" )
2072 elif LossInPings == main.ERROR:
2073 main.log.info( "There are multiple mininet process running" )
2074 elif LossInPings == main.FALSE:
2075 main.log.info( "No Loss in the pings" )
2076 main.log.info( "No loss of dataplane connectivity" )
2077 utilities.assert_equals(
2078 expect=main.FALSE,
2079 actual=LossInPings,
2080 onpass="No Loss of connectivity",
2081 onfail="Loss of dataplane connectivity detected" )
2082 '''
2083
2084 main.step( "Leadership Election is still functional" )
2085 # Test of LeadershipElection
2086 leaderList = []
Jon Hall5cf14d52015-07-16 12:15:19 -07002087
Jon Hall3b489db2015-10-05 14:38:37 -07002088 restarted = []
2089 for i in main.kill:
2090 restarted.append( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07002091 leaderResult = main.TRUE
Jon Hall3b489db2015-10-05 14:38:37 -07002092
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002093 for i in main.activeNodes:
2094 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002095 leaderN = cli.electionTestLeader()
2096 leaderList.append( leaderN )
2097 if leaderN == main.FALSE:
2098 # error in response
2099 main.log.error( "Something is wrong with " +
2100 "electionTestLeader function, check the" +
2101 " error logs" )
2102 leaderResult = main.FALSE
2103 elif leaderN is None:
2104 main.log.error( cli.name +
2105 " shows no leader for the election-app was" +
2106 " elected after the old one died" )
2107 leaderResult = main.FALSE
2108 elif leaderN in restarted:
2109 main.log.error( cli.name + " shows " + str( leaderN ) +
2110 " as leader for the election-app, but it " +
2111 "was restarted" )
2112 leaderResult = main.FALSE
2113 if len( set( leaderList ) ) != 1:
2114 leaderResult = main.FALSE
2115 main.log.error(
2116 "Inconsistent view of leader for the election test app" )
2117 # TODO: print the list
2118 utilities.assert_equals(
2119 expect=main.TRUE,
2120 actual=leaderResult,
2121 onpass="Leadership election passed",
2122 onfail="Something went wrong with Leadership election" )
2123
2124 def CASE8( self, main ):
2125 """
2126 Compare topo
2127 """
2128 import json
2129 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002130 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002131 assert main, "main not defined"
2132 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002133 assert main.CLIs, "main.CLIs not defined"
2134 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002135
2136 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002137 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002138 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002139 topoResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002140 topoFailMsg = "ONOS topology don't match Mininet"
Jon Hall5cf14d52015-07-16 12:15:19 -07002141 elapsed = 0
2142 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002143 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002144 startTime = time.time()
2145 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002146 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002147 devicesResults = main.TRUE
2148 linksResults = main.TRUE
2149 hostsResults = main.TRUE
2150 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002151 count += 1
2152 cliStart = time.time()
2153 devices = []
2154 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002155 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002156 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002157 name="devices-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002158 args=[ main.CLIs[i].devices, [ None ] ],
2159 kwargs= { 'sleep': 5, 'attempts': 5,
2160 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002161 threads.append( t )
2162 t.start()
2163
2164 for t in threads:
2165 t.join()
2166 devices.append( t.result )
2167 hosts = []
2168 ipResult = main.TRUE
2169 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002170 for i in main.activeNodes:
Jon Halld8f6de82015-12-17 17:04:34 -08002171 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002172 name="hosts-" + str( i ),
Jon Halld8f6de82015-12-17 17:04:34 -08002173 args=[ main.CLIs[i].hosts, [ None ] ],
2174 kwargs= { 'sleep': 5, 'attempts': 5,
2175 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002176 threads.append( t )
2177 t.start()
2178
2179 for t in threads:
2180 t.join()
2181 try:
2182 hosts.append( json.loads( t.result ) )
2183 except ( ValueError, TypeError ):
2184 main.log.exception( "Error parsing hosts results" )
2185 main.log.error( repr( t.result ) )
Jon Hallf3d16e72015-12-16 17:45:08 -08002186 hosts.append( None )
Jon Hall5cf14d52015-07-16 12:15:19 -07002187 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002188 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallacd1b182015-12-17 11:43:20 -08002189 if hosts[ controller ]:
2190 for host in hosts[ controller ]:
2191 if host is None or host.get( 'ipAddresses', [] ) == []:
2192 main.log.error(
2193 "Error with host ipAddresses on controller" +
2194 controllerStr + ": " + str( host ) )
2195 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002196 ports = []
2197 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002198 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002199 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002200 name="ports-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002201 args=[ main.CLIs[i].ports, [ None ] ],
2202 kwargs= { 'sleep': 5, 'attempts': 5,
2203 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002204 threads.append( t )
2205 t.start()
2206
2207 for t in threads:
2208 t.join()
2209 ports.append( t.result )
2210 links = []
2211 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002212 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002213 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002214 name="links-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002215 args=[ main.CLIs[i].links, [ None ] ],
2216 kwargs= { 'sleep': 5, 'attempts': 5,
2217 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002218 threads.append( t )
2219 t.start()
2220
2221 for t in threads:
2222 t.join()
2223 links.append( t.result )
2224 clusters = []
2225 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002226 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002227 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002228 name="clusters-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002229 args=[ main.CLIs[i].clusters, [ None ] ],
2230 kwargs= { 'sleep': 5, 'attempts': 5,
2231 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002232 threads.append( t )
2233 t.start()
2234
2235 for t in threads:
2236 t.join()
2237 clusters.append( t.result )
2238
2239 elapsed = time.time() - startTime
2240 cliTime = time.time() - cliStart
2241 print "Elapsed time: " + str( elapsed )
2242 print "CLI time: " + str( cliTime )
2243
Jon Hall6e709752016-02-01 13:38:46 -08002244 if all( e is None for e in devices ) and\
2245 all( e is None for e in hosts ) and\
2246 all( e is None for e in ports ) and\
2247 all( e is None for e in links ) and\
2248 all( e is None for e in clusters ):
2249 topoFailMsg = "Could not get topology from ONOS"
2250 main.log.error( topoFailMsg )
2251 continue # Try again, No use trying to compare
2252
Jon Hall5cf14d52015-07-16 12:15:19 -07002253 mnSwitches = main.Mininet1.getSwitches()
2254 mnLinks = main.Mininet1.getLinks()
2255 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002256 for controller in range( len( main.activeNodes ) ):
2257 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002258 if devices[ controller ] and ports[ controller ] and\
2259 "Error" not in devices[ controller ] and\
2260 "Error" not in ports[ controller ]:
2261
Jon Hallc6793552016-01-19 14:18:37 -08002262 try:
2263 currentDevicesResult = main.Mininet1.compareSwitches(
2264 mnSwitches,
2265 json.loads( devices[ controller ] ),
2266 json.loads( ports[ controller ] ) )
2267 except ( TypeError, ValueError ) as e:
2268 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2269 devices[ controller ], ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002270 else:
2271 currentDevicesResult = main.FALSE
2272 utilities.assert_equals( expect=main.TRUE,
2273 actual=currentDevicesResult,
2274 onpass="ONOS" + controllerStr +
2275 " Switches view is correct",
2276 onfail="ONOS" + controllerStr +
2277 " Switches view is incorrect" )
2278
2279 if links[ controller ] and "Error" not in links[ controller ]:
2280 currentLinksResult = main.Mininet1.compareLinks(
2281 mnSwitches, mnLinks,
2282 json.loads( links[ controller ] ) )
2283 else:
2284 currentLinksResult = main.FALSE
2285 utilities.assert_equals( expect=main.TRUE,
2286 actual=currentLinksResult,
2287 onpass="ONOS" + controllerStr +
2288 " links view is correct",
2289 onfail="ONOS" + controllerStr +
2290 " links view is incorrect" )
Jon Hall657cdf62015-12-17 14:40:51 -08002291 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002292 currentHostsResult = main.Mininet1.compareHosts(
2293 mnHosts,
2294 hosts[ controller ] )
Jon Hall13b446e2016-01-05 12:17:01 -08002295 elif hosts[ controller ] == []:
2296 currentHostsResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002297 else:
2298 currentHostsResult = main.FALSE
2299 utilities.assert_equals( expect=main.TRUE,
2300 actual=currentHostsResult,
2301 onpass="ONOS" + controllerStr +
2302 " hosts exist in Mininet",
2303 onfail="ONOS" + controllerStr +
2304 " hosts don't match Mininet" )
2305 # CHECKING HOST ATTACHMENT POINTS
2306 hostAttachment = True
2307 zeroHosts = False
2308 # FIXME: topo-HA/obelisk specific mappings:
2309 # key is mac and value is dpid
2310 mappings = {}
2311 for i in range( 1, 29 ): # hosts 1 through 28
2312 # set up correct variables:
2313 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2314 if i == 1:
2315 deviceId = "1000".zfill(16)
2316 elif i == 2:
2317 deviceId = "2000".zfill(16)
2318 elif i == 3:
2319 deviceId = "3000".zfill(16)
2320 elif i == 4:
2321 deviceId = "3004".zfill(16)
2322 elif i == 5:
2323 deviceId = "5000".zfill(16)
2324 elif i == 6:
2325 deviceId = "6000".zfill(16)
2326 elif i == 7:
2327 deviceId = "6007".zfill(16)
2328 elif i >= 8 and i <= 17:
2329 dpid = '3' + str( i ).zfill( 3 )
2330 deviceId = dpid.zfill(16)
2331 elif i >= 18 and i <= 27:
2332 dpid = '6' + str( i ).zfill( 3 )
2333 deviceId = dpid.zfill(16)
2334 elif i == 28:
2335 deviceId = "2800".zfill(16)
2336 mappings[ macId ] = deviceId
Jon Halld8f6de82015-12-17 17:04:34 -08002337 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002338 if hosts[ controller ] == []:
2339 main.log.warn( "There are no hosts discovered" )
2340 zeroHosts = True
2341 else:
2342 for host in hosts[ controller ]:
2343 mac = None
2344 location = None
2345 device = None
2346 port = None
2347 try:
2348 mac = host.get( 'mac' )
2349 assert mac, "mac field could not be found for this host object"
2350
2351 location = host.get( 'location' )
2352 assert location, "location field could not be found for this host object"
2353
2354 # Trim the protocol identifier off deviceId
2355 device = str( location.get( 'elementId' ) ).split(':')[1]
2356 assert device, "elementId field could not be found for this host location object"
2357
2358 port = location.get( 'port' )
2359 assert port, "port field could not be found for this host location object"
2360
2361 # Now check if this matches where they should be
2362 if mac and device and port:
2363 if str( port ) != "1":
2364 main.log.error( "The attachment port is incorrect for " +
2365 "host " + str( mac ) +
2366 ". Expected: 1 Actual: " + str( port) )
2367 hostAttachment = False
2368 if device != mappings[ str( mac ) ]:
2369 main.log.error( "The attachment device is incorrect for " +
2370 "host " + str( mac ) +
2371 ". Expected: " + mappings[ str( mac ) ] +
2372 " Actual: " + device )
2373 hostAttachment = False
2374 else:
2375 hostAttachment = False
2376 except AssertionError:
2377 main.log.exception( "Json object not as expected" )
2378 main.log.error( repr( host ) )
2379 hostAttachment = False
2380 else:
2381 main.log.error( "No hosts json output or \"Error\"" +
2382 " in output. hosts = " +
2383 repr( hosts[ controller ] ) )
2384 if zeroHosts is False:
2385 hostAttachment = True
2386
2387 # END CHECKING HOST ATTACHMENT POINTS
2388 devicesResults = devicesResults and currentDevicesResult
2389 linksResults = linksResults and currentLinksResult
2390 hostsResults = hostsResults and currentHostsResult
2391 hostAttachmentResults = hostAttachmentResults and\
2392 hostAttachment
Jon Halle9b1fa32015-12-08 15:32:21 -08002393 topoResult = devicesResults and linksResults and\
2394 hostsResults and hostAttachmentResults
2395 utilities.assert_equals( expect=True,
2396 actual=topoResult,
2397 onpass="ONOS topology matches Mininet",
Jon Hall6e709752016-02-01 13:38:46 -08002398 onfail=topoFailMsg )
Jon Halle9b1fa32015-12-08 15:32:21 -08002399 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002400
2401 # Compare json objects for hosts and dataplane clusters
2402
2403 # hosts
2404 main.step( "Hosts view is consistent across all ONOS nodes" )
2405 consistentHostsResult = main.TRUE
2406 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002407 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall13b446e2016-01-05 12:17:01 -08002408 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002409 if hosts[ controller ] == hosts[ 0 ]:
2410 continue
2411 else: # hosts not consistent
2412 main.log.error( "hosts from ONOS" + controllerStr +
2413 " is inconsistent with ONOS1" )
2414 main.log.warn( repr( hosts[ controller ] ) )
2415 consistentHostsResult = main.FALSE
2416
2417 else:
2418 main.log.error( "Error in getting ONOS hosts from ONOS" +
2419 controllerStr )
2420 consistentHostsResult = main.FALSE
2421 main.log.warn( "ONOS" + controllerStr +
2422 " hosts response: " +
2423 repr( hosts[ controller ] ) )
2424 utilities.assert_equals(
2425 expect=main.TRUE,
2426 actual=consistentHostsResult,
2427 onpass="Hosts view is consistent across all ONOS nodes",
2428 onfail="ONOS nodes have different views of hosts" )
2429
2430 main.step( "Hosts information is correct" )
2431 hostsResults = hostsResults and ipResult
2432 utilities.assert_equals(
2433 expect=main.TRUE,
2434 actual=hostsResults,
2435 onpass="Host information is correct",
2436 onfail="Host information is incorrect" )
2437
2438 main.step( "Host attachment points to the network" )
2439 utilities.assert_equals(
2440 expect=True,
2441 actual=hostAttachmentResults,
2442 onpass="Hosts are correctly attached to the network",
2443 onfail="ONOS did not correctly attach hosts to the network" )
2444
2445 # Strongly connected clusters of devices
2446 main.step( "Clusters view is consistent across all ONOS nodes" )
2447 consistentClustersResult = main.TRUE
2448 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002449 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002450 if "Error" not in clusters[ controller ]:
2451 if clusters[ controller ] == clusters[ 0 ]:
2452 continue
2453 else: # clusters not consistent
2454 main.log.error( "clusters from ONOS" +
2455 controllerStr +
2456 " is inconsistent with ONOS1" )
2457 consistentClustersResult = main.FALSE
2458
2459 else:
2460 main.log.error( "Error in getting dataplane clusters " +
2461 "from ONOS" + controllerStr )
2462 consistentClustersResult = main.FALSE
2463 main.log.warn( "ONOS" + controllerStr +
2464 " clusters response: " +
2465 repr( clusters[ controller ] ) )
2466 utilities.assert_equals(
2467 expect=main.TRUE,
2468 actual=consistentClustersResult,
2469 onpass="Clusters view is consistent across all ONOS nodes",
2470 onfail="ONOS nodes have different views of clusters" )
2471
2472 main.step( "There is only one SCC" )
2473 # there should always only be one cluster
2474 try:
2475 numClusters = len( json.loads( clusters[ 0 ] ) )
2476 except ( ValueError, TypeError ):
2477 main.log.exception( "Error parsing clusters[0]: " +
2478 repr( clusters[0] ) )
2479 clusterResults = main.FALSE
2480 if numClusters == 1:
2481 clusterResults = main.TRUE
2482 utilities.assert_equals(
2483 expect=1,
2484 actual=numClusters,
2485 onpass="ONOS shows 1 SCC",
2486 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2487
2488 topoResult = ( devicesResults and linksResults
2489 and hostsResults and consistentHostsResult
2490 and consistentClustersResult and clusterResults
2491 and ipResult and hostAttachmentResults )
2492
2493 topoResult = topoResult and int( count <= 2 )
2494 note = "note it takes about " + str( int( cliTime ) ) + \
2495 " seconds for the test to make all the cli calls to fetch " +\
2496 "the topology from each ONOS instance"
2497 main.log.info(
2498 "Very crass estimate for topology discovery/convergence( " +
2499 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2500 str( count ) + " tries" )
2501
2502 main.step( "Device information is correct" )
2503 utilities.assert_equals(
2504 expect=main.TRUE,
2505 actual=devicesResults,
2506 onpass="Device information is correct",
2507 onfail="Device information is incorrect" )
2508
2509 main.step( "Links are correct" )
2510 utilities.assert_equals(
2511 expect=main.TRUE,
2512 actual=linksResults,
2513 onpass="Link are correct",
2514 onfail="Links are incorrect" )
2515
2516 # FIXME: move this to an ONOS state case
2517 main.step( "Checking ONOS nodes" )
2518 nodesOutput = []
2519 nodeResults = main.TRUE
2520 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002521 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002522 t = main.Thread( target=main.CLIs[i].nodes,
Jon Hall5cf14d52015-07-16 12:15:19 -07002523 name="nodes-" + str( i ),
2524 args=[ ] )
2525 threads.append( t )
2526 t.start()
2527
2528 for t in threads:
2529 t.join()
2530 nodesOutput.append( t.result )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002531 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
Jon Halle9b1fa32015-12-08 15:32:21 -08002532 ips.sort()
Jon Hall5cf14d52015-07-16 12:15:19 -07002533 for i in nodesOutput:
2534 try:
2535 current = json.loads( i )
Jon Halle9b1fa32015-12-08 15:32:21 -08002536 activeIps = []
2537 currentResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002538 for node in current:
Jon Halle9b1fa32015-12-08 15:32:21 -08002539 if node['state'] == 'ACTIVE':
2540 activeIps.append( node['ip'] )
2541 activeIps.sort()
2542 if ips == activeIps:
2543 currentResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002544 except ( ValueError, TypeError ):
2545 main.log.error( "Error parsing nodes output" )
2546 main.log.warn( repr( i ) )
Jon Halle9b1fa32015-12-08 15:32:21 -08002547 currentResult = main.FALSE
2548 nodeResults = nodeResults and currentResult
Jon Hall5cf14d52015-07-16 12:15:19 -07002549 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2550 onpass="Nodes check successful",
2551 onfail="Nodes check NOT successful" )
2552
2553 def CASE9( self, main ):
2554 """
2555 Link s3-s28 down
2556 """
2557 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002558 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002559 assert main, "main not defined"
2560 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002561 assert main.CLIs, "main.CLIs not defined"
2562 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002563 # NOTE: You should probably run a topology check after this
2564
2565 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2566
2567 description = "Turn off a link to ensure that Link Discovery " +\
2568 "is working properly"
2569 main.case( description )
2570
2571 main.step( "Kill Link between s3 and s28" )
2572 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2573 main.log.info( "Waiting " + str( linkSleep ) +
2574 " seconds for link down to be discovered" )
2575 time.sleep( linkSleep )
2576 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2577 onpass="Link down successful",
2578 onfail="Failed to bring link down" )
2579 # TODO do some sort of check here
2580
2581 def CASE10( self, main ):
2582 """
2583 Link s3-s28 up
2584 """
2585 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002586 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002587 assert main, "main not defined"
2588 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002589 assert main.CLIs, "main.CLIs not defined"
2590 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002591 # NOTE: You should probably run a topology check after this
2592
2593 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2594
2595 description = "Restore a link to ensure that Link Discovery is " + \
2596 "working properly"
2597 main.case( description )
2598
2599 main.step( "Bring link between s3 and s28 back up" )
2600 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2601 main.log.info( "Waiting " + str( linkSleep ) +
2602 " seconds for link up to be discovered" )
2603 time.sleep( linkSleep )
2604 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2605 onpass="Link up successful",
2606 onfail="Failed to bring link up" )
2607 # TODO do some sort of check here
2608
2609 def CASE11( self, main ):
2610 """
2611 Switch Down
2612 """
2613 # NOTE: You should probably run a topology check after this
2614 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002615 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002616 assert main, "main not defined"
2617 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002618 assert main.CLIs, "main.CLIs not defined"
2619 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002620
2621 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2622
2623 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002624 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002625 main.case( description )
2626 switch = main.params[ 'kill' ][ 'switch' ]
2627 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2628
2629 # TODO: Make this switch parameterizable
2630 main.step( "Kill " + switch )
2631 main.log.info( "Deleting " + switch )
2632 main.Mininet1.delSwitch( switch )
2633 main.log.info( "Waiting " + str( switchSleep ) +
2634 " seconds for switch down to be discovered" )
2635 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002636 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002637 # Peek at the deleted switch
2638 main.log.warn( str( device ) )
2639 result = main.FALSE
2640 if device and device[ 'available' ] is False:
2641 result = main.TRUE
2642 utilities.assert_equals( expect=main.TRUE, actual=result,
2643 onpass="Kill switch successful",
2644 onfail="Failed to kill switch?" )
2645
2646 def CASE12( self, main ):
2647 """
2648 Switch Up
2649 """
2650 # NOTE: You should probably run a topology check after this
2651 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002652 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002653 assert main, "main not defined"
2654 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002655 assert main.CLIs, "main.CLIs not defined"
2656 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002657 assert ONOS1Port, "ONOS1Port not defined"
2658 assert ONOS2Port, "ONOS2Port not defined"
2659 assert ONOS3Port, "ONOS3Port not defined"
2660 assert ONOS4Port, "ONOS4Port not defined"
2661 assert ONOS5Port, "ONOS5Port not defined"
2662 assert ONOS6Port, "ONOS6Port not defined"
2663 assert ONOS7Port, "ONOS7Port not defined"
2664
2665 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2666 switch = main.params[ 'kill' ][ 'switch' ]
2667 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2668 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002669 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002670 description = "Adding a switch to ensure it is discovered correctly"
2671 main.case( description )
2672
2673 main.step( "Add back " + switch )
2674 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2675 for peer in links:
2676 main.Mininet1.addLink( switch, peer )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002677 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002678 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2679 main.log.info( "Waiting " + str( switchSleep ) +
2680 " seconds for switch up to be discovered" )
2681 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002682 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002683 # Peek at the deleted switch
2684 main.log.warn( str( device ) )
2685 result = main.FALSE
2686 if device and device[ 'available' ]:
2687 result = main.TRUE
2688 utilities.assert_equals( expect=main.TRUE, actual=result,
2689 onpass="add switch successful",
2690 onfail="Failed to add switch?" )
2691
2692 def CASE13( self, main ):
2693 """
2694 Clean up
2695 """
2696 import os
2697 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002698 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002699 assert main, "main not defined"
2700 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002701 assert main.CLIs, "main.CLIs not defined"
2702 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002703
2704 # printing colors to terminal
2705 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2706 'blue': '\033[94m', 'green': '\033[92m',
2707 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2708 main.case( "Test Cleanup" )
2709 main.step( "Killing tcpdumps" )
2710 main.Mininet2.stopTcpdump()
2711
2712 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002713 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002714 main.step( "Copying MN pcap and ONOS log files to test station" )
2715 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2716 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002717 # NOTE: MN Pcap file is being saved to logdir.
2718 # We scp this file as MN and TestON aren't necessarily the same vm
2719
2720 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002721 # TODO: Load these from params
2722 # NOTE: must end in /
2723 logFolder = "/opt/onos/log/"
2724 logFiles = [ "karaf.log", "karaf.log.1" ]
2725 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002726 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002727 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002728 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002729 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2730 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002731 # std*.log's
2732 # NOTE: must end in /
2733 logFolder = "/opt/onos/var/"
2734 logFiles = [ "stderr.log", "stdout.log" ]
2735 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002736 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002737 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002738 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002739 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2740 logFolder + f, dstName )
2741 else:
2742 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002743
2744 main.step( "Stopping Mininet" )
2745 mnResult = main.Mininet1.stopNet()
2746 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2747 onpass="Mininet stopped",
2748 onfail="MN cleanup NOT successful" )
2749
2750 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002751 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002752 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2753 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002754
2755 try:
2756 timerLog = open( main.logdir + "/Timers.csv", 'w')
2757 # Overwrite with empty line and close
2758 labels = "Gossip Intents, Restart"
2759 data = str( gossipTime ) + ", " + str( main.restartTime )
2760 timerLog.write( labels + "\n" + data )
2761 timerLog.close()
2762 except NameError, e:
2763 main.log.exception(e)
2764
2765 def CASE14( self, main ):
2766 """
2767 start election app on all onos nodes
2768 """
Jon Halle1a3b752015-07-22 13:02:46 -07002769 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002770 assert main, "main not defined"
2771 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002772 assert main.CLIs, "main.CLIs not defined"
2773 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002774
2775 main.case("Start Leadership Election app")
2776 main.step( "Install leadership election app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002777 onosCli = main.CLIs[ main.activeNodes[0] ]
2778 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002779 utilities.assert_equals(
2780 expect=main.TRUE,
2781 actual=appResult,
2782 onpass="Election app installed",
2783 onfail="Something went wrong with installing Leadership election" )
2784
2785 main.step( "Run for election on each node" )
2786 leaderResult = main.TRUE
2787 leaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002788 for i in main.activeNodes:
2789 main.CLIs[i].electionTestRun()
2790 for i in main.activeNodes:
2791 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002792 leader = cli.electionTestLeader()
2793 if leader is None or leader == main.FALSE:
2794 main.log.error( cli.name + ": Leader for the election app " +
2795 "should be an ONOS node, instead got '" +
2796 str( leader ) + "'" )
2797 leaderResult = main.FALSE
2798 leaders.append( leader )
2799 utilities.assert_equals(
2800 expect=main.TRUE,
2801 actual=leaderResult,
2802 onpass="Successfully ran for leadership",
2803 onfail="Failed to run for leadership" )
2804
2805 main.step( "Check that each node shows the same leader" )
2806 sameLeader = main.TRUE
2807 if len( set( leaders ) ) != 1:
2808 sameLeader = main.FALSE
Jon Halle1a3b752015-07-22 13:02:46 -07002809 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
Jon Hall5cf14d52015-07-16 12:15:19 -07002810 str( leaders ) )
2811 utilities.assert_equals(
2812 expect=main.TRUE,
2813 actual=sameLeader,
2814 onpass="Leadership is consistent for the election topic",
2815 onfail="Nodes have different leaders" )
2816
2817 def CASE15( self, main ):
2818 """
2819 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002820 15.1 Run election on each node
2821 15.2 Check that each node has the same leaders and candidates
2822 15.3 Find current leader and withdraw
2823 15.4 Check that a new node was elected leader
2824 15.5 Check that that new leader was the candidate of old leader
2825 15.6 Run for election on old leader
2826 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2827 15.8 Make sure that the old leader was added to the candidate list
2828
2829 old and new variable prefixes refer to data from before vs after
2830 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002831 """
2832 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002833 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002834 assert main, "main not defined"
2835 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002836 assert main.CLIs, "main.CLIs not defined"
2837 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002838
Jon Hall5cf14d52015-07-16 12:15:19 -07002839 description = "Check that Leadership Election is still functional"
2840 main.case( description )
acsmars71adceb2015-08-31 15:09:26 -07002841 # NOTE: Need to re-run since being a canidate is not persistant
2842 # TODO: add check for "Command not found:" in the driver, this
2843 # means the election test app isn't loaded
Jon Hall5cf14d52015-07-16 12:15:19 -07002844
acsmars71adceb2015-08-31 15:09:26 -07002845 oldLeaders = [] # leaders by node before withdrawl from candidates
2846 newLeaders = [] # leaders by node after withdrawl from candidates
2847 oldAllCandidates = [] # list of lists of each nodes' candidates before
2848 newAllCandidates = [] # list of lists of each nodes' candidates after
2849 oldCandidates = [] # list of candidates from node 0 before withdrawl
2850 newCandidates = [] # list of candidates from node 0 after withdrawl
2851 oldLeader = '' # the old leader from oldLeaders, None if not same
2852 newLeader = '' # the new leaders fron newLoeaders, None if not same
2853 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2854 expectNoLeader = False # True when there is only one leader
2855 if main.numCtrls == 1:
2856 expectNoLeader = True
2857
2858 main.step( "Run for election on each node" )
2859 electionResult = main.TRUE
2860
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002861 for i in main.activeNodes: # run test election on each node
2862 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002863 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002864 utilities.assert_equals(
2865 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002866 actual=electionResult,
2867 onpass="All nodes successfully ran for leadership",
2868 onfail="At least one node failed to run for leadership" )
2869
acsmars3a72bde2015-09-02 14:16:22 -07002870 if electionResult == main.FALSE:
2871 main.log.error(
2872 "Skipping Test Case because Election Test App isn't loaded" )
2873 main.skipCase()
2874
acsmars71adceb2015-08-31 15:09:26 -07002875 main.step( "Check that each node shows the same leader and candidates" )
2876 sameResult = main.TRUE
2877 failMessage = "Nodes have different leaders"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002878 for i in main.activeNodes:
2879 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002880 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2881 oldAllCandidates.append( node )
Jon Hall6e709752016-02-01 13:38:46 -08002882 if node:
2883 oldLeaders.append( node[ 0 ] )
2884 else:
2885 oldLeaders.append( None )
acsmars71adceb2015-08-31 15:09:26 -07002886 oldCandidates = oldAllCandidates[ 0 ]
Jon Hall6e709752016-02-01 13:38:46 -08002887 if oldCandidates is None:
2888 oldCandidates = [ None ]
acsmars71adceb2015-08-31 15:09:26 -07002889
2890 # Check that each node has the same leader. Defines oldLeader
2891 if len( set( oldLeaders ) ) != 1:
2892 sameResult = main.FALSE
2893 main.log.error( "More than one leader present:" + str( oldLeaders ) )
2894 oldLeader = None
2895 else:
2896 oldLeader = oldLeaders[ 0 ]
2897
2898 # Check that each node's candidate list is the same
acsmars29233db2015-11-04 11:15:00 -08002899 candidateDiscrepancy = False # Boolean of candidate mismatches
acsmars71adceb2015-08-31 15:09:26 -07002900 for candidates in oldAllCandidates:
Jon Hall6e709752016-02-01 13:38:46 -08002901 if candidates is None:
2902 main.log.warn( "Error getting candidates" )
2903 candidates = [ None ]
acsmars71adceb2015-08-31 15:09:26 -07002904 if set( candidates ) != set( oldCandidates ):
2905 sameResult = main.FALSE
acsmars29233db2015-11-04 11:15:00 -08002906 candidateDiscrepancy = True
acsmars29233db2015-11-04 11:15:00 -08002907 if candidateDiscrepancy:
2908 failMessage += " and candidates"
acsmars71adceb2015-08-31 15:09:26 -07002909 utilities.assert_equals(
2910 expect=main.TRUE,
2911 actual=sameResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002912 onpass="Leadership is consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002913 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002914
2915 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002916 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002917 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002918 if oldLeader is None:
2919 main.log.error( "Leadership isn't consistent." )
2920 withdrawResult = main.FALSE
2921 # Get the CLI of the oldLeader
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002922 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07002923 if oldLeader == main.nodes[ i ].ip_address:
2924 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002925 break
2926 else: # FOR/ELSE statement
2927 main.log.error( "Leader election, could not find current leader" )
2928 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07002929 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07002930 utilities.assert_equals(
2931 expect=main.TRUE,
2932 actual=withdrawResult,
2933 onpass="Node was withdrawn from election",
2934 onfail="Node was not withdrawn from election" )
2935
acsmars71adceb2015-08-31 15:09:26 -07002936 main.step( "Check that a new node was elected leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002937 # FIXME: use threads
acsmars71adceb2015-08-31 15:09:26 -07002938 newLeaderResult = main.TRUE
2939 failMessage = "Nodes have different leaders"
2940
2941 # Get new leaders and candidates
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002942 for i in main.activeNodes:
2943 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002944 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2945 # elections might no have finished yet
2946 if node[ 0 ] == 'none' and not expectNoLeader:
2947 main.log.info( "Node has no leader, waiting 5 seconds to be " +
2948 "sure elections are complete." )
2949 time.sleep(5)
2950 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2951 # election still isn't done or there is a problem
2952 if node[ 0 ] == 'none':
2953 main.log.error( "No leader was elected on at least 1 node" )
2954 newLeaderResult = main.FALSE
2955 newAllCandidates.append( node )
2956 newLeaders.append( node[ 0 ] )
2957 newCandidates = newAllCandidates[ 0 ]
2958
2959 # Check that each node has the same leader. Defines newLeader
2960 if len( set( newLeaders ) ) != 1:
2961 newLeaderResult = main.FALSE
2962 main.log.error( "Nodes have different leaders: " +
2963 str( newLeaders ) )
2964 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07002965 else:
acsmars71adceb2015-08-31 15:09:26 -07002966 newLeader = newLeaders[ 0 ]
2967
2968 # Check that each node's candidate list is the same
2969 for candidates in newAllCandidates:
2970 if set( candidates ) != set( newCandidates ):
2971 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07002972 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07002973
2974 # Check that the new leader is not the older leader, which was withdrawn
2975 if newLeader == oldLeader:
2976 newLeaderResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002977 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
acsmars71adceb2015-08-31 15:09:26 -07002978 " as the current leader" )
2979
Jon Hall5cf14d52015-07-16 12:15:19 -07002980 utilities.assert_equals(
2981 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002982 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002983 onpass="Leadership election passed",
2984 onfail="Something went wrong with Leadership election" )
2985
acsmars71adceb2015-08-31 15:09:26 -07002986 main.step( "Check that that new leader was the candidate of old leader")
Jon Hall6e709752016-02-01 13:38:46 -08002987 # candidates[ 2 ] should become the top candidate after withdrawl
acsmars71adceb2015-08-31 15:09:26 -07002988 correctCandidateResult = main.TRUE
2989 if expectNoLeader:
2990 if newLeader == 'none':
2991 main.log.info( "No leader expected. None found. Pass" )
2992 correctCandidateResult = main.TRUE
2993 else:
2994 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2995 correctCandidateResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002996 elif len( oldCandidates ) >= 3 and newLeader != oldCandidates[ 2 ]:
acsmars71adceb2015-08-31 15:09:26 -07002997 correctCandidateResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002998 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
2999 newLeader, oldCandidates[ 2 ] ) )
3000 else:
3001 main.log.warn( "Could not determine who should be the correct leader" )
3002 correctCandidateResult = main.FALSE
acsmars71adceb2015-08-31 15:09:26 -07003003 utilities.assert_equals(
3004 expect=main.TRUE,
3005 actual=correctCandidateResult,
3006 onpass="Correct Candidate Elected",
3007 onfail="Incorrect Candidate Elected" )
3008
Jon Hall5cf14d52015-07-16 12:15:19 -07003009 main.step( "Run for election on old leader( just so everyone " +
3010 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07003011 if oldLeaderCLI is not None:
3012 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07003013 else:
acsmars71adceb2015-08-31 15:09:26 -07003014 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003015 runResult = main.FALSE
3016 utilities.assert_equals(
3017 expect=main.TRUE,
3018 actual=runResult,
3019 onpass="App re-ran for election",
3020 onfail="App failed to run for election" )
acsmars71adceb2015-08-31 15:09:26 -07003021 main.step(
3022 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003023 # verify leader didn't just change
acsmars71adceb2015-08-31 15:09:26 -07003024 positionResult = main.TRUE
3025 # Get new leaders and candidates, wait if oldLeader is not a candidate yet
3026
3027 # Reset and reuse the new candidate and leaders lists
3028 newAllCandidates = []
3029 newCandidates = []
3030 newLeaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003031 for i in main.activeNodes:
3032 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07003033 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3034 if oldLeader not in node: # election might no have finished yet
3035 main.log.info( "Old Leader not elected, waiting 5 seconds to " +
3036 "be sure elections are complete" )
3037 time.sleep(5)
3038 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3039 if oldLeader not in node: # election still isn't done, errors
3040 main.log.error(
3041 "Old leader was not elected on at least one node" )
3042 positionResult = main.FALSE
3043 newAllCandidates.append( node )
3044 newLeaders.append( node[ 0 ] )
3045 newCandidates = newAllCandidates[ 0 ]
3046
3047 # Check that each node has the same leader. Defines newLeader
3048 if len( set( newLeaders ) ) != 1:
3049 positionResult = main.FALSE
3050 main.log.error( "Nodes have different leaders: " +
3051 str( newLeaders ) )
3052 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07003053 else:
acsmars71adceb2015-08-31 15:09:26 -07003054 newLeader = newLeaders[ 0 ]
3055
3056 # Check that each node's candidate list is the same
3057 for candidates in newAllCandidates:
3058 if set( candidates ) != set( newCandidates ):
3059 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07003060 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07003061
3062 # Check that the re-elected node is last on the candidate List
3063 if oldLeader != newCandidates[ -1 ]:
Jon Hall6e709752016-02-01 13:38:46 -08003064 main.log.error( "Old Leader (" + str( oldLeader ) + ") not in the proper position " +
acsmars71adceb2015-08-31 15:09:26 -07003065 str( newCandidates ) )
3066 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003067
3068 utilities.assert_equals(
3069 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07003070 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003071 onpass="Old leader successfully re-ran for election",
3072 onfail="Something went wrong with Leadership election after " +
3073 "the old leader re-ran for election" )
3074
3075 def CASE16( self, main ):
3076 """
3077 Install Distributed Primitives app
3078 """
3079 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003080 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003081 assert main, "main not defined"
3082 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003083 assert main.CLIs, "main.CLIs not defined"
3084 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003085
3086 # Variables for the distributed primitives tests
3087 global pCounterName
3088 global iCounterName
3089 global pCounterValue
3090 global iCounterValue
3091 global onosSet
3092 global onosSetName
3093 pCounterName = "TestON-Partitions"
3094 iCounterName = "TestON-inMemory"
3095 pCounterValue = 0
3096 iCounterValue = 0
3097 onosSet = set([])
3098 onosSetName = "TestON-set"
3099
3100 description = "Install Primitives app"
3101 main.case( description )
3102 main.step( "Install Primitives app" )
3103 appName = "org.onosproject.distributedprimitives"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003104 node = main.activeNodes[0]
3105 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003106 utilities.assert_equals( expect=main.TRUE,
3107 actual=appResults,
3108 onpass="Primitives app activated",
3109 onfail="Primitives app not activated" )
3110 time.sleep( 5 ) # To allow all nodes to activate
3111
3112 def CASE17( self, main ):
3113 """
3114 Check for basic functionality with distributed primitives
3115 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003116 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003117 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003118 assert main, "main not defined"
3119 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003120 assert main.CLIs, "main.CLIs not defined"
3121 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003122 assert pCounterName, "pCounterName not defined"
3123 assert iCounterName, "iCounterName not defined"
3124 assert onosSetName, "onosSetName not defined"
3125 # NOTE: assert fails if value is 0/None/Empty/False
3126 try:
3127 pCounterValue
3128 except NameError:
3129 main.log.error( "pCounterValue not defined, setting to 0" )
3130 pCounterValue = 0
3131 try:
3132 iCounterValue
3133 except NameError:
3134 main.log.error( "iCounterValue not defined, setting to 0" )
3135 iCounterValue = 0
3136 try:
3137 onosSet
3138 except NameError:
3139 main.log.error( "onosSet not defined, setting to empty Set" )
3140 onosSet = set([])
3141 # Variables for the distributed primitives tests. These are local only
3142 addValue = "a"
3143 addAllValue = "a b c d e f"
3144 retainValue = "c d e f"
3145
3146 description = "Check for basic functionality with distributed " +\
3147 "primitives"
3148 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003149 main.caseExplanation = "Test the methods of the distributed " +\
3150 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003151 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003152 # Partitioned counters
3153 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003154 pCounters = []
3155 threads = []
3156 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003157 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003158 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3159 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003160 args=[ pCounterName ] )
3161 pCounterValue += 1
3162 addedPValues.append( pCounterValue )
3163 threads.append( t )
3164 t.start()
3165
3166 for t in threads:
3167 t.join()
3168 pCounters.append( t.result )
3169 # Check that counter incremented numController times
3170 pCounterResults = True
3171 for i in addedPValues:
3172 tmpResult = i in pCounters
3173 pCounterResults = pCounterResults and tmpResult
3174 if not tmpResult:
3175 main.log.error( str( i ) + " is not in partitioned "
3176 "counter incremented results" )
3177 utilities.assert_equals( expect=True,
3178 actual=pCounterResults,
3179 onpass="Default counter incremented",
3180 onfail="Error incrementing default" +
3181 " counter" )
3182
Jon Halle1a3b752015-07-22 13:02:46 -07003183 main.step( "Get then Increment a default counter on each node" )
3184 pCounters = []
3185 threads = []
3186 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003187 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003188 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3189 name="counterGetAndAdd-" + str( i ),
3190 args=[ pCounterName ] )
3191 addedPValues.append( pCounterValue )
3192 pCounterValue += 1
3193 threads.append( t )
3194 t.start()
3195
3196 for t in threads:
3197 t.join()
3198 pCounters.append( t.result )
3199 # Check that counter incremented numController times
3200 pCounterResults = True
3201 for i in addedPValues:
3202 tmpResult = i in pCounters
3203 pCounterResults = pCounterResults and tmpResult
3204 if not tmpResult:
3205 main.log.error( str( i ) + " is not in partitioned "
3206 "counter incremented results" )
3207 utilities.assert_equals( expect=True,
3208 actual=pCounterResults,
3209 onpass="Default counter incremented",
3210 onfail="Error incrementing default" +
3211 " counter" )
3212
3213 main.step( "Counters we added have the correct values" )
3214 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3215 utilities.assert_equals( expect=main.TRUE,
3216 actual=incrementCheck,
3217 onpass="Added counters are correct",
3218 onfail="Added counters are incorrect" )
3219
3220 main.step( "Add -8 to then get a default counter on each node" )
3221 pCounters = []
3222 threads = []
3223 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003224 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003225 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3226 name="counterIncrement-" + str( i ),
3227 args=[ pCounterName ],
3228 kwargs={ "delta": -8 } )
3229 pCounterValue += -8
3230 addedPValues.append( pCounterValue )
3231 threads.append( t )
3232 t.start()
3233
3234 for t in threads:
3235 t.join()
3236 pCounters.append( t.result )
3237 # Check that counter incremented numController times
3238 pCounterResults = True
3239 for i in addedPValues:
3240 tmpResult = i in pCounters
3241 pCounterResults = pCounterResults and tmpResult
3242 if not tmpResult:
3243 main.log.error( str( i ) + " is not in partitioned "
3244 "counter incremented results" )
3245 utilities.assert_equals( expect=True,
3246 actual=pCounterResults,
3247 onpass="Default counter incremented",
3248 onfail="Error incrementing default" +
3249 " counter" )
3250
3251 main.step( "Add 5 to then get a default counter on each node" )
3252 pCounters = []
3253 threads = []
3254 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003255 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003256 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3257 name="counterIncrement-" + str( i ),
3258 args=[ pCounterName ],
3259 kwargs={ "delta": 5 } )
3260 pCounterValue += 5
3261 addedPValues.append( pCounterValue )
3262 threads.append( t )
3263 t.start()
3264
3265 for t in threads:
3266 t.join()
3267 pCounters.append( t.result )
3268 # Check that counter incremented numController times
3269 pCounterResults = True
3270 for i in addedPValues:
3271 tmpResult = i in pCounters
3272 pCounterResults = pCounterResults and tmpResult
3273 if not tmpResult:
3274 main.log.error( str( i ) + " is not in partitioned "
3275 "counter incremented results" )
3276 utilities.assert_equals( expect=True,
3277 actual=pCounterResults,
3278 onpass="Default counter incremented",
3279 onfail="Error incrementing default" +
3280 " counter" )
3281
3282 main.step( "Get then add 5 to a default counter on each node" )
3283 pCounters = []
3284 threads = []
3285 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003286 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003287 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3288 name="counterIncrement-" + str( i ),
3289 args=[ pCounterName ],
3290 kwargs={ "delta": 5 } )
3291 addedPValues.append( pCounterValue )
3292 pCounterValue += 5
3293 threads.append( t )
3294 t.start()
3295
3296 for t in threads:
3297 t.join()
3298 pCounters.append( t.result )
3299 # Check that counter incremented numController times
3300 pCounterResults = True
3301 for i in addedPValues:
3302 tmpResult = i in pCounters
3303 pCounterResults = pCounterResults and tmpResult
3304 if not tmpResult:
3305 main.log.error( str( i ) + " is not in partitioned "
3306 "counter incremented results" )
3307 utilities.assert_equals( expect=True,
3308 actual=pCounterResults,
3309 onpass="Default counter incremented",
3310 onfail="Error incrementing default" +
3311 " counter" )
3312
3313 main.step( "Counters we added have the correct values" )
3314 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3315 utilities.assert_equals( expect=main.TRUE,
3316 actual=incrementCheck,
3317 onpass="Added counters are correct",
3318 onfail="Added counters are incorrect" )
3319
3320 # In-Memory counters
3321 main.step( "Increment and get an in-memory counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003322 iCounters = []
3323 addedIValues = []
3324 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003325 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003326 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003327 name="icounterIncrement-" + str( i ),
3328 args=[ iCounterName ],
3329 kwargs={ "inMemory": True } )
3330 iCounterValue += 1
3331 addedIValues.append( iCounterValue )
3332 threads.append( t )
3333 t.start()
3334
3335 for t in threads:
3336 t.join()
3337 iCounters.append( t.result )
3338 # Check that counter incremented numController times
3339 iCounterResults = True
3340 for i in addedIValues:
3341 tmpResult = i in iCounters
3342 iCounterResults = iCounterResults and tmpResult
3343 if not tmpResult:
3344 main.log.error( str( i ) + " is not in the in-memory "
3345 "counter incremented results" )
3346 utilities.assert_equals( expect=True,
3347 actual=iCounterResults,
Jon Halle1a3b752015-07-22 13:02:46 -07003348 onpass="In-memory counter incremented",
3349 onfail="Error incrementing in-memory" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003350 " counter" )
3351
Jon Halle1a3b752015-07-22 13:02:46 -07003352 main.step( "Get then Increment a in-memory counter on each node" )
3353 iCounters = []
3354 threads = []
3355 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003356 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003357 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3358 name="counterGetAndAdd-" + str( i ),
3359 args=[ iCounterName ],
3360 kwargs={ "inMemory": True } )
3361 addedIValues.append( iCounterValue )
3362 iCounterValue += 1
3363 threads.append( t )
3364 t.start()
3365
3366 for t in threads:
3367 t.join()
3368 iCounters.append( t.result )
3369 # Check that counter incremented numController times
3370 iCounterResults = True
3371 for i in addedIValues:
3372 tmpResult = i in iCounters
3373 iCounterResults = iCounterResults and tmpResult
3374 if not tmpResult:
3375 main.log.error( str( i ) + " is not in in-memory "
3376 "counter incremented results" )
3377 utilities.assert_equals( expect=True,
3378 actual=iCounterResults,
3379 onpass="In-memory counter incremented",
3380 onfail="Error incrementing in-memory" +
3381 " counter" )
3382
3383 main.step( "Counters we added have the correct values" )
3384 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3385 utilities.assert_equals( expect=main.TRUE,
3386 actual=incrementCheck,
3387 onpass="Added counters are correct",
3388 onfail="Added counters are incorrect" )
3389
3390 main.step( "Add -8 to then get a in-memory counter on each node" )
3391 iCounters = []
3392 threads = []
3393 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003394 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003395 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3396 name="counterIncrement-" + str( i ),
3397 args=[ iCounterName ],
3398 kwargs={ "delta": -8, "inMemory": True } )
3399 iCounterValue += -8
3400 addedIValues.append( iCounterValue )
3401 threads.append( t )
3402 t.start()
3403
3404 for t in threads:
3405 t.join()
3406 iCounters.append( t.result )
3407 # Check that counter incremented numController times
3408 iCounterResults = True
3409 for i in addedIValues:
3410 tmpResult = i in iCounters
3411 iCounterResults = iCounterResults and tmpResult
3412 if not tmpResult:
3413 main.log.error( str( i ) + " is not in in-memory "
3414 "counter incremented results" )
3415 utilities.assert_equals( expect=True,
3416 actual=pCounterResults,
3417 onpass="In-memory counter incremented",
3418 onfail="Error incrementing in-memory" +
3419 " counter" )
3420
3421 main.step( "Add 5 to then get a in-memory counter on each node" )
3422 iCounters = []
3423 threads = []
3424 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003425 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003426 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3427 name="counterIncrement-" + str( i ),
3428 args=[ iCounterName ],
3429 kwargs={ "delta": 5, "inMemory": True } )
3430 iCounterValue += 5
3431 addedIValues.append( iCounterValue )
3432 threads.append( t )
3433 t.start()
3434
3435 for t in threads:
3436 t.join()
3437 iCounters.append( t.result )
3438 # Check that counter incremented numController times
3439 iCounterResults = True
3440 for i in addedIValues:
3441 tmpResult = i in iCounters
3442 iCounterResults = iCounterResults and tmpResult
3443 if not tmpResult:
3444 main.log.error( str( i ) + " is not in in-memory "
3445 "counter incremented results" )
3446 utilities.assert_equals( expect=True,
3447 actual=pCounterResults,
3448 onpass="In-memory counter incremented",
3449 onfail="Error incrementing in-memory" +
3450 " counter" )
3451
3452 main.step( "Get then add 5 to a in-memory counter on each node" )
3453 iCounters = []
3454 threads = []
3455 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003456 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003457 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3458 name="counterIncrement-" + str( i ),
3459 args=[ iCounterName ],
3460 kwargs={ "delta": 5, "inMemory": True } )
3461 addedIValues.append( iCounterValue )
3462 iCounterValue += 5
3463 threads.append( t )
3464 t.start()
3465
3466 for t in threads:
3467 t.join()
3468 iCounters.append( t.result )
3469 # Check that counter incremented numController times
3470 iCounterResults = True
3471 for i in addedIValues:
3472 tmpResult = i in iCounters
3473 iCounterResults = iCounterResults and tmpResult
3474 if not tmpResult:
3475 main.log.error( str( i ) + " is not in in-memory "
3476 "counter incremented results" )
3477 utilities.assert_equals( expect=True,
3478 actual=iCounterResults,
3479 onpass="In-memory counter incremented",
3480 onfail="Error incrementing in-memory" +
3481 " counter" )
3482
3483 main.step( "Counters we added have the correct values" )
3484 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3485 utilities.assert_equals( expect=main.TRUE,
3486 actual=incrementCheck,
3487 onpass="Added counters are correct",
3488 onfail="Added counters are incorrect" )
3489
Jon Hall5cf14d52015-07-16 12:15:19 -07003490 main.step( "Check counters are consistant across nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07003491 onosCounters, consistentCounterResults = main.Counters.consistentCheck()
Jon Hall5cf14d52015-07-16 12:15:19 -07003492 utilities.assert_equals( expect=main.TRUE,
3493 actual=consistentCounterResults,
3494 onpass="ONOS counters are consistent " +
3495 "across nodes",
3496 onfail="ONOS Counters are inconsistent " +
3497 "across nodes" )
3498
3499 main.step( "Counters we added have the correct values" )
Jon Halle1a3b752015-07-22 13:02:46 -07003500 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3501 incrementCheck = incrementCheck and \
3502 main.Counters.counterCheck( iCounterName, iCounterValue )
Jon Hall5cf14d52015-07-16 12:15:19 -07003503 utilities.assert_equals( expect=main.TRUE,
Jon Halle1a3b752015-07-22 13:02:46 -07003504 actual=incrementCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -07003505 onpass="Added counters are correct",
3506 onfail="Added counters are incorrect" )
3507 # DISTRIBUTED SETS
3508 main.step( "Distributed Set get" )
3509 size = len( onosSet )
3510 getResponses = []
3511 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003512 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003513 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003514 name="setTestGet-" + str( i ),
3515 args=[ onosSetName ] )
3516 threads.append( t )
3517 t.start()
3518 for t in threads:
3519 t.join()
3520 getResponses.append( t.result )
3521
3522 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003523 for i in range( len( main.activeNodes ) ):
3524 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003525 if isinstance( getResponses[ i ], list):
3526 current = set( getResponses[ i ] )
3527 if len( current ) == len( getResponses[ i ] ):
3528 # no repeats
3529 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003530 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003531 " has incorrect view" +
3532 " of set " + onosSetName + ":\n" +
3533 str( getResponses[ i ] ) )
3534 main.log.debug( "Expected: " + str( onosSet ) )
3535 main.log.debug( "Actual: " + str( current ) )
3536 getResults = main.FALSE
3537 else:
3538 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003539 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003540 " has repeat elements in" +
3541 " set " + onosSetName + ":\n" +
3542 str( getResponses[ i ] ) )
3543 getResults = main.FALSE
3544 elif getResponses[ i ] == main.ERROR:
3545 getResults = main.FALSE
3546 utilities.assert_equals( expect=main.TRUE,
3547 actual=getResults,
3548 onpass="Set elements are correct",
3549 onfail="Set elements are incorrect" )
3550
3551 main.step( "Distributed Set size" )
3552 sizeResponses = []
3553 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003554 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003555 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003556 name="setTestSize-" + str( i ),
3557 args=[ onosSetName ] )
3558 threads.append( t )
3559 t.start()
3560 for t in threads:
3561 t.join()
3562 sizeResponses.append( t.result )
3563
3564 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003565 for i in range( len( main.activeNodes ) ):
3566 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003567 if size != sizeResponses[ i ]:
3568 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003569 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003570 " expected a size of " + str( size ) +
3571 " for set " + onosSetName +
3572 " but got " + str( sizeResponses[ i ] ) )
3573 utilities.assert_equals( expect=main.TRUE,
3574 actual=sizeResults,
3575 onpass="Set sizes are correct",
3576 onfail="Set sizes are incorrect" )
3577
3578 main.step( "Distributed Set add()" )
3579 onosSet.add( addValue )
3580 addResponses = []
3581 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003582 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003583 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003584 name="setTestAdd-" + str( i ),
3585 args=[ onosSetName, addValue ] )
3586 threads.append( t )
3587 t.start()
3588 for t in threads:
3589 t.join()
3590 addResponses.append( t.result )
3591
3592 # main.TRUE = successfully changed the set
3593 # main.FALSE = action resulted in no change in set
3594 # main.ERROR - Some error in executing the function
3595 addResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003596 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003597 if addResponses[ i ] == main.TRUE:
3598 # All is well
3599 pass
3600 elif addResponses[ i ] == main.FALSE:
3601 # Already in set, probably fine
3602 pass
3603 elif addResponses[ i ] == main.ERROR:
3604 # Error in execution
3605 addResults = main.FALSE
3606 else:
3607 # unexpected result
3608 addResults = main.FALSE
3609 if addResults != main.TRUE:
3610 main.log.error( "Error executing set add" )
3611
3612 # Check if set is still correct
3613 size = len( onosSet )
3614 getResponses = []
3615 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003616 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003617 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003618 name="setTestGet-" + str( i ),
3619 args=[ onosSetName ] )
3620 threads.append( t )
3621 t.start()
3622 for t in threads:
3623 t.join()
3624 getResponses.append( t.result )
3625 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003626 for i in range( len( main.activeNodes ) ):
3627 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003628 if isinstance( getResponses[ i ], list):
3629 current = set( getResponses[ i ] )
3630 if len( current ) == len( getResponses[ i ] ):
3631 # no repeats
3632 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003633 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003634 " of set " + onosSetName + ":\n" +
3635 str( getResponses[ i ] ) )
3636 main.log.debug( "Expected: " + str( onosSet ) )
3637 main.log.debug( "Actual: " + str( current ) )
3638 getResults = main.FALSE
3639 else:
3640 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003641 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003642 " set " + onosSetName + ":\n" +
3643 str( getResponses[ i ] ) )
3644 getResults = main.FALSE
3645 elif getResponses[ i ] == main.ERROR:
3646 getResults = main.FALSE
3647 sizeResponses = []
3648 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003649 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003650 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003651 name="setTestSize-" + str( i ),
3652 args=[ onosSetName ] )
3653 threads.append( t )
3654 t.start()
3655 for t in threads:
3656 t.join()
3657 sizeResponses.append( t.result )
3658 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003659 for i in range( len( main.activeNodes ) ):
3660 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003661 if size != sizeResponses[ i ]:
3662 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003663 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003664 " expected a size of " + str( size ) +
3665 " for set " + onosSetName +
3666 " but got " + str( sizeResponses[ i ] ) )
3667 addResults = addResults and getResults and sizeResults
3668 utilities.assert_equals( expect=main.TRUE,
3669 actual=addResults,
3670 onpass="Set add correct",
3671 onfail="Set add was incorrect" )
3672
3673 main.step( "Distributed Set addAll()" )
3674 onosSet.update( addAllValue.split() )
3675 addResponses = []
3676 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003677 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003678 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003679 name="setTestAddAll-" + str( i ),
3680 args=[ onosSetName, addAllValue ] )
3681 threads.append( t )
3682 t.start()
3683 for t in threads:
3684 t.join()
3685 addResponses.append( t.result )
3686
3687 # main.TRUE = successfully changed the set
3688 # main.FALSE = action resulted in no change in set
3689 # main.ERROR - Some error in executing the function
3690 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003691 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003692 if addResponses[ i ] == main.TRUE:
3693 # All is well
3694 pass
3695 elif addResponses[ i ] == main.FALSE:
3696 # Already in set, probably fine
3697 pass
3698 elif addResponses[ i ] == main.ERROR:
3699 # Error in execution
3700 addAllResults = main.FALSE
3701 else:
3702 # unexpected result
3703 addAllResults = main.FALSE
3704 if addAllResults != main.TRUE:
3705 main.log.error( "Error executing set addAll" )
3706
3707 # Check if set is still correct
3708 size = len( onosSet )
3709 getResponses = []
3710 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003711 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003712 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003713 name="setTestGet-" + str( i ),
3714 args=[ onosSetName ] )
3715 threads.append( t )
3716 t.start()
3717 for t in threads:
3718 t.join()
3719 getResponses.append( t.result )
3720 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003721 for i in range( len( main.activeNodes ) ):
3722 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003723 if isinstance( getResponses[ i ], list):
3724 current = set( getResponses[ i ] )
3725 if len( current ) == len( getResponses[ i ] ):
3726 # no repeats
3727 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003728 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003729 " has incorrect view" +
3730 " of set " + onosSetName + ":\n" +
3731 str( getResponses[ i ] ) )
3732 main.log.debug( "Expected: " + str( onosSet ) )
3733 main.log.debug( "Actual: " + str( current ) )
3734 getResults = main.FALSE
3735 else:
3736 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003737 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003738 " has repeat elements in" +
3739 " set " + onosSetName + ":\n" +
3740 str( getResponses[ i ] ) )
3741 getResults = main.FALSE
3742 elif getResponses[ i ] == main.ERROR:
3743 getResults = main.FALSE
3744 sizeResponses = []
3745 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003746 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003747 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003748 name="setTestSize-" + str( i ),
3749 args=[ onosSetName ] )
3750 threads.append( t )
3751 t.start()
3752 for t in threads:
3753 t.join()
3754 sizeResponses.append( t.result )
3755 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003756 for i in range( len( main.activeNodes ) ):
3757 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003758 if size != sizeResponses[ i ]:
3759 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003760 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003761 " expected a size of " + str( size ) +
3762 " for set " + onosSetName +
3763 " but got " + str( sizeResponses[ i ] ) )
3764 addAllResults = addAllResults and getResults and sizeResults
3765 utilities.assert_equals( expect=main.TRUE,
3766 actual=addAllResults,
3767 onpass="Set addAll correct",
3768 onfail="Set addAll was incorrect" )
3769
3770 main.step( "Distributed Set contains()" )
3771 containsResponses = []
3772 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003773 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003774 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003775 name="setContains-" + str( i ),
3776 args=[ onosSetName ],
3777 kwargs={ "values": addValue } )
3778 threads.append( t )
3779 t.start()
3780 for t in threads:
3781 t.join()
3782 # NOTE: This is the tuple
3783 containsResponses.append( t.result )
3784
3785 containsResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003786 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003787 if containsResponses[ i ] == main.ERROR:
3788 containsResults = main.FALSE
3789 else:
3790 containsResults = containsResults and\
3791 containsResponses[ i ][ 1 ]
3792 utilities.assert_equals( expect=main.TRUE,
3793 actual=containsResults,
3794 onpass="Set contains is functional",
3795 onfail="Set contains failed" )
3796
3797 main.step( "Distributed Set containsAll()" )
3798 containsAllResponses = []
3799 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003800 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003801 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003802 name="setContainsAll-" + str( i ),
3803 args=[ onosSetName ],
3804 kwargs={ "values": addAllValue } )
3805 threads.append( t )
3806 t.start()
3807 for t in threads:
3808 t.join()
3809 # NOTE: This is the tuple
3810 containsAllResponses.append( t.result )
3811
3812 containsAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003813 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003814 if containsResponses[ i ] == main.ERROR:
3815 containsResults = main.FALSE
3816 else:
3817 containsResults = containsResults and\
3818 containsResponses[ i ][ 1 ]
3819 utilities.assert_equals( expect=main.TRUE,
3820 actual=containsAllResults,
3821 onpass="Set containsAll is functional",
3822 onfail="Set containsAll failed" )
3823
3824 main.step( "Distributed Set remove()" )
3825 onosSet.remove( addValue )
3826 removeResponses = []
3827 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003828 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003829 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003830 name="setTestRemove-" + str( i ),
3831 args=[ onosSetName, addValue ] )
3832 threads.append( t )
3833 t.start()
3834 for t in threads:
3835 t.join()
3836 removeResponses.append( t.result )
3837
3838 # main.TRUE = successfully changed the set
3839 # main.FALSE = action resulted in no change in set
3840 # main.ERROR - Some error in executing the function
3841 removeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003842 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003843 if removeResponses[ i ] == main.TRUE:
3844 # All is well
3845 pass
3846 elif removeResponses[ i ] == main.FALSE:
3847 # not in set, probably fine
3848 pass
3849 elif removeResponses[ i ] == main.ERROR:
3850 # Error in execution
3851 removeResults = main.FALSE
3852 else:
3853 # unexpected result
3854 removeResults = main.FALSE
3855 if removeResults != main.TRUE:
3856 main.log.error( "Error executing set remove" )
3857
3858 # Check if set is still correct
3859 size = len( onosSet )
3860 getResponses = []
3861 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003862 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003863 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003864 name="setTestGet-" + str( i ),
3865 args=[ onosSetName ] )
3866 threads.append( t )
3867 t.start()
3868 for t in threads:
3869 t.join()
3870 getResponses.append( t.result )
3871 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003872 for i in range( len( main.activeNodes ) ):
3873 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003874 if isinstance( getResponses[ i ], list):
3875 current = set( getResponses[ i ] )
3876 if len( current ) == len( getResponses[ i ] ):
3877 # no repeats
3878 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003879 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003880 " has incorrect view" +
3881 " of set " + onosSetName + ":\n" +
3882 str( getResponses[ i ] ) )
3883 main.log.debug( "Expected: " + str( onosSet ) )
3884 main.log.debug( "Actual: " + str( current ) )
3885 getResults = main.FALSE
3886 else:
3887 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003888 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003889 " has repeat elements in" +
3890 " set " + onosSetName + ":\n" +
3891 str( getResponses[ i ] ) )
3892 getResults = main.FALSE
3893 elif getResponses[ i ] == main.ERROR:
3894 getResults = main.FALSE
3895 sizeResponses = []
3896 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003897 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003898 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003899 name="setTestSize-" + str( i ),
3900 args=[ onosSetName ] )
3901 threads.append( t )
3902 t.start()
3903 for t in threads:
3904 t.join()
3905 sizeResponses.append( t.result )
3906 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003907 for i in range( len( main.activeNodes ) ):
3908 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003909 if size != sizeResponses[ i ]:
3910 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003911 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003912 " expected a size of " + str( size ) +
3913 " for set " + onosSetName +
3914 " but got " + str( sizeResponses[ i ] ) )
3915 removeResults = removeResults and getResults and sizeResults
3916 utilities.assert_equals( expect=main.TRUE,
3917 actual=removeResults,
3918 onpass="Set remove correct",
3919 onfail="Set remove was incorrect" )
3920
3921 main.step( "Distributed Set removeAll()" )
3922 onosSet.difference_update( addAllValue.split() )
3923 removeAllResponses = []
3924 threads = []
3925 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003926 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003927 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003928 name="setTestRemoveAll-" + str( i ),
3929 args=[ onosSetName, addAllValue ] )
3930 threads.append( t )
3931 t.start()
3932 for t in threads:
3933 t.join()
3934 removeAllResponses.append( t.result )
3935 except Exception, e:
3936 main.log.exception(e)
3937
3938 # main.TRUE = successfully changed the set
3939 # main.FALSE = action resulted in no change in set
3940 # main.ERROR - Some error in executing the function
3941 removeAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003942 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003943 if removeAllResponses[ i ] == main.TRUE:
3944 # All is well
3945 pass
3946 elif removeAllResponses[ i ] == main.FALSE:
3947 # not in set, probably fine
3948 pass
3949 elif removeAllResponses[ i ] == main.ERROR:
3950 # Error in execution
3951 removeAllResults = main.FALSE
3952 else:
3953 # unexpected result
3954 removeAllResults = main.FALSE
3955 if removeAllResults != main.TRUE:
3956 main.log.error( "Error executing set removeAll" )
3957
3958 # Check if set is still correct
3959 size = len( onosSet )
3960 getResponses = []
3961 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003962 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003963 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003964 name="setTestGet-" + str( i ),
3965 args=[ onosSetName ] )
3966 threads.append( t )
3967 t.start()
3968 for t in threads:
3969 t.join()
3970 getResponses.append( t.result )
3971 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003972 for i in range( len( main.activeNodes ) ):
3973 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003974 if isinstance( getResponses[ i ], list):
3975 current = set( getResponses[ i ] )
3976 if len( current ) == len( getResponses[ i ] ):
3977 # no repeats
3978 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003979 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003980 " has incorrect view" +
3981 " of set " + onosSetName + ":\n" +
3982 str( getResponses[ i ] ) )
3983 main.log.debug( "Expected: " + str( onosSet ) )
3984 main.log.debug( "Actual: " + str( current ) )
3985 getResults = main.FALSE
3986 else:
3987 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003988 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003989 " has repeat elements in" +
3990 " set " + onosSetName + ":\n" +
3991 str( getResponses[ i ] ) )
3992 getResults = main.FALSE
3993 elif getResponses[ i ] == main.ERROR:
3994 getResults = main.FALSE
3995 sizeResponses = []
3996 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003997 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003998 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003999 name="setTestSize-" + str( i ),
4000 args=[ onosSetName ] )
4001 threads.append( t )
4002 t.start()
4003 for t in threads:
4004 t.join()
4005 sizeResponses.append( t.result )
4006 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004007 for i in range( len( main.activeNodes ) ):
4008 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004009 if size != sizeResponses[ i ]:
4010 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004011 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004012 " expected a size of " + str( size ) +
4013 " for set " + onosSetName +
4014 " but got " + str( sizeResponses[ i ] ) )
4015 removeAllResults = removeAllResults and getResults and sizeResults
4016 utilities.assert_equals( expect=main.TRUE,
4017 actual=removeAllResults,
4018 onpass="Set removeAll correct",
4019 onfail="Set removeAll was incorrect" )
4020
4021 main.step( "Distributed Set addAll()" )
4022 onosSet.update( addAllValue.split() )
4023 addResponses = []
4024 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004025 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004026 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004027 name="setTestAddAll-" + str( i ),
4028 args=[ onosSetName, addAllValue ] )
4029 threads.append( t )
4030 t.start()
4031 for t in threads:
4032 t.join()
4033 addResponses.append( t.result )
4034
4035 # main.TRUE = successfully changed the set
4036 # main.FALSE = action resulted in no change in set
4037 # main.ERROR - Some error in executing the function
4038 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004039 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004040 if addResponses[ i ] == main.TRUE:
4041 # All is well
4042 pass
4043 elif addResponses[ i ] == main.FALSE:
4044 # Already in set, probably fine
4045 pass
4046 elif addResponses[ i ] == main.ERROR:
4047 # Error in execution
4048 addAllResults = main.FALSE
4049 else:
4050 # unexpected result
4051 addAllResults = main.FALSE
4052 if addAllResults != main.TRUE:
4053 main.log.error( "Error executing set addAll" )
4054
4055 # Check if set is still correct
4056 size = len( onosSet )
4057 getResponses = []
4058 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004059 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004060 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004061 name="setTestGet-" + str( i ),
4062 args=[ onosSetName ] )
4063 threads.append( t )
4064 t.start()
4065 for t in threads:
4066 t.join()
4067 getResponses.append( t.result )
4068 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004069 for i in range( len( main.activeNodes ) ):
4070 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004071 if isinstance( getResponses[ i ], list):
4072 current = set( getResponses[ i ] )
4073 if len( current ) == len( getResponses[ i ] ):
4074 # no repeats
4075 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004076 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004077 " has incorrect view" +
4078 " of set " + onosSetName + ":\n" +
4079 str( getResponses[ i ] ) )
4080 main.log.debug( "Expected: " + str( onosSet ) )
4081 main.log.debug( "Actual: " + str( current ) )
4082 getResults = main.FALSE
4083 else:
4084 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004085 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004086 " has repeat elements in" +
4087 " set " + onosSetName + ":\n" +
4088 str( getResponses[ i ] ) )
4089 getResults = main.FALSE
4090 elif getResponses[ i ] == main.ERROR:
4091 getResults = main.FALSE
4092 sizeResponses = []
4093 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004094 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004095 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004096 name="setTestSize-" + str( i ),
4097 args=[ onosSetName ] )
4098 threads.append( t )
4099 t.start()
4100 for t in threads:
4101 t.join()
4102 sizeResponses.append( t.result )
4103 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004104 for i in range( len( main.activeNodes ) ):
4105 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004106 if size != sizeResponses[ i ]:
4107 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004108 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004109 " expected a size of " + str( size ) +
4110 " for set " + onosSetName +
4111 " but got " + str( sizeResponses[ i ] ) )
4112 addAllResults = addAllResults and getResults and sizeResults
4113 utilities.assert_equals( expect=main.TRUE,
4114 actual=addAllResults,
4115 onpass="Set addAll correct",
4116 onfail="Set addAll was incorrect" )
4117
4118 main.step( "Distributed Set clear()" )
4119 onosSet.clear()
4120 clearResponses = []
4121 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004122 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004123 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004124 name="setTestClear-" + str( i ),
4125 args=[ onosSetName, " "], # Values doesn't matter
4126 kwargs={ "clear": True } )
4127 threads.append( t )
4128 t.start()
4129 for t in threads:
4130 t.join()
4131 clearResponses.append( t.result )
4132
4133 # main.TRUE = successfully changed the set
4134 # main.FALSE = action resulted in no change in set
4135 # main.ERROR - Some error in executing the function
4136 clearResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004137 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004138 if clearResponses[ i ] == main.TRUE:
4139 # All is well
4140 pass
4141 elif clearResponses[ i ] == main.FALSE:
4142 # Nothing set, probably fine
4143 pass
4144 elif clearResponses[ i ] == main.ERROR:
4145 # Error in execution
4146 clearResults = main.FALSE
4147 else:
4148 # unexpected result
4149 clearResults = main.FALSE
4150 if clearResults != main.TRUE:
4151 main.log.error( "Error executing set clear" )
4152
4153 # Check if set is still correct
4154 size = len( onosSet )
4155 getResponses = []
4156 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004157 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004158 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004159 name="setTestGet-" + str( i ),
4160 args=[ onosSetName ] )
4161 threads.append( t )
4162 t.start()
4163 for t in threads:
4164 t.join()
4165 getResponses.append( t.result )
4166 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004167 for i in range( len( main.activeNodes ) ):
4168 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004169 if isinstance( getResponses[ i ], list):
4170 current = set( getResponses[ i ] )
4171 if len( current ) == len( getResponses[ i ] ):
4172 # no repeats
4173 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004174 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004175 " has incorrect view" +
4176 " of set " + onosSetName + ":\n" +
4177 str( getResponses[ i ] ) )
4178 main.log.debug( "Expected: " + str( onosSet ) )
4179 main.log.debug( "Actual: " + str( current ) )
4180 getResults = main.FALSE
4181 else:
4182 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004183 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004184 " has repeat elements in" +
4185 " set " + onosSetName + ":\n" +
4186 str( getResponses[ i ] ) )
4187 getResults = main.FALSE
4188 elif getResponses[ i ] == main.ERROR:
4189 getResults = main.FALSE
4190 sizeResponses = []
4191 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004192 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004193 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004194 name="setTestSize-" + str( i ),
4195 args=[ onosSetName ] )
4196 threads.append( t )
4197 t.start()
4198 for t in threads:
4199 t.join()
4200 sizeResponses.append( t.result )
4201 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004202 for i in range( len( main.activeNodes ) ):
4203 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004204 if size != sizeResponses[ i ]:
4205 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004206 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004207 " expected a size of " + str( size ) +
4208 " for set " + onosSetName +
4209 " but got " + str( sizeResponses[ i ] ) )
4210 clearResults = clearResults and getResults and sizeResults
4211 utilities.assert_equals( expect=main.TRUE,
4212 actual=clearResults,
4213 onpass="Set clear correct",
4214 onfail="Set clear was incorrect" )
4215
4216 main.step( "Distributed Set addAll()" )
4217 onosSet.update( addAllValue.split() )
4218 addResponses = []
4219 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004220 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004221 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004222 name="setTestAddAll-" + str( i ),
4223 args=[ onosSetName, addAllValue ] )
4224 threads.append( t )
4225 t.start()
4226 for t in threads:
4227 t.join()
4228 addResponses.append( t.result )
4229
4230 # main.TRUE = successfully changed the set
4231 # main.FALSE = action resulted in no change in set
4232 # main.ERROR - Some error in executing the function
4233 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004234 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004235 if addResponses[ i ] == main.TRUE:
4236 # All is well
4237 pass
4238 elif addResponses[ i ] == main.FALSE:
4239 # Already in set, probably fine
4240 pass
4241 elif addResponses[ i ] == main.ERROR:
4242 # Error in execution
4243 addAllResults = main.FALSE
4244 else:
4245 # unexpected result
4246 addAllResults = main.FALSE
4247 if addAllResults != main.TRUE:
4248 main.log.error( "Error executing set addAll" )
4249
4250 # Check if set is still correct
4251 size = len( onosSet )
4252 getResponses = []
4253 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004254 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004255 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004256 name="setTestGet-" + str( i ),
4257 args=[ onosSetName ] )
4258 threads.append( t )
4259 t.start()
4260 for t in threads:
4261 t.join()
4262 getResponses.append( t.result )
4263 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004264 for i in range( len( main.activeNodes ) ):
4265 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004266 if isinstance( getResponses[ i ], list):
4267 current = set( getResponses[ i ] )
4268 if len( current ) == len( getResponses[ i ] ):
4269 # no repeats
4270 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004271 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004272 " has incorrect view" +
4273 " of set " + onosSetName + ":\n" +
4274 str( getResponses[ i ] ) )
4275 main.log.debug( "Expected: " + str( onosSet ) )
4276 main.log.debug( "Actual: " + str( current ) )
4277 getResults = main.FALSE
4278 else:
4279 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004280 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004281 " has repeat elements in" +
4282 " set " + onosSetName + ":\n" +
4283 str( getResponses[ i ] ) )
4284 getResults = main.FALSE
4285 elif getResponses[ i ] == main.ERROR:
4286 getResults = main.FALSE
4287 sizeResponses = []
4288 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004289 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004290 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004291 name="setTestSize-" + str( i ),
4292 args=[ onosSetName ] )
4293 threads.append( t )
4294 t.start()
4295 for t in threads:
4296 t.join()
4297 sizeResponses.append( t.result )
4298 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004299 for i in range( len( main.activeNodes ) ):
4300 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004301 if size != sizeResponses[ i ]:
4302 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004303 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004304 " expected a size of " + str( size ) +
4305 " for set " + onosSetName +
4306 " but got " + str( sizeResponses[ i ] ) )
4307 addAllResults = addAllResults and getResults and sizeResults
4308 utilities.assert_equals( expect=main.TRUE,
4309 actual=addAllResults,
4310 onpass="Set addAll correct",
4311 onfail="Set addAll was incorrect" )
4312
4313 main.step( "Distributed Set retain()" )
4314 onosSet.intersection_update( retainValue.split() )
4315 retainResponses = []
4316 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004317 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004318 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004319 name="setTestRetain-" + str( i ),
4320 args=[ onosSetName, retainValue ],
4321 kwargs={ "retain": True } )
4322 threads.append( t )
4323 t.start()
4324 for t in threads:
4325 t.join()
4326 retainResponses.append( t.result )
4327
4328 # main.TRUE = successfully changed the set
4329 # main.FALSE = action resulted in no change in set
4330 # main.ERROR - Some error in executing the function
4331 retainResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004332 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004333 if retainResponses[ i ] == main.TRUE:
4334 # All is well
4335 pass
4336 elif retainResponses[ i ] == main.FALSE:
4337 # Already in set, probably fine
4338 pass
4339 elif retainResponses[ i ] == main.ERROR:
4340 # Error in execution
4341 retainResults = main.FALSE
4342 else:
4343 # unexpected result
4344 retainResults = main.FALSE
4345 if retainResults != main.TRUE:
4346 main.log.error( "Error executing set retain" )
4347
4348 # Check if set is still correct
4349 size = len( onosSet )
4350 getResponses = []
4351 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004352 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004353 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004354 name="setTestGet-" + str( i ),
4355 args=[ onosSetName ] )
4356 threads.append( t )
4357 t.start()
4358 for t in threads:
4359 t.join()
4360 getResponses.append( t.result )
4361 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004362 for i in range( len( main.activeNodes ) ):
4363 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004364 if isinstance( getResponses[ i ], list):
4365 current = set( getResponses[ i ] )
4366 if len( current ) == len( getResponses[ i ] ):
4367 # no repeats
4368 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004369 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004370 " has incorrect view" +
4371 " of set " + onosSetName + ":\n" +
4372 str( getResponses[ i ] ) )
4373 main.log.debug( "Expected: " + str( onosSet ) )
4374 main.log.debug( "Actual: " + str( current ) )
4375 getResults = main.FALSE
4376 else:
4377 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004378 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004379 " has repeat elements in" +
4380 " set " + onosSetName + ":\n" +
4381 str( getResponses[ i ] ) )
4382 getResults = main.FALSE
4383 elif getResponses[ i ] == main.ERROR:
4384 getResults = main.FALSE
4385 sizeResponses = []
4386 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004387 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004388 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004389 name="setTestSize-" + str( i ),
4390 args=[ onosSetName ] )
4391 threads.append( t )
4392 t.start()
4393 for t in threads:
4394 t.join()
4395 sizeResponses.append( t.result )
4396 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004397 for i in range( len( main.activeNodes ) ):
4398 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004399 if size != sizeResponses[ i ]:
4400 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004401 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004402 str( size ) + " for set " + onosSetName +
4403 " but got " + str( sizeResponses[ i ] ) )
4404 retainResults = retainResults and getResults and sizeResults
4405 utilities.assert_equals( expect=main.TRUE,
4406 actual=retainResults,
4407 onpass="Set retain correct",
4408 onfail="Set retain was incorrect" )
4409
Jon Hall2a5002c2015-08-21 16:49:11 -07004410 # Transactional maps
4411 main.step( "Partitioned Transactional maps put" )
4412 tMapValue = "Testing"
4413 numKeys = 100
4414 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004415 node = main.activeNodes[0]
4416 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
Jon Hall6e709752016-02-01 13:38:46 -08004417 if putResponses and len( putResponses ) == 100:
Jon Hall2a5002c2015-08-21 16:49:11 -07004418 for i in putResponses:
4419 if putResponses[ i ][ 'value' ] != tMapValue:
4420 putResult = False
4421 else:
4422 putResult = False
4423 if not putResult:
4424 main.log.debug( "Put response values: " + str( putResponses ) )
4425 utilities.assert_equals( expect=True,
4426 actual=putResult,
4427 onpass="Partitioned Transactional Map put successful",
4428 onfail="Partitioned Transactional Map put values are incorrect" )
4429
4430 main.step( "Partitioned Transactional maps get" )
4431 getCheck = True
4432 for n in range( 1, numKeys + 1 ):
4433 getResponses = []
4434 threads = []
4435 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004436 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004437 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4438 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004439 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004440 threads.append( t )
4441 t.start()
4442 for t in threads:
4443 t.join()
4444 getResponses.append( t.result )
4445 for node in getResponses:
4446 if node != tMapValue:
4447 valueCheck = False
4448 if not valueCheck:
4449 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4450 main.log.warn( getResponses )
4451 getCheck = getCheck and valueCheck
4452 utilities.assert_equals( expect=True,
4453 actual=getCheck,
4454 onpass="Partitioned Transactional Map get values were correct",
4455 onfail="Partitioned Transactional Map values incorrect" )
4456
4457 main.step( "In-memory Transactional maps put" )
4458 tMapValue = "Testing"
4459 numKeys = 100
4460 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004461 node = main.activeNodes[0]
4462 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue, inMemory=True )
Jon Hall2a5002c2015-08-21 16:49:11 -07004463 if len( putResponses ) == 100:
4464 for i in putResponses:
4465 if putResponses[ i ][ 'value' ] != tMapValue:
4466 putResult = False
4467 else:
4468 putResult = False
4469 if not putResult:
4470 main.log.debug( "Put response values: " + str( putResponses ) )
4471 utilities.assert_equals( expect=True,
4472 actual=putResult,
4473 onpass="In-Memory Transactional Map put successful",
4474 onfail="In-Memory Transactional Map put values are incorrect" )
4475
4476 main.step( "In-Memory Transactional maps get" )
4477 getCheck = True
4478 for n in range( 1, numKeys + 1 ):
4479 getResponses = []
4480 threads = []
4481 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004482 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004483 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4484 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004485 args=[ "Key" + str( n ) ],
Jon Hall2a5002c2015-08-21 16:49:11 -07004486 kwargs={ "inMemory": True } )
4487 threads.append( t )
4488 t.start()
4489 for t in threads:
4490 t.join()
4491 getResponses.append( t.result )
4492 for node in getResponses:
4493 if node != tMapValue:
4494 valueCheck = False
4495 if not valueCheck:
4496 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4497 main.log.warn( getResponses )
4498 getCheck = getCheck and valueCheck
4499 utilities.assert_equals( expect=True,
4500 actual=getCheck,
4501 onpass="In-Memory Transactional Map get values were correct",
4502 onfail="In-Memory Transactional Map values incorrect" )