blob: 16b8a8b0dac7c9dfdf03062cecfd2e8fd1ac2111 [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
Jon Hallb3ed8ed2015-10-28 16:43:55 -070012CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
Jon Hall5cf14d52015-07-16 12:15:19 -070014CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
Jon Hallb3ed8ed2015-10-28 16:43:55 -070028class HAkillNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -070029
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hall3b489db2015-10-05 14:38:37 -070052 import pexpect
Jon Hall6e709752016-02-01 13:38:46 -080053 import time
54 main.log.info( "ONOS HA test: Restart a minority of ONOS nodes - " +
Jon Hall5cf14d52015-07-16 12:15:19 -070055 "initialization" )
56 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070057 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070058 "installing ONOS, starting Mininet and ONOS" +\
59 "cli sessions."
Jon Hall5cf14d52015-07-16 12:15:19 -070060
61 # load some variables from the params file
62 PULLCODE = False
63 if main.params[ 'Git' ] == 'True':
64 PULLCODE = True
65 gitBranch = main.params[ 'branch' ]
66 cellName = main.params[ 'ENV' ][ 'cellName' ]
67
Jon Halle1a3b752015-07-22 13:02:46 -070068 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070069 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070070 if main.ONOSbench.maxNodes < main.numCtrls:
71 main.numCtrls = int( main.ONOSbench.maxNodes )
72 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070073 global ONOS1Port
74 global ONOS2Port
75 global ONOS3Port
76 global ONOS4Port
77 global ONOS5Port
78 global ONOS6Port
79 global ONOS7Port
80
81 # FIXME: just get controller port from params?
82 # TODO: do we really need all these?
83 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
84 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
85 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
86 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
87 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
88 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
89 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
90
Jon Halle1a3b752015-07-22 13:02:46 -070091 try:
92 fileName = "Counters"
93 # TODO: Maybe make a library folder somewhere?
94 path = main.params[ 'imports' ][ 'path' ]
95 main.Counters = imp.load_source( fileName,
96 path + fileName + ".py" )
97 except Exception as e:
98 main.log.exception( e )
99 main.cleanup()
100 main.exit()
101
102 main.CLIs = []
103 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700104 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700105 for i in range( 1, main.numCtrls + 1 ):
106 try:
107 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
108 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
109 ipList.append( main.nodes[ -1 ].ip_address )
110 except AttributeError:
111 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700112
113 main.step( "Create cell file" )
114 cellAppString = main.params[ 'ENV' ][ 'appString' ]
115 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
116 main.Mininet1.ip_address,
117 cellAppString, ipList )
118 main.step( "Applying cell variable to environment" )
119 cellResult = main.ONOSbench.setCell( cellName )
120 verifyResult = main.ONOSbench.verifyCell()
121
122 # FIXME:this is short term fix
123 main.log.info( "Removing raft logs" )
124 main.ONOSbench.onosRemoveRaftLogs()
125
126 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700127 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700128 main.ONOSbench.onosUninstall( node.ip_address )
129
130 # Make sure ONOS is DEAD
131 main.log.info( "Killing any ONOS processes" )
132 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700133 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700134 killed = main.ONOSbench.onosKill( node.ip_address )
135 killResults = killResults and killed
136
137 cleanInstallResult = main.TRUE
138 gitPullResult = main.TRUE
139
140 main.step( "Starting Mininet" )
141 # scp topo file to mininet
142 # TODO: move to params?
143 topoName = "obelisk.py"
144 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700145 main.ONOSbench.scp( main.Mininet1,
146 filePath + topoName,
147 main.Mininet1.home,
148 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700149 mnResult = main.Mininet1.startNet( )
150 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
151 onpass="Mininet Started",
152 onfail="Error starting Mininet" )
153
154 main.step( "Git checkout and pull " + gitBranch )
155 if PULLCODE:
156 main.ONOSbench.gitCheckout( gitBranch )
157 gitPullResult = main.ONOSbench.gitPull()
158 # values of 1 or 3 are good
159 utilities.assert_lesser( expect=0, actual=gitPullResult,
160 onpass="Git pull successful",
161 onfail="Git pull failed" )
162 main.ONOSbench.getVersion( report=True )
163
164 main.step( "Using mvn clean install" )
165 cleanInstallResult = main.TRUE
166 if PULLCODE and gitPullResult == main.TRUE:
167 cleanInstallResult = main.ONOSbench.cleanInstall()
168 else:
169 main.log.warn( "Did not pull new code so skipping mvn " +
170 "clean install" )
171 utilities.assert_equals( expect=main.TRUE,
172 actual=cleanInstallResult,
173 onpass="MCI successful",
174 onfail="MCI failed" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700175
176 main.step( "Make sure ONOS service doesn't automatically respawn" )
177 handle = main.ONOSbench.handle
178 handle.sendline( "sed -i -e 's/^respawn$/#respawn/g' tools/package/init/onos.conf" )
179 handle.expect( "\$" ) # $ from the command
180 handle.expect( "\$" ) # $ from the prompt
181
Jon Hall5cf14d52015-07-16 12:15:19 -0700182 # GRAPHS
183 # NOTE: important params here:
184 # job = name of Jenkins job
185 # Plot Name = Plot-HA, only can be used if multiple plots
186 # index = The number of the graph under plot name
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700187 job = "HAkillNodes"
Jon Hall5cf14d52015-07-16 12:15:19 -0700188 plotName = "Plot-HA"
Jon Hall843f8bc2016-03-18 14:28:13 -0700189 index = "2"
Jon Hall5cf14d52015-07-16 12:15:19 -0700190 graphs = '<ac:structured-macro ac:name="html">\n'
191 graphs += '<ac:plain-text-body><![CDATA[\n'
192 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
Jon Halla9845df2016-01-15 14:55:58 -0800193 '/plot/' + plotName + '/getPlot?index=' + index +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700194 '&width=500&height=300"' +\
195 'noborder="0" width="500" height="300" scrolling="yes" ' +\
196 'seamless="seamless"></iframe>\n'
197 graphs += ']]></ac:plain-text-body>\n'
198 graphs += '</ac:structured-macro>\n'
199 main.log.wiki(graphs)
200
201 main.step( "Creating ONOS package" )
Jon Hall3b489db2015-10-05 14:38:37 -0700202 # copy gen-partions file to ONOS
203 # NOTE: this assumes TestON and ONOS are on the same machine
204 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
205 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
206 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
207 main.ONOSbench.ip_address,
208 srcFile,
209 dstDir,
210 pwd=main.ONOSbench.pwd,
211 direction="from" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700212 packageResult = main.ONOSbench.onosPackage()
213 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
214 onpass="ONOS package successful",
215 onfail="ONOS package failed" )
216
217 main.step( "Installing ONOS package" )
218 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700219 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700220 tmpResult = main.ONOSbench.onosInstall( options="-f",
221 node=node.ip_address )
222 onosInstallResult = onosInstallResult and tmpResult
223 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
224 onpass="ONOS install successful",
225 onfail="ONOS install failed" )
Jon Hall3b489db2015-10-05 14:38:37 -0700226 # clean up gen-partitions file
227 try:
228 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
229 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
230 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
231 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
232 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
233 str( main.ONOSbench.handle.before ) )
234 except ( pexpect.TIMEOUT, pexpect.EOF ):
235 main.log.exception( "ONOSbench: pexpect exception found:" +
236 main.ONOSbench.handle.before )
237 main.cleanup()
238 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700239
240 main.step( "Checking if ONOS is up yet" )
241 for i in range( 2 ):
242 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700243 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700244 started = main.ONOSbench.isup( node.ip_address )
245 if not started:
Jon Hallc6793552016-01-19 14:18:37 -0800246 main.log.error( node.name + " hasn't started" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700247 onosIsupResult = onosIsupResult and started
248 if onosIsupResult == main.TRUE:
249 break
250 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
251 onpass="ONOS startup successful",
252 onfail="ONOS startup failed" )
253
254 main.log.step( "Starting ONOS CLI sessions" )
255 cliResults = main.TRUE
256 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700257 for i in range( main.numCtrls ):
258 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700259 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700260 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700261 threads.append( t )
262 t.start()
263
264 for t in threads:
265 t.join()
266 cliResults = cliResults and t.result
267 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
268 onpass="ONOS cli startup successful",
269 onfail="ONOS cli startup failed" )
270
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700271 # Create a list of active nodes for use when some nodes are stopped
272 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
273
Jon Hall5cf14d52015-07-16 12:15:19 -0700274 if main.params[ 'tcpdump' ].lower() == "true":
275 main.step( "Start Packet Capture MN" )
276 main.Mininet2.startTcpdump(
277 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
278 + "-MN.pcap",
279 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
280 port=main.params[ 'MNtcpdump' ][ 'port' ] )
281
282 main.step( "App Ids check" )
Jon Hallf3d16e72015-12-16 17:45:08 -0800283 time.sleep(60)
Jon Hall5cf14d52015-07-16 12:15:19 -0700284 appCheck = main.TRUE
285 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700286 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700287 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700288 name="appToIDCheck-" + str( i ),
289 args=[] )
290 threads.append( t )
291 t.start()
292
293 for t in threads:
294 t.join()
295 appCheck = appCheck and t.result
296 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700297 node = main.activeNodes[0]
298 main.log.warn( main.CLIs[node].apps() )
299 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700300 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
301 onpass="App Ids seem to be correct",
302 onfail="Something is wrong with app Ids" )
303
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700304 main.step( "Clean up ONOS service changes" )
305 handle.sendline( "git checkout -- tools/package/init/onos.conf" )
306 handle.expect( "\$" )
307
Jon Hall5cf14d52015-07-16 12:15:19 -0700308 if cliResults == main.FALSE:
309 main.log.error( "Failed to start ONOS, stopping test" )
310 main.cleanup()
311 main.exit()
312
313 def CASE2( self, main ):
314 """
315 Assign devices to controllers
316 """
317 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700318 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700319 assert main, "main not defined"
320 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700321 assert main.CLIs, "main.CLIs not defined"
322 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700323 assert ONOS1Port, "ONOS1Port not defined"
324 assert ONOS2Port, "ONOS2Port not defined"
325 assert ONOS3Port, "ONOS3Port not defined"
326 assert ONOS4Port, "ONOS4Port not defined"
327 assert ONOS5Port, "ONOS5Port not defined"
328 assert ONOS6Port, "ONOS6Port not defined"
329 assert ONOS7Port, "ONOS7Port not defined"
330
331 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700332 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700333 "and check that an ONOS node becomes the " +\
334 "master of the device."
335 main.step( "Assign switches to controllers" )
336
337 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700338 for i in range( main.numCtrls ):
339 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700340 swList = []
341 for i in range( 1, 29 ):
342 swList.append( "s" + str( i ) )
343 main.Mininet1.assignSwController( sw=swList, ip=ipList )
344
345 mastershipCheck = main.TRUE
346 for i in range( 1, 29 ):
347 response = main.Mininet1.getSwController( "s" + str( i ) )
348 try:
349 main.log.info( str( response ) )
350 except Exception:
351 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700352 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700353 if re.search( "tcp:" + node.ip_address, response ):
354 mastershipCheck = mastershipCheck and main.TRUE
355 else:
356 main.log.error( "Error, node " + node.ip_address + " is " +
357 "not in the list of controllers s" +
358 str( i ) + " is connecting to." )
359 mastershipCheck = main.FALSE
360 utilities.assert_equals(
361 expect=main.TRUE,
362 actual=mastershipCheck,
363 onpass="Switch mastership assigned correctly",
364 onfail="Switches not assigned correctly to controllers" )
365
366 def CASE21( self, main ):
367 """
368 Assign mastership to controllers
369 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700370 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700371 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700372 assert main, "main not defined"
373 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700374 assert main.CLIs, "main.CLIs not defined"
375 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700376 assert ONOS1Port, "ONOS1Port not defined"
377 assert ONOS2Port, "ONOS2Port not defined"
378 assert ONOS3Port, "ONOS3Port not defined"
379 assert ONOS4Port, "ONOS4Port not defined"
380 assert ONOS5Port, "ONOS5Port not defined"
381 assert ONOS6Port, "ONOS6Port not defined"
382 assert ONOS7Port, "ONOS7Port not defined"
383
384 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700385 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700386 "device. Then manually assign" +\
387 " mastership to specific ONOS nodes using" +\
388 " 'device-role'"
389 main.step( "Assign mastership of switches to specific controllers" )
390 # Manually assign mastership to the controller we want
391 roleCall = main.TRUE
392
393 ipList = [ ]
394 deviceList = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700395 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700396 try:
397 # Assign mastership to specific controllers. This assignment was
398 # determined for a 7 node cluser, but will work with any sized
399 # cluster
400 for i in range( 1, 29 ): # switches 1 through 28
401 # set up correct variables:
402 if i == 1:
403 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700404 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700405 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700406 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700407 c = 1 % main.numCtrls
408 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700409 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700410 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700411 c = 1 % main.numCtrls
412 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700413 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700414 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700415 c = 3 % main.numCtrls
416 ip = main.nodes[ c ].ip_address # ONOS4
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700417 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700418 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700419 c = 2 % main.numCtrls
420 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700421 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700422 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700423 c = 2 % main.numCtrls
424 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700425 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700426 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700427 c = 5 % main.numCtrls
428 ip = main.nodes[ c ].ip_address # ONOS6
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700429 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700430 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700431 c = 4 % main.numCtrls
432 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700433 dpid = '3' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700434 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700435 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700436 c = 6 % main.numCtrls
437 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700438 dpid = '6' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700439 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700440 elif i == 28:
441 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700442 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700443 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700444 else:
445 main.log.error( "You didn't write an else statement for " +
446 "switch s" + str( i ) )
447 roleCall = main.FALSE
448 # Assign switch
449 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
450 # TODO: make this controller dynamic
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700451 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700452 ipList.append( ip )
453 deviceList.append( deviceId )
454 except ( AttributeError, AssertionError ):
455 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700456 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700457 utilities.assert_equals(
458 expect=main.TRUE,
459 actual=roleCall,
460 onpass="Re-assigned switch mastership to designated controller",
461 onfail="Something wrong with deviceRole calls" )
462
463 main.step( "Check mastership was correctly assigned" )
464 roleCheck = main.TRUE
465 # NOTE: This is due to the fact that device mastership change is not
466 # atomic and is actually a multi step process
467 time.sleep( 5 )
468 for i in range( len( ipList ) ):
469 ip = ipList[i]
470 deviceId = deviceList[i]
471 # Check assignment
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700472 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700473 if ip in master:
474 roleCheck = roleCheck and main.TRUE
475 else:
476 roleCheck = roleCheck and main.FALSE
477 main.log.error( "Error, controller " + ip + " is not" +
478 " master " + "of device " +
479 str( deviceId ) + ". Master is " +
480 repr( master ) + "." )
481 utilities.assert_equals(
482 expect=main.TRUE,
483 actual=roleCheck,
484 onpass="Switches were successfully reassigned to designated " +
485 "controller",
486 onfail="Switches were not successfully reassigned" )
487
488 def CASE3( self, main ):
489 """
490 Assign intents
491 """
492 import time
493 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700494 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700495 assert main, "main not defined"
496 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700497 assert main.CLIs, "main.CLIs not defined"
498 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700499 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700500 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700501 "assign predetermined host-to-host intents." +\
502 " After installation, check that the intent" +\
503 " is distributed to all nodes and the state" +\
504 " is INSTALLED"
505
506 # install onos-app-fwd
507 main.step( "Install reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700508 onosCli = main.CLIs[ main.activeNodes[0] ]
509 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700510 utilities.assert_equals( expect=main.TRUE, actual=installResults,
511 onpass="Install fwd successful",
512 onfail="Install fwd failed" )
513
514 main.step( "Check app ids" )
515 appCheck = main.TRUE
516 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700517 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700518 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700519 name="appToIDCheck-" + str( i ),
520 args=[] )
521 threads.append( t )
522 t.start()
523
524 for t in threads:
525 t.join()
526 appCheck = appCheck and t.result
527 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700528 main.log.warn( onosCli.apps() )
529 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700530 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
531 onpass="App Ids seem to be correct",
532 onfail="Something is wrong with app Ids" )
533
534 main.step( "Discovering Hosts( Via pingall for now )" )
535 # FIXME: Once we have a host discovery mechanism, use that instead
536 # REACTIVE FWD test
537 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700538 passMsg = "Reactive Pingall test passed"
539 time1 = time.time()
540 pingResult = main.Mininet1.pingall()
541 time2 = time.time()
542 if not pingResult:
543 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700544 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700545 passMsg += " on the second try"
546 utilities.assert_equals(
547 expect=main.TRUE,
548 actual=pingResult,
549 onpass= passMsg,
550 onfail="Reactive Pingall failed, " +
551 "one or more ping pairs failed" )
552 main.log.info( "Time for pingall: %2f seconds" %
553 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700554 # timeout for fwd flows
555 time.sleep( 11 )
556 # uninstall onos-app-fwd
557 main.step( "Uninstall reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700558 node = main.activeNodes[0]
559 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700560 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
561 onpass="Uninstall fwd successful",
562 onfail="Uninstall fwd failed" )
563
564 main.step( "Check app ids" )
565 threads = []
566 appCheck2 = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700567 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700568 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700569 name="appToIDCheck-" + str( i ),
570 args=[] )
571 threads.append( t )
572 t.start()
573
574 for t in threads:
575 t.join()
576 appCheck2 = appCheck2 and t.result
577 if appCheck2 != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700578 node = main.activeNodes[0]
579 main.log.warn( main.CLIs[node].apps() )
580 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700581 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
582 onpass="App Ids seem to be correct",
583 onfail="Something is wrong with app Ids" )
584
585 main.step( "Add host intents via cli" )
586 intentIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700587 # TODO: move the host numbers to params
588 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700589 intentAddResult = True
590 hostResult = main.TRUE
591 for i in range( 8, 18 ):
592 main.log.info( "Adding host intent between h" + str( i ) +
593 " and h" + str( i + 10 ) )
594 host1 = "00:00:00:00:00:" + \
595 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
596 host2 = "00:00:00:00:00:" + \
597 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
598 # NOTE: getHost can return None
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700599 host1Dict = onosCli.getHost( host1 )
600 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700601 host1Id = None
602 host2Id = None
603 if host1Dict and host2Dict:
604 host1Id = host1Dict.get( 'id', None )
605 host2Id = host2Dict.get( 'id', None )
606 if host1Id and host2Id:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700607 nodeNum = ( i % len( main.activeNodes ) )
608 node = main.activeNodes[nodeNum]
609 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700610 if tmpId:
611 main.log.info( "Added intent with id: " + tmpId )
612 intentIds.append( tmpId )
613 else:
614 main.log.error( "addHostIntent returned: " +
615 repr( tmpId ) )
616 else:
617 main.log.error( "Error, getHost() failed for h" + str( i ) +
618 " and/or h" + str( i + 10 ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700619 node = main.activeNodes[0]
620 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700621 main.log.warn( "Hosts output: " )
622 try:
623 main.log.warn( json.dumps( json.loads( hosts ),
624 sort_keys=True,
625 indent=4,
626 separators=( ',', ': ' ) ) )
627 except ( ValueError, TypeError ):
628 main.log.warn( repr( hosts ) )
629 hostResult = main.FALSE
630 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
631 onpass="Found a host id for each host",
632 onfail="Error looking up host ids" )
633
634 intentStart = time.time()
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700635 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700636 main.log.info( "Submitted intents: " + str( intentIds ) )
637 main.log.info( "Intents in ONOS: " + str( onosIds ) )
638 for intent in intentIds:
639 if intent in onosIds:
640 pass # intent submitted is in onos
641 else:
642 intentAddResult = False
643 if intentAddResult:
644 intentStop = time.time()
645 else:
646 intentStop = None
647 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700648 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700649 intentStates = []
650 installedCheck = True
651 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
652 count = 0
653 try:
654 for intent in json.loads( intents ):
655 state = intent.get( 'state', None )
656 if "INSTALLED" not in state:
657 installedCheck = False
658 intentId = intent.get( 'id', None )
659 intentStates.append( ( intentId, state ) )
660 except ( ValueError, TypeError ):
661 main.log.exception( "Error parsing intents" )
662 # add submitted intents not in the store
663 tmplist = [ i for i, s in intentStates ]
664 missingIntents = False
665 for i in intentIds:
666 if i not in tmplist:
667 intentStates.append( ( i, " - " ) )
668 missingIntents = True
669 intentStates.sort()
670 for i, s in intentStates:
671 count += 1
672 main.log.info( "%-6s%-15s%-15s" %
673 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700674 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700675 try:
676 missing = False
677 if leaders:
678 parsedLeaders = json.loads( leaders )
679 main.log.warn( json.dumps( parsedLeaders,
680 sort_keys=True,
681 indent=4,
682 separators=( ',', ': ' ) ) )
683 # check for all intent partitions
684 topics = []
685 for i in range( 14 ):
686 topics.append( "intent-partition-" + str( i ) )
687 main.log.debug( topics )
688 ONOStopics = [ j['topic'] for j in parsedLeaders ]
689 for topic in topics:
690 if topic not in ONOStopics:
691 main.log.error( "Error: " + topic +
692 " not in leaders" )
693 missing = True
694 else:
695 main.log.error( "leaders() returned None" )
696 except ( ValueError, TypeError ):
697 main.log.exception( "Error parsing leaders" )
698 main.log.error( repr( leaders ) )
699 # Check all nodes
700 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700701 for i in main.activeNodes:
702 response = main.CLIs[i].leaders( jsonFormat=False)
703 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700704 str( response ) )
705
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700706 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700707 try:
708 if partitions :
709 parsedPartitions = json.loads( partitions )
710 main.log.warn( json.dumps( parsedPartitions,
711 sort_keys=True,
712 indent=4,
713 separators=( ',', ': ' ) ) )
714 # TODO check for a leader in all paritions
715 # TODO check for consistency among nodes
716 else:
717 main.log.error( "partitions() returned None" )
718 except ( ValueError, TypeError ):
719 main.log.exception( "Error parsing partitions" )
720 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700721 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700722 try:
723 if pendingMap :
724 parsedPending = json.loads( pendingMap )
725 main.log.warn( json.dumps( parsedPending,
726 sort_keys=True,
727 indent=4,
728 separators=( ',', ': ' ) ) )
729 # TODO check something here?
730 else:
731 main.log.error( "pendingMap() returned None" )
732 except ( ValueError, TypeError ):
733 main.log.exception( "Error parsing pending map" )
734 main.log.error( repr( pendingMap ) )
735
736 intentAddResult = bool( intentAddResult and not missingIntents and
737 installedCheck )
738 if not intentAddResult:
739 main.log.error( "Error in pushing host intents to ONOS" )
740
741 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700742 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700743 correct = True
744 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700745 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700746 onosIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700747 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700748 onosIds.append( ids )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700749 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700750 str( sorted( onosIds ) ) )
751 if sorted( ids ) != sorted( intentIds ):
752 main.log.warn( "Set of intent IDs doesn't match" )
753 correct = False
754 break
755 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700756 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700757 for intent in intents:
758 if intent[ 'state' ] != "INSTALLED":
759 main.log.warn( "Intent " + intent[ 'id' ] +
760 " is " + intent[ 'state' ] )
761 correct = False
762 break
763 if correct:
764 break
765 else:
766 time.sleep(1)
767 if not intentStop:
768 intentStop = time.time()
769 global gossipTime
770 gossipTime = intentStop - intentStart
771 main.log.info( "It took about " + str( gossipTime ) +
772 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700773 gossipPeriod = int( main.params['timers']['gossip'] )
774 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700775 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700776 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700777 onpass="ECM anti-entropy for intents worked within " +
778 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700779 onfail="Intent ECM anti-entropy took too long. " +
780 "Expected time:{}, Actual time:{}".format( maxGossipTime,
781 gossipTime ) )
782 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700783 intentAddResult = True
784
785 if not intentAddResult or "key" in pendingMap:
786 import time
787 installedCheck = True
788 main.log.info( "Sleeping 60 seconds to see if intents are found" )
789 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700790 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700791 main.log.info( "Submitted intents: " + str( intentIds ) )
792 main.log.info( "Intents in ONOS: " + str( onosIds ) )
793 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700794 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700795 intentStates = []
796 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
797 count = 0
798 try:
799 for intent in json.loads( intents ):
800 # Iter through intents of a node
801 state = intent.get( 'state', None )
802 if "INSTALLED" not in state:
803 installedCheck = False
804 intentId = intent.get( 'id', None )
805 intentStates.append( ( intentId, state ) )
806 except ( ValueError, TypeError ):
807 main.log.exception( "Error parsing intents" )
808 # add submitted intents not in the store
809 tmplist = [ i for i, s in intentStates ]
810 for i in intentIds:
811 if i not in tmplist:
812 intentStates.append( ( i, " - " ) )
813 intentStates.sort()
814 for i, s in intentStates:
815 count += 1
816 main.log.info( "%-6s%-15s%-15s" %
817 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700818 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700819 try:
820 missing = False
821 if leaders:
822 parsedLeaders = json.loads( leaders )
823 main.log.warn( json.dumps( parsedLeaders,
824 sort_keys=True,
825 indent=4,
826 separators=( ',', ': ' ) ) )
827 # check for all intent partitions
828 # check for election
829 topics = []
830 for i in range( 14 ):
831 topics.append( "intent-partition-" + str( i ) )
832 # FIXME: this should only be after we start the app
833 topics.append( "org.onosproject.election" )
834 main.log.debug( topics )
835 ONOStopics = [ j['topic'] for j in parsedLeaders ]
836 for topic in topics:
837 if topic not in ONOStopics:
838 main.log.error( "Error: " + topic +
839 " not in leaders" )
840 missing = True
841 else:
842 main.log.error( "leaders() returned None" )
843 except ( ValueError, TypeError ):
844 main.log.exception( "Error parsing leaders" )
845 main.log.error( repr( leaders ) )
846 # Check all nodes
847 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700848 for i in main.activeNodes:
849 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700850 response = node.leaders( jsonFormat=False)
851 main.log.warn( str( node.name ) + " leaders output: \n" +
852 str( response ) )
853
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700854 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700855 try:
856 if partitions :
857 parsedPartitions = json.loads( partitions )
858 main.log.warn( json.dumps( parsedPartitions,
859 sort_keys=True,
860 indent=4,
861 separators=( ',', ': ' ) ) )
862 # TODO check for a leader in all paritions
863 # TODO check for consistency among nodes
864 else:
865 main.log.error( "partitions() returned None" )
866 except ( ValueError, TypeError ):
867 main.log.exception( "Error parsing partitions" )
868 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700869 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700870 try:
871 if pendingMap :
872 parsedPending = json.loads( pendingMap )
873 main.log.warn( json.dumps( parsedPending,
874 sort_keys=True,
875 indent=4,
876 separators=( ',', ': ' ) ) )
877 # TODO check something here?
878 else:
879 main.log.error( "pendingMap() returned None" )
880 except ( ValueError, TypeError ):
881 main.log.exception( "Error parsing pending map" )
882 main.log.error( repr( pendingMap ) )
883
884 def CASE4( self, main ):
885 """
886 Ping across added host intents
887 """
888 import json
889 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700890 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700891 assert main, "main not defined"
892 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700893 assert main.CLIs, "main.CLIs not defined"
894 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700895 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700896 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700897 "functionality and check the state of " +\
898 "the intent"
899 main.step( "Ping across added host intents" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700900 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700901 PingResult = main.TRUE
902 for i in range( 8, 18 ):
903 ping = main.Mininet1.pingHost( src="h" + str( i ),
904 target="h" + str( i + 10 ) )
905 PingResult = PingResult and ping
906 if ping == main.FALSE:
907 main.log.warn( "Ping failed between h" + str( i ) +
908 " and h" + str( i + 10 ) )
909 elif ping == main.TRUE:
910 main.log.info( "Ping test passed!" )
911 # Don't set PingResult or you'd override failures
912 if PingResult == main.FALSE:
913 main.log.error(
914 "Intents have not been installed correctly, pings failed." )
915 # TODO: pretty print
916 main.log.warn( "ONOS1 intents: " )
917 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700918 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700919 main.log.warn( json.dumps( json.loads( tmpIntents ),
920 sort_keys=True,
921 indent=4,
922 separators=( ',', ': ' ) ) )
923 except ( ValueError, TypeError ):
924 main.log.warn( repr( tmpIntents ) )
925 utilities.assert_equals(
926 expect=main.TRUE,
927 actual=PingResult,
928 onpass="Intents have been installed correctly and pings work",
929 onfail="Intents have not been installed correctly, pings failed." )
930
931 main.step( "Check Intent state" )
932 installedCheck = False
933 loopCount = 0
934 while not installedCheck and loopCount < 40:
935 installedCheck = True
936 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700937 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700938 intentStates = []
939 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
940 count = 0
941 # Iter through intents of a node
942 try:
943 for intent in json.loads( intents ):
944 state = intent.get( 'state', None )
945 if "INSTALLED" not in state:
946 installedCheck = False
947 intentId = intent.get( 'id', None )
948 intentStates.append( ( intentId, state ) )
949 except ( ValueError, TypeError ):
950 main.log.exception( "Error parsing intents." )
951 # Print states
952 intentStates.sort()
953 for i, s in intentStates:
954 count += 1
955 main.log.info( "%-6s%-15s%-15s" %
956 ( str( count ), str( i ), str( s ) ) )
957 if not installedCheck:
958 time.sleep( 1 )
959 loopCount += 1
960 utilities.assert_equals( expect=True, actual=installedCheck,
961 onpass="Intents are all INSTALLED",
962 onfail="Intents are not all in " +
963 "INSTALLED state" )
964
965 main.step( "Check leadership of topics" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700966 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700967 topicCheck = main.TRUE
968 try:
969 if leaders:
970 parsedLeaders = json.loads( leaders )
971 main.log.warn( json.dumps( parsedLeaders,
972 sort_keys=True,
973 indent=4,
974 separators=( ',', ': ' ) ) )
975 # check for all intent partitions
976 # check for election
977 # TODO: Look at Devices as topics now that it uses this system
978 topics = []
979 for i in range( 14 ):
980 topics.append( "intent-partition-" + str( i ) )
981 # FIXME: this should only be after we start the app
982 # FIXME: topics.append( "org.onosproject.election" )
983 # Print leaders output
984 main.log.debug( topics )
985 ONOStopics = [ j['topic'] for j in parsedLeaders ]
986 for topic in topics:
987 if topic not in ONOStopics:
988 main.log.error( "Error: " + topic +
989 " not in leaders" )
990 topicCheck = main.FALSE
991 else:
992 main.log.error( "leaders() returned None" )
993 topicCheck = main.FALSE
994 except ( ValueError, TypeError ):
995 topicCheck = main.FALSE
996 main.log.exception( "Error parsing leaders" )
997 main.log.error( repr( leaders ) )
998 # TODO: Check for a leader of these topics
999 # Check all nodes
1000 if topicCheck:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001001 for i in main.activeNodes:
1002 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001003 response = node.leaders( jsonFormat=False)
1004 main.log.warn( str( node.name ) + " leaders output: \n" +
1005 str( response ) )
1006
1007 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1008 onpass="intent Partitions is in leaders",
1009 onfail="Some topics were lost " )
1010 # Print partitions
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001011 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001012 try:
1013 if partitions :
1014 parsedPartitions = json.loads( partitions )
1015 main.log.warn( json.dumps( parsedPartitions,
1016 sort_keys=True,
1017 indent=4,
1018 separators=( ',', ': ' ) ) )
1019 # TODO check for a leader in all paritions
1020 # TODO check for consistency among nodes
1021 else:
1022 main.log.error( "partitions() returned None" )
1023 except ( ValueError, TypeError ):
1024 main.log.exception( "Error parsing partitions" )
1025 main.log.error( repr( partitions ) )
1026 # Print Pending Map
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001027 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001028 try:
1029 if pendingMap :
1030 parsedPending = json.loads( pendingMap )
1031 main.log.warn( json.dumps( parsedPending,
1032 sort_keys=True,
1033 indent=4,
1034 separators=( ',', ': ' ) ) )
1035 # TODO check something here?
1036 else:
1037 main.log.error( "pendingMap() returned None" )
1038 except ( ValueError, TypeError ):
1039 main.log.exception( "Error parsing pending map" )
1040 main.log.error( repr( pendingMap ) )
1041
1042 if not installedCheck:
1043 main.log.info( "Waiting 60 seconds to see if the state of " +
1044 "intents change" )
1045 time.sleep( 60 )
1046 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001047 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001048 intentStates = []
1049 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1050 count = 0
1051 # Iter through intents of a node
1052 try:
1053 for intent in json.loads( intents ):
1054 state = intent.get( 'state', None )
1055 if "INSTALLED" not in state:
1056 installedCheck = False
1057 intentId = intent.get( 'id', None )
1058 intentStates.append( ( intentId, state ) )
1059 except ( ValueError, TypeError ):
1060 main.log.exception( "Error parsing intents." )
1061 intentStates.sort()
1062 for i, s in intentStates:
1063 count += 1
1064 main.log.info( "%-6s%-15s%-15s" %
1065 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001066 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001067 try:
1068 missing = False
1069 if leaders:
1070 parsedLeaders = json.loads( leaders )
1071 main.log.warn( json.dumps( parsedLeaders,
1072 sort_keys=True,
1073 indent=4,
1074 separators=( ',', ': ' ) ) )
1075 # check for all intent partitions
1076 # check for election
1077 topics = []
1078 for i in range( 14 ):
1079 topics.append( "intent-partition-" + str( i ) )
1080 # FIXME: this should only be after we start the app
1081 topics.append( "org.onosproject.election" )
1082 main.log.debug( topics )
1083 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1084 for topic in topics:
1085 if topic not in ONOStopics:
1086 main.log.error( "Error: " + topic +
1087 " not in leaders" )
1088 missing = True
1089 else:
1090 main.log.error( "leaders() returned None" )
1091 except ( ValueError, TypeError ):
1092 main.log.exception( "Error parsing leaders" )
1093 main.log.error( repr( leaders ) )
1094 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001095 for i in main.activeNodes:
1096 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001097 response = node.leaders( jsonFormat=False)
1098 main.log.warn( str( node.name ) + " leaders output: \n" +
1099 str( response ) )
1100
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001101 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001102 try:
1103 if partitions :
1104 parsedPartitions = json.loads( partitions )
1105 main.log.warn( json.dumps( parsedPartitions,
1106 sort_keys=True,
1107 indent=4,
1108 separators=( ',', ': ' ) ) )
1109 # TODO check for a leader in all paritions
1110 # TODO check for consistency among nodes
1111 else:
1112 main.log.error( "partitions() returned None" )
1113 except ( ValueError, TypeError ):
1114 main.log.exception( "Error parsing partitions" )
1115 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001116 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001117 try:
1118 if pendingMap :
1119 parsedPending = json.loads( pendingMap )
1120 main.log.warn( json.dumps( parsedPending,
1121 sort_keys=True,
1122 indent=4,
1123 separators=( ',', ': ' ) ) )
1124 # TODO check something here?
1125 else:
1126 main.log.error( "pendingMap() returned None" )
1127 except ( ValueError, TypeError ):
1128 main.log.exception( "Error parsing pending map" )
1129 main.log.error( repr( pendingMap ) )
1130 # Print flowrules
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001131 node = main.activeNodes[0]
1132 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001133 main.step( "Wait a minute then ping again" )
1134 # the wait is above
1135 PingResult = main.TRUE
1136 for i in range( 8, 18 ):
1137 ping = main.Mininet1.pingHost( src="h" + str( i ),
1138 target="h" + str( i + 10 ) )
1139 PingResult = PingResult and ping
1140 if ping == main.FALSE:
1141 main.log.warn( "Ping failed between h" + str( i ) +
1142 " and h" + str( i + 10 ) )
1143 elif ping == main.TRUE:
1144 main.log.info( "Ping test passed!" )
1145 # Don't set PingResult or you'd override failures
1146 if PingResult == main.FALSE:
1147 main.log.error(
1148 "Intents have not been installed correctly, pings failed." )
1149 # TODO: pretty print
1150 main.log.warn( "ONOS1 intents: " )
1151 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001152 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001153 main.log.warn( json.dumps( json.loads( tmpIntents ),
1154 sort_keys=True,
1155 indent=4,
1156 separators=( ',', ': ' ) ) )
1157 except ( ValueError, TypeError ):
1158 main.log.warn( repr( tmpIntents ) )
1159 utilities.assert_equals(
1160 expect=main.TRUE,
1161 actual=PingResult,
1162 onpass="Intents have been installed correctly and pings work",
1163 onfail="Intents have not been installed correctly, pings failed." )
1164
1165 def CASE5( self, main ):
1166 """
1167 Reading state of ONOS
1168 """
1169 import json
1170 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001171 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001172 assert main, "main not defined"
1173 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001174 assert main.CLIs, "main.CLIs not defined"
1175 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001176
1177 main.case( "Setting up and gathering data for current state" )
1178 # The general idea for this test case is to pull the state of
1179 # ( intents,flows, topology,... ) from each ONOS node
1180 # We can then compare them with each other and also with past states
1181
1182 main.step( "Check that each switch has a master" )
1183 global mastershipState
1184 mastershipState = '[]'
1185
1186 # Assert that each device has a master
1187 rolesNotNull = main.TRUE
1188 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001189 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001190 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001191 name="rolesNotNull-" + str( i ),
1192 args=[] )
1193 threads.append( t )
1194 t.start()
1195
1196 for t in threads:
1197 t.join()
1198 rolesNotNull = rolesNotNull and t.result
1199 utilities.assert_equals(
1200 expect=main.TRUE,
1201 actual=rolesNotNull,
1202 onpass="Each device has a master",
1203 onfail="Some devices don't have a master assigned" )
1204
1205 main.step( "Get the Mastership of each switch from each controller" )
1206 ONOSMastership = []
1207 mastershipCheck = main.FALSE
1208 consistentMastership = True
1209 rolesResults = True
1210 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001211 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001212 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001213 name="roles-" + str( i ),
1214 args=[] )
1215 threads.append( t )
1216 t.start()
1217
1218 for t in threads:
1219 t.join()
1220 ONOSMastership.append( t.result )
1221
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001222 for i in range( len( ONOSMastership ) ):
1223 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001224 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001225 main.log.error( "Error in getting ONOS" + node + " roles" )
1226 main.log.warn( "ONOS" + node + " mastership response: " +
1227 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001228 rolesResults = False
1229 utilities.assert_equals(
1230 expect=True,
1231 actual=rolesResults,
1232 onpass="No error in reading roles output",
1233 onfail="Error in reading roles from ONOS" )
1234
1235 main.step( "Check for consistency in roles from each controller" )
1236 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1237 main.log.info(
1238 "Switch roles are consistent across all ONOS nodes" )
1239 else:
1240 consistentMastership = False
1241 utilities.assert_equals(
1242 expect=True,
1243 actual=consistentMastership,
1244 onpass="Switch roles are consistent across all ONOS nodes",
1245 onfail="ONOS nodes have different views of switch roles" )
1246
1247 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001248 for i in range( len( main.activeNodes ) ):
1249 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001250 try:
1251 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001252 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001253 json.dumps(
1254 json.loads( ONOSMastership[ i ] ),
1255 sort_keys=True,
1256 indent=4,
1257 separators=( ',', ': ' ) ) )
1258 except ( ValueError, TypeError ):
1259 main.log.warn( repr( ONOSMastership[ i ] ) )
1260 elif rolesResults and consistentMastership:
1261 mastershipCheck = main.TRUE
1262 mastershipState = ONOSMastership[ 0 ]
1263
1264 main.step( "Get the intents from each controller" )
1265 global intentState
1266 intentState = []
1267 ONOSIntents = []
1268 intentCheck = main.FALSE
1269 consistentIntents = True
1270 intentsResults = True
1271 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001272 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001273 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001274 name="intents-" + str( i ),
1275 args=[],
1276 kwargs={ 'jsonFormat': True } )
1277 threads.append( t )
1278 t.start()
1279
1280 for t in threads:
1281 t.join()
1282 ONOSIntents.append( t.result )
1283
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001284 for i in range( len( ONOSIntents ) ):
1285 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001286 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001287 main.log.error( "Error in getting ONOS" + node + " intents" )
1288 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001289 repr( ONOSIntents[ i ] ) )
1290 intentsResults = False
1291 utilities.assert_equals(
1292 expect=True,
1293 actual=intentsResults,
1294 onpass="No error in reading intents output",
1295 onfail="Error in reading intents from ONOS" )
1296
1297 main.step( "Check for consistency in Intents from each controller" )
1298 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1299 main.log.info( "Intents are consistent across all ONOS " +
1300 "nodes" )
1301 else:
1302 consistentIntents = False
1303 main.log.error( "Intents not consistent" )
1304 utilities.assert_equals(
1305 expect=True,
1306 actual=consistentIntents,
1307 onpass="Intents are consistent across all ONOS nodes",
1308 onfail="ONOS nodes have different views of intents" )
1309
1310 if intentsResults:
1311 # Try to make it easy to figure out what is happening
1312 #
1313 # Intent ONOS1 ONOS2 ...
1314 # 0x01 INSTALLED INSTALLING
1315 # ... ... ...
1316 # ... ... ...
1317 title = " Id"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001318 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001319 title += " " * 10 + "ONOS" + str( n + 1 )
1320 main.log.warn( title )
1321 # get all intent keys in the cluster
1322 keys = []
1323 for nodeStr in ONOSIntents:
1324 node = json.loads( nodeStr )
1325 for intent in node:
1326 keys.append( intent.get( 'id' ) )
1327 keys = set( keys )
1328 for key in keys:
1329 row = "%-13s" % key
1330 for nodeStr in ONOSIntents:
1331 node = json.loads( nodeStr )
1332 for intent in node:
1333 if intent.get( 'id', "Error" ) == key:
1334 row += "%-15s" % intent.get( 'state' )
1335 main.log.warn( row )
1336 # End table view
1337
1338 if intentsResults and not consistentIntents:
1339 # print the json objects
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001340 n = str( main.activeNodes[-1] + 1 )
1341 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001342 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1343 sort_keys=True,
1344 indent=4,
1345 separators=( ',', ': ' ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001346 for i in range( len( ONOSIntents ) ):
1347 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001348 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001349 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001350 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1351 sort_keys=True,
1352 indent=4,
1353 separators=( ',', ': ' ) ) )
1354 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001355 main.log.debug( "ONOS" + node + " intents match ONOS" +
1356 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001357 elif intentsResults and consistentIntents:
1358 intentCheck = main.TRUE
1359 intentState = ONOSIntents[ 0 ]
1360
1361 main.step( "Get the flows from each controller" )
1362 global flowState
1363 flowState = []
1364 ONOSFlows = []
1365 ONOSFlowsJson = []
1366 flowCheck = main.FALSE
1367 consistentFlows = True
1368 flowsResults = True
1369 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001370 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001371 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001372 name="flows-" + str( i ),
1373 args=[],
1374 kwargs={ 'jsonFormat': True } )
1375 threads.append( t )
1376 t.start()
1377
1378 # NOTE: Flows command can take some time to run
1379 time.sleep(30)
1380 for t in threads:
1381 t.join()
1382 result = t.result
1383 ONOSFlows.append( result )
1384
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001385 for i in range( len( ONOSFlows ) ):
1386 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001387 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1388 main.log.error( "Error in getting ONOS" + num + " flows" )
1389 main.log.warn( "ONOS" + num + " flows response: " +
1390 repr( ONOSFlows[ i ] ) )
1391 flowsResults = False
1392 ONOSFlowsJson.append( None )
1393 else:
1394 try:
1395 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1396 except ( ValueError, TypeError ):
1397 # FIXME: change this to log.error?
1398 main.log.exception( "Error in parsing ONOS" + num +
1399 " response as json." )
1400 main.log.error( repr( ONOSFlows[ i ] ) )
1401 ONOSFlowsJson.append( None )
1402 flowsResults = False
1403 utilities.assert_equals(
1404 expect=True,
1405 actual=flowsResults,
1406 onpass="No error in reading flows output",
1407 onfail="Error in reading flows from ONOS" )
1408
1409 main.step( "Check for consistency in Flows from each controller" )
1410 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1411 if all( tmp ):
1412 main.log.info( "Flow count is consistent across all ONOS nodes" )
1413 else:
1414 consistentFlows = False
1415 utilities.assert_equals(
1416 expect=True,
1417 actual=consistentFlows,
1418 onpass="The flow count is consistent across all ONOS nodes",
1419 onfail="ONOS nodes have different flow counts" )
1420
1421 if flowsResults and not consistentFlows:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001422 for i in range( len( ONOSFlows ) ):
1423 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001424 try:
1425 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001426 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001427 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1428 indent=4, separators=( ',', ': ' ) ) )
1429 except ( ValueError, TypeError ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001430 main.log.warn( "ONOS" + node + " flows: " +
1431 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001432 elif flowsResults and consistentFlows:
1433 flowCheck = main.TRUE
1434 flowState = ONOSFlows[ 0 ]
1435
1436 main.step( "Get the OF Table entries" )
1437 global flows
1438 flows = []
1439 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001440 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001441 if flowCheck == main.FALSE:
1442 for table in flows:
1443 main.log.warn( table )
1444 # TODO: Compare switch flow tables with ONOS flow tables
1445
1446 main.step( "Start continuous pings" )
1447 main.Mininet2.pingLong(
1448 src=main.params[ 'PING' ][ 'source1' ],
1449 target=main.params[ 'PING' ][ 'target1' ],
1450 pingTime=500 )
1451 main.Mininet2.pingLong(
1452 src=main.params[ 'PING' ][ 'source2' ],
1453 target=main.params[ 'PING' ][ 'target2' ],
1454 pingTime=500 )
1455 main.Mininet2.pingLong(
1456 src=main.params[ 'PING' ][ 'source3' ],
1457 target=main.params[ 'PING' ][ 'target3' ],
1458 pingTime=500 )
1459 main.Mininet2.pingLong(
1460 src=main.params[ 'PING' ][ 'source4' ],
1461 target=main.params[ 'PING' ][ 'target4' ],
1462 pingTime=500 )
1463 main.Mininet2.pingLong(
1464 src=main.params[ 'PING' ][ 'source5' ],
1465 target=main.params[ 'PING' ][ 'target5' ],
1466 pingTime=500 )
1467 main.Mininet2.pingLong(
1468 src=main.params[ 'PING' ][ 'source6' ],
1469 target=main.params[ 'PING' ][ 'target6' ],
1470 pingTime=500 )
1471 main.Mininet2.pingLong(
1472 src=main.params[ 'PING' ][ 'source7' ],
1473 target=main.params[ 'PING' ][ 'target7' ],
1474 pingTime=500 )
1475 main.Mininet2.pingLong(
1476 src=main.params[ 'PING' ][ 'source8' ],
1477 target=main.params[ 'PING' ][ 'target8' ],
1478 pingTime=500 )
1479 main.Mininet2.pingLong(
1480 src=main.params[ 'PING' ][ 'source9' ],
1481 target=main.params[ 'PING' ][ 'target9' ],
1482 pingTime=500 )
1483 main.Mininet2.pingLong(
1484 src=main.params[ 'PING' ][ 'source10' ],
1485 target=main.params[ 'PING' ][ 'target10' ],
1486 pingTime=500 )
1487
1488 main.step( "Collecting topology information from ONOS" )
1489 devices = []
1490 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001491 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001492 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001493 name="devices-" + str( i ),
1494 args=[ ] )
1495 threads.append( t )
1496 t.start()
1497
1498 for t in threads:
1499 t.join()
1500 devices.append( t.result )
1501 hosts = []
1502 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001503 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001504 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001505 name="hosts-" + str( i ),
1506 args=[ ] )
1507 threads.append( t )
1508 t.start()
1509
1510 for t in threads:
1511 t.join()
1512 try:
1513 hosts.append( json.loads( t.result ) )
1514 except ( ValueError, TypeError ):
1515 # FIXME: better handling of this, print which node
1516 # Maybe use thread name?
1517 main.log.exception( "Error parsing json output of hosts" )
Jon Hallf3d16e72015-12-16 17:45:08 -08001518 main.log.warn( repr( t.result ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001519 hosts.append( None )
1520
1521 ports = []
1522 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001523 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001524 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001525 name="ports-" + str( i ),
1526 args=[ ] )
1527 threads.append( t )
1528 t.start()
1529
1530 for t in threads:
1531 t.join()
1532 ports.append( t.result )
1533 links = []
1534 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001535 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001536 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001537 name="links-" + str( i ),
1538 args=[ ] )
1539 threads.append( t )
1540 t.start()
1541
1542 for t in threads:
1543 t.join()
1544 links.append( t.result )
1545 clusters = []
1546 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001547 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001548 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001549 name="clusters-" + str( i ),
1550 args=[ ] )
1551 threads.append( t )
1552 t.start()
1553
1554 for t in threads:
1555 t.join()
1556 clusters.append( t.result )
1557 # Compare json objects for hosts and dataplane clusters
1558
1559 # hosts
1560 main.step( "Host view is consistent across ONOS nodes" )
1561 consistentHostsResult = main.TRUE
1562 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001563 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001564 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001565 if hosts[ controller ] == hosts[ 0 ]:
1566 continue
1567 else: # hosts not consistent
1568 main.log.error( "hosts from ONOS" +
1569 controllerStr +
1570 " is inconsistent with ONOS1" )
1571 main.log.warn( repr( hosts[ controller ] ) )
1572 consistentHostsResult = main.FALSE
1573
1574 else:
1575 main.log.error( "Error in getting ONOS hosts from ONOS" +
1576 controllerStr )
1577 consistentHostsResult = main.FALSE
1578 main.log.warn( "ONOS" + controllerStr +
1579 " hosts response: " +
1580 repr( hosts[ controller ] ) )
1581 utilities.assert_equals(
1582 expect=main.TRUE,
1583 actual=consistentHostsResult,
1584 onpass="Hosts view is consistent across all ONOS nodes",
1585 onfail="ONOS nodes have different views of hosts" )
1586
1587 main.step( "Each host has an IP address" )
1588 ipResult = main.TRUE
1589 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001590 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallf3d16e72015-12-16 17:45:08 -08001591 if hosts[ controller ]:
1592 for host in hosts[ controller ]:
1593 if not host.get( 'ipAddresses', [ ] ):
1594 main.log.error( "Error with host ips on controller" +
1595 controllerStr + ": " + str( host ) )
1596 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07001597 utilities.assert_equals(
1598 expect=main.TRUE,
1599 actual=ipResult,
1600 onpass="The ips of the hosts aren't empty",
1601 onfail="The ip of at least one host is missing" )
1602
1603 # Strongly connected clusters of devices
1604 main.step( "Cluster view is consistent across ONOS nodes" )
1605 consistentClustersResult = main.TRUE
1606 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001607 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001608 if "Error" not in clusters[ controller ]:
1609 if clusters[ controller ] == clusters[ 0 ]:
1610 continue
1611 else: # clusters not consistent
1612 main.log.error( "clusters from ONOS" + controllerStr +
1613 " is inconsistent with ONOS1" )
1614 consistentClustersResult = main.FALSE
1615
1616 else:
1617 main.log.error( "Error in getting dataplane clusters " +
1618 "from ONOS" + controllerStr )
1619 consistentClustersResult = main.FALSE
1620 main.log.warn( "ONOS" + controllerStr +
1621 " clusters response: " +
1622 repr( clusters[ controller ] ) )
1623 utilities.assert_equals(
1624 expect=main.TRUE,
1625 actual=consistentClustersResult,
1626 onpass="Clusters view is consistent across all ONOS nodes",
1627 onfail="ONOS nodes have different views of clusters" )
1628 # there should always only be one cluster
1629 main.step( "Cluster view correct across ONOS nodes" )
1630 try:
1631 numClusters = len( json.loads( clusters[ 0 ] ) )
1632 except ( ValueError, TypeError ):
1633 main.log.exception( "Error parsing clusters[0]: " +
1634 repr( clusters[ 0 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08001635 numClusters = "ERROR"
Jon Hall5cf14d52015-07-16 12:15:19 -07001636 clusterResults = main.FALSE
1637 if numClusters == 1:
1638 clusterResults = main.TRUE
1639 utilities.assert_equals(
1640 expect=1,
1641 actual=numClusters,
1642 onpass="ONOS shows 1 SCC",
1643 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1644
1645 main.step( "Comparing ONOS topology to MN" )
1646 devicesResults = main.TRUE
1647 linksResults = main.TRUE
1648 hostsResults = main.TRUE
1649 mnSwitches = main.Mininet1.getSwitches()
1650 mnLinks = main.Mininet1.getLinks()
1651 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001652 for controller in main.activeNodes:
1653 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001654 if devices[ controller ] and ports[ controller ] and\
1655 "Error" not in devices[ controller ] and\
1656 "Error" not in ports[ controller ]:
Jon Hall6e709752016-02-01 13:38:46 -08001657 currentDevicesResult = main.Mininet1.compareSwitches(
1658 mnSwitches,
1659 json.loads( devices[ controller ] ),
1660 json.loads( ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001661 else:
1662 currentDevicesResult = main.FALSE
1663 utilities.assert_equals( expect=main.TRUE,
1664 actual=currentDevicesResult,
1665 onpass="ONOS" + controllerStr +
1666 " Switches view is correct",
1667 onfail="ONOS" + controllerStr +
1668 " Switches view is incorrect" )
1669 if links[ controller ] and "Error" not in links[ controller ]:
1670 currentLinksResult = main.Mininet1.compareLinks(
1671 mnSwitches, mnLinks,
1672 json.loads( links[ controller ] ) )
1673 else:
1674 currentLinksResult = main.FALSE
1675 utilities.assert_equals( expect=main.TRUE,
1676 actual=currentLinksResult,
1677 onpass="ONOS" + controllerStr +
1678 " links view is correct",
1679 onfail="ONOS" + controllerStr +
1680 " links view is incorrect" )
1681
Jon Hall657cdf62015-12-17 14:40:51 -08001682 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07001683 currentHostsResult = main.Mininet1.compareHosts(
1684 mnHosts,
1685 hosts[ controller ] )
1686 else:
1687 currentHostsResult = main.FALSE
1688 utilities.assert_equals( expect=main.TRUE,
1689 actual=currentHostsResult,
1690 onpass="ONOS" + controllerStr +
1691 " hosts exist in Mininet",
1692 onfail="ONOS" + controllerStr +
1693 " hosts don't match Mininet" )
1694
1695 devicesResults = devicesResults and currentDevicesResult
1696 linksResults = linksResults and currentLinksResult
1697 hostsResults = hostsResults and currentHostsResult
1698
1699 main.step( "Device information is correct" )
1700 utilities.assert_equals(
1701 expect=main.TRUE,
1702 actual=devicesResults,
1703 onpass="Device information is correct",
1704 onfail="Device information is incorrect" )
1705
1706 main.step( "Links are correct" )
1707 utilities.assert_equals(
1708 expect=main.TRUE,
1709 actual=linksResults,
1710 onpass="Link are correct",
1711 onfail="Links are incorrect" )
1712
1713 main.step( "Hosts are correct" )
1714 utilities.assert_equals(
1715 expect=main.TRUE,
1716 actual=hostsResults,
1717 onpass="Hosts are correct",
1718 onfail="Hosts are incorrect" )
1719
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001720 def CASE61( self, main ):
Jon Hall5cf14d52015-07-16 12:15:19 -07001721 """
1722 The Failure case.
1723 """
Jon Halle1a3b752015-07-22 13:02:46 -07001724 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001725 assert main, "main not defined"
1726 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001727 assert main.CLIs, "main.CLIs not defined"
1728 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001729 main.case( "Kill minority of ONOS nodes" )
Jon Hall96091e62015-09-21 17:34:17 -07001730
1731 main.step( "Checking ONOS Logs for errors" )
1732 for node in main.nodes:
1733 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1734 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1735
Jon Hall3b489db2015-10-05 14:38:37 -07001736 n = len( main.nodes ) # Number of nodes
1737 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1738 main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
1739 if n > 3:
1740 main.kill.append( p - 1 )
1741 # NOTE: This only works for cluster sizes of 3,5, or 7.
1742
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001743 main.step( "Kill " + str( len( main.kill ) ) + " ONOS nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07001744 killResults = main.TRUE
1745 for i in main.kill:
1746 killResults = killResults and\
1747 main.ONOSbench.onosKill( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001748 main.activeNodes.remove( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001749 utilities.assert_equals( expect=main.TRUE, actual=killResults,
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001750 onpass="ONOS nodes killed successfully",
1751 onfail="ONOS nodes NOT successfully killed" )
1752
1753 def CASE62( self, main ):
1754 """
1755 The bring up stopped nodes
1756 """
1757 import time
1758 assert main.numCtrls, "main.numCtrls not defined"
1759 assert main, "main not defined"
1760 assert utilities.assert_equals, "utilities.assert_equals not defined"
1761 assert main.CLIs, "main.CLIs not defined"
1762 assert main.nodes, "main.nodes not defined"
1763 assert main.kill, "main.kill not defined"
1764 main.case( "Restart minority of ONOS nodes" )
1765
1766 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1767 startResults = main.TRUE
1768 restartTime = time.time()
1769 for i in main.kill:
1770 startResults = startResults and\
1771 main.ONOSbench.onosStart( main.nodes[i].ip_address )
1772 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1773 onpass="ONOS nodes started successfully",
1774 onfail="ONOS nodes NOT successfully started" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001775
1776 main.step( "Checking if ONOS is up yet" )
1777 count = 0
1778 onosIsupResult = main.FALSE
1779 while onosIsupResult == main.FALSE and count < 10:
Jon Hall3b489db2015-10-05 14:38:37 -07001780 onosIsupResult = main.TRUE
1781 for i in main.kill:
1782 onosIsupResult = onosIsupResult and\
1783 main.ONOSbench.isup( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07001784 count = count + 1
Jon Hall5cf14d52015-07-16 12:15:19 -07001785 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1786 onpass="ONOS restarted successfully",
1787 onfail="ONOS restart NOT successful" )
1788
Jon Halle1a3b752015-07-22 13:02:46 -07001789 main.step( "Restarting ONOS main.CLIs" )
Jon Hall3b489db2015-10-05 14:38:37 -07001790 cliResults = main.TRUE
1791 for i in main.kill:
1792 cliResults = cliResults and\
1793 main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001794 main.activeNodes.append( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001795 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1796 onpass="ONOS cli restarted",
1797 onfail="ONOS cli did not restart" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001798 main.activeNodes.sort()
1799 try:
1800 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1801 "List of active nodes has duplicates, this likely indicates something was run out of order"
1802 except AssertionError:
1803 main.log.exception( "" )
1804 main.cleanup()
1805 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -07001806
1807 # Grab the time of restart so we chan check how long the gossip
1808 # protocol has had time to work
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001809 main.restartTime = time.time() - restartTime
Jon Hall5cf14d52015-07-16 12:15:19 -07001810 main.log.debug( "Restart time: " + str( main.restartTime ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001811 # TODO: MAke this configurable. Also, we are breaking the above timer
1812 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001813 node = main.activeNodes[0]
1814 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1815 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1816 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001817
1818 def CASE7( self, main ):
1819 """
1820 Check state after ONOS failure
1821 """
1822 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001823 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001824 assert main, "main not defined"
1825 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001826 assert main.CLIs, "main.CLIs not defined"
1827 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001828 try:
1829 main.kill
1830 except AttributeError:
1831 main.kill = []
1832
Jon Hall5cf14d52015-07-16 12:15:19 -07001833 main.case( "Running ONOS Constant State Tests" )
1834
1835 main.step( "Check that each switch has a master" )
1836 # Assert that each device has a master
1837 rolesNotNull = main.TRUE
1838 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001839 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001840 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001841 name="rolesNotNull-" + str( i ),
1842 args=[ ] )
1843 threads.append( t )
1844 t.start()
1845
1846 for t in threads:
1847 t.join()
1848 rolesNotNull = rolesNotNull and t.result
1849 utilities.assert_equals(
1850 expect=main.TRUE,
1851 actual=rolesNotNull,
1852 onpass="Each device has a master",
1853 onfail="Some devices don't have a master assigned" )
1854
1855 main.step( "Read device roles from ONOS" )
1856 ONOSMastership = []
1857 consistentMastership = True
1858 rolesResults = True
1859 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001860 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001861 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001862 name="roles-" + str( i ),
1863 args=[] )
1864 threads.append( t )
1865 t.start()
1866
1867 for t in threads:
1868 t.join()
1869 ONOSMastership.append( t.result )
1870
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001871 for i in range( len( ONOSMastership ) ):
1872 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001873 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001874 main.log.error( "Error in getting ONOS" + node + " roles" )
1875 main.log.warn( "ONOS" + node + " mastership response: " +
1876 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001877 rolesResults = False
1878 utilities.assert_equals(
1879 expect=True,
1880 actual=rolesResults,
1881 onpass="No error in reading roles output",
1882 onfail="Error in reading roles from ONOS" )
1883
1884 main.step( "Check for consistency in roles from each controller" )
1885 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1886 main.log.info(
1887 "Switch roles are consistent across all ONOS nodes" )
1888 else:
1889 consistentMastership = False
1890 utilities.assert_equals(
1891 expect=True,
1892 actual=consistentMastership,
1893 onpass="Switch roles are consistent across all ONOS nodes",
1894 onfail="ONOS nodes have different views of switch roles" )
1895
1896 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001897 for i in range( len( ONOSMastership ) ):
1898 node = str( main.activeNodes[i] + 1 )
1899 main.log.warn( "ONOS" + node + " roles: ",
1900 json.dumps( json.loads( ONOSMastership[ i ] ),
1901 sort_keys=True,
1902 indent=4,
1903 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001904
1905 # NOTE: we expect mastership to change on controller failure
Jon Hall5cf14d52015-07-16 12:15:19 -07001906
1907 main.step( "Get the intents and compare across all nodes" )
1908 ONOSIntents = []
1909 intentCheck = main.FALSE
1910 consistentIntents = True
1911 intentsResults = True
1912 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001913 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001914 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001915 name="intents-" + str( i ),
1916 args=[],
1917 kwargs={ 'jsonFormat': True } )
1918 threads.append( t )
1919 t.start()
1920
1921 for t in threads:
1922 t.join()
1923 ONOSIntents.append( t.result )
1924
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001925 for i in range( len( ONOSIntents) ):
1926 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001927 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001928 main.log.error( "Error in getting ONOS" + node + " intents" )
1929 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001930 repr( ONOSIntents[ i ] ) )
1931 intentsResults = False
1932 utilities.assert_equals(
1933 expect=True,
1934 actual=intentsResults,
1935 onpass="No error in reading intents output",
1936 onfail="Error in reading intents from ONOS" )
1937
1938 main.step( "Check for consistency in Intents from each controller" )
1939 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1940 main.log.info( "Intents are consistent across all ONOS " +
1941 "nodes" )
1942 else:
1943 consistentIntents = False
1944
1945 # Try to make it easy to figure out what is happening
1946 #
1947 # Intent ONOS1 ONOS2 ...
1948 # 0x01 INSTALLED INSTALLING
1949 # ... ... ...
1950 # ... ... ...
1951 title = " ID"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001952 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001953 title += " " * 10 + "ONOS" + str( n + 1 )
1954 main.log.warn( title )
1955 # get all intent keys in the cluster
1956 keys = []
1957 for nodeStr in ONOSIntents:
1958 node = json.loads( nodeStr )
1959 for intent in node:
1960 keys.append( intent.get( 'id' ) )
1961 keys = set( keys )
1962 for key in keys:
1963 row = "%-13s" % key
1964 for nodeStr in ONOSIntents:
1965 node = json.loads( nodeStr )
1966 for intent in node:
1967 if intent.get( 'id' ) == key:
1968 row += "%-15s" % intent.get( 'state' )
1969 main.log.warn( row )
1970 # End table view
1971
1972 utilities.assert_equals(
1973 expect=True,
1974 actual=consistentIntents,
1975 onpass="Intents are consistent across all ONOS nodes",
1976 onfail="ONOS nodes have different views of intents" )
1977 intentStates = []
1978 for node in ONOSIntents: # Iter through ONOS nodes
1979 nodeStates = []
1980 # Iter through intents of a node
1981 try:
1982 for intent in json.loads( node ):
1983 nodeStates.append( intent[ 'state' ] )
1984 except ( ValueError, TypeError ):
1985 main.log.exception( "Error in parsing intents" )
1986 main.log.error( repr( node ) )
1987 intentStates.append( nodeStates )
1988 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1989 main.log.info( dict( out ) )
1990
1991 if intentsResults and not consistentIntents:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001992 for i in range( len( main.activeNodes ) ):
1993 node = str( main.activeNodes[i] + 1 )
1994 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001995 main.log.warn( json.dumps(
1996 json.loads( ONOSIntents[ i ] ),
1997 sort_keys=True,
1998 indent=4,
1999 separators=( ',', ': ' ) ) )
2000 elif intentsResults and consistentIntents:
2001 intentCheck = main.TRUE
2002
2003 # NOTE: Store has no durability, so intents are lost across system
2004 # restarts
2005 main.step( "Compare current intents with intents before the failure" )
2006 # NOTE: this requires case 5 to pass for intentState to be set.
2007 # maybe we should stop the test if that fails?
2008 sameIntents = main.FALSE
2009 if intentState and intentState == ONOSIntents[ 0 ]:
2010 sameIntents = main.TRUE
2011 main.log.info( "Intents are consistent with before failure" )
2012 # TODO: possibly the states have changed? we may need to figure out
2013 # what the acceptable states are
2014 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2015 sameIntents = main.TRUE
2016 try:
2017 before = json.loads( intentState )
2018 after = json.loads( ONOSIntents[ 0 ] )
2019 for intent in before:
2020 if intent not in after:
2021 sameIntents = main.FALSE
2022 main.log.debug( "Intent is not currently in ONOS " +
2023 "(at least in the same form):" )
2024 main.log.debug( json.dumps( intent ) )
2025 except ( ValueError, TypeError ):
2026 main.log.exception( "Exception printing intents" )
2027 main.log.debug( repr( ONOSIntents[0] ) )
2028 main.log.debug( repr( intentState ) )
2029 if sameIntents == main.FALSE:
2030 try:
2031 main.log.debug( "ONOS intents before: " )
2032 main.log.debug( json.dumps( json.loads( intentState ),
2033 sort_keys=True, indent=4,
2034 separators=( ',', ': ' ) ) )
2035 main.log.debug( "Current ONOS intents: " )
2036 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2037 sort_keys=True, indent=4,
2038 separators=( ',', ': ' ) ) )
2039 except ( ValueError, TypeError ):
2040 main.log.exception( "Exception printing intents" )
2041 main.log.debug( repr( ONOSIntents[0] ) )
2042 main.log.debug( repr( intentState ) )
2043 utilities.assert_equals(
2044 expect=main.TRUE,
2045 actual=sameIntents,
2046 onpass="Intents are consistent with before failure",
2047 onfail="The Intents changed during failure" )
2048 intentCheck = intentCheck and sameIntents
2049
2050 main.step( "Get the OF Table entries and compare to before " +
2051 "component failure" )
2052 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002053 for i in range( 28 ):
2054 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002055 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2056 FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
Jon Hall5cf14d52015-07-16 12:15:19 -07002057 if FlowTables == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002058 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2059
Jon Hall5cf14d52015-07-16 12:15:19 -07002060 utilities.assert_equals(
2061 expect=main.TRUE,
2062 actual=FlowTables,
2063 onpass="No changes were found in the flow tables",
2064 onfail="Changes were found in the flow tables" )
2065
2066 main.Mininet2.pingLongKill()
2067 '''
2068 main.step( "Check the continuous pings to ensure that no packets " +
2069 "were dropped during component failure" )
2070 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2071 main.params[ 'TESTONIP' ] )
2072 LossInPings = main.FALSE
2073 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2074 for i in range( 8, 18 ):
2075 main.log.info(
2076 "Checking for a loss in pings along flow from s" +
2077 str( i ) )
2078 LossInPings = main.Mininet2.checkForLoss(
2079 "/tmp/ping.h" +
2080 str( i ) ) or LossInPings
2081 if LossInPings == main.TRUE:
2082 main.log.info( "Loss in ping detected" )
2083 elif LossInPings == main.ERROR:
2084 main.log.info( "There are multiple mininet process running" )
2085 elif LossInPings == main.FALSE:
2086 main.log.info( "No Loss in the pings" )
2087 main.log.info( "No loss of dataplane connectivity" )
2088 utilities.assert_equals(
2089 expect=main.FALSE,
2090 actual=LossInPings,
2091 onpass="No Loss of connectivity",
2092 onfail="Loss of dataplane connectivity detected" )
2093 '''
2094
2095 main.step( "Leadership Election is still functional" )
2096 # Test of LeadershipElection
2097 leaderList = []
Jon Hall5cf14d52015-07-16 12:15:19 -07002098
Jon Hall3b489db2015-10-05 14:38:37 -07002099 restarted = []
2100 for i in main.kill:
2101 restarted.append( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07002102 leaderResult = main.TRUE
Jon Hall3b489db2015-10-05 14:38:37 -07002103
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002104 for i in main.activeNodes:
2105 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002106 leaderN = cli.electionTestLeader()
2107 leaderList.append( leaderN )
2108 if leaderN == main.FALSE:
2109 # error in response
2110 main.log.error( "Something is wrong with " +
2111 "electionTestLeader function, check the" +
2112 " error logs" )
2113 leaderResult = main.FALSE
2114 elif leaderN is None:
2115 main.log.error( cli.name +
2116 " shows no leader for the election-app was" +
2117 " elected after the old one died" )
2118 leaderResult = main.FALSE
2119 elif leaderN in restarted:
2120 main.log.error( cli.name + " shows " + str( leaderN ) +
2121 " as leader for the election-app, but it " +
2122 "was restarted" )
2123 leaderResult = main.FALSE
2124 if len( set( leaderList ) ) != 1:
2125 leaderResult = main.FALSE
2126 main.log.error(
2127 "Inconsistent view of leader for the election test app" )
2128 # TODO: print the list
2129 utilities.assert_equals(
2130 expect=main.TRUE,
2131 actual=leaderResult,
2132 onpass="Leadership election passed",
2133 onfail="Something went wrong with Leadership election" )
2134
2135 def CASE8( self, main ):
2136 """
2137 Compare topo
2138 """
2139 import json
2140 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002141 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002142 assert main, "main not defined"
2143 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002144 assert main.CLIs, "main.CLIs not defined"
2145 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002146
2147 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002148 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002149 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002150 topoResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002151 topoFailMsg = "ONOS topology don't match Mininet"
Jon Hall5cf14d52015-07-16 12:15:19 -07002152 elapsed = 0
2153 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002154 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002155 startTime = time.time()
2156 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002157 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002158 devicesResults = main.TRUE
2159 linksResults = main.TRUE
2160 hostsResults = main.TRUE
2161 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002162 count += 1
2163 cliStart = time.time()
2164 devices = []
2165 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002166 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002167 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002168 name="devices-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002169 args=[ main.CLIs[i].devices, [ None ] ],
2170 kwargs= { 'sleep': 5, 'attempts': 5,
2171 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002172 threads.append( t )
2173 t.start()
2174
2175 for t in threads:
2176 t.join()
2177 devices.append( t.result )
2178 hosts = []
2179 ipResult = main.TRUE
2180 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002181 for i in main.activeNodes:
Jon Halld8f6de82015-12-17 17:04:34 -08002182 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002183 name="hosts-" + str( i ),
Jon Halld8f6de82015-12-17 17:04:34 -08002184 args=[ main.CLIs[i].hosts, [ None ] ],
2185 kwargs= { 'sleep': 5, 'attempts': 5,
2186 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002187 threads.append( t )
2188 t.start()
2189
2190 for t in threads:
2191 t.join()
2192 try:
2193 hosts.append( json.loads( t.result ) )
2194 except ( ValueError, TypeError ):
2195 main.log.exception( "Error parsing hosts results" )
2196 main.log.error( repr( t.result ) )
Jon Hallf3d16e72015-12-16 17:45:08 -08002197 hosts.append( None )
Jon Hall5cf14d52015-07-16 12:15:19 -07002198 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002199 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hallacd1b182015-12-17 11:43:20 -08002200 if hosts[ controller ]:
2201 for host in hosts[ controller ]:
2202 if host is None or host.get( 'ipAddresses', [] ) == []:
2203 main.log.error(
2204 "Error with host ipAddresses on controller" +
2205 controllerStr + ": " + str( host ) )
2206 ipResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002207 ports = []
2208 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002209 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002210 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002211 name="ports-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002212 args=[ main.CLIs[i].ports, [ None ] ],
2213 kwargs= { 'sleep': 5, 'attempts': 5,
2214 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002215 threads.append( t )
2216 t.start()
2217
2218 for t in threads:
2219 t.join()
2220 ports.append( t.result )
2221 links = []
2222 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002223 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002224 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002225 name="links-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002226 args=[ main.CLIs[i].links, [ None ] ],
2227 kwargs= { 'sleep': 5, 'attempts': 5,
2228 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002229 threads.append( t )
2230 t.start()
2231
2232 for t in threads:
2233 t.join()
2234 links.append( t.result )
2235 clusters = []
2236 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002237 for i in main.activeNodes:
Jon Hall6e709752016-02-01 13:38:46 -08002238 t = main.Thread( target=utilities.retry,
Jon Hall5cf14d52015-07-16 12:15:19 -07002239 name="clusters-" + str( i ),
Jon Hall6e709752016-02-01 13:38:46 -08002240 args=[ main.CLIs[i].clusters, [ None ] ],
2241 kwargs= { 'sleep': 5, 'attempts': 5,
2242 'randomTime': True } )
Jon Hall5cf14d52015-07-16 12:15:19 -07002243 threads.append( t )
2244 t.start()
2245
2246 for t in threads:
2247 t.join()
2248 clusters.append( t.result )
2249
2250 elapsed = time.time() - startTime
2251 cliTime = time.time() - cliStart
2252 print "Elapsed time: " + str( elapsed )
2253 print "CLI time: " + str( cliTime )
2254
Jon Hall6e709752016-02-01 13:38:46 -08002255 if all( e is None for e in devices ) and\
2256 all( e is None for e in hosts ) and\
2257 all( e is None for e in ports ) and\
2258 all( e is None for e in links ) and\
2259 all( e is None for e in clusters ):
2260 topoFailMsg = "Could not get topology from ONOS"
2261 main.log.error( topoFailMsg )
2262 continue # Try again, No use trying to compare
2263
Jon Hall5cf14d52015-07-16 12:15:19 -07002264 mnSwitches = main.Mininet1.getSwitches()
2265 mnLinks = main.Mininet1.getLinks()
2266 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002267 for controller in range( len( main.activeNodes ) ):
2268 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002269 if devices[ controller ] and ports[ controller ] and\
2270 "Error" not in devices[ controller ] and\
2271 "Error" not in ports[ controller ]:
2272
Jon Hallc6793552016-01-19 14:18:37 -08002273 try:
2274 currentDevicesResult = main.Mininet1.compareSwitches(
2275 mnSwitches,
2276 json.loads( devices[ controller ] ),
2277 json.loads( ports[ controller ] ) )
2278 except ( TypeError, ValueError ) as e:
2279 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2280 devices[ controller ], ports[ controller ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002281 else:
2282 currentDevicesResult = main.FALSE
2283 utilities.assert_equals( expect=main.TRUE,
2284 actual=currentDevicesResult,
2285 onpass="ONOS" + controllerStr +
2286 " Switches view is correct",
2287 onfail="ONOS" + controllerStr +
2288 " Switches view is incorrect" )
2289
2290 if links[ controller ] and "Error" not in links[ controller ]:
2291 currentLinksResult = main.Mininet1.compareLinks(
2292 mnSwitches, mnLinks,
2293 json.loads( links[ controller ] ) )
2294 else:
2295 currentLinksResult = main.FALSE
2296 utilities.assert_equals( expect=main.TRUE,
2297 actual=currentLinksResult,
2298 onpass="ONOS" + controllerStr +
2299 " links view is correct",
2300 onfail="ONOS" + controllerStr +
2301 " links view is incorrect" )
Jon Hall657cdf62015-12-17 14:40:51 -08002302 if hosts[ controller ] and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002303 currentHostsResult = main.Mininet1.compareHosts(
2304 mnHosts,
2305 hosts[ controller ] )
Jon Hall13b446e2016-01-05 12:17:01 -08002306 elif hosts[ controller ] == []:
2307 currentHostsResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002308 else:
2309 currentHostsResult = main.FALSE
2310 utilities.assert_equals( expect=main.TRUE,
2311 actual=currentHostsResult,
2312 onpass="ONOS" + controllerStr +
2313 " hosts exist in Mininet",
2314 onfail="ONOS" + controllerStr +
2315 " hosts don't match Mininet" )
2316 # CHECKING HOST ATTACHMENT POINTS
2317 hostAttachment = True
2318 zeroHosts = False
2319 # FIXME: topo-HA/obelisk specific mappings:
2320 # key is mac and value is dpid
2321 mappings = {}
2322 for i in range( 1, 29 ): # hosts 1 through 28
2323 # set up correct variables:
2324 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2325 if i == 1:
2326 deviceId = "1000".zfill(16)
2327 elif i == 2:
2328 deviceId = "2000".zfill(16)
2329 elif i == 3:
2330 deviceId = "3000".zfill(16)
2331 elif i == 4:
2332 deviceId = "3004".zfill(16)
2333 elif i == 5:
2334 deviceId = "5000".zfill(16)
2335 elif i == 6:
2336 deviceId = "6000".zfill(16)
2337 elif i == 7:
2338 deviceId = "6007".zfill(16)
2339 elif i >= 8 and i <= 17:
2340 dpid = '3' + str( i ).zfill( 3 )
2341 deviceId = dpid.zfill(16)
2342 elif i >= 18 and i <= 27:
2343 dpid = '6' + str( i ).zfill( 3 )
2344 deviceId = dpid.zfill(16)
2345 elif i == 28:
2346 deviceId = "2800".zfill(16)
2347 mappings[ macId ] = deviceId
Jon Halld8f6de82015-12-17 17:04:34 -08002348 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002349 if hosts[ controller ] == []:
2350 main.log.warn( "There are no hosts discovered" )
2351 zeroHosts = True
2352 else:
2353 for host in hosts[ controller ]:
2354 mac = None
2355 location = None
2356 device = None
2357 port = None
2358 try:
2359 mac = host.get( 'mac' )
2360 assert mac, "mac field could not be found for this host object"
2361
2362 location = host.get( 'location' )
2363 assert location, "location field could not be found for this host object"
2364
2365 # Trim the protocol identifier off deviceId
2366 device = str( location.get( 'elementId' ) ).split(':')[1]
2367 assert device, "elementId field could not be found for this host location object"
2368
2369 port = location.get( 'port' )
2370 assert port, "port field could not be found for this host location object"
2371
2372 # Now check if this matches where they should be
2373 if mac and device and port:
2374 if str( port ) != "1":
2375 main.log.error( "The attachment port is incorrect for " +
2376 "host " + str( mac ) +
2377 ". Expected: 1 Actual: " + str( port) )
2378 hostAttachment = False
2379 if device != mappings[ str( mac ) ]:
2380 main.log.error( "The attachment device is incorrect for " +
2381 "host " + str( mac ) +
2382 ". Expected: " + mappings[ str( mac ) ] +
2383 " Actual: " + device )
2384 hostAttachment = False
2385 else:
2386 hostAttachment = False
2387 except AssertionError:
2388 main.log.exception( "Json object not as expected" )
2389 main.log.error( repr( host ) )
2390 hostAttachment = False
2391 else:
2392 main.log.error( "No hosts json output or \"Error\"" +
2393 " in output. hosts = " +
2394 repr( hosts[ controller ] ) )
2395 if zeroHosts is False:
2396 hostAttachment = True
2397
2398 # END CHECKING HOST ATTACHMENT POINTS
2399 devicesResults = devicesResults and currentDevicesResult
2400 linksResults = linksResults and currentLinksResult
2401 hostsResults = hostsResults and currentHostsResult
2402 hostAttachmentResults = hostAttachmentResults and\
2403 hostAttachment
Jon Halle9b1fa32015-12-08 15:32:21 -08002404 topoResult = devicesResults and linksResults and\
2405 hostsResults and hostAttachmentResults
2406 utilities.assert_equals( expect=True,
2407 actual=topoResult,
2408 onpass="ONOS topology matches Mininet",
Jon Hall6e709752016-02-01 13:38:46 -08002409 onfail=topoFailMsg )
Jon Halle9b1fa32015-12-08 15:32:21 -08002410 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002411
2412 # Compare json objects for hosts and dataplane clusters
2413
2414 # hosts
2415 main.step( "Hosts view is consistent across all ONOS nodes" )
2416 consistentHostsResult = main.TRUE
2417 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002418 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall13b446e2016-01-05 12:17:01 -08002419 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
Jon Hall5cf14d52015-07-16 12:15:19 -07002420 if hosts[ controller ] == hosts[ 0 ]:
2421 continue
2422 else: # hosts not consistent
2423 main.log.error( "hosts from ONOS" + controllerStr +
2424 " is inconsistent with ONOS1" )
2425 main.log.warn( repr( hosts[ controller ] ) )
2426 consistentHostsResult = main.FALSE
2427
2428 else:
2429 main.log.error( "Error in getting ONOS hosts from ONOS" +
2430 controllerStr )
2431 consistentHostsResult = main.FALSE
2432 main.log.warn( "ONOS" + controllerStr +
2433 " hosts response: " +
2434 repr( hosts[ controller ] ) )
2435 utilities.assert_equals(
2436 expect=main.TRUE,
2437 actual=consistentHostsResult,
2438 onpass="Hosts view is consistent across all ONOS nodes",
2439 onfail="ONOS nodes have different views of hosts" )
2440
2441 main.step( "Hosts information is correct" )
2442 hostsResults = hostsResults and ipResult
2443 utilities.assert_equals(
2444 expect=main.TRUE,
2445 actual=hostsResults,
2446 onpass="Host information is correct",
2447 onfail="Host information is incorrect" )
2448
2449 main.step( "Host attachment points to the network" )
2450 utilities.assert_equals(
2451 expect=True,
2452 actual=hostAttachmentResults,
2453 onpass="Hosts are correctly attached to the network",
2454 onfail="ONOS did not correctly attach hosts to the network" )
2455
2456 # Strongly connected clusters of devices
2457 main.step( "Clusters view is consistent across all ONOS nodes" )
2458 consistentClustersResult = main.TRUE
2459 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002460 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002461 if "Error" not in clusters[ controller ]:
2462 if clusters[ controller ] == clusters[ 0 ]:
2463 continue
2464 else: # clusters not consistent
2465 main.log.error( "clusters from ONOS" +
2466 controllerStr +
2467 " is inconsistent with ONOS1" )
2468 consistentClustersResult = main.FALSE
2469
2470 else:
2471 main.log.error( "Error in getting dataplane clusters " +
2472 "from ONOS" + controllerStr )
2473 consistentClustersResult = main.FALSE
2474 main.log.warn( "ONOS" + controllerStr +
2475 " clusters response: " +
2476 repr( clusters[ controller ] ) )
2477 utilities.assert_equals(
2478 expect=main.TRUE,
2479 actual=consistentClustersResult,
2480 onpass="Clusters view is consistent across all ONOS nodes",
2481 onfail="ONOS nodes have different views of clusters" )
2482
2483 main.step( "There is only one SCC" )
2484 # there should always only be one cluster
2485 try:
2486 numClusters = len( json.loads( clusters[ 0 ] ) )
2487 except ( ValueError, TypeError ):
2488 main.log.exception( "Error parsing clusters[0]: " +
2489 repr( clusters[0] ) )
2490 clusterResults = main.FALSE
2491 if numClusters == 1:
2492 clusterResults = main.TRUE
2493 utilities.assert_equals(
2494 expect=1,
2495 actual=numClusters,
2496 onpass="ONOS shows 1 SCC",
2497 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2498
2499 topoResult = ( devicesResults and linksResults
2500 and hostsResults and consistentHostsResult
2501 and consistentClustersResult and clusterResults
2502 and ipResult and hostAttachmentResults )
2503
2504 topoResult = topoResult and int( count <= 2 )
2505 note = "note it takes about " + str( int( cliTime ) ) + \
2506 " seconds for the test to make all the cli calls to fetch " +\
2507 "the topology from each ONOS instance"
2508 main.log.info(
2509 "Very crass estimate for topology discovery/convergence( " +
2510 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2511 str( count ) + " tries" )
2512
2513 main.step( "Device information is correct" )
2514 utilities.assert_equals(
2515 expect=main.TRUE,
2516 actual=devicesResults,
2517 onpass="Device information is correct",
2518 onfail="Device information is incorrect" )
2519
2520 main.step( "Links are correct" )
2521 utilities.assert_equals(
2522 expect=main.TRUE,
2523 actual=linksResults,
2524 onpass="Link are correct",
2525 onfail="Links are incorrect" )
2526
2527 # FIXME: move this to an ONOS state case
2528 main.step( "Checking ONOS nodes" )
2529 nodesOutput = []
2530 nodeResults = main.TRUE
2531 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002532 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002533 t = main.Thread( target=main.CLIs[i].nodes,
Jon Hall5cf14d52015-07-16 12:15:19 -07002534 name="nodes-" + str( i ),
2535 args=[ ] )
2536 threads.append( t )
2537 t.start()
2538
2539 for t in threads:
2540 t.join()
2541 nodesOutput.append( t.result )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002542 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
Jon Halle9b1fa32015-12-08 15:32:21 -08002543 ips.sort()
Jon Hall5cf14d52015-07-16 12:15:19 -07002544 for i in nodesOutput:
2545 try:
2546 current = json.loads( i )
Jon Halle9b1fa32015-12-08 15:32:21 -08002547 activeIps = []
2548 currentResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002549 for node in current:
Jon Halle9b1fa32015-12-08 15:32:21 -08002550 if node['state'] == 'ACTIVE':
2551 activeIps.append( node['ip'] )
2552 activeIps.sort()
2553 if ips == activeIps:
2554 currentResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002555 except ( ValueError, TypeError ):
2556 main.log.error( "Error parsing nodes output" )
2557 main.log.warn( repr( i ) )
Jon Halle9b1fa32015-12-08 15:32:21 -08002558 currentResult = main.FALSE
2559 nodeResults = nodeResults and currentResult
Jon Hall5cf14d52015-07-16 12:15:19 -07002560 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2561 onpass="Nodes check successful",
2562 onfail="Nodes check NOT successful" )
2563
2564 def CASE9( self, main ):
2565 """
2566 Link s3-s28 down
2567 """
2568 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002569 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002570 assert main, "main not defined"
2571 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002572 assert main.CLIs, "main.CLIs not defined"
2573 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002574 # NOTE: You should probably run a topology check after this
2575
2576 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2577
2578 description = "Turn off a link to ensure that Link Discovery " +\
2579 "is working properly"
2580 main.case( description )
2581
2582 main.step( "Kill Link between s3 and s28" )
2583 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2584 main.log.info( "Waiting " + str( linkSleep ) +
2585 " seconds for link down to be discovered" )
2586 time.sleep( linkSleep )
2587 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2588 onpass="Link down successful",
2589 onfail="Failed to bring link down" )
2590 # TODO do some sort of check here
2591
2592 def CASE10( self, main ):
2593 """
2594 Link s3-s28 up
2595 """
2596 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002597 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002598 assert main, "main not defined"
2599 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002600 assert main.CLIs, "main.CLIs not defined"
2601 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002602 # NOTE: You should probably run a topology check after this
2603
2604 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2605
2606 description = "Restore a link to ensure that Link Discovery is " + \
2607 "working properly"
2608 main.case( description )
2609
2610 main.step( "Bring link between s3 and s28 back up" )
2611 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2612 main.log.info( "Waiting " + str( linkSleep ) +
2613 " seconds for link up to be discovered" )
2614 time.sleep( linkSleep )
2615 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2616 onpass="Link up successful",
2617 onfail="Failed to bring link up" )
2618 # TODO do some sort of check here
2619
2620 def CASE11( self, main ):
2621 """
2622 Switch Down
2623 """
2624 # NOTE: You should probably run a topology check after this
2625 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002626 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002627 assert main, "main not defined"
2628 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002629 assert main.CLIs, "main.CLIs not defined"
2630 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002631
2632 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2633
2634 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002635 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002636 main.case( description )
2637 switch = main.params[ 'kill' ][ 'switch' ]
2638 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2639
2640 # TODO: Make this switch parameterizable
2641 main.step( "Kill " + switch )
2642 main.log.info( "Deleting " + switch )
2643 main.Mininet1.delSwitch( switch )
2644 main.log.info( "Waiting " + str( switchSleep ) +
2645 " seconds for switch down to be discovered" )
2646 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002647 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002648 # Peek at the deleted switch
2649 main.log.warn( str( device ) )
2650 result = main.FALSE
2651 if device and device[ 'available' ] is False:
2652 result = main.TRUE
2653 utilities.assert_equals( expect=main.TRUE, actual=result,
2654 onpass="Kill switch successful",
2655 onfail="Failed to kill switch?" )
2656
2657 def CASE12( self, main ):
2658 """
2659 Switch Up
2660 """
2661 # NOTE: You should probably run a topology check after this
2662 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002663 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002664 assert main, "main not defined"
2665 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002666 assert main.CLIs, "main.CLIs not defined"
2667 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002668 assert ONOS1Port, "ONOS1Port not defined"
2669 assert ONOS2Port, "ONOS2Port not defined"
2670 assert ONOS3Port, "ONOS3Port not defined"
2671 assert ONOS4Port, "ONOS4Port not defined"
2672 assert ONOS5Port, "ONOS5Port not defined"
2673 assert ONOS6Port, "ONOS6Port not defined"
2674 assert ONOS7Port, "ONOS7Port not defined"
2675
2676 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2677 switch = main.params[ 'kill' ][ 'switch' ]
2678 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2679 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002680 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002681 description = "Adding a switch to ensure it is discovered correctly"
2682 main.case( description )
2683
2684 main.step( "Add back " + switch )
2685 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2686 for peer in links:
2687 main.Mininet1.addLink( switch, peer )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002688 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002689 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2690 main.log.info( "Waiting " + str( switchSleep ) +
2691 " seconds for switch up to be discovered" )
2692 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002693 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002694 # Peek at the deleted switch
2695 main.log.warn( str( device ) )
2696 result = main.FALSE
2697 if device and device[ 'available' ]:
2698 result = main.TRUE
2699 utilities.assert_equals( expect=main.TRUE, actual=result,
2700 onpass="add switch successful",
2701 onfail="Failed to add switch?" )
2702
2703 def CASE13( self, main ):
2704 """
2705 Clean up
2706 """
2707 import os
2708 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002709 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002710 assert main, "main not defined"
2711 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002712 assert main.CLIs, "main.CLIs not defined"
2713 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002714
2715 # printing colors to terminal
2716 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2717 'blue': '\033[94m', 'green': '\033[92m',
2718 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2719 main.case( "Test Cleanup" )
2720 main.step( "Killing tcpdumps" )
2721 main.Mininet2.stopTcpdump()
2722
2723 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002724 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002725 main.step( "Copying MN pcap and ONOS log files to test station" )
2726 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2727 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002728 # NOTE: MN Pcap file is being saved to logdir.
2729 # We scp this file as MN and TestON aren't necessarily the same vm
2730
2731 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002732 # TODO: Load these from params
2733 # NOTE: must end in /
2734 logFolder = "/opt/onos/log/"
2735 logFiles = [ "karaf.log", "karaf.log.1" ]
2736 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002737 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002738 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002739 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002740 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2741 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002742 # std*.log's
2743 # NOTE: must end in /
2744 logFolder = "/opt/onos/var/"
2745 logFiles = [ "stderr.log", "stdout.log" ]
2746 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002747 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002748 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002749 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002750 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2751 logFolder + f, dstName )
2752 else:
2753 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002754
2755 main.step( "Stopping Mininet" )
2756 mnResult = main.Mininet1.stopNet()
2757 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2758 onpass="Mininet stopped",
2759 onfail="MN cleanup NOT successful" )
2760
2761 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002762 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002763 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2764 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002765
2766 try:
2767 timerLog = open( main.logdir + "/Timers.csv", 'w')
2768 # Overwrite with empty line and close
2769 labels = "Gossip Intents, Restart"
2770 data = str( gossipTime ) + ", " + str( main.restartTime )
2771 timerLog.write( labels + "\n" + data )
2772 timerLog.close()
2773 except NameError, e:
2774 main.log.exception(e)
2775
2776 def CASE14( self, main ):
2777 """
2778 start election app on all onos nodes
2779 """
Jon Halle1a3b752015-07-22 13:02:46 -07002780 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002781 assert main, "main not defined"
2782 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002783 assert main.CLIs, "main.CLIs not defined"
2784 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002785
2786 main.case("Start Leadership Election app")
2787 main.step( "Install leadership election app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002788 onosCli = main.CLIs[ main.activeNodes[0] ]
2789 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002790 utilities.assert_equals(
2791 expect=main.TRUE,
2792 actual=appResult,
2793 onpass="Election app installed",
2794 onfail="Something went wrong with installing Leadership election" )
2795
2796 main.step( "Run for election on each node" )
2797 leaderResult = main.TRUE
2798 leaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002799 for i in main.activeNodes:
2800 main.CLIs[i].electionTestRun()
2801 for i in main.activeNodes:
2802 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002803 leader = cli.electionTestLeader()
2804 if leader is None or leader == main.FALSE:
2805 main.log.error( cli.name + ": Leader for the election app " +
2806 "should be an ONOS node, instead got '" +
2807 str( leader ) + "'" )
2808 leaderResult = main.FALSE
2809 leaders.append( leader )
2810 utilities.assert_equals(
2811 expect=main.TRUE,
2812 actual=leaderResult,
2813 onpass="Successfully ran for leadership",
2814 onfail="Failed to run for leadership" )
2815
2816 main.step( "Check that each node shows the same leader" )
2817 sameLeader = main.TRUE
2818 if len( set( leaders ) ) != 1:
2819 sameLeader = main.FALSE
Jon Halle1a3b752015-07-22 13:02:46 -07002820 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
Jon Hall5cf14d52015-07-16 12:15:19 -07002821 str( leaders ) )
2822 utilities.assert_equals(
2823 expect=main.TRUE,
2824 actual=sameLeader,
2825 onpass="Leadership is consistent for the election topic",
2826 onfail="Nodes have different leaders" )
2827
2828 def CASE15( self, main ):
2829 """
2830 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002831 15.1 Run election on each node
2832 15.2 Check that each node has the same leaders and candidates
2833 15.3 Find current leader and withdraw
2834 15.4 Check that a new node was elected leader
2835 15.5 Check that that new leader was the candidate of old leader
2836 15.6 Run for election on old leader
2837 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2838 15.8 Make sure that the old leader was added to the candidate list
2839
2840 old and new variable prefixes refer to data from before vs after
2841 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002842 """
2843 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002844 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002845 assert main, "main not defined"
2846 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002847 assert main.CLIs, "main.CLIs not defined"
2848 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002849
Jon Hall5cf14d52015-07-16 12:15:19 -07002850 description = "Check that Leadership Election is still functional"
2851 main.case( description )
acsmars71adceb2015-08-31 15:09:26 -07002852 # NOTE: Need to re-run since being a canidate is not persistant
2853 # TODO: add check for "Command not found:" in the driver, this
2854 # means the election test app isn't loaded
Jon Hall5cf14d52015-07-16 12:15:19 -07002855
acsmars71adceb2015-08-31 15:09:26 -07002856 oldLeaders = [] # leaders by node before withdrawl from candidates
2857 newLeaders = [] # leaders by node after withdrawl from candidates
2858 oldAllCandidates = [] # list of lists of each nodes' candidates before
2859 newAllCandidates = [] # list of lists of each nodes' candidates after
2860 oldCandidates = [] # list of candidates from node 0 before withdrawl
2861 newCandidates = [] # list of candidates from node 0 after withdrawl
2862 oldLeader = '' # the old leader from oldLeaders, None if not same
2863 newLeader = '' # the new leaders fron newLoeaders, None if not same
2864 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2865 expectNoLeader = False # True when there is only one leader
2866 if main.numCtrls == 1:
2867 expectNoLeader = True
2868
2869 main.step( "Run for election on each node" )
2870 electionResult = main.TRUE
2871
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002872 for i in main.activeNodes: # run test election on each node
2873 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002874 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002875 utilities.assert_equals(
2876 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002877 actual=electionResult,
2878 onpass="All nodes successfully ran for leadership",
2879 onfail="At least one node failed to run for leadership" )
2880
acsmars3a72bde2015-09-02 14:16:22 -07002881 if electionResult == main.FALSE:
2882 main.log.error(
2883 "Skipping Test Case because Election Test App isn't loaded" )
2884 main.skipCase()
2885
acsmars71adceb2015-08-31 15:09:26 -07002886 main.step( "Check that each node shows the same leader and candidates" )
2887 sameResult = main.TRUE
2888 failMessage = "Nodes have different leaders"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002889 for i in main.activeNodes:
2890 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002891 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2892 oldAllCandidates.append( node )
Jon Hall6e709752016-02-01 13:38:46 -08002893 if node:
2894 oldLeaders.append( node[ 0 ] )
2895 else:
2896 oldLeaders.append( None )
acsmars71adceb2015-08-31 15:09:26 -07002897 oldCandidates = oldAllCandidates[ 0 ]
Jon Hall6e709752016-02-01 13:38:46 -08002898 if oldCandidates is None:
2899 oldCandidates = [ None ]
acsmars71adceb2015-08-31 15:09:26 -07002900
2901 # Check that each node has the same leader. Defines oldLeader
2902 if len( set( oldLeaders ) ) != 1:
2903 sameResult = main.FALSE
2904 main.log.error( "More than one leader present:" + str( oldLeaders ) )
2905 oldLeader = None
2906 else:
2907 oldLeader = oldLeaders[ 0 ]
2908
2909 # Check that each node's candidate list is the same
acsmars29233db2015-11-04 11:15:00 -08002910 candidateDiscrepancy = False # Boolean of candidate mismatches
acsmars71adceb2015-08-31 15:09:26 -07002911 for candidates in oldAllCandidates:
Jon Hall6e709752016-02-01 13:38:46 -08002912 if candidates is None:
2913 main.log.warn( "Error getting candidates" )
2914 candidates = [ None ]
acsmars71adceb2015-08-31 15:09:26 -07002915 if set( candidates ) != set( oldCandidates ):
2916 sameResult = main.FALSE
acsmars29233db2015-11-04 11:15:00 -08002917 candidateDiscrepancy = True
acsmars29233db2015-11-04 11:15:00 -08002918 if candidateDiscrepancy:
2919 failMessage += " and candidates"
acsmars71adceb2015-08-31 15:09:26 -07002920 utilities.assert_equals(
2921 expect=main.TRUE,
2922 actual=sameResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002923 onpass="Leadership is consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002924 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002925
2926 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002927 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002928 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002929 if oldLeader is None:
2930 main.log.error( "Leadership isn't consistent." )
2931 withdrawResult = main.FALSE
2932 # Get the CLI of the oldLeader
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002933 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07002934 if oldLeader == main.nodes[ i ].ip_address:
2935 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002936 break
2937 else: # FOR/ELSE statement
2938 main.log.error( "Leader election, could not find current leader" )
2939 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07002940 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07002941 utilities.assert_equals(
2942 expect=main.TRUE,
2943 actual=withdrawResult,
2944 onpass="Node was withdrawn from election",
2945 onfail="Node was not withdrawn from election" )
2946
acsmars71adceb2015-08-31 15:09:26 -07002947 main.step( "Check that a new node was elected leader" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002948 # FIXME: use threads
acsmars71adceb2015-08-31 15:09:26 -07002949 newLeaderResult = main.TRUE
2950 failMessage = "Nodes have different leaders"
2951
2952 # Get new leaders and candidates
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002953 for i in main.activeNodes:
2954 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002955 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2956 # elections might no have finished yet
2957 if node[ 0 ] == 'none' and not expectNoLeader:
2958 main.log.info( "Node has no leader, waiting 5 seconds to be " +
2959 "sure elections are complete." )
2960 time.sleep(5)
2961 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2962 # election still isn't done or there is a problem
2963 if node[ 0 ] == 'none':
2964 main.log.error( "No leader was elected on at least 1 node" )
2965 newLeaderResult = main.FALSE
2966 newAllCandidates.append( node )
2967 newLeaders.append( node[ 0 ] )
2968 newCandidates = newAllCandidates[ 0 ]
2969
2970 # Check that each node has the same leader. Defines newLeader
2971 if len( set( newLeaders ) ) != 1:
2972 newLeaderResult = main.FALSE
2973 main.log.error( "Nodes have different leaders: " +
2974 str( newLeaders ) )
2975 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07002976 else:
acsmars71adceb2015-08-31 15:09:26 -07002977 newLeader = newLeaders[ 0 ]
2978
2979 # Check that each node's candidate list is the same
2980 for candidates in newAllCandidates:
2981 if set( candidates ) != set( newCandidates ):
2982 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07002983 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07002984
2985 # Check that the new leader is not the older leader, which was withdrawn
2986 if newLeader == oldLeader:
2987 newLeaderResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002988 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
acsmars71adceb2015-08-31 15:09:26 -07002989 " as the current leader" )
2990
Jon Hall5cf14d52015-07-16 12:15:19 -07002991 utilities.assert_equals(
2992 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002993 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002994 onpass="Leadership election passed",
2995 onfail="Something went wrong with Leadership election" )
2996
acsmars71adceb2015-08-31 15:09:26 -07002997 main.step( "Check that that new leader was the candidate of old leader")
Jon Hall6e709752016-02-01 13:38:46 -08002998 # candidates[ 2 ] should become the top candidate after withdrawl
acsmars71adceb2015-08-31 15:09:26 -07002999 correctCandidateResult = main.TRUE
3000 if expectNoLeader:
3001 if newLeader == 'none':
3002 main.log.info( "No leader expected. None found. Pass" )
3003 correctCandidateResult = main.TRUE
3004 else:
3005 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3006 correctCandidateResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08003007 elif len( oldCandidates ) >= 3 and newLeader != oldCandidates[ 2 ]:
acsmars71adceb2015-08-31 15:09:26 -07003008 correctCandidateResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08003009 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3010 newLeader, oldCandidates[ 2 ] ) )
3011 else:
3012 main.log.warn( "Could not determine who should be the correct leader" )
3013 correctCandidateResult = main.FALSE
acsmars71adceb2015-08-31 15:09:26 -07003014 utilities.assert_equals(
3015 expect=main.TRUE,
3016 actual=correctCandidateResult,
3017 onpass="Correct Candidate Elected",
3018 onfail="Incorrect Candidate Elected" )
3019
Jon Hall5cf14d52015-07-16 12:15:19 -07003020 main.step( "Run for election on old leader( just so everyone " +
3021 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07003022 if oldLeaderCLI is not None:
3023 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07003024 else:
acsmars71adceb2015-08-31 15:09:26 -07003025 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003026 runResult = main.FALSE
3027 utilities.assert_equals(
3028 expect=main.TRUE,
3029 actual=runResult,
3030 onpass="App re-ran for election",
3031 onfail="App failed to run for election" )
acsmars71adceb2015-08-31 15:09:26 -07003032 main.step(
3033 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003034 # verify leader didn't just change
acsmars71adceb2015-08-31 15:09:26 -07003035 positionResult = main.TRUE
3036 # Get new leaders and candidates, wait if oldLeader is not a candidate yet
3037
3038 # Reset and reuse the new candidate and leaders lists
3039 newAllCandidates = []
3040 newCandidates = []
3041 newLeaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003042 for i in main.activeNodes:
3043 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07003044 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3045 if oldLeader not in node: # election might no have finished yet
3046 main.log.info( "Old Leader not elected, waiting 5 seconds to " +
3047 "be sure elections are complete" )
3048 time.sleep(5)
3049 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3050 if oldLeader not in node: # election still isn't done, errors
3051 main.log.error(
3052 "Old leader was not elected on at least one node" )
3053 positionResult = main.FALSE
3054 newAllCandidates.append( node )
3055 newLeaders.append( node[ 0 ] )
3056 newCandidates = newAllCandidates[ 0 ]
3057
3058 # Check that each node has the same leader. Defines newLeader
3059 if len( set( newLeaders ) ) != 1:
3060 positionResult = main.FALSE
3061 main.log.error( "Nodes have different leaders: " +
3062 str( newLeaders ) )
3063 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07003064 else:
acsmars71adceb2015-08-31 15:09:26 -07003065 newLeader = newLeaders[ 0 ]
3066
3067 # Check that each node's candidate list is the same
3068 for candidates in newAllCandidates:
3069 if set( candidates ) != set( newCandidates ):
3070 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07003071 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07003072
3073 # Check that the re-elected node is last on the candidate List
3074 if oldLeader != newCandidates[ -1 ]:
Jon Hall6e709752016-02-01 13:38:46 -08003075 main.log.error( "Old Leader (" + str( oldLeader ) + ") not in the proper position " +
acsmars71adceb2015-08-31 15:09:26 -07003076 str( newCandidates ) )
3077 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003078
3079 utilities.assert_equals(
3080 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07003081 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003082 onpass="Old leader successfully re-ran for election",
3083 onfail="Something went wrong with Leadership election after " +
3084 "the old leader re-ran for election" )
3085
3086 def CASE16( self, main ):
3087 """
3088 Install Distributed Primitives app
3089 """
3090 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003091 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003092 assert main, "main not defined"
3093 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003094 assert main.CLIs, "main.CLIs not defined"
3095 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003096
3097 # Variables for the distributed primitives tests
3098 global pCounterName
3099 global iCounterName
3100 global pCounterValue
3101 global iCounterValue
3102 global onosSet
3103 global onosSetName
3104 pCounterName = "TestON-Partitions"
3105 iCounterName = "TestON-inMemory"
3106 pCounterValue = 0
3107 iCounterValue = 0
3108 onosSet = set([])
3109 onosSetName = "TestON-set"
3110
3111 description = "Install Primitives app"
3112 main.case( description )
3113 main.step( "Install Primitives app" )
3114 appName = "org.onosproject.distributedprimitives"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003115 node = main.activeNodes[0]
3116 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003117 utilities.assert_equals( expect=main.TRUE,
3118 actual=appResults,
3119 onpass="Primitives app activated",
3120 onfail="Primitives app not activated" )
3121 time.sleep( 5 ) # To allow all nodes to activate
3122
3123 def CASE17( self, main ):
3124 """
3125 Check for basic functionality with distributed primitives
3126 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003127 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003128 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003129 assert main, "main not defined"
3130 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003131 assert main.CLIs, "main.CLIs not defined"
3132 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003133 assert pCounterName, "pCounterName not defined"
3134 assert iCounterName, "iCounterName not defined"
3135 assert onosSetName, "onosSetName not defined"
3136 # NOTE: assert fails if value is 0/None/Empty/False
3137 try:
3138 pCounterValue
3139 except NameError:
3140 main.log.error( "pCounterValue not defined, setting to 0" )
3141 pCounterValue = 0
3142 try:
3143 iCounterValue
3144 except NameError:
3145 main.log.error( "iCounterValue not defined, setting to 0" )
3146 iCounterValue = 0
3147 try:
3148 onosSet
3149 except NameError:
3150 main.log.error( "onosSet not defined, setting to empty Set" )
3151 onosSet = set([])
3152 # Variables for the distributed primitives tests. These are local only
3153 addValue = "a"
3154 addAllValue = "a b c d e f"
3155 retainValue = "c d e f"
3156
3157 description = "Check for basic functionality with distributed " +\
3158 "primitives"
3159 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003160 main.caseExplanation = "Test the methods of the distributed " +\
3161 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003162 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003163 # Partitioned counters
3164 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003165 pCounters = []
3166 threads = []
3167 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003168 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003169 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3170 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003171 args=[ pCounterName ] )
3172 pCounterValue += 1
3173 addedPValues.append( pCounterValue )
3174 threads.append( t )
3175 t.start()
3176
3177 for t in threads:
3178 t.join()
3179 pCounters.append( t.result )
3180 # Check that counter incremented numController times
3181 pCounterResults = True
3182 for i in addedPValues:
3183 tmpResult = i in pCounters
3184 pCounterResults = pCounterResults and tmpResult
3185 if not tmpResult:
3186 main.log.error( str( i ) + " is not in partitioned "
3187 "counter incremented results" )
3188 utilities.assert_equals( expect=True,
3189 actual=pCounterResults,
3190 onpass="Default counter incremented",
3191 onfail="Error incrementing default" +
3192 " counter" )
3193
Jon Halle1a3b752015-07-22 13:02:46 -07003194 main.step( "Get then Increment a default counter on each node" )
3195 pCounters = []
3196 threads = []
3197 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003198 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003199 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3200 name="counterGetAndAdd-" + str( i ),
3201 args=[ pCounterName ] )
3202 addedPValues.append( pCounterValue )
3203 pCounterValue += 1
3204 threads.append( t )
3205 t.start()
3206
3207 for t in threads:
3208 t.join()
3209 pCounters.append( t.result )
3210 # Check that counter incremented numController times
3211 pCounterResults = True
3212 for i in addedPValues:
3213 tmpResult = i in pCounters
3214 pCounterResults = pCounterResults and tmpResult
3215 if not tmpResult:
3216 main.log.error( str( i ) + " is not in partitioned "
3217 "counter incremented results" )
3218 utilities.assert_equals( expect=True,
3219 actual=pCounterResults,
3220 onpass="Default counter incremented",
3221 onfail="Error incrementing default" +
3222 " counter" )
3223
3224 main.step( "Counters we added have the correct values" )
3225 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3226 utilities.assert_equals( expect=main.TRUE,
3227 actual=incrementCheck,
3228 onpass="Added counters are correct",
3229 onfail="Added counters are incorrect" )
3230
3231 main.step( "Add -8 to then get a default counter on each node" )
3232 pCounters = []
3233 threads = []
3234 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003235 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003236 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3237 name="counterIncrement-" + str( i ),
3238 args=[ pCounterName ],
3239 kwargs={ "delta": -8 } )
3240 pCounterValue += -8
3241 addedPValues.append( pCounterValue )
3242 threads.append( t )
3243 t.start()
3244
3245 for t in threads:
3246 t.join()
3247 pCounters.append( t.result )
3248 # Check that counter incremented numController times
3249 pCounterResults = True
3250 for i in addedPValues:
3251 tmpResult = i in pCounters
3252 pCounterResults = pCounterResults and tmpResult
3253 if not tmpResult:
3254 main.log.error( str( i ) + " is not in partitioned "
3255 "counter incremented results" )
3256 utilities.assert_equals( expect=True,
3257 actual=pCounterResults,
3258 onpass="Default counter incremented",
3259 onfail="Error incrementing default" +
3260 " counter" )
3261
3262 main.step( "Add 5 to then get a default counter on each node" )
3263 pCounters = []
3264 threads = []
3265 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003266 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003267 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3268 name="counterIncrement-" + str( i ),
3269 args=[ pCounterName ],
3270 kwargs={ "delta": 5 } )
3271 pCounterValue += 5
3272 addedPValues.append( pCounterValue )
3273 threads.append( t )
3274 t.start()
3275
3276 for t in threads:
3277 t.join()
3278 pCounters.append( t.result )
3279 # Check that counter incremented numController times
3280 pCounterResults = True
3281 for i in addedPValues:
3282 tmpResult = i in pCounters
3283 pCounterResults = pCounterResults and tmpResult
3284 if not tmpResult:
3285 main.log.error( str( i ) + " is not in partitioned "
3286 "counter incremented results" )
3287 utilities.assert_equals( expect=True,
3288 actual=pCounterResults,
3289 onpass="Default counter incremented",
3290 onfail="Error incrementing default" +
3291 " counter" )
3292
3293 main.step( "Get then add 5 to a default counter on each node" )
3294 pCounters = []
3295 threads = []
3296 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003297 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003298 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3299 name="counterIncrement-" + str( i ),
3300 args=[ pCounterName ],
3301 kwargs={ "delta": 5 } )
3302 addedPValues.append( pCounterValue )
3303 pCounterValue += 5
3304 threads.append( t )
3305 t.start()
3306
3307 for t in threads:
3308 t.join()
3309 pCounters.append( t.result )
3310 # Check that counter incremented numController times
3311 pCounterResults = True
3312 for i in addedPValues:
3313 tmpResult = i in pCounters
3314 pCounterResults = pCounterResults and tmpResult
3315 if not tmpResult:
3316 main.log.error( str( i ) + " is not in partitioned "
3317 "counter incremented results" )
3318 utilities.assert_equals( expect=True,
3319 actual=pCounterResults,
3320 onpass="Default counter incremented",
3321 onfail="Error incrementing default" +
3322 " counter" )
3323
3324 main.step( "Counters we added have the correct values" )
3325 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3326 utilities.assert_equals( expect=main.TRUE,
3327 actual=incrementCheck,
3328 onpass="Added counters are correct",
3329 onfail="Added counters are incorrect" )
3330
3331 # In-Memory counters
3332 main.step( "Increment and get an in-memory counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003333 iCounters = []
3334 addedIValues = []
3335 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003336 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003337 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003338 name="icounterIncrement-" + str( i ),
3339 args=[ iCounterName ],
3340 kwargs={ "inMemory": True } )
3341 iCounterValue += 1
3342 addedIValues.append( iCounterValue )
3343 threads.append( t )
3344 t.start()
3345
3346 for t in threads:
3347 t.join()
3348 iCounters.append( t.result )
3349 # Check that counter incremented numController times
3350 iCounterResults = True
3351 for i in addedIValues:
3352 tmpResult = i in iCounters
3353 iCounterResults = iCounterResults and tmpResult
3354 if not tmpResult:
3355 main.log.error( str( i ) + " is not in the in-memory "
3356 "counter incremented results" )
3357 utilities.assert_equals( expect=True,
3358 actual=iCounterResults,
Jon Halle1a3b752015-07-22 13:02:46 -07003359 onpass="In-memory counter incremented",
3360 onfail="Error incrementing in-memory" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003361 " counter" )
3362
Jon Halle1a3b752015-07-22 13:02:46 -07003363 main.step( "Get then Increment a in-memory counter on each node" )
3364 iCounters = []
3365 threads = []
3366 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003367 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003368 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3369 name="counterGetAndAdd-" + str( i ),
3370 args=[ iCounterName ],
3371 kwargs={ "inMemory": True } )
3372 addedIValues.append( iCounterValue )
3373 iCounterValue += 1
3374 threads.append( t )
3375 t.start()
3376
3377 for t in threads:
3378 t.join()
3379 iCounters.append( t.result )
3380 # Check that counter incremented numController times
3381 iCounterResults = True
3382 for i in addedIValues:
3383 tmpResult = i in iCounters
3384 iCounterResults = iCounterResults and tmpResult
3385 if not tmpResult:
3386 main.log.error( str( i ) + " is not in in-memory "
3387 "counter incremented results" )
3388 utilities.assert_equals( expect=True,
3389 actual=iCounterResults,
3390 onpass="In-memory counter incremented",
3391 onfail="Error incrementing in-memory" +
3392 " counter" )
3393
3394 main.step( "Counters we added have the correct values" )
3395 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3396 utilities.assert_equals( expect=main.TRUE,
3397 actual=incrementCheck,
3398 onpass="Added counters are correct",
3399 onfail="Added counters are incorrect" )
3400
3401 main.step( "Add -8 to then get a in-memory counter on each node" )
3402 iCounters = []
3403 threads = []
3404 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003405 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003406 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3407 name="counterIncrement-" + str( i ),
3408 args=[ iCounterName ],
3409 kwargs={ "delta": -8, "inMemory": True } )
3410 iCounterValue += -8
3411 addedIValues.append( iCounterValue )
3412 threads.append( t )
3413 t.start()
3414
3415 for t in threads:
3416 t.join()
3417 iCounters.append( t.result )
3418 # Check that counter incremented numController times
3419 iCounterResults = True
3420 for i in addedIValues:
3421 tmpResult = i in iCounters
3422 iCounterResults = iCounterResults and tmpResult
3423 if not tmpResult:
3424 main.log.error( str( i ) + " is not in in-memory "
3425 "counter incremented results" )
3426 utilities.assert_equals( expect=True,
3427 actual=pCounterResults,
3428 onpass="In-memory counter incremented",
3429 onfail="Error incrementing in-memory" +
3430 " counter" )
3431
3432 main.step( "Add 5 to then get a in-memory counter on each node" )
3433 iCounters = []
3434 threads = []
3435 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003436 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003437 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3438 name="counterIncrement-" + str( i ),
3439 args=[ iCounterName ],
3440 kwargs={ "delta": 5, "inMemory": True } )
3441 iCounterValue += 5
3442 addedIValues.append( iCounterValue )
3443 threads.append( t )
3444 t.start()
3445
3446 for t in threads:
3447 t.join()
3448 iCounters.append( t.result )
3449 # Check that counter incremented numController times
3450 iCounterResults = True
3451 for i in addedIValues:
3452 tmpResult = i in iCounters
3453 iCounterResults = iCounterResults and tmpResult
3454 if not tmpResult:
3455 main.log.error( str( i ) + " is not in in-memory "
3456 "counter incremented results" )
3457 utilities.assert_equals( expect=True,
3458 actual=pCounterResults,
3459 onpass="In-memory counter incremented",
3460 onfail="Error incrementing in-memory" +
3461 " counter" )
3462
3463 main.step( "Get then add 5 to a in-memory counter on each node" )
3464 iCounters = []
3465 threads = []
3466 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003467 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003468 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3469 name="counterIncrement-" + str( i ),
3470 args=[ iCounterName ],
3471 kwargs={ "delta": 5, "inMemory": True } )
3472 addedIValues.append( iCounterValue )
3473 iCounterValue += 5
3474 threads.append( t )
3475 t.start()
3476
3477 for t in threads:
3478 t.join()
3479 iCounters.append( t.result )
3480 # Check that counter incremented numController times
3481 iCounterResults = True
3482 for i in addedIValues:
3483 tmpResult = i in iCounters
3484 iCounterResults = iCounterResults and tmpResult
3485 if not tmpResult:
3486 main.log.error( str( i ) + " is not in in-memory "
3487 "counter incremented results" )
3488 utilities.assert_equals( expect=True,
3489 actual=iCounterResults,
3490 onpass="In-memory counter incremented",
3491 onfail="Error incrementing in-memory" +
3492 " counter" )
3493
3494 main.step( "Counters we added have the correct values" )
3495 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3496 utilities.assert_equals( expect=main.TRUE,
3497 actual=incrementCheck,
3498 onpass="Added counters are correct",
3499 onfail="Added counters are incorrect" )
3500
Jon Hall5cf14d52015-07-16 12:15:19 -07003501 main.step( "Check counters are consistant across nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07003502 onosCounters, consistentCounterResults = main.Counters.consistentCheck()
Jon Hall5cf14d52015-07-16 12:15:19 -07003503 utilities.assert_equals( expect=main.TRUE,
3504 actual=consistentCounterResults,
3505 onpass="ONOS counters are consistent " +
3506 "across nodes",
3507 onfail="ONOS Counters are inconsistent " +
3508 "across nodes" )
3509
3510 main.step( "Counters we added have the correct values" )
Jon Halle1a3b752015-07-22 13:02:46 -07003511 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3512 incrementCheck = incrementCheck and \
3513 main.Counters.counterCheck( iCounterName, iCounterValue )
Jon Hall5cf14d52015-07-16 12:15:19 -07003514 utilities.assert_equals( expect=main.TRUE,
Jon Halle1a3b752015-07-22 13:02:46 -07003515 actual=incrementCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -07003516 onpass="Added counters are correct",
3517 onfail="Added counters are incorrect" )
3518 # DISTRIBUTED SETS
3519 main.step( "Distributed Set get" )
3520 size = len( onosSet )
3521 getResponses = []
3522 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003523 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003524 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003525 name="setTestGet-" + str( i ),
3526 args=[ onosSetName ] )
3527 threads.append( t )
3528 t.start()
3529 for t in threads:
3530 t.join()
3531 getResponses.append( t.result )
3532
3533 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003534 for i in range( len( main.activeNodes ) ):
3535 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003536 if isinstance( getResponses[ i ], list):
3537 current = set( getResponses[ i ] )
3538 if len( current ) == len( getResponses[ i ] ):
3539 # no repeats
3540 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003541 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003542 " has incorrect view" +
3543 " of set " + onosSetName + ":\n" +
3544 str( getResponses[ i ] ) )
3545 main.log.debug( "Expected: " + str( onosSet ) )
3546 main.log.debug( "Actual: " + str( current ) )
3547 getResults = main.FALSE
3548 else:
3549 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003550 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003551 " has repeat elements in" +
3552 " set " + onosSetName + ":\n" +
3553 str( getResponses[ i ] ) )
3554 getResults = main.FALSE
3555 elif getResponses[ i ] == main.ERROR:
3556 getResults = main.FALSE
3557 utilities.assert_equals( expect=main.TRUE,
3558 actual=getResults,
3559 onpass="Set elements are correct",
3560 onfail="Set elements are incorrect" )
3561
3562 main.step( "Distributed Set size" )
3563 sizeResponses = []
3564 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003565 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003566 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003567 name="setTestSize-" + str( i ),
3568 args=[ onosSetName ] )
3569 threads.append( t )
3570 t.start()
3571 for t in threads:
3572 t.join()
3573 sizeResponses.append( t.result )
3574
3575 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003576 for i in range( len( main.activeNodes ) ):
3577 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003578 if size != sizeResponses[ i ]:
3579 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003580 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003581 " expected a size of " + str( size ) +
3582 " for set " + onosSetName +
3583 " but got " + str( sizeResponses[ i ] ) )
3584 utilities.assert_equals( expect=main.TRUE,
3585 actual=sizeResults,
3586 onpass="Set sizes are correct",
3587 onfail="Set sizes are incorrect" )
3588
3589 main.step( "Distributed Set add()" )
3590 onosSet.add( addValue )
3591 addResponses = []
3592 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003593 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003594 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003595 name="setTestAdd-" + str( i ),
3596 args=[ onosSetName, addValue ] )
3597 threads.append( t )
3598 t.start()
3599 for t in threads:
3600 t.join()
3601 addResponses.append( t.result )
3602
3603 # main.TRUE = successfully changed the set
3604 # main.FALSE = action resulted in no change in set
3605 # main.ERROR - Some error in executing the function
3606 addResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003607 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003608 if addResponses[ i ] == main.TRUE:
3609 # All is well
3610 pass
3611 elif addResponses[ i ] == main.FALSE:
3612 # Already in set, probably fine
3613 pass
3614 elif addResponses[ i ] == main.ERROR:
3615 # Error in execution
3616 addResults = main.FALSE
3617 else:
3618 # unexpected result
3619 addResults = main.FALSE
3620 if addResults != main.TRUE:
3621 main.log.error( "Error executing set add" )
3622
3623 # Check if set is still correct
3624 size = len( onosSet )
3625 getResponses = []
3626 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003627 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003628 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003629 name="setTestGet-" + str( i ),
3630 args=[ onosSetName ] )
3631 threads.append( t )
3632 t.start()
3633 for t in threads:
3634 t.join()
3635 getResponses.append( t.result )
3636 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003637 for i in range( len( main.activeNodes ) ):
3638 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003639 if isinstance( getResponses[ i ], list):
3640 current = set( getResponses[ i ] )
3641 if len( current ) == len( getResponses[ i ] ):
3642 # no repeats
3643 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003644 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003645 " of set " + onosSetName + ":\n" +
3646 str( getResponses[ i ] ) )
3647 main.log.debug( "Expected: " + str( onosSet ) )
3648 main.log.debug( "Actual: " + str( current ) )
3649 getResults = main.FALSE
3650 else:
3651 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003652 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003653 " set " + onosSetName + ":\n" +
3654 str( getResponses[ i ] ) )
3655 getResults = main.FALSE
3656 elif getResponses[ i ] == main.ERROR:
3657 getResults = main.FALSE
3658 sizeResponses = []
3659 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003660 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003661 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003662 name="setTestSize-" + str( i ),
3663 args=[ onosSetName ] )
3664 threads.append( t )
3665 t.start()
3666 for t in threads:
3667 t.join()
3668 sizeResponses.append( t.result )
3669 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003670 for i in range( len( main.activeNodes ) ):
3671 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003672 if size != sizeResponses[ i ]:
3673 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003674 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003675 " expected a size of " + str( size ) +
3676 " for set " + onosSetName +
3677 " but got " + str( sizeResponses[ i ] ) )
3678 addResults = addResults and getResults and sizeResults
3679 utilities.assert_equals( expect=main.TRUE,
3680 actual=addResults,
3681 onpass="Set add correct",
3682 onfail="Set add was incorrect" )
3683
3684 main.step( "Distributed Set addAll()" )
3685 onosSet.update( addAllValue.split() )
3686 addResponses = []
3687 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003688 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003689 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003690 name="setTestAddAll-" + str( i ),
3691 args=[ onosSetName, addAllValue ] )
3692 threads.append( t )
3693 t.start()
3694 for t in threads:
3695 t.join()
3696 addResponses.append( t.result )
3697
3698 # main.TRUE = successfully changed the set
3699 # main.FALSE = action resulted in no change in set
3700 # main.ERROR - Some error in executing the function
3701 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003702 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003703 if addResponses[ i ] == main.TRUE:
3704 # All is well
3705 pass
3706 elif addResponses[ i ] == main.FALSE:
3707 # Already in set, probably fine
3708 pass
3709 elif addResponses[ i ] == main.ERROR:
3710 # Error in execution
3711 addAllResults = main.FALSE
3712 else:
3713 # unexpected result
3714 addAllResults = main.FALSE
3715 if addAllResults != main.TRUE:
3716 main.log.error( "Error executing set addAll" )
3717
3718 # Check if set is still correct
3719 size = len( onosSet )
3720 getResponses = []
3721 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003722 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003723 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003724 name="setTestGet-" + str( i ),
3725 args=[ onosSetName ] )
3726 threads.append( t )
3727 t.start()
3728 for t in threads:
3729 t.join()
3730 getResponses.append( t.result )
3731 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003732 for i in range( len( main.activeNodes ) ):
3733 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003734 if isinstance( getResponses[ i ], list):
3735 current = set( getResponses[ i ] )
3736 if len( current ) == len( getResponses[ i ] ):
3737 # no repeats
3738 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003739 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003740 " has incorrect view" +
3741 " of set " + onosSetName + ":\n" +
3742 str( getResponses[ i ] ) )
3743 main.log.debug( "Expected: " + str( onosSet ) )
3744 main.log.debug( "Actual: " + str( current ) )
3745 getResults = main.FALSE
3746 else:
3747 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003748 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003749 " has repeat elements in" +
3750 " set " + onosSetName + ":\n" +
3751 str( getResponses[ i ] ) )
3752 getResults = main.FALSE
3753 elif getResponses[ i ] == main.ERROR:
3754 getResults = main.FALSE
3755 sizeResponses = []
3756 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003757 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003758 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003759 name="setTestSize-" + str( i ),
3760 args=[ onosSetName ] )
3761 threads.append( t )
3762 t.start()
3763 for t in threads:
3764 t.join()
3765 sizeResponses.append( t.result )
3766 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003767 for i in range( len( main.activeNodes ) ):
3768 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003769 if size != sizeResponses[ i ]:
3770 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003771 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003772 " expected a size of " + str( size ) +
3773 " for set " + onosSetName +
3774 " but got " + str( sizeResponses[ i ] ) )
3775 addAllResults = addAllResults and getResults and sizeResults
3776 utilities.assert_equals( expect=main.TRUE,
3777 actual=addAllResults,
3778 onpass="Set addAll correct",
3779 onfail="Set addAll was incorrect" )
3780
3781 main.step( "Distributed Set contains()" )
3782 containsResponses = []
3783 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003784 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003785 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003786 name="setContains-" + str( i ),
3787 args=[ onosSetName ],
3788 kwargs={ "values": addValue } )
3789 threads.append( t )
3790 t.start()
3791 for t in threads:
3792 t.join()
3793 # NOTE: This is the tuple
3794 containsResponses.append( t.result )
3795
3796 containsResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003797 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003798 if containsResponses[ i ] == main.ERROR:
3799 containsResults = main.FALSE
3800 else:
3801 containsResults = containsResults and\
3802 containsResponses[ i ][ 1 ]
3803 utilities.assert_equals( expect=main.TRUE,
3804 actual=containsResults,
3805 onpass="Set contains is functional",
3806 onfail="Set contains failed" )
3807
3808 main.step( "Distributed Set containsAll()" )
3809 containsAllResponses = []
3810 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003811 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003812 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003813 name="setContainsAll-" + str( i ),
3814 args=[ onosSetName ],
3815 kwargs={ "values": addAllValue } )
3816 threads.append( t )
3817 t.start()
3818 for t in threads:
3819 t.join()
3820 # NOTE: This is the tuple
3821 containsAllResponses.append( t.result )
3822
3823 containsAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003824 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003825 if containsResponses[ i ] == main.ERROR:
3826 containsResults = main.FALSE
3827 else:
3828 containsResults = containsResults and\
3829 containsResponses[ i ][ 1 ]
3830 utilities.assert_equals( expect=main.TRUE,
3831 actual=containsAllResults,
3832 onpass="Set containsAll is functional",
3833 onfail="Set containsAll failed" )
3834
3835 main.step( "Distributed Set remove()" )
3836 onosSet.remove( addValue )
3837 removeResponses = []
3838 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003839 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003840 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003841 name="setTestRemove-" + str( i ),
3842 args=[ onosSetName, addValue ] )
3843 threads.append( t )
3844 t.start()
3845 for t in threads:
3846 t.join()
3847 removeResponses.append( t.result )
3848
3849 # main.TRUE = successfully changed the set
3850 # main.FALSE = action resulted in no change in set
3851 # main.ERROR - Some error in executing the function
3852 removeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003853 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003854 if removeResponses[ i ] == main.TRUE:
3855 # All is well
3856 pass
3857 elif removeResponses[ i ] == main.FALSE:
3858 # not in set, probably fine
3859 pass
3860 elif removeResponses[ i ] == main.ERROR:
3861 # Error in execution
3862 removeResults = main.FALSE
3863 else:
3864 # unexpected result
3865 removeResults = main.FALSE
3866 if removeResults != main.TRUE:
3867 main.log.error( "Error executing set remove" )
3868
3869 # Check if set is still correct
3870 size = len( onosSet )
3871 getResponses = []
3872 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003873 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003874 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003875 name="setTestGet-" + str( i ),
3876 args=[ onosSetName ] )
3877 threads.append( t )
3878 t.start()
3879 for t in threads:
3880 t.join()
3881 getResponses.append( t.result )
3882 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003883 for i in range( len( main.activeNodes ) ):
3884 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003885 if isinstance( getResponses[ i ], list):
3886 current = set( getResponses[ i ] )
3887 if len( current ) == len( getResponses[ i ] ):
3888 # no repeats
3889 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003890 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003891 " has incorrect view" +
3892 " of set " + onosSetName + ":\n" +
3893 str( getResponses[ i ] ) )
3894 main.log.debug( "Expected: " + str( onosSet ) )
3895 main.log.debug( "Actual: " + str( current ) )
3896 getResults = main.FALSE
3897 else:
3898 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003899 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003900 " has repeat elements in" +
3901 " set " + onosSetName + ":\n" +
3902 str( getResponses[ i ] ) )
3903 getResults = main.FALSE
3904 elif getResponses[ i ] == main.ERROR:
3905 getResults = main.FALSE
3906 sizeResponses = []
3907 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003908 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003909 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003910 name="setTestSize-" + str( i ),
3911 args=[ onosSetName ] )
3912 threads.append( t )
3913 t.start()
3914 for t in threads:
3915 t.join()
3916 sizeResponses.append( t.result )
3917 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003918 for i in range( len( main.activeNodes ) ):
3919 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003920 if size != sizeResponses[ i ]:
3921 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003922 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003923 " expected a size of " + str( size ) +
3924 " for set " + onosSetName +
3925 " but got " + str( sizeResponses[ i ] ) )
3926 removeResults = removeResults and getResults and sizeResults
3927 utilities.assert_equals( expect=main.TRUE,
3928 actual=removeResults,
3929 onpass="Set remove correct",
3930 onfail="Set remove was incorrect" )
3931
3932 main.step( "Distributed Set removeAll()" )
3933 onosSet.difference_update( addAllValue.split() )
3934 removeAllResponses = []
3935 threads = []
3936 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003937 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003938 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003939 name="setTestRemoveAll-" + str( i ),
3940 args=[ onosSetName, addAllValue ] )
3941 threads.append( t )
3942 t.start()
3943 for t in threads:
3944 t.join()
3945 removeAllResponses.append( t.result )
3946 except Exception, e:
3947 main.log.exception(e)
3948
3949 # main.TRUE = successfully changed the set
3950 # main.FALSE = action resulted in no change in set
3951 # main.ERROR - Some error in executing the function
3952 removeAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003953 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003954 if removeAllResponses[ i ] == main.TRUE:
3955 # All is well
3956 pass
3957 elif removeAllResponses[ i ] == main.FALSE:
3958 # not in set, probably fine
3959 pass
3960 elif removeAllResponses[ i ] == main.ERROR:
3961 # Error in execution
3962 removeAllResults = main.FALSE
3963 else:
3964 # unexpected result
3965 removeAllResults = main.FALSE
3966 if removeAllResults != main.TRUE:
3967 main.log.error( "Error executing set removeAll" )
3968
3969 # Check if set is still correct
3970 size = len( onosSet )
3971 getResponses = []
3972 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003973 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003974 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003975 name="setTestGet-" + str( i ),
3976 args=[ onosSetName ] )
3977 threads.append( t )
3978 t.start()
3979 for t in threads:
3980 t.join()
3981 getResponses.append( t.result )
3982 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003983 for i in range( len( main.activeNodes ) ):
3984 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003985 if isinstance( getResponses[ i ], list):
3986 current = set( getResponses[ i ] )
3987 if len( current ) == len( getResponses[ i ] ):
3988 # no repeats
3989 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003990 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003991 " has incorrect view" +
3992 " of set " + onosSetName + ":\n" +
3993 str( getResponses[ i ] ) )
3994 main.log.debug( "Expected: " + str( onosSet ) )
3995 main.log.debug( "Actual: " + str( current ) )
3996 getResults = main.FALSE
3997 else:
3998 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003999 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004000 " has repeat elements in" +
4001 " set " + onosSetName + ":\n" +
4002 str( getResponses[ i ] ) )
4003 getResults = main.FALSE
4004 elif getResponses[ i ] == main.ERROR:
4005 getResults = main.FALSE
4006 sizeResponses = []
4007 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004008 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004009 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004010 name="setTestSize-" + str( i ),
4011 args=[ onosSetName ] )
4012 threads.append( t )
4013 t.start()
4014 for t in threads:
4015 t.join()
4016 sizeResponses.append( t.result )
4017 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004018 for i in range( len( main.activeNodes ) ):
4019 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004020 if size != sizeResponses[ i ]:
4021 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004022 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004023 " expected a size of " + str( size ) +
4024 " for set " + onosSetName +
4025 " but got " + str( sizeResponses[ i ] ) )
4026 removeAllResults = removeAllResults and getResults and sizeResults
4027 utilities.assert_equals( expect=main.TRUE,
4028 actual=removeAllResults,
4029 onpass="Set removeAll correct",
4030 onfail="Set removeAll was incorrect" )
4031
4032 main.step( "Distributed Set addAll()" )
4033 onosSet.update( addAllValue.split() )
4034 addResponses = []
4035 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004036 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004037 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004038 name="setTestAddAll-" + str( i ),
4039 args=[ onosSetName, addAllValue ] )
4040 threads.append( t )
4041 t.start()
4042 for t in threads:
4043 t.join()
4044 addResponses.append( t.result )
4045
4046 # main.TRUE = successfully changed the set
4047 # main.FALSE = action resulted in no change in set
4048 # main.ERROR - Some error in executing the function
4049 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004050 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004051 if addResponses[ i ] == main.TRUE:
4052 # All is well
4053 pass
4054 elif addResponses[ i ] == main.FALSE:
4055 # Already in set, probably fine
4056 pass
4057 elif addResponses[ i ] == main.ERROR:
4058 # Error in execution
4059 addAllResults = main.FALSE
4060 else:
4061 # unexpected result
4062 addAllResults = main.FALSE
4063 if addAllResults != main.TRUE:
4064 main.log.error( "Error executing set addAll" )
4065
4066 # Check if set is still correct
4067 size = len( onosSet )
4068 getResponses = []
4069 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004070 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004071 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004072 name="setTestGet-" + str( i ),
4073 args=[ onosSetName ] )
4074 threads.append( t )
4075 t.start()
4076 for t in threads:
4077 t.join()
4078 getResponses.append( t.result )
4079 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004080 for i in range( len( main.activeNodes ) ):
4081 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004082 if isinstance( getResponses[ i ], list):
4083 current = set( getResponses[ i ] )
4084 if len( current ) == len( getResponses[ i ] ):
4085 # no repeats
4086 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004087 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004088 " has incorrect view" +
4089 " of set " + onosSetName + ":\n" +
4090 str( getResponses[ i ] ) )
4091 main.log.debug( "Expected: " + str( onosSet ) )
4092 main.log.debug( "Actual: " + str( current ) )
4093 getResults = main.FALSE
4094 else:
4095 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004096 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004097 " has repeat elements in" +
4098 " set " + onosSetName + ":\n" +
4099 str( getResponses[ i ] ) )
4100 getResults = main.FALSE
4101 elif getResponses[ i ] == main.ERROR:
4102 getResults = main.FALSE
4103 sizeResponses = []
4104 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004105 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004106 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004107 name="setTestSize-" + str( i ),
4108 args=[ onosSetName ] )
4109 threads.append( t )
4110 t.start()
4111 for t in threads:
4112 t.join()
4113 sizeResponses.append( t.result )
4114 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004115 for i in range( len( main.activeNodes ) ):
4116 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004117 if size != sizeResponses[ i ]:
4118 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004119 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004120 " expected a size of " + str( size ) +
4121 " for set " + onosSetName +
4122 " but got " + str( sizeResponses[ i ] ) )
4123 addAllResults = addAllResults and getResults and sizeResults
4124 utilities.assert_equals( expect=main.TRUE,
4125 actual=addAllResults,
4126 onpass="Set addAll correct",
4127 onfail="Set addAll was incorrect" )
4128
4129 main.step( "Distributed Set clear()" )
4130 onosSet.clear()
4131 clearResponses = []
4132 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004133 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004134 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004135 name="setTestClear-" + str( i ),
4136 args=[ onosSetName, " "], # Values doesn't matter
4137 kwargs={ "clear": True } )
4138 threads.append( t )
4139 t.start()
4140 for t in threads:
4141 t.join()
4142 clearResponses.append( t.result )
4143
4144 # main.TRUE = successfully changed the set
4145 # main.FALSE = action resulted in no change in set
4146 # main.ERROR - Some error in executing the function
4147 clearResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004148 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004149 if clearResponses[ i ] == main.TRUE:
4150 # All is well
4151 pass
4152 elif clearResponses[ i ] == main.FALSE:
4153 # Nothing set, probably fine
4154 pass
4155 elif clearResponses[ i ] == main.ERROR:
4156 # Error in execution
4157 clearResults = main.FALSE
4158 else:
4159 # unexpected result
4160 clearResults = main.FALSE
4161 if clearResults != main.TRUE:
4162 main.log.error( "Error executing set clear" )
4163
4164 # Check if set is still correct
4165 size = len( onosSet )
4166 getResponses = []
4167 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004168 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004169 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004170 name="setTestGet-" + str( i ),
4171 args=[ onosSetName ] )
4172 threads.append( t )
4173 t.start()
4174 for t in threads:
4175 t.join()
4176 getResponses.append( t.result )
4177 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004178 for i in range( len( main.activeNodes ) ):
4179 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004180 if isinstance( getResponses[ i ], list):
4181 current = set( getResponses[ i ] )
4182 if len( current ) == len( getResponses[ i ] ):
4183 # no repeats
4184 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004185 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004186 " has incorrect view" +
4187 " of set " + onosSetName + ":\n" +
4188 str( getResponses[ i ] ) )
4189 main.log.debug( "Expected: " + str( onosSet ) )
4190 main.log.debug( "Actual: " + str( current ) )
4191 getResults = main.FALSE
4192 else:
4193 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004194 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004195 " has repeat elements in" +
4196 " set " + onosSetName + ":\n" +
4197 str( getResponses[ i ] ) )
4198 getResults = main.FALSE
4199 elif getResponses[ i ] == main.ERROR:
4200 getResults = main.FALSE
4201 sizeResponses = []
4202 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004203 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004204 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004205 name="setTestSize-" + str( i ),
4206 args=[ onosSetName ] )
4207 threads.append( t )
4208 t.start()
4209 for t in threads:
4210 t.join()
4211 sizeResponses.append( t.result )
4212 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004213 for i in range( len( main.activeNodes ) ):
4214 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004215 if size != sizeResponses[ i ]:
4216 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004217 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004218 " expected a size of " + str( size ) +
4219 " for set " + onosSetName +
4220 " but got " + str( sizeResponses[ i ] ) )
4221 clearResults = clearResults and getResults and sizeResults
4222 utilities.assert_equals( expect=main.TRUE,
4223 actual=clearResults,
4224 onpass="Set clear correct",
4225 onfail="Set clear was incorrect" )
4226
4227 main.step( "Distributed Set addAll()" )
4228 onosSet.update( addAllValue.split() )
4229 addResponses = []
4230 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004231 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004232 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004233 name="setTestAddAll-" + str( i ),
4234 args=[ onosSetName, addAllValue ] )
4235 threads.append( t )
4236 t.start()
4237 for t in threads:
4238 t.join()
4239 addResponses.append( t.result )
4240
4241 # main.TRUE = successfully changed the set
4242 # main.FALSE = action resulted in no change in set
4243 # main.ERROR - Some error in executing the function
4244 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004245 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004246 if addResponses[ i ] == main.TRUE:
4247 # All is well
4248 pass
4249 elif addResponses[ i ] == main.FALSE:
4250 # Already in set, probably fine
4251 pass
4252 elif addResponses[ i ] == main.ERROR:
4253 # Error in execution
4254 addAllResults = main.FALSE
4255 else:
4256 # unexpected result
4257 addAllResults = main.FALSE
4258 if addAllResults != main.TRUE:
4259 main.log.error( "Error executing set addAll" )
4260
4261 # Check if set is still correct
4262 size = len( onosSet )
4263 getResponses = []
4264 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004265 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004266 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004267 name="setTestGet-" + str( i ),
4268 args=[ onosSetName ] )
4269 threads.append( t )
4270 t.start()
4271 for t in threads:
4272 t.join()
4273 getResponses.append( t.result )
4274 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004275 for i in range( len( main.activeNodes ) ):
4276 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004277 if isinstance( getResponses[ i ], list):
4278 current = set( getResponses[ i ] )
4279 if len( current ) == len( getResponses[ i ] ):
4280 # no repeats
4281 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004282 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004283 " has incorrect view" +
4284 " of set " + onosSetName + ":\n" +
4285 str( getResponses[ i ] ) )
4286 main.log.debug( "Expected: " + str( onosSet ) )
4287 main.log.debug( "Actual: " + str( current ) )
4288 getResults = main.FALSE
4289 else:
4290 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004291 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004292 " has repeat elements in" +
4293 " set " + onosSetName + ":\n" +
4294 str( getResponses[ i ] ) )
4295 getResults = main.FALSE
4296 elif getResponses[ i ] == main.ERROR:
4297 getResults = main.FALSE
4298 sizeResponses = []
4299 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004300 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004301 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004302 name="setTestSize-" + str( i ),
4303 args=[ onosSetName ] )
4304 threads.append( t )
4305 t.start()
4306 for t in threads:
4307 t.join()
4308 sizeResponses.append( t.result )
4309 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004310 for i in range( len( main.activeNodes ) ):
4311 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004312 if size != sizeResponses[ i ]:
4313 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004314 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004315 " expected a size of " + str( size ) +
4316 " for set " + onosSetName +
4317 " but got " + str( sizeResponses[ i ] ) )
4318 addAllResults = addAllResults and getResults and sizeResults
4319 utilities.assert_equals( expect=main.TRUE,
4320 actual=addAllResults,
4321 onpass="Set addAll correct",
4322 onfail="Set addAll was incorrect" )
4323
4324 main.step( "Distributed Set retain()" )
4325 onosSet.intersection_update( retainValue.split() )
4326 retainResponses = []
4327 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004328 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004329 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004330 name="setTestRetain-" + str( i ),
4331 args=[ onosSetName, retainValue ],
4332 kwargs={ "retain": True } )
4333 threads.append( t )
4334 t.start()
4335 for t in threads:
4336 t.join()
4337 retainResponses.append( t.result )
4338
4339 # main.TRUE = successfully changed the set
4340 # main.FALSE = action resulted in no change in set
4341 # main.ERROR - Some error in executing the function
4342 retainResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004343 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004344 if retainResponses[ i ] == main.TRUE:
4345 # All is well
4346 pass
4347 elif retainResponses[ i ] == main.FALSE:
4348 # Already in set, probably fine
4349 pass
4350 elif retainResponses[ i ] == main.ERROR:
4351 # Error in execution
4352 retainResults = main.FALSE
4353 else:
4354 # unexpected result
4355 retainResults = main.FALSE
4356 if retainResults != main.TRUE:
4357 main.log.error( "Error executing set retain" )
4358
4359 # Check if set is still correct
4360 size = len( onosSet )
4361 getResponses = []
4362 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004363 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004364 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004365 name="setTestGet-" + str( i ),
4366 args=[ onosSetName ] )
4367 threads.append( t )
4368 t.start()
4369 for t in threads:
4370 t.join()
4371 getResponses.append( t.result )
4372 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004373 for i in range( len( main.activeNodes ) ):
4374 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004375 if isinstance( getResponses[ i ], list):
4376 current = set( getResponses[ i ] )
4377 if len( current ) == len( getResponses[ i ] ):
4378 # no repeats
4379 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004380 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004381 " has incorrect view" +
4382 " of set " + onosSetName + ":\n" +
4383 str( getResponses[ i ] ) )
4384 main.log.debug( "Expected: " + str( onosSet ) )
4385 main.log.debug( "Actual: " + str( current ) )
4386 getResults = main.FALSE
4387 else:
4388 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004389 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004390 " has repeat elements in" +
4391 " set " + onosSetName + ":\n" +
4392 str( getResponses[ i ] ) )
4393 getResults = main.FALSE
4394 elif getResponses[ i ] == main.ERROR:
4395 getResults = main.FALSE
4396 sizeResponses = []
4397 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004398 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004399 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004400 name="setTestSize-" + str( i ),
4401 args=[ onosSetName ] )
4402 threads.append( t )
4403 t.start()
4404 for t in threads:
4405 t.join()
4406 sizeResponses.append( t.result )
4407 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004408 for i in range( len( main.activeNodes ) ):
4409 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004410 if size != sizeResponses[ i ]:
4411 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004412 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004413 str( size ) + " for set " + onosSetName +
4414 " but got " + str( sizeResponses[ i ] ) )
4415 retainResults = retainResults and getResults and sizeResults
4416 utilities.assert_equals( expect=main.TRUE,
4417 actual=retainResults,
4418 onpass="Set retain correct",
4419 onfail="Set retain was incorrect" )
4420
Jon Hall2a5002c2015-08-21 16:49:11 -07004421 # Transactional maps
4422 main.step( "Partitioned Transactional maps put" )
4423 tMapValue = "Testing"
4424 numKeys = 100
4425 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004426 node = main.activeNodes[0]
4427 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
Jon Hall6e709752016-02-01 13:38:46 -08004428 if putResponses and len( putResponses ) == 100:
Jon Hall2a5002c2015-08-21 16:49:11 -07004429 for i in putResponses:
4430 if putResponses[ i ][ 'value' ] != tMapValue:
4431 putResult = False
4432 else:
4433 putResult = False
4434 if not putResult:
4435 main.log.debug( "Put response values: " + str( putResponses ) )
4436 utilities.assert_equals( expect=True,
4437 actual=putResult,
4438 onpass="Partitioned Transactional Map put successful",
4439 onfail="Partitioned Transactional Map put values are incorrect" )
4440
4441 main.step( "Partitioned Transactional maps get" )
4442 getCheck = True
4443 for n in range( 1, numKeys + 1 ):
4444 getResponses = []
4445 threads = []
4446 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004447 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004448 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4449 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004450 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004451 threads.append( t )
4452 t.start()
4453 for t in threads:
4454 t.join()
4455 getResponses.append( t.result )
4456 for node in getResponses:
4457 if node != tMapValue:
4458 valueCheck = False
4459 if not valueCheck:
4460 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4461 main.log.warn( getResponses )
4462 getCheck = getCheck and valueCheck
4463 utilities.assert_equals( expect=True,
4464 actual=getCheck,
4465 onpass="Partitioned Transactional Map get values were correct",
4466 onfail="Partitioned Transactional Map values incorrect" )
4467
4468 main.step( "In-memory Transactional maps put" )
4469 tMapValue = "Testing"
4470 numKeys = 100
4471 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004472 node = main.activeNodes[0]
4473 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue, inMemory=True )
Jon Hall2a5002c2015-08-21 16:49:11 -07004474 if len( putResponses ) == 100:
4475 for i in putResponses:
4476 if putResponses[ i ][ 'value' ] != tMapValue:
4477 putResult = False
4478 else:
4479 putResult = False
4480 if not putResult:
4481 main.log.debug( "Put response values: " + str( putResponses ) )
4482 utilities.assert_equals( expect=True,
4483 actual=putResult,
4484 onpass="In-Memory Transactional Map put successful",
4485 onfail="In-Memory Transactional Map put values are incorrect" )
4486
4487 main.step( "In-Memory Transactional maps get" )
4488 getCheck = True
4489 for n in range( 1, numKeys + 1 ):
4490 getResponses = []
4491 threads = []
4492 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004493 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004494 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4495 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004496 args=[ "Key" + str( n ) ],
Jon Hall2a5002c2015-08-21 16:49:11 -07004497 kwargs={ "inMemory": True } )
4498 threads.append( t )
4499 t.start()
4500 for t in threads:
4501 t.join()
4502 getResponses.append( t.result )
4503 for node in getResponses:
4504 if node != tMapValue:
4505 valueCheck = False
4506 if not valueCheck:
4507 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4508 main.log.warn( getResponses )
4509 getCheck = getCheck and valueCheck
4510 utilities.assert_equals( expect=True,
4511 actual=getCheck,
4512 onpass="In-Memory Transactional Map get values were correct",
4513 onfail="In-Memory Transactional Map values incorrect" )