blob: 1088374bb86d481fce8c29ad4dcd8c26742a7ff7 [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
Jon Hallb3ed8ed2015-10-28 16:43:55 -070012CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
Jon Hall5cf14d52015-07-16 12:15:19 -070014CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
Jon Hallb3ed8ed2015-10-28 16:43:55 -070028class HAstopNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -070029
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hall3b489db2015-10-05 14:38:37 -070052 import pexpect
Jon Hallb3ed8ed2015-10-28 16:43:55 -070053 main.log.info( "ONOS HA test: Stop a minority of ONOS nodes - " +
Jon Hall5cf14d52015-07-16 12:15:19 -070054 "initialization" )
55 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070056 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070057 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
59 # TODO: save all the timers and output them for plotting
60
61 # load some variables from the params file
62 PULLCODE = False
63 if main.params[ 'Git' ] == 'True':
64 PULLCODE = True
65 gitBranch = main.params[ 'branch' ]
66 cellName = main.params[ 'ENV' ][ 'cellName' ]
67
Jon Halle1a3b752015-07-22 13:02:46 -070068 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070069 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070070 if main.ONOSbench.maxNodes < main.numCtrls:
71 main.numCtrls = int( main.ONOSbench.maxNodes )
72 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070073 global ONOS1Port
74 global ONOS2Port
75 global ONOS3Port
76 global ONOS4Port
77 global ONOS5Port
78 global ONOS6Port
79 global ONOS7Port
80
81 # FIXME: just get controller port from params?
82 # TODO: do we really need all these?
83 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
84 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
85 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
86 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
87 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
88 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
89 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
90
Jon Halle1a3b752015-07-22 13:02:46 -070091 try:
92 fileName = "Counters"
93 # TODO: Maybe make a library folder somewhere?
94 path = main.params[ 'imports' ][ 'path' ]
95 main.Counters = imp.load_source( fileName,
96 path + fileName + ".py" )
97 except Exception as e:
98 main.log.exception( e )
99 main.cleanup()
100 main.exit()
101
102 main.CLIs = []
103 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700104 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700105 for i in range( 1, main.numCtrls + 1 ):
106 try:
107 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
108 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
109 ipList.append( main.nodes[ -1 ].ip_address )
110 except AttributeError:
111 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700112
113 main.step( "Create cell file" )
114 cellAppString = main.params[ 'ENV' ][ 'appString' ]
115 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
116 main.Mininet1.ip_address,
117 cellAppString, ipList )
118 main.step( "Applying cell variable to environment" )
119 cellResult = main.ONOSbench.setCell( cellName )
120 verifyResult = main.ONOSbench.verifyCell()
121
122 # FIXME:this is short term fix
123 main.log.info( "Removing raft logs" )
124 main.ONOSbench.onosRemoveRaftLogs()
125
126 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700127 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700128 main.ONOSbench.onosUninstall( node.ip_address )
129
130 # Make sure ONOS is DEAD
131 main.log.info( "Killing any ONOS processes" )
132 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700133 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700134 killed = main.ONOSbench.onosKill( node.ip_address )
135 killResults = killResults and killed
136
137 cleanInstallResult = main.TRUE
138 gitPullResult = main.TRUE
139
140 main.step( "Starting Mininet" )
141 # scp topo file to mininet
142 # TODO: move to params?
143 topoName = "obelisk.py"
144 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700145 main.ONOSbench.scp( main.Mininet1,
146 filePath + topoName,
147 main.Mininet1.home,
148 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700149 mnResult = main.Mininet1.startNet( )
150 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
151 onpass="Mininet Started",
152 onfail="Error starting Mininet" )
153
154 main.step( "Git checkout and pull " + gitBranch )
155 if PULLCODE:
156 main.ONOSbench.gitCheckout( gitBranch )
157 gitPullResult = main.ONOSbench.gitPull()
158 # values of 1 or 3 are good
159 utilities.assert_lesser( expect=0, actual=gitPullResult,
160 onpass="Git pull successful",
161 onfail="Git pull failed" )
162 main.ONOSbench.getVersion( report=True )
163
164 main.step( "Using mvn clean install" )
165 cleanInstallResult = main.TRUE
166 if PULLCODE and gitPullResult == main.TRUE:
167 cleanInstallResult = main.ONOSbench.cleanInstall()
168 else:
169 main.log.warn( "Did not pull new code so skipping mvn " +
170 "clean install" )
171 utilities.assert_equals( expect=main.TRUE,
172 actual=cleanInstallResult,
173 onpass="MCI successful",
174 onfail="MCI failed" )
175 # GRAPHS
176 # NOTE: important params here:
177 # job = name of Jenkins job
178 # Plot Name = Plot-HA, only can be used if multiple plots
179 # index = The number of the graph under plot name
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700180 job = "HAstopNodes"
Jon Hall5cf14d52015-07-16 12:15:19 -0700181 plotName = "Plot-HA"
182 graphs = '<ac:structured-macro ac:name="html">\n'
183 graphs += '<ac:plain-text-body><![CDATA[\n'
184 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
185 '/plot/' + plotName + '/getPlot?index=0' +\
186 '&width=500&height=300"' +\
187 'noborder="0" width="500" height="300" scrolling="yes" ' +\
188 'seamless="seamless"></iframe>\n'
189 graphs += ']]></ac:plain-text-body>\n'
190 graphs += '</ac:structured-macro>\n'
191 main.log.wiki(graphs)
192
193 main.step( "Creating ONOS package" )
Jon Hall3b489db2015-10-05 14:38:37 -0700194 # copy gen-partions file to ONOS
195 # NOTE: this assumes TestON and ONOS are on the same machine
196 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
197 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
198 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
199 main.ONOSbench.ip_address,
200 srcFile,
201 dstDir,
202 pwd=main.ONOSbench.pwd,
203 direction="from" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700204 packageResult = main.ONOSbench.onosPackage()
205 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
206 onpass="ONOS package successful",
207 onfail="ONOS package failed" )
208
209 main.step( "Installing ONOS package" )
210 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700211 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700212 tmpResult = main.ONOSbench.onosInstall( options="-f",
213 node=node.ip_address )
214 onosInstallResult = onosInstallResult and tmpResult
215 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
216 onpass="ONOS install successful",
217 onfail="ONOS install failed" )
Jon Hall3b489db2015-10-05 14:38:37 -0700218 # clean up gen-partitions file
219 try:
220 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
221 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
222 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
223 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
224 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
225 str( main.ONOSbench.handle.before ) )
226 except ( pexpect.TIMEOUT, pexpect.EOF ):
227 main.log.exception( "ONOSbench: pexpect exception found:" +
228 main.ONOSbench.handle.before )
229 main.cleanup()
230 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700231
232 main.step( "Checking if ONOS is up yet" )
233 for i in range( 2 ):
234 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700235 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700236 started = main.ONOSbench.isup( node.ip_address )
237 if not started:
238 main.log.error( node.name + " didn't start!" )
239 main.ONOSbench.onosStop( node.ip_address )
240 main.ONOSbench.onosStart( node.ip_address )
241 onosIsupResult = onosIsupResult and started
242 if onosIsupResult == main.TRUE:
243 break
244 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
245 onpass="ONOS startup successful",
246 onfail="ONOS startup failed" )
247
248 main.log.step( "Starting ONOS CLI sessions" )
249 cliResults = main.TRUE
250 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700251 for i in range( main.numCtrls ):
252 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700253 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700254 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700255 threads.append( t )
256 t.start()
257
258 for t in threads:
259 t.join()
260 cliResults = cliResults and t.result
261 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
262 onpass="ONOS cli startup successful",
263 onfail="ONOS cli startup failed" )
264
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700265 # Create a list of active nodes for use when some nodes are stopped
266 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
267
Jon Hall5cf14d52015-07-16 12:15:19 -0700268 if main.params[ 'tcpdump' ].lower() == "true":
269 main.step( "Start Packet Capture MN" )
270 main.Mininet2.startTcpdump(
271 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
272 + "-MN.pcap",
273 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
274 port=main.params[ 'MNtcpdump' ][ 'port' ] )
275
276 main.step( "App Ids check" )
277 appCheck = main.TRUE
278 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700279 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700280 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700281 name="appToIDCheck-" + str( i ),
282 args=[] )
283 threads.append( t )
284 t.start()
285
286 for t in threads:
287 t.join()
288 appCheck = appCheck and t.result
289 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700290 node = main.activeNodes[0]
291 main.log.warn( main.CLIs[node].apps() )
292 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700293 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
294 onpass="App Ids seem to be correct",
295 onfail="Something is wrong with app Ids" )
296
297 if cliResults == main.FALSE:
298 main.log.error( "Failed to start ONOS, stopping test" )
299 main.cleanup()
300 main.exit()
301
302 def CASE2( self, main ):
303 """
304 Assign devices to controllers
305 """
306 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700307 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700308 assert main, "main not defined"
309 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700310 assert main.CLIs, "main.CLIs not defined"
311 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700312 assert ONOS1Port, "ONOS1Port not defined"
313 assert ONOS2Port, "ONOS2Port not defined"
314 assert ONOS3Port, "ONOS3Port not defined"
315 assert ONOS4Port, "ONOS4Port not defined"
316 assert ONOS5Port, "ONOS5Port not defined"
317 assert ONOS6Port, "ONOS6Port not defined"
318 assert ONOS7Port, "ONOS7Port not defined"
319
320 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700321 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700322 "and check that an ONOS node becomes the " +\
323 "master of the device."
324 main.step( "Assign switches to controllers" )
325
326 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700327 for i in range( main.numCtrls ):
328 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700329 swList = []
330 for i in range( 1, 29 ):
331 swList.append( "s" + str( i ) )
332 main.Mininet1.assignSwController( sw=swList, ip=ipList )
333
334 mastershipCheck = main.TRUE
335 for i in range( 1, 29 ):
336 response = main.Mininet1.getSwController( "s" + str( i ) )
337 try:
338 main.log.info( str( response ) )
339 except Exception:
340 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700341 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700342 if re.search( "tcp:" + node.ip_address, response ):
343 mastershipCheck = mastershipCheck and main.TRUE
344 else:
345 main.log.error( "Error, node " + node.ip_address + " is " +
346 "not in the list of controllers s" +
347 str( i ) + " is connecting to." )
348 mastershipCheck = main.FALSE
349 utilities.assert_equals(
350 expect=main.TRUE,
351 actual=mastershipCheck,
352 onpass="Switch mastership assigned correctly",
353 onfail="Switches not assigned correctly to controllers" )
354
355 def CASE21( self, main ):
356 """
357 Assign mastership to controllers
358 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700359 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700360 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700361 assert main, "main not defined"
362 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700363 assert main.CLIs, "main.CLIs not defined"
364 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700365 assert ONOS1Port, "ONOS1Port not defined"
366 assert ONOS2Port, "ONOS2Port not defined"
367 assert ONOS3Port, "ONOS3Port not defined"
368 assert ONOS4Port, "ONOS4Port not defined"
369 assert ONOS5Port, "ONOS5Port not defined"
370 assert ONOS6Port, "ONOS6Port not defined"
371 assert ONOS7Port, "ONOS7Port not defined"
372
373 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700374 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700375 "device. Then manually assign" +\
376 " mastership to specific ONOS nodes using" +\
377 " 'device-role'"
378 main.step( "Assign mastership of switches to specific controllers" )
379 # Manually assign mastership to the controller we want
380 roleCall = main.TRUE
381
382 ipList = [ ]
383 deviceList = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700384 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700385 try:
386 # Assign mastership to specific controllers. This assignment was
387 # determined for a 7 node cluser, but will work with any sized
388 # cluster
389 for i in range( 1, 29 ): # switches 1 through 28
390 # set up correct variables:
391 if i == 1:
392 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700393 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700394 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700395 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700396 c = 1 % main.numCtrls
397 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700398 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700399 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700400 c = 1 % main.numCtrls
401 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700402 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700403 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700404 c = 3 % main.numCtrls
405 ip = main.nodes[ c ].ip_address # ONOS4
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700406 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700407 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700408 c = 2 % main.numCtrls
409 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700410 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700411 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700412 c = 2 % main.numCtrls
413 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700414 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700415 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700416 c = 5 % main.numCtrls
417 ip = main.nodes[ c ].ip_address # ONOS6
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700418 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700419 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700420 c = 4 % main.numCtrls
421 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700422 dpid = '3' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700423 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700424 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700425 c = 6 % main.numCtrls
426 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700427 dpid = '6' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700428 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700429 elif i == 28:
430 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700431 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700432 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700433 else:
434 main.log.error( "You didn't write an else statement for " +
435 "switch s" + str( i ) )
436 roleCall = main.FALSE
437 # Assign switch
438 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
439 # TODO: make this controller dynamic
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700440 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700441 ipList.append( ip )
442 deviceList.append( deviceId )
443 except ( AttributeError, AssertionError ):
444 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700445 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700446 utilities.assert_equals(
447 expect=main.TRUE,
448 actual=roleCall,
449 onpass="Re-assigned switch mastership to designated controller",
450 onfail="Something wrong with deviceRole calls" )
451
452 main.step( "Check mastership was correctly assigned" )
453 roleCheck = main.TRUE
454 # NOTE: This is due to the fact that device mastership change is not
455 # atomic and is actually a multi step process
456 time.sleep( 5 )
457 for i in range( len( ipList ) ):
458 ip = ipList[i]
459 deviceId = deviceList[i]
460 # Check assignment
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700461 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700462 if ip in master:
463 roleCheck = roleCheck and main.TRUE
464 else:
465 roleCheck = roleCheck and main.FALSE
466 main.log.error( "Error, controller " + ip + " is not" +
467 " master " + "of device " +
468 str( deviceId ) + ". Master is " +
469 repr( master ) + "." )
470 utilities.assert_equals(
471 expect=main.TRUE,
472 actual=roleCheck,
473 onpass="Switches were successfully reassigned to designated " +
474 "controller",
475 onfail="Switches were not successfully reassigned" )
476
477 def CASE3( self, main ):
478 """
479 Assign intents
480 """
481 import time
482 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700483 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700484 assert main, "main not defined"
485 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700486 assert main.CLIs, "main.CLIs not defined"
487 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700488 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700489 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700490 "assign predetermined host-to-host intents." +\
491 " After installation, check that the intent" +\
492 " is distributed to all nodes and the state" +\
493 " is INSTALLED"
494
495 # install onos-app-fwd
496 main.step( "Install reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700497 onosCli = main.CLIs[ main.activeNodes[0] ]
498 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700499 utilities.assert_equals( expect=main.TRUE, actual=installResults,
500 onpass="Install fwd successful",
501 onfail="Install fwd failed" )
502
503 main.step( "Check app ids" )
504 appCheck = main.TRUE
505 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700506 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700507 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700508 name="appToIDCheck-" + str( i ),
509 args=[] )
510 threads.append( t )
511 t.start()
512
513 for t in threads:
514 t.join()
515 appCheck = appCheck and t.result
516 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700517 main.log.warn( onosCli.apps() )
518 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700519 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
520 onpass="App Ids seem to be correct",
521 onfail="Something is wrong with app Ids" )
522
523 main.step( "Discovering Hosts( Via pingall for now )" )
524 # FIXME: Once we have a host discovery mechanism, use that instead
525 # REACTIVE FWD test
526 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700527 passMsg = "Reactive Pingall test passed"
528 time1 = time.time()
529 pingResult = main.Mininet1.pingall()
530 time2 = time.time()
531 if not pingResult:
532 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700533 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700534 passMsg += " on the second try"
535 utilities.assert_equals(
536 expect=main.TRUE,
537 actual=pingResult,
538 onpass= passMsg,
539 onfail="Reactive Pingall failed, " +
540 "one or more ping pairs failed" )
541 main.log.info( "Time for pingall: %2f seconds" %
542 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700543 # timeout for fwd flows
544 time.sleep( 11 )
545 # uninstall onos-app-fwd
546 main.step( "Uninstall reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700547 node = main.activeNodes[0]
548 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700549 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
550 onpass="Uninstall fwd successful",
551 onfail="Uninstall fwd failed" )
552
553 main.step( "Check app ids" )
554 threads = []
555 appCheck2 = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700556 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700557 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700558 name="appToIDCheck-" + str( i ),
559 args=[] )
560 threads.append( t )
561 t.start()
562
563 for t in threads:
564 t.join()
565 appCheck2 = appCheck2 and t.result
566 if appCheck2 != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700567 node = main.activeNodes[0]
568 main.log.warn( main.CLIs[node].apps() )
569 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700570 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
571 onpass="App Ids seem to be correct",
572 onfail="Something is wrong with app Ids" )
573
574 main.step( "Add host intents via cli" )
575 intentIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700576 # TODO: move the host numbers to params
577 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700578 intentAddResult = True
579 hostResult = main.TRUE
580 for i in range( 8, 18 ):
581 main.log.info( "Adding host intent between h" + str( i ) +
582 " and h" + str( i + 10 ) )
583 host1 = "00:00:00:00:00:" + \
584 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
585 host2 = "00:00:00:00:00:" + \
586 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
587 # NOTE: getHost can return None
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700588 host1Dict = onosCli.getHost( host1 )
589 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700590 host1Id = None
591 host2Id = None
592 if host1Dict and host2Dict:
593 host1Id = host1Dict.get( 'id', None )
594 host2Id = host2Dict.get( 'id', None )
595 if host1Id and host2Id:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700596 nodeNum = ( i % len( main.activeNodes ) )
597 node = main.activeNodes[nodeNum]
598 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700599 if tmpId:
600 main.log.info( "Added intent with id: " + tmpId )
601 intentIds.append( tmpId )
602 else:
603 main.log.error( "addHostIntent returned: " +
604 repr( tmpId ) )
605 else:
606 main.log.error( "Error, getHost() failed for h" + str( i ) +
607 " and/or h" + str( i + 10 ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700608 node = main.activeNodes[0]
609 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700610 main.log.warn( "Hosts output: " )
611 try:
612 main.log.warn( json.dumps( json.loads( hosts ),
613 sort_keys=True,
614 indent=4,
615 separators=( ',', ': ' ) ) )
616 except ( ValueError, TypeError ):
617 main.log.warn( repr( hosts ) )
618 hostResult = main.FALSE
619 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
620 onpass="Found a host id for each host",
621 onfail="Error looking up host ids" )
622
623 intentStart = time.time()
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700624 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700625 main.log.info( "Submitted intents: " + str( intentIds ) )
626 main.log.info( "Intents in ONOS: " + str( onosIds ) )
627 for intent in intentIds:
628 if intent in onosIds:
629 pass # intent submitted is in onos
630 else:
631 intentAddResult = False
632 if intentAddResult:
633 intentStop = time.time()
634 else:
635 intentStop = None
636 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700637 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700638 intentStates = []
639 installedCheck = True
640 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
641 count = 0
642 try:
643 for intent in json.loads( intents ):
644 state = intent.get( 'state', None )
645 if "INSTALLED" not in state:
646 installedCheck = False
647 intentId = intent.get( 'id', None )
648 intentStates.append( ( intentId, state ) )
649 except ( ValueError, TypeError ):
650 main.log.exception( "Error parsing intents" )
651 # add submitted intents not in the store
652 tmplist = [ i for i, s in intentStates ]
653 missingIntents = False
654 for i in intentIds:
655 if i not in tmplist:
656 intentStates.append( ( i, " - " ) )
657 missingIntents = True
658 intentStates.sort()
659 for i, s in intentStates:
660 count += 1
661 main.log.info( "%-6s%-15s%-15s" %
662 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700663 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700664 try:
665 missing = False
666 if leaders:
667 parsedLeaders = json.loads( leaders )
668 main.log.warn( json.dumps( parsedLeaders,
669 sort_keys=True,
670 indent=4,
671 separators=( ',', ': ' ) ) )
672 # check for all intent partitions
673 topics = []
674 for i in range( 14 ):
675 topics.append( "intent-partition-" + str( i ) )
676 main.log.debug( topics )
677 ONOStopics = [ j['topic'] for j in parsedLeaders ]
678 for topic in topics:
679 if topic not in ONOStopics:
680 main.log.error( "Error: " + topic +
681 " not in leaders" )
682 missing = True
683 else:
684 main.log.error( "leaders() returned None" )
685 except ( ValueError, TypeError ):
686 main.log.exception( "Error parsing leaders" )
687 main.log.error( repr( leaders ) )
688 # Check all nodes
689 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700690 for i in main.activeNodes:
691 response = main.CLIs[i].leaders( jsonFormat=False)
692 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700693 str( response ) )
694
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700695 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700696 try:
697 if partitions :
698 parsedPartitions = json.loads( partitions )
699 main.log.warn( json.dumps( parsedPartitions,
700 sort_keys=True,
701 indent=4,
702 separators=( ',', ': ' ) ) )
703 # TODO check for a leader in all paritions
704 # TODO check for consistency among nodes
705 else:
706 main.log.error( "partitions() returned None" )
707 except ( ValueError, TypeError ):
708 main.log.exception( "Error parsing partitions" )
709 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700710 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700711 try:
712 if pendingMap :
713 parsedPending = json.loads( pendingMap )
714 main.log.warn( json.dumps( parsedPending,
715 sort_keys=True,
716 indent=4,
717 separators=( ',', ': ' ) ) )
718 # TODO check something here?
719 else:
720 main.log.error( "pendingMap() returned None" )
721 except ( ValueError, TypeError ):
722 main.log.exception( "Error parsing pending map" )
723 main.log.error( repr( pendingMap ) )
724
725 intentAddResult = bool( intentAddResult and not missingIntents and
726 installedCheck )
727 if not intentAddResult:
728 main.log.error( "Error in pushing host intents to ONOS" )
729
730 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700731 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700732 correct = True
733 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700734 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700735 onosIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700736 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700737 onosIds.append( ids )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700738 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700739 str( sorted( onosIds ) ) )
740 if sorted( ids ) != sorted( intentIds ):
741 main.log.warn( "Set of intent IDs doesn't match" )
742 correct = False
743 break
744 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700745 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700746 for intent in intents:
747 if intent[ 'state' ] != "INSTALLED":
748 main.log.warn( "Intent " + intent[ 'id' ] +
749 " is " + intent[ 'state' ] )
750 correct = False
751 break
752 if correct:
753 break
754 else:
755 time.sleep(1)
756 if not intentStop:
757 intentStop = time.time()
758 global gossipTime
759 gossipTime = intentStop - intentStart
760 main.log.info( "It took about " + str( gossipTime ) +
761 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700762 gossipPeriod = int( main.params['timers']['gossip'] )
763 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700764 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700765 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700766 onpass="ECM anti-entropy for intents worked within " +
767 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700768 onfail="Intent ECM anti-entropy took too long. " +
769 "Expected time:{}, Actual time:{}".format( maxGossipTime,
770 gossipTime ) )
771 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700772 intentAddResult = True
773
774 if not intentAddResult or "key" in pendingMap:
775 import time
776 installedCheck = True
777 main.log.info( "Sleeping 60 seconds to see if intents are found" )
778 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700779 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700780 main.log.info( "Submitted intents: " + str( intentIds ) )
781 main.log.info( "Intents in ONOS: " + str( onosIds ) )
782 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700783 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700784 intentStates = []
785 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
786 count = 0
787 try:
788 for intent in json.loads( intents ):
789 # Iter through intents of a node
790 state = intent.get( 'state', None )
791 if "INSTALLED" not in state:
792 installedCheck = False
793 intentId = intent.get( 'id', None )
794 intentStates.append( ( intentId, state ) )
795 except ( ValueError, TypeError ):
796 main.log.exception( "Error parsing intents" )
797 # add submitted intents not in the store
798 tmplist = [ i for i, s in intentStates ]
799 for i in intentIds:
800 if i not in tmplist:
801 intentStates.append( ( i, " - " ) )
802 intentStates.sort()
803 for i, s in intentStates:
804 count += 1
805 main.log.info( "%-6s%-15s%-15s" %
806 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700807 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700808 try:
809 missing = False
810 if leaders:
811 parsedLeaders = json.loads( leaders )
812 main.log.warn( json.dumps( parsedLeaders,
813 sort_keys=True,
814 indent=4,
815 separators=( ',', ': ' ) ) )
816 # check for all intent partitions
817 # check for election
818 topics = []
819 for i in range( 14 ):
820 topics.append( "intent-partition-" + str( i ) )
821 # FIXME: this should only be after we start the app
822 topics.append( "org.onosproject.election" )
823 main.log.debug( topics )
824 ONOStopics = [ j['topic'] for j in parsedLeaders ]
825 for topic in topics:
826 if topic not in ONOStopics:
827 main.log.error( "Error: " + topic +
828 " not in leaders" )
829 missing = True
830 else:
831 main.log.error( "leaders() returned None" )
832 except ( ValueError, TypeError ):
833 main.log.exception( "Error parsing leaders" )
834 main.log.error( repr( leaders ) )
835 # Check all nodes
836 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700837 for i in main.activeNodes:
838 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700839 response = node.leaders( jsonFormat=False)
840 main.log.warn( str( node.name ) + " leaders output: \n" +
841 str( response ) )
842
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700843 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700844 try:
845 if partitions :
846 parsedPartitions = json.loads( partitions )
847 main.log.warn( json.dumps( parsedPartitions,
848 sort_keys=True,
849 indent=4,
850 separators=( ',', ': ' ) ) )
851 # TODO check for a leader in all paritions
852 # TODO check for consistency among nodes
853 else:
854 main.log.error( "partitions() returned None" )
855 except ( ValueError, TypeError ):
856 main.log.exception( "Error parsing partitions" )
857 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700858 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700859 try:
860 if pendingMap :
861 parsedPending = json.loads( pendingMap )
862 main.log.warn( json.dumps( parsedPending,
863 sort_keys=True,
864 indent=4,
865 separators=( ',', ': ' ) ) )
866 # TODO check something here?
867 else:
868 main.log.error( "pendingMap() returned None" )
869 except ( ValueError, TypeError ):
870 main.log.exception( "Error parsing pending map" )
871 main.log.error( repr( pendingMap ) )
872
873 def CASE4( self, main ):
874 """
875 Ping across added host intents
876 """
877 import json
878 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700879 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700880 assert main, "main not defined"
881 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700882 assert main.CLIs, "main.CLIs not defined"
883 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700884 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700885 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700886 "functionality and check the state of " +\
887 "the intent"
888 main.step( "Ping across added host intents" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700889 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700890 PingResult = main.TRUE
891 for i in range( 8, 18 ):
892 ping = main.Mininet1.pingHost( src="h" + str( i ),
893 target="h" + str( i + 10 ) )
894 PingResult = PingResult and ping
895 if ping == main.FALSE:
896 main.log.warn( "Ping failed between h" + str( i ) +
897 " and h" + str( i + 10 ) )
898 elif ping == main.TRUE:
899 main.log.info( "Ping test passed!" )
900 # Don't set PingResult or you'd override failures
901 if PingResult == main.FALSE:
902 main.log.error(
903 "Intents have not been installed correctly, pings failed." )
904 # TODO: pretty print
905 main.log.warn( "ONOS1 intents: " )
906 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700907 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700908 main.log.warn( json.dumps( json.loads( tmpIntents ),
909 sort_keys=True,
910 indent=4,
911 separators=( ',', ': ' ) ) )
912 except ( ValueError, TypeError ):
913 main.log.warn( repr( tmpIntents ) )
914 utilities.assert_equals(
915 expect=main.TRUE,
916 actual=PingResult,
917 onpass="Intents have been installed correctly and pings work",
918 onfail="Intents have not been installed correctly, pings failed." )
919
920 main.step( "Check Intent state" )
921 installedCheck = False
922 loopCount = 0
923 while not installedCheck and loopCount < 40:
924 installedCheck = True
925 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700926 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700927 intentStates = []
928 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
929 count = 0
930 # Iter through intents of a node
931 try:
932 for intent in json.loads( intents ):
933 state = intent.get( 'state', None )
934 if "INSTALLED" not in state:
935 installedCheck = False
936 intentId = intent.get( 'id', None )
937 intentStates.append( ( intentId, state ) )
938 except ( ValueError, TypeError ):
939 main.log.exception( "Error parsing intents." )
940 # Print states
941 intentStates.sort()
942 for i, s in intentStates:
943 count += 1
944 main.log.info( "%-6s%-15s%-15s" %
945 ( str( count ), str( i ), str( s ) ) )
946 if not installedCheck:
947 time.sleep( 1 )
948 loopCount += 1
949 utilities.assert_equals( expect=True, actual=installedCheck,
950 onpass="Intents are all INSTALLED",
951 onfail="Intents are not all in " +
952 "INSTALLED state" )
953
954 main.step( "Check leadership of topics" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700955 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700956 topicCheck = main.TRUE
957 try:
958 if leaders:
959 parsedLeaders = json.loads( leaders )
960 main.log.warn( json.dumps( parsedLeaders,
961 sort_keys=True,
962 indent=4,
963 separators=( ',', ': ' ) ) )
964 # check for all intent partitions
965 # check for election
966 # TODO: Look at Devices as topics now that it uses this system
967 topics = []
968 for i in range( 14 ):
969 topics.append( "intent-partition-" + str( i ) )
970 # FIXME: this should only be after we start the app
971 # FIXME: topics.append( "org.onosproject.election" )
972 # Print leaders output
973 main.log.debug( topics )
974 ONOStopics = [ j['topic'] for j in parsedLeaders ]
975 for topic in topics:
976 if topic not in ONOStopics:
977 main.log.error( "Error: " + topic +
978 " not in leaders" )
979 topicCheck = main.FALSE
980 else:
981 main.log.error( "leaders() returned None" )
982 topicCheck = main.FALSE
983 except ( ValueError, TypeError ):
984 topicCheck = main.FALSE
985 main.log.exception( "Error parsing leaders" )
986 main.log.error( repr( leaders ) )
987 # TODO: Check for a leader of these topics
988 # Check all nodes
989 if topicCheck:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700990 for i in main.activeNodes:
991 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700992 response = node.leaders( jsonFormat=False)
993 main.log.warn( str( node.name ) + " leaders output: \n" +
994 str( response ) )
995
996 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
997 onpass="intent Partitions is in leaders",
998 onfail="Some topics were lost " )
999 # Print partitions
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001000 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001001 try:
1002 if partitions :
1003 parsedPartitions = json.loads( partitions )
1004 main.log.warn( json.dumps( parsedPartitions,
1005 sort_keys=True,
1006 indent=4,
1007 separators=( ',', ': ' ) ) )
1008 # TODO check for a leader in all paritions
1009 # TODO check for consistency among nodes
1010 else:
1011 main.log.error( "partitions() returned None" )
1012 except ( ValueError, TypeError ):
1013 main.log.exception( "Error parsing partitions" )
1014 main.log.error( repr( partitions ) )
1015 # Print Pending Map
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001016 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001017 try:
1018 if pendingMap :
1019 parsedPending = json.loads( pendingMap )
1020 main.log.warn( json.dumps( parsedPending,
1021 sort_keys=True,
1022 indent=4,
1023 separators=( ',', ': ' ) ) )
1024 # TODO check something here?
1025 else:
1026 main.log.error( "pendingMap() returned None" )
1027 except ( ValueError, TypeError ):
1028 main.log.exception( "Error parsing pending map" )
1029 main.log.error( repr( pendingMap ) )
1030
1031 if not installedCheck:
1032 main.log.info( "Waiting 60 seconds to see if the state of " +
1033 "intents change" )
1034 time.sleep( 60 )
1035 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001036 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001037 intentStates = []
1038 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1039 count = 0
1040 # Iter through intents of a node
1041 try:
1042 for intent in json.loads( intents ):
1043 state = intent.get( 'state', None )
1044 if "INSTALLED" not in state:
1045 installedCheck = False
1046 intentId = intent.get( 'id', None )
1047 intentStates.append( ( intentId, state ) )
1048 except ( ValueError, TypeError ):
1049 main.log.exception( "Error parsing intents." )
1050 intentStates.sort()
1051 for i, s in intentStates:
1052 count += 1
1053 main.log.info( "%-6s%-15s%-15s" %
1054 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001055 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001056 try:
1057 missing = False
1058 if leaders:
1059 parsedLeaders = json.loads( leaders )
1060 main.log.warn( json.dumps( parsedLeaders,
1061 sort_keys=True,
1062 indent=4,
1063 separators=( ',', ': ' ) ) )
1064 # check for all intent partitions
1065 # check for election
1066 topics = []
1067 for i in range( 14 ):
1068 topics.append( "intent-partition-" + str( i ) )
1069 # FIXME: this should only be after we start the app
1070 topics.append( "org.onosproject.election" )
1071 main.log.debug( topics )
1072 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1073 for topic in topics:
1074 if topic not in ONOStopics:
1075 main.log.error( "Error: " + topic +
1076 " not in leaders" )
1077 missing = True
1078 else:
1079 main.log.error( "leaders() returned None" )
1080 except ( ValueError, TypeError ):
1081 main.log.exception( "Error parsing leaders" )
1082 main.log.error( repr( leaders ) )
1083 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001084 for i in main.activeNodes:
1085 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001086 response = node.leaders( jsonFormat=False)
1087 main.log.warn( str( node.name ) + " leaders output: \n" +
1088 str( response ) )
1089
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001090 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001091 try:
1092 if partitions :
1093 parsedPartitions = json.loads( partitions )
1094 main.log.warn( json.dumps( parsedPartitions,
1095 sort_keys=True,
1096 indent=4,
1097 separators=( ',', ': ' ) ) )
1098 # TODO check for a leader in all paritions
1099 # TODO check for consistency among nodes
1100 else:
1101 main.log.error( "partitions() returned None" )
1102 except ( ValueError, TypeError ):
1103 main.log.exception( "Error parsing partitions" )
1104 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001105 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001106 try:
1107 if pendingMap :
1108 parsedPending = json.loads( pendingMap )
1109 main.log.warn( json.dumps( parsedPending,
1110 sort_keys=True,
1111 indent=4,
1112 separators=( ',', ': ' ) ) )
1113 # TODO check something here?
1114 else:
1115 main.log.error( "pendingMap() returned None" )
1116 except ( ValueError, TypeError ):
1117 main.log.exception( "Error parsing pending map" )
1118 main.log.error( repr( pendingMap ) )
1119 # Print flowrules
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001120 node = main.activeNodes[0]
1121 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001122 main.step( "Wait a minute then ping again" )
1123 # the wait is above
1124 PingResult = main.TRUE
1125 for i in range( 8, 18 ):
1126 ping = main.Mininet1.pingHost( src="h" + str( i ),
1127 target="h" + str( i + 10 ) )
1128 PingResult = PingResult and ping
1129 if ping == main.FALSE:
1130 main.log.warn( "Ping failed between h" + str( i ) +
1131 " and h" + str( i + 10 ) )
1132 elif ping == main.TRUE:
1133 main.log.info( "Ping test passed!" )
1134 # Don't set PingResult or you'd override failures
1135 if PingResult == main.FALSE:
1136 main.log.error(
1137 "Intents have not been installed correctly, pings failed." )
1138 # TODO: pretty print
1139 main.log.warn( "ONOS1 intents: " )
1140 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001141 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001142 main.log.warn( json.dumps( json.loads( tmpIntents ),
1143 sort_keys=True,
1144 indent=4,
1145 separators=( ',', ': ' ) ) )
1146 except ( ValueError, TypeError ):
1147 main.log.warn( repr( tmpIntents ) )
1148 utilities.assert_equals(
1149 expect=main.TRUE,
1150 actual=PingResult,
1151 onpass="Intents have been installed correctly and pings work",
1152 onfail="Intents have not been installed correctly, pings failed." )
1153
1154 def CASE5( self, main ):
1155 """
1156 Reading state of ONOS
1157 """
1158 import json
1159 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001160 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001161 assert main, "main not defined"
1162 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001163 assert main.CLIs, "main.CLIs not defined"
1164 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001165
1166 main.case( "Setting up and gathering data for current state" )
1167 # The general idea for this test case is to pull the state of
1168 # ( intents,flows, topology,... ) from each ONOS node
1169 # We can then compare them with each other and also with past states
1170
1171 main.step( "Check that each switch has a master" )
1172 global mastershipState
1173 mastershipState = '[]'
1174
1175 # Assert that each device has a master
1176 rolesNotNull = main.TRUE
1177 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001178 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001179 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001180 name="rolesNotNull-" + str( i ),
1181 args=[] )
1182 threads.append( t )
1183 t.start()
1184
1185 for t in threads:
1186 t.join()
1187 rolesNotNull = rolesNotNull and t.result
1188 utilities.assert_equals(
1189 expect=main.TRUE,
1190 actual=rolesNotNull,
1191 onpass="Each device has a master",
1192 onfail="Some devices don't have a master assigned" )
1193
1194 main.step( "Get the Mastership of each switch from each controller" )
1195 ONOSMastership = []
1196 mastershipCheck = main.FALSE
1197 consistentMastership = True
1198 rolesResults = True
1199 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001200 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001201 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001202 name="roles-" + str( i ),
1203 args=[] )
1204 threads.append( t )
1205 t.start()
1206
1207 for t in threads:
1208 t.join()
1209 ONOSMastership.append( t.result )
1210
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001211 for i in range( len( ONOSMastership ) ):
1212 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001213 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001214 main.log.error( "Error in getting ONOS" + node + " roles" )
1215 main.log.warn( "ONOS" + node + " mastership response: " +
1216 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001217 rolesResults = False
1218 utilities.assert_equals(
1219 expect=True,
1220 actual=rolesResults,
1221 onpass="No error in reading roles output",
1222 onfail="Error in reading roles from ONOS" )
1223
1224 main.step( "Check for consistency in roles from each controller" )
1225 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1226 main.log.info(
1227 "Switch roles are consistent across all ONOS nodes" )
1228 else:
1229 consistentMastership = False
1230 utilities.assert_equals(
1231 expect=True,
1232 actual=consistentMastership,
1233 onpass="Switch roles are consistent across all ONOS nodes",
1234 onfail="ONOS nodes have different views of switch roles" )
1235
1236 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001237 for i in range( len( main.activeNodes ) ):
1238 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001239 try:
1240 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001241 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001242 json.dumps(
1243 json.loads( ONOSMastership[ i ] ),
1244 sort_keys=True,
1245 indent=4,
1246 separators=( ',', ': ' ) ) )
1247 except ( ValueError, TypeError ):
1248 main.log.warn( repr( ONOSMastership[ i ] ) )
1249 elif rolesResults and consistentMastership:
1250 mastershipCheck = main.TRUE
1251 mastershipState = ONOSMastership[ 0 ]
1252
1253 main.step( "Get the intents from each controller" )
1254 global intentState
1255 intentState = []
1256 ONOSIntents = []
1257 intentCheck = main.FALSE
1258 consistentIntents = True
1259 intentsResults = True
1260 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001261 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001262 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001263 name="intents-" + str( i ),
1264 args=[],
1265 kwargs={ 'jsonFormat': True } )
1266 threads.append( t )
1267 t.start()
1268
1269 for t in threads:
1270 t.join()
1271 ONOSIntents.append( t.result )
1272
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001273 for i in range( len( ONOSIntents ) ):
1274 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001275 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001276 main.log.error( "Error in getting ONOS" + node + " intents" )
1277 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001278 repr( ONOSIntents[ i ] ) )
1279 intentsResults = False
1280 utilities.assert_equals(
1281 expect=True,
1282 actual=intentsResults,
1283 onpass="No error in reading intents output",
1284 onfail="Error in reading intents from ONOS" )
1285
1286 main.step( "Check for consistency in Intents from each controller" )
1287 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1288 main.log.info( "Intents are consistent across all ONOS " +
1289 "nodes" )
1290 else:
1291 consistentIntents = False
1292 main.log.error( "Intents not consistent" )
1293 utilities.assert_equals(
1294 expect=True,
1295 actual=consistentIntents,
1296 onpass="Intents are consistent across all ONOS nodes",
1297 onfail="ONOS nodes have different views of intents" )
1298
1299 if intentsResults:
1300 # Try to make it easy to figure out what is happening
1301 #
1302 # Intent ONOS1 ONOS2 ...
1303 # 0x01 INSTALLED INSTALLING
1304 # ... ... ...
1305 # ... ... ...
1306 title = " Id"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001307 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001308 title += " " * 10 + "ONOS" + str( n + 1 )
1309 main.log.warn( title )
1310 # get all intent keys in the cluster
1311 keys = []
1312 for nodeStr in ONOSIntents:
1313 node = json.loads( nodeStr )
1314 for intent in node:
1315 keys.append( intent.get( 'id' ) )
1316 keys = set( keys )
1317 for key in keys:
1318 row = "%-13s" % key
1319 for nodeStr in ONOSIntents:
1320 node = json.loads( nodeStr )
1321 for intent in node:
1322 if intent.get( 'id', "Error" ) == key:
1323 row += "%-15s" % intent.get( 'state' )
1324 main.log.warn( row )
1325 # End table view
1326
1327 if intentsResults and not consistentIntents:
1328 # print the json objects
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001329 n = str( main.activeNodes[-1] + 1 )
1330 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001331 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1332 sort_keys=True,
1333 indent=4,
1334 separators=( ',', ': ' ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001335 for i in range( len( ONOSIntents ) ):
1336 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001337 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001338 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001339 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1340 sort_keys=True,
1341 indent=4,
1342 separators=( ',', ': ' ) ) )
1343 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001344 main.log.debug( "ONOS" + node + " intents match ONOS" +
1345 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001346 elif intentsResults and consistentIntents:
1347 intentCheck = main.TRUE
1348 intentState = ONOSIntents[ 0 ]
1349
1350 main.step( "Get the flows from each controller" )
1351 global flowState
1352 flowState = []
1353 ONOSFlows = []
1354 ONOSFlowsJson = []
1355 flowCheck = main.FALSE
1356 consistentFlows = True
1357 flowsResults = True
1358 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001359 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001360 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001361 name="flows-" + str( i ),
1362 args=[],
1363 kwargs={ 'jsonFormat': True } )
1364 threads.append( t )
1365 t.start()
1366
1367 # NOTE: Flows command can take some time to run
1368 time.sleep(30)
1369 for t in threads:
1370 t.join()
1371 result = t.result
1372 ONOSFlows.append( result )
1373
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001374 for i in range( len( ONOSFlows ) ):
1375 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001376 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1377 main.log.error( "Error in getting ONOS" + num + " flows" )
1378 main.log.warn( "ONOS" + num + " flows response: " +
1379 repr( ONOSFlows[ i ] ) )
1380 flowsResults = False
1381 ONOSFlowsJson.append( None )
1382 else:
1383 try:
1384 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1385 except ( ValueError, TypeError ):
1386 # FIXME: change this to log.error?
1387 main.log.exception( "Error in parsing ONOS" + num +
1388 " response as json." )
1389 main.log.error( repr( ONOSFlows[ i ] ) )
1390 ONOSFlowsJson.append( None )
1391 flowsResults = False
1392 utilities.assert_equals(
1393 expect=True,
1394 actual=flowsResults,
1395 onpass="No error in reading flows output",
1396 onfail="Error in reading flows from ONOS" )
1397
1398 main.step( "Check for consistency in Flows from each controller" )
1399 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1400 if all( tmp ):
1401 main.log.info( "Flow count is consistent across all ONOS nodes" )
1402 else:
1403 consistentFlows = False
1404 utilities.assert_equals(
1405 expect=True,
1406 actual=consistentFlows,
1407 onpass="The flow count is consistent across all ONOS nodes",
1408 onfail="ONOS nodes have different flow counts" )
1409
1410 if flowsResults and not consistentFlows:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001411 for i in range( len( ONOSFlows ) ):
1412 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001413 try:
1414 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001415 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001416 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1417 indent=4, separators=( ',', ': ' ) ) )
1418 except ( ValueError, TypeError ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001419 main.log.warn( "ONOS" + node + " flows: " +
1420 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001421 elif flowsResults and consistentFlows:
1422 flowCheck = main.TRUE
1423 flowState = ONOSFlows[ 0 ]
1424
1425 main.step( "Get the OF Table entries" )
1426 global flows
1427 flows = []
1428 for i in range( 1, 29 ):
Jon Hall9043c902015-07-30 14:23:44 -07001429 flows.append( main.Mininet1.getFlowTable( 1.3, "s" + str( i ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001430 if flowCheck == main.FALSE:
1431 for table in flows:
1432 main.log.warn( table )
1433 # TODO: Compare switch flow tables with ONOS flow tables
1434
1435 main.step( "Start continuous pings" )
1436 main.Mininet2.pingLong(
1437 src=main.params[ 'PING' ][ 'source1' ],
1438 target=main.params[ 'PING' ][ 'target1' ],
1439 pingTime=500 )
1440 main.Mininet2.pingLong(
1441 src=main.params[ 'PING' ][ 'source2' ],
1442 target=main.params[ 'PING' ][ 'target2' ],
1443 pingTime=500 )
1444 main.Mininet2.pingLong(
1445 src=main.params[ 'PING' ][ 'source3' ],
1446 target=main.params[ 'PING' ][ 'target3' ],
1447 pingTime=500 )
1448 main.Mininet2.pingLong(
1449 src=main.params[ 'PING' ][ 'source4' ],
1450 target=main.params[ 'PING' ][ 'target4' ],
1451 pingTime=500 )
1452 main.Mininet2.pingLong(
1453 src=main.params[ 'PING' ][ 'source5' ],
1454 target=main.params[ 'PING' ][ 'target5' ],
1455 pingTime=500 )
1456 main.Mininet2.pingLong(
1457 src=main.params[ 'PING' ][ 'source6' ],
1458 target=main.params[ 'PING' ][ 'target6' ],
1459 pingTime=500 )
1460 main.Mininet2.pingLong(
1461 src=main.params[ 'PING' ][ 'source7' ],
1462 target=main.params[ 'PING' ][ 'target7' ],
1463 pingTime=500 )
1464 main.Mininet2.pingLong(
1465 src=main.params[ 'PING' ][ 'source8' ],
1466 target=main.params[ 'PING' ][ 'target8' ],
1467 pingTime=500 )
1468 main.Mininet2.pingLong(
1469 src=main.params[ 'PING' ][ 'source9' ],
1470 target=main.params[ 'PING' ][ 'target9' ],
1471 pingTime=500 )
1472 main.Mininet2.pingLong(
1473 src=main.params[ 'PING' ][ 'source10' ],
1474 target=main.params[ 'PING' ][ 'target10' ],
1475 pingTime=500 )
1476
1477 main.step( "Collecting topology information from ONOS" )
1478 devices = []
1479 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001480 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001481 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001482 name="devices-" + str( i ),
1483 args=[ ] )
1484 threads.append( t )
1485 t.start()
1486
1487 for t in threads:
1488 t.join()
1489 devices.append( t.result )
1490 hosts = []
1491 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001492 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001493 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001494 name="hosts-" + str( i ),
1495 args=[ ] )
1496 threads.append( t )
1497 t.start()
1498
1499 for t in threads:
1500 t.join()
1501 try:
1502 hosts.append( json.loads( t.result ) )
1503 except ( ValueError, TypeError ):
1504 # FIXME: better handling of this, print which node
1505 # Maybe use thread name?
1506 main.log.exception( "Error parsing json output of hosts" )
1507 # FIXME: should this be an empty json object instead?
1508 hosts.append( None )
1509
1510 ports = []
1511 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001512 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001513 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001514 name="ports-" + str( i ),
1515 args=[ ] )
1516 threads.append( t )
1517 t.start()
1518
1519 for t in threads:
1520 t.join()
1521 ports.append( t.result )
1522 links = []
1523 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001524 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001525 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001526 name="links-" + str( i ),
1527 args=[ ] )
1528 threads.append( t )
1529 t.start()
1530
1531 for t in threads:
1532 t.join()
1533 links.append( t.result )
1534 clusters = []
1535 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001536 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001537 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001538 name="clusters-" + str( i ),
1539 args=[ ] )
1540 threads.append( t )
1541 t.start()
1542
1543 for t in threads:
1544 t.join()
1545 clusters.append( t.result )
1546 # Compare json objects for hosts and dataplane clusters
1547
1548 # hosts
1549 main.step( "Host view is consistent across ONOS nodes" )
1550 consistentHostsResult = main.TRUE
1551 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001552 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001553 if "Error" not in hosts[ controller ]:
1554 if hosts[ controller ] == hosts[ 0 ]:
1555 continue
1556 else: # hosts not consistent
1557 main.log.error( "hosts from ONOS" +
1558 controllerStr +
1559 " is inconsistent with ONOS1" )
1560 main.log.warn( repr( hosts[ controller ] ) )
1561 consistentHostsResult = main.FALSE
1562
1563 else:
1564 main.log.error( "Error in getting ONOS hosts from ONOS" +
1565 controllerStr )
1566 consistentHostsResult = main.FALSE
1567 main.log.warn( "ONOS" + controllerStr +
1568 " hosts response: " +
1569 repr( hosts[ controller ] ) )
1570 utilities.assert_equals(
1571 expect=main.TRUE,
1572 actual=consistentHostsResult,
1573 onpass="Hosts view is consistent across all ONOS nodes",
1574 onfail="ONOS nodes have different views of hosts" )
1575
1576 main.step( "Each host has an IP address" )
1577 ipResult = main.TRUE
1578 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001579 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001580 for host in hosts[ controller ]:
1581 if not host.get( 'ipAddresses', [ ] ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001582 main.log.error( "Error with host ips on controller" +
Jon Hall5cf14d52015-07-16 12:15:19 -07001583 controllerStr + ": " + str( host ) )
1584 ipResult = main.FALSE
1585 utilities.assert_equals(
1586 expect=main.TRUE,
1587 actual=ipResult,
1588 onpass="The ips of the hosts aren't empty",
1589 onfail="The ip of at least one host is missing" )
1590
1591 # Strongly connected clusters of devices
1592 main.step( "Cluster view is consistent across ONOS nodes" )
1593 consistentClustersResult = main.TRUE
1594 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001595 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001596 if "Error" not in clusters[ controller ]:
1597 if clusters[ controller ] == clusters[ 0 ]:
1598 continue
1599 else: # clusters not consistent
1600 main.log.error( "clusters from ONOS" + controllerStr +
1601 " is inconsistent with ONOS1" )
1602 consistentClustersResult = main.FALSE
1603
1604 else:
1605 main.log.error( "Error in getting dataplane clusters " +
1606 "from ONOS" + controllerStr )
1607 consistentClustersResult = main.FALSE
1608 main.log.warn( "ONOS" + controllerStr +
1609 " clusters response: " +
1610 repr( clusters[ controller ] ) )
1611 utilities.assert_equals(
1612 expect=main.TRUE,
1613 actual=consistentClustersResult,
1614 onpass="Clusters view is consistent across all ONOS nodes",
1615 onfail="ONOS nodes have different views of clusters" )
1616 # there should always only be one cluster
1617 main.step( "Cluster view correct across ONOS nodes" )
1618 try:
1619 numClusters = len( json.loads( clusters[ 0 ] ) )
1620 except ( ValueError, TypeError ):
1621 main.log.exception( "Error parsing clusters[0]: " +
1622 repr( clusters[ 0 ] ) )
1623 clusterResults = main.FALSE
1624 if numClusters == 1:
1625 clusterResults = main.TRUE
1626 utilities.assert_equals(
1627 expect=1,
1628 actual=numClusters,
1629 onpass="ONOS shows 1 SCC",
1630 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1631
1632 main.step( "Comparing ONOS topology to MN" )
1633 devicesResults = main.TRUE
1634 linksResults = main.TRUE
1635 hostsResults = main.TRUE
1636 mnSwitches = main.Mininet1.getSwitches()
1637 mnLinks = main.Mininet1.getLinks()
1638 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001639 for controller in main.activeNodes:
1640 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001641 if devices[ controller ] and ports[ controller ] and\
1642 "Error" not in devices[ controller ] and\
1643 "Error" not in ports[ controller ]:
1644
1645 currentDevicesResult = main.Mininet1.compareSwitches(
1646 mnSwitches,
1647 json.loads( devices[ controller ] ),
1648 json.loads( ports[ controller ] ) )
1649 else:
1650 currentDevicesResult = main.FALSE
1651 utilities.assert_equals( expect=main.TRUE,
1652 actual=currentDevicesResult,
1653 onpass="ONOS" + controllerStr +
1654 " Switches view is correct",
1655 onfail="ONOS" + controllerStr +
1656 " Switches view is incorrect" )
1657 if links[ controller ] and "Error" not in links[ controller ]:
1658 currentLinksResult = main.Mininet1.compareLinks(
1659 mnSwitches, mnLinks,
1660 json.loads( links[ controller ] ) )
1661 else:
1662 currentLinksResult = main.FALSE
1663 utilities.assert_equals( expect=main.TRUE,
1664 actual=currentLinksResult,
1665 onpass="ONOS" + controllerStr +
1666 " links view is correct",
1667 onfail="ONOS" + controllerStr +
1668 " links view is incorrect" )
1669
1670 if hosts[ controller ] or "Error" not in hosts[ controller ]:
1671 currentHostsResult = main.Mininet1.compareHosts(
1672 mnHosts,
1673 hosts[ controller ] )
1674 else:
1675 currentHostsResult = main.FALSE
1676 utilities.assert_equals( expect=main.TRUE,
1677 actual=currentHostsResult,
1678 onpass="ONOS" + controllerStr +
1679 " hosts exist in Mininet",
1680 onfail="ONOS" + controllerStr +
1681 " hosts don't match Mininet" )
1682
1683 devicesResults = devicesResults and currentDevicesResult
1684 linksResults = linksResults and currentLinksResult
1685 hostsResults = hostsResults and currentHostsResult
1686
1687 main.step( "Device information is correct" )
1688 utilities.assert_equals(
1689 expect=main.TRUE,
1690 actual=devicesResults,
1691 onpass="Device information is correct",
1692 onfail="Device information is incorrect" )
1693
1694 main.step( "Links are correct" )
1695 utilities.assert_equals(
1696 expect=main.TRUE,
1697 actual=linksResults,
1698 onpass="Link are correct",
1699 onfail="Links are incorrect" )
1700
1701 main.step( "Hosts are correct" )
1702 utilities.assert_equals(
1703 expect=main.TRUE,
1704 actual=hostsResults,
1705 onpass="Hosts are correct",
1706 onfail="Hosts are incorrect" )
1707
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001708 def CASE61( self, main ):
Jon Hall5cf14d52015-07-16 12:15:19 -07001709 """
1710 The Failure case.
1711 """
Jon Halle1a3b752015-07-22 13:02:46 -07001712 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001713 assert main, "main not defined"
1714 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001715 assert main.CLIs, "main.CLIs not defined"
1716 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001717 main.case( "Stop minority of ONOS nodes" )
Jon Hall96091e62015-09-21 17:34:17 -07001718
1719 main.step( "Checking ONOS Logs for errors" )
1720 for node in main.nodes:
1721 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1722 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1723
Jon Hall3b489db2015-10-05 14:38:37 -07001724 n = len( main.nodes ) # Number of nodes
1725 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1726 main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
1727 if n > 3:
1728 main.kill.append( p - 1 )
1729 # NOTE: This only works for cluster sizes of 3,5, or 7.
1730
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001731 main.step( "Stopping " + str( len( main.kill ) ) + " ONOS nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07001732 killResults = main.TRUE
1733 for i in main.kill:
1734 killResults = killResults and\
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001735 main.ONOSbench.onosStop( main.nodes[i].ip_address )
1736 main.activeNodes.remove( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001737 utilities.assert_equals( expect=main.TRUE, actual=killResults,
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001738 onpass="ONOS nodes stopped successfully",
1739 onfail="ONOS nodes NOT successfully stopped" )
1740
1741 def CASE62( self, main ):
1742 """
1743 The bring up stopped nodes
1744 """
1745 import time
1746 assert main.numCtrls, "main.numCtrls not defined"
1747 assert main, "main not defined"
1748 assert utilities.assert_equals, "utilities.assert_equals not defined"
1749 assert main.CLIs, "main.CLIs not defined"
1750 assert main.nodes, "main.nodes not defined"
1751 assert main.kill, "main.kill not defined"
1752 main.case( "Restart minority of ONOS nodes" )
1753
1754 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1755 startResults = main.TRUE
1756 restartTime = time.time()
1757 for i in main.kill:
1758 startResults = startResults and\
1759 main.ONOSbench.onosStart( main.nodes[i].ip_address )
1760 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1761 onpass="ONOS nodes started successfully",
1762 onfail="ONOS nodes NOT successfully started" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001763
1764 main.step( "Checking if ONOS is up yet" )
1765 count = 0
1766 onosIsupResult = main.FALSE
1767 while onosIsupResult == main.FALSE and count < 10:
Jon Hall3b489db2015-10-05 14:38:37 -07001768 onosIsupResult = main.TRUE
1769 for i in main.kill:
1770 onosIsupResult = onosIsupResult and\
1771 main.ONOSbench.isup( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07001772 count = count + 1
Jon Hall5cf14d52015-07-16 12:15:19 -07001773 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1774 onpass="ONOS restarted successfully",
1775 onfail="ONOS restart NOT successful" )
1776
Jon Halle1a3b752015-07-22 13:02:46 -07001777 main.step( "Restarting ONOS main.CLIs" )
Jon Hall3b489db2015-10-05 14:38:37 -07001778 cliResults = main.TRUE
1779 for i in main.kill:
1780 cliResults = cliResults and\
1781 main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001782 main.activeNodes.append( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001783 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1784 onpass="ONOS cli restarted",
1785 onfail="ONOS cli did not restart" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001786 main.activeNodes.sort()
1787 try:
1788 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1789 "List of active nodes has duplicates, this likely indicates something was run out of order"
1790 except AssertionError:
1791 main.log.exception( "" )
1792 main.cleanup()
1793 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -07001794
1795 # Grab the time of restart so we chan check how long the gossip
1796 # protocol has had time to work
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001797 main.restartTime = time.time() - restartTime
Jon Hall5cf14d52015-07-16 12:15:19 -07001798 main.log.debug( "Restart time: " + str( main.restartTime ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001799 # TODO: MAke this configurable. Also, we are breaking the above timer
1800 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001801 node = main.activeNodes[0]
1802 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1803 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1804 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001805
1806 def CASE7( self, main ):
1807 """
1808 Check state after ONOS failure
1809 """
1810 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001811 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001812 assert main, "main not defined"
1813 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001814 assert main.CLIs, "main.CLIs not defined"
1815 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001816 try:
1817 main.kill
1818 except AttributeError:
1819 main.kill = []
1820
Jon Hall5cf14d52015-07-16 12:15:19 -07001821 main.case( "Running ONOS Constant State Tests" )
1822
1823 main.step( "Check that each switch has a master" )
1824 # Assert that each device has a master
1825 rolesNotNull = main.TRUE
1826 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001827 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001828 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001829 name="rolesNotNull-" + str( i ),
1830 args=[ ] )
1831 threads.append( t )
1832 t.start()
1833
1834 for t in threads:
1835 t.join()
1836 rolesNotNull = rolesNotNull and t.result
1837 utilities.assert_equals(
1838 expect=main.TRUE,
1839 actual=rolesNotNull,
1840 onpass="Each device has a master",
1841 onfail="Some devices don't have a master assigned" )
1842
1843 main.step( "Read device roles from ONOS" )
1844 ONOSMastership = []
1845 consistentMastership = True
1846 rolesResults = True
1847 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001848 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001849 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001850 name="roles-" + str( i ),
1851 args=[] )
1852 threads.append( t )
1853 t.start()
1854
1855 for t in threads:
1856 t.join()
1857 ONOSMastership.append( t.result )
1858
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001859 for i in range( len( ONOSMastership ) ):
1860 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001861 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001862 main.log.error( "Error in getting ONOS" + node + " roles" )
1863 main.log.warn( "ONOS" + node + " mastership response: " +
1864 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001865 rolesResults = False
1866 utilities.assert_equals(
1867 expect=True,
1868 actual=rolesResults,
1869 onpass="No error in reading roles output",
1870 onfail="Error in reading roles from ONOS" )
1871
1872 main.step( "Check for consistency in roles from each controller" )
1873 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1874 main.log.info(
1875 "Switch roles are consistent across all ONOS nodes" )
1876 else:
1877 consistentMastership = False
1878 utilities.assert_equals(
1879 expect=True,
1880 actual=consistentMastership,
1881 onpass="Switch roles are consistent across all ONOS nodes",
1882 onfail="ONOS nodes have different views of switch roles" )
1883
1884 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001885 for i in range( len( ONOSMastership ) ):
1886 node = str( main.activeNodes[i] + 1 )
1887 main.log.warn( "ONOS" + node + " roles: ",
1888 json.dumps( json.loads( ONOSMastership[ i ] ),
1889 sort_keys=True,
1890 indent=4,
1891 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001892
1893 # NOTE: we expect mastership to change on controller failure
Jon Hall5cf14d52015-07-16 12:15:19 -07001894
1895 main.step( "Get the intents and compare across all nodes" )
1896 ONOSIntents = []
1897 intentCheck = main.FALSE
1898 consistentIntents = True
1899 intentsResults = True
1900 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001901 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001902 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001903 name="intents-" + str( i ),
1904 args=[],
1905 kwargs={ 'jsonFormat': True } )
1906 threads.append( t )
1907 t.start()
1908
1909 for t in threads:
1910 t.join()
1911 ONOSIntents.append( t.result )
1912
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001913 for i in range( len( ONOSIntents) ):
1914 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001915 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001916 main.log.error( "Error in getting ONOS" + node + " intents" )
1917 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001918 repr( ONOSIntents[ i ] ) )
1919 intentsResults = False
1920 utilities.assert_equals(
1921 expect=True,
1922 actual=intentsResults,
1923 onpass="No error in reading intents output",
1924 onfail="Error in reading intents from ONOS" )
1925
1926 main.step( "Check for consistency in Intents from each controller" )
1927 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1928 main.log.info( "Intents are consistent across all ONOS " +
1929 "nodes" )
1930 else:
1931 consistentIntents = False
1932
1933 # Try to make it easy to figure out what is happening
1934 #
1935 # Intent ONOS1 ONOS2 ...
1936 # 0x01 INSTALLED INSTALLING
1937 # ... ... ...
1938 # ... ... ...
1939 title = " ID"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001940 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001941 title += " " * 10 + "ONOS" + str( n + 1 )
1942 main.log.warn( title )
1943 # get all intent keys in the cluster
1944 keys = []
1945 for nodeStr in ONOSIntents:
1946 node = json.loads( nodeStr )
1947 for intent in node:
1948 keys.append( intent.get( 'id' ) )
1949 keys = set( keys )
1950 for key in keys:
1951 row = "%-13s" % key
1952 for nodeStr in ONOSIntents:
1953 node = json.loads( nodeStr )
1954 for intent in node:
1955 if intent.get( 'id' ) == key:
1956 row += "%-15s" % intent.get( 'state' )
1957 main.log.warn( row )
1958 # End table view
1959
1960 utilities.assert_equals(
1961 expect=True,
1962 actual=consistentIntents,
1963 onpass="Intents are consistent across all ONOS nodes",
1964 onfail="ONOS nodes have different views of intents" )
1965 intentStates = []
1966 for node in ONOSIntents: # Iter through ONOS nodes
1967 nodeStates = []
1968 # Iter through intents of a node
1969 try:
1970 for intent in json.loads( node ):
1971 nodeStates.append( intent[ 'state' ] )
1972 except ( ValueError, TypeError ):
1973 main.log.exception( "Error in parsing intents" )
1974 main.log.error( repr( node ) )
1975 intentStates.append( nodeStates )
1976 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1977 main.log.info( dict( out ) )
1978
1979 if intentsResults and not consistentIntents:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001980 for i in range( len( main.activeNodes ) ):
1981 node = str( main.activeNodes[i] + 1 )
1982 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001983 main.log.warn( json.dumps(
1984 json.loads( ONOSIntents[ i ] ),
1985 sort_keys=True,
1986 indent=4,
1987 separators=( ',', ': ' ) ) )
1988 elif intentsResults and consistentIntents:
1989 intentCheck = main.TRUE
1990
1991 # NOTE: Store has no durability, so intents are lost across system
1992 # restarts
1993 main.step( "Compare current intents with intents before the failure" )
1994 # NOTE: this requires case 5 to pass for intentState to be set.
1995 # maybe we should stop the test if that fails?
1996 sameIntents = main.FALSE
1997 if intentState and intentState == ONOSIntents[ 0 ]:
1998 sameIntents = main.TRUE
1999 main.log.info( "Intents are consistent with before failure" )
2000 # TODO: possibly the states have changed? we may need to figure out
2001 # what the acceptable states are
2002 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2003 sameIntents = main.TRUE
2004 try:
2005 before = json.loads( intentState )
2006 after = json.loads( ONOSIntents[ 0 ] )
2007 for intent in before:
2008 if intent not in after:
2009 sameIntents = main.FALSE
2010 main.log.debug( "Intent is not currently in ONOS " +
2011 "(at least in the same form):" )
2012 main.log.debug( json.dumps( intent ) )
2013 except ( ValueError, TypeError ):
2014 main.log.exception( "Exception printing intents" )
2015 main.log.debug( repr( ONOSIntents[0] ) )
2016 main.log.debug( repr( intentState ) )
2017 if sameIntents == main.FALSE:
2018 try:
2019 main.log.debug( "ONOS intents before: " )
2020 main.log.debug( json.dumps( json.loads( intentState ),
2021 sort_keys=True, indent=4,
2022 separators=( ',', ': ' ) ) )
2023 main.log.debug( "Current ONOS intents: " )
2024 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2025 sort_keys=True, indent=4,
2026 separators=( ',', ': ' ) ) )
2027 except ( ValueError, TypeError ):
2028 main.log.exception( "Exception printing intents" )
2029 main.log.debug( repr( ONOSIntents[0] ) )
2030 main.log.debug( repr( intentState ) )
2031 utilities.assert_equals(
2032 expect=main.TRUE,
2033 actual=sameIntents,
2034 onpass="Intents are consistent with before failure",
2035 onfail="The Intents changed during failure" )
2036 intentCheck = intentCheck and sameIntents
2037
2038 main.step( "Get the OF Table entries and compare to before " +
2039 "component failure" )
2040 FlowTables = main.TRUE
2041 flows2 = []
2042 for i in range( 28 ):
2043 main.log.info( "Checking flow table on s" + str( i + 1 ) )
Jon Hall9043c902015-07-30 14:23:44 -07002044 tmpFlows = main.Mininet1.getFlowTable( 1.3, "s" + str( i + 1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002045 flows2.append( tmpFlows )
Jon Hall9043c902015-07-30 14:23:44 -07002046 tempResult = main.Mininet1.flowComp(
Jon Hall5cf14d52015-07-16 12:15:19 -07002047 flow1=flows[ i ],
2048 flow2=tmpFlows )
2049 FlowTables = FlowTables and tempResult
2050 if FlowTables == main.FALSE:
2051 main.log.info( "Differences in flow table for switch: s" +
2052 str( i + 1 ) )
2053 utilities.assert_equals(
2054 expect=main.TRUE,
2055 actual=FlowTables,
2056 onpass="No changes were found in the flow tables",
2057 onfail="Changes were found in the flow tables" )
2058
2059 main.Mininet2.pingLongKill()
2060 '''
2061 main.step( "Check the continuous pings to ensure that no packets " +
2062 "were dropped during component failure" )
2063 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2064 main.params[ 'TESTONIP' ] )
2065 LossInPings = main.FALSE
2066 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2067 for i in range( 8, 18 ):
2068 main.log.info(
2069 "Checking for a loss in pings along flow from s" +
2070 str( i ) )
2071 LossInPings = main.Mininet2.checkForLoss(
2072 "/tmp/ping.h" +
2073 str( i ) ) or LossInPings
2074 if LossInPings == main.TRUE:
2075 main.log.info( "Loss in ping detected" )
2076 elif LossInPings == main.ERROR:
2077 main.log.info( "There are multiple mininet process running" )
2078 elif LossInPings == main.FALSE:
2079 main.log.info( "No Loss in the pings" )
2080 main.log.info( "No loss of dataplane connectivity" )
2081 utilities.assert_equals(
2082 expect=main.FALSE,
2083 actual=LossInPings,
2084 onpass="No Loss of connectivity",
2085 onfail="Loss of dataplane connectivity detected" )
2086 '''
2087
2088 main.step( "Leadership Election is still functional" )
2089 # Test of LeadershipElection
2090 leaderList = []
Jon Hall5cf14d52015-07-16 12:15:19 -07002091
Jon Hall3b489db2015-10-05 14:38:37 -07002092 restarted = []
2093 for i in main.kill:
2094 restarted.append( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07002095 leaderResult = main.TRUE
Jon Hall3b489db2015-10-05 14:38:37 -07002096
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002097 for i in main.activeNodes:
2098 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002099 leaderN = cli.electionTestLeader()
2100 leaderList.append( leaderN )
2101 if leaderN == main.FALSE:
2102 # error in response
2103 main.log.error( "Something is wrong with " +
2104 "electionTestLeader function, check the" +
2105 " error logs" )
2106 leaderResult = main.FALSE
2107 elif leaderN is None:
2108 main.log.error( cli.name +
2109 " shows no leader for the election-app was" +
2110 " elected after the old one died" )
2111 leaderResult = main.FALSE
2112 elif leaderN in restarted:
2113 main.log.error( cli.name + " shows " + str( leaderN ) +
2114 " as leader for the election-app, but it " +
2115 "was restarted" )
2116 leaderResult = main.FALSE
2117 if len( set( leaderList ) ) != 1:
2118 leaderResult = main.FALSE
2119 main.log.error(
2120 "Inconsistent view of leader for the election test app" )
2121 # TODO: print the list
2122 utilities.assert_equals(
2123 expect=main.TRUE,
2124 actual=leaderResult,
2125 onpass="Leadership election passed",
2126 onfail="Something went wrong with Leadership election" )
2127
2128 def CASE8( self, main ):
2129 """
2130 Compare topo
2131 """
2132 import json
2133 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002134 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002135 assert main, "main not defined"
2136 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002137 assert main.CLIs, "main.CLIs not defined"
2138 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002139
2140 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002141 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002142 " and ONOS"
2143
2144 main.step( "Comparing ONOS topology to MN" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002145 topoResult = main.FALSE
2146 elapsed = 0
2147 count = 0
2148 main.step( "Collecting topology information from ONOS" )
2149 startTime = time.time()
2150 # Give time for Gossip to work
2151 while topoResult == main.FALSE and elapsed < 60:
Jon Hall96091e62015-09-21 17:34:17 -07002152 devicesResults = main.TRUE
2153 linksResults = main.TRUE
2154 hostsResults = main.TRUE
2155 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002156 count += 1
2157 cliStart = time.time()
2158 devices = []
2159 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002160 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002161 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07002162 name="devices-" + str( i ),
2163 args=[ ] )
2164 threads.append( t )
2165 t.start()
2166
2167 for t in threads:
2168 t.join()
2169 devices.append( t.result )
2170 hosts = []
2171 ipResult = main.TRUE
2172 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002173 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002174 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07002175 name="hosts-" + str( i ),
2176 args=[ ] )
2177 threads.append( t )
2178 t.start()
2179
2180 for t in threads:
2181 t.join()
2182 try:
2183 hosts.append( json.loads( t.result ) )
2184 except ( ValueError, TypeError ):
2185 main.log.exception( "Error parsing hosts results" )
2186 main.log.error( repr( t.result ) )
2187 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002188 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002189 for host in hosts[ controller ]:
2190 if host is None or host.get( 'ipAddresses', [] ) == []:
2191 main.log.error(
2192 "DEBUG:Error with host ipAddresses on controller" +
2193 controllerStr + ": " + str( host ) )
2194 ipResult = main.FALSE
2195 ports = []
2196 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002197 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002198 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07002199 name="ports-" + str( i ),
2200 args=[ ] )
2201 threads.append( t )
2202 t.start()
2203
2204 for t in threads:
2205 t.join()
2206 ports.append( t.result )
2207 links = []
2208 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002209 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002210 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07002211 name="links-" + str( i ),
2212 args=[ ] )
2213 threads.append( t )
2214 t.start()
2215
2216 for t in threads:
2217 t.join()
2218 links.append( t.result )
2219 clusters = []
2220 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002221 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002222 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07002223 name="clusters-" + str( i ),
2224 args=[ ] )
2225 threads.append( t )
2226 t.start()
2227
2228 for t in threads:
2229 t.join()
2230 clusters.append( t.result )
2231
2232 elapsed = time.time() - startTime
2233 cliTime = time.time() - cliStart
2234 print "Elapsed time: " + str( elapsed )
2235 print "CLI time: " + str( cliTime )
2236
2237 mnSwitches = main.Mininet1.getSwitches()
2238 mnLinks = main.Mininet1.getLinks()
2239 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002240 for controller in range( len( main.activeNodes ) ):
2241 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002242 if devices[ controller ] and ports[ controller ] and\
2243 "Error" not in devices[ controller ] and\
2244 "Error" not in ports[ controller ]:
2245
2246 currentDevicesResult = main.Mininet1.compareSwitches(
2247 mnSwitches,
2248 json.loads( devices[ controller ] ),
2249 json.loads( ports[ controller ] ) )
2250 else:
2251 currentDevicesResult = main.FALSE
2252 utilities.assert_equals( expect=main.TRUE,
2253 actual=currentDevicesResult,
2254 onpass="ONOS" + controllerStr +
2255 " Switches view is correct",
2256 onfail="ONOS" + controllerStr +
2257 " Switches view is incorrect" )
2258
2259 if links[ controller ] and "Error" not in links[ controller ]:
2260 currentLinksResult = main.Mininet1.compareLinks(
2261 mnSwitches, mnLinks,
2262 json.loads( links[ controller ] ) )
2263 else:
2264 currentLinksResult = main.FALSE
2265 utilities.assert_equals( expect=main.TRUE,
2266 actual=currentLinksResult,
2267 onpass="ONOS" + controllerStr +
2268 " links view is correct",
2269 onfail="ONOS" + controllerStr +
2270 " links view is incorrect" )
2271
2272 if hosts[ controller ] or "Error" not in hosts[ controller ]:
2273 currentHostsResult = main.Mininet1.compareHosts(
2274 mnHosts,
2275 hosts[ controller ] )
2276 else:
2277 currentHostsResult = main.FALSE
2278 utilities.assert_equals( expect=main.TRUE,
2279 actual=currentHostsResult,
2280 onpass="ONOS" + controllerStr +
2281 " hosts exist in Mininet",
2282 onfail="ONOS" + controllerStr +
2283 " hosts don't match Mininet" )
2284 # CHECKING HOST ATTACHMENT POINTS
2285 hostAttachment = True
2286 zeroHosts = False
2287 # FIXME: topo-HA/obelisk specific mappings:
2288 # key is mac and value is dpid
2289 mappings = {}
2290 for i in range( 1, 29 ): # hosts 1 through 28
2291 # set up correct variables:
2292 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2293 if i == 1:
2294 deviceId = "1000".zfill(16)
2295 elif i == 2:
2296 deviceId = "2000".zfill(16)
2297 elif i == 3:
2298 deviceId = "3000".zfill(16)
2299 elif i == 4:
2300 deviceId = "3004".zfill(16)
2301 elif i == 5:
2302 deviceId = "5000".zfill(16)
2303 elif i == 6:
2304 deviceId = "6000".zfill(16)
2305 elif i == 7:
2306 deviceId = "6007".zfill(16)
2307 elif i >= 8 and i <= 17:
2308 dpid = '3' + str( i ).zfill( 3 )
2309 deviceId = dpid.zfill(16)
2310 elif i >= 18 and i <= 27:
2311 dpid = '6' + str( i ).zfill( 3 )
2312 deviceId = dpid.zfill(16)
2313 elif i == 28:
2314 deviceId = "2800".zfill(16)
2315 mappings[ macId ] = deviceId
2316 if hosts[ controller ] or "Error" not in hosts[ controller ]:
2317 if hosts[ controller ] == []:
2318 main.log.warn( "There are no hosts discovered" )
2319 zeroHosts = True
2320 else:
2321 for host in hosts[ controller ]:
2322 mac = None
2323 location = None
2324 device = None
2325 port = None
2326 try:
2327 mac = host.get( 'mac' )
2328 assert mac, "mac field could not be found for this host object"
2329
2330 location = host.get( 'location' )
2331 assert location, "location field could not be found for this host object"
2332
2333 # Trim the protocol identifier off deviceId
2334 device = str( location.get( 'elementId' ) ).split(':')[1]
2335 assert device, "elementId field could not be found for this host location object"
2336
2337 port = location.get( 'port' )
2338 assert port, "port field could not be found for this host location object"
2339
2340 # Now check if this matches where they should be
2341 if mac and device and port:
2342 if str( port ) != "1":
2343 main.log.error( "The attachment port is incorrect for " +
2344 "host " + str( mac ) +
2345 ". Expected: 1 Actual: " + str( port) )
2346 hostAttachment = False
2347 if device != mappings[ str( mac ) ]:
2348 main.log.error( "The attachment device is incorrect for " +
2349 "host " + str( mac ) +
2350 ". Expected: " + mappings[ str( mac ) ] +
2351 " Actual: " + device )
2352 hostAttachment = False
2353 else:
2354 hostAttachment = False
2355 except AssertionError:
2356 main.log.exception( "Json object not as expected" )
2357 main.log.error( repr( host ) )
2358 hostAttachment = False
2359 else:
2360 main.log.error( "No hosts json output or \"Error\"" +
2361 " in output. hosts = " +
2362 repr( hosts[ controller ] ) )
2363 if zeroHosts is False:
2364 hostAttachment = True
2365
2366 # END CHECKING HOST ATTACHMENT POINTS
2367 devicesResults = devicesResults and currentDevicesResult
2368 linksResults = linksResults and currentLinksResult
2369 hostsResults = hostsResults and currentHostsResult
2370 hostAttachmentResults = hostAttachmentResults and\
2371 hostAttachment
2372
2373 # Compare json objects for hosts and dataplane clusters
2374
2375 # hosts
2376 main.step( "Hosts view is consistent across all ONOS nodes" )
2377 consistentHostsResult = main.TRUE
2378 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002379 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002380 if "Error" not in hosts[ controller ]:
2381 if hosts[ controller ] == hosts[ 0 ]:
2382 continue
2383 else: # hosts not consistent
2384 main.log.error( "hosts from ONOS" + controllerStr +
2385 " is inconsistent with ONOS1" )
2386 main.log.warn( repr( hosts[ controller ] ) )
2387 consistentHostsResult = main.FALSE
2388
2389 else:
2390 main.log.error( "Error in getting ONOS hosts from ONOS" +
2391 controllerStr )
2392 consistentHostsResult = main.FALSE
2393 main.log.warn( "ONOS" + controllerStr +
2394 " hosts response: " +
2395 repr( hosts[ controller ] ) )
2396 utilities.assert_equals(
2397 expect=main.TRUE,
2398 actual=consistentHostsResult,
2399 onpass="Hosts view is consistent across all ONOS nodes",
2400 onfail="ONOS nodes have different views of hosts" )
2401
2402 main.step( "Hosts information is correct" )
2403 hostsResults = hostsResults and ipResult
2404 utilities.assert_equals(
2405 expect=main.TRUE,
2406 actual=hostsResults,
2407 onpass="Host information is correct",
2408 onfail="Host information is incorrect" )
2409
2410 main.step( "Host attachment points to the network" )
2411 utilities.assert_equals(
2412 expect=True,
2413 actual=hostAttachmentResults,
2414 onpass="Hosts are correctly attached to the network",
2415 onfail="ONOS did not correctly attach hosts to the network" )
2416
2417 # Strongly connected clusters of devices
2418 main.step( "Clusters view is consistent across all ONOS nodes" )
2419 consistentClustersResult = main.TRUE
2420 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002421 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002422 if "Error" not in clusters[ controller ]:
2423 if clusters[ controller ] == clusters[ 0 ]:
2424 continue
2425 else: # clusters not consistent
2426 main.log.error( "clusters from ONOS" +
2427 controllerStr +
2428 " is inconsistent with ONOS1" )
2429 consistentClustersResult = main.FALSE
2430
2431 else:
2432 main.log.error( "Error in getting dataplane clusters " +
2433 "from ONOS" + controllerStr )
2434 consistentClustersResult = main.FALSE
2435 main.log.warn( "ONOS" + controllerStr +
2436 " clusters response: " +
2437 repr( clusters[ controller ] ) )
2438 utilities.assert_equals(
2439 expect=main.TRUE,
2440 actual=consistentClustersResult,
2441 onpass="Clusters view is consistent across all ONOS nodes",
2442 onfail="ONOS nodes have different views of clusters" )
2443
2444 main.step( "There is only one SCC" )
2445 # there should always only be one cluster
2446 try:
2447 numClusters = len( json.loads( clusters[ 0 ] ) )
2448 except ( ValueError, TypeError ):
2449 main.log.exception( "Error parsing clusters[0]: " +
2450 repr( clusters[0] ) )
2451 clusterResults = main.FALSE
2452 if numClusters == 1:
2453 clusterResults = main.TRUE
2454 utilities.assert_equals(
2455 expect=1,
2456 actual=numClusters,
2457 onpass="ONOS shows 1 SCC",
2458 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2459
2460 topoResult = ( devicesResults and linksResults
2461 and hostsResults and consistentHostsResult
2462 and consistentClustersResult and clusterResults
2463 and ipResult and hostAttachmentResults )
2464
2465 topoResult = topoResult and int( count <= 2 )
2466 note = "note it takes about " + str( int( cliTime ) ) + \
2467 " seconds for the test to make all the cli calls to fetch " +\
2468 "the topology from each ONOS instance"
2469 main.log.info(
2470 "Very crass estimate for topology discovery/convergence( " +
2471 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2472 str( count ) + " tries" )
2473
2474 main.step( "Device information is correct" )
2475 utilities.assert_equals(
2476 expect=main.TRUE,
2477 actual=devicesResults,
2478 onpass="Device information is correct",
2479 onfail="Device information is incorrect" )
2480
2481 main.step( "Links are correct" )
2482 utilities.assert_equals(
2483 expect=main.TRUE,
2484 actual=linksResults,
2485 onpass="Link are correct",
2486 onfail="Links are incorrect" )
2487
2488 # FIXME: move this to an ONOS state case
2489 main.step( "Checking ONOS nodes" )
2490 nodesOutput = []
2491 nodeResults = main.TRUE
2492 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002493 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002494 t = main.Thread( target=main.CLIs[i].nodes,
Jon Hall5cf14d52015-07-16 12:15:19 -07002495 name="nodes-" + str( i ),
2496 args=[ ] )
2497 threads.append( t )
2498 t.start()
2499
2500 for t in threads:
2501 t.join()
2502 nodesOutput.append( t.result )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002503 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002504 for i in nodesOutput:
2505 try:
2506 current = json.loads( i )
2507 for node in current:
2508 currentResult = main.FALSE
2509 if node['ip'] in ips: # node in nodes() output is in cell
2510 if node['state'] == 'ACTIVE':
2511 currentResult = main.TRUE
2512 else:
2513 main.log.error( "Error in ONOS node availability" )
2514 main.log.error(
2515 json.dumps( current,
2516 sort_keys=True,
2517 indent=4,
2518 separators=( ',', ': ' ) ) )
2519 break
2520 nodeResults = nodeResults and currentResult
2521 except ( ValueError, TypeError ):
2522 main.log.error( "Error parsing nodes output" )
2523 main.log.warn( repr( i ) )
2524 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2525 onpass="Nodes check successful",
2526 onfail="Nodes check NOT successful" )
2527
2528 def CASE9( self, main ):
2529 """
2530 Link s3-s28 down
2531 """
2532 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002533 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002534 assert main, "main not defined"
2535 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002536 assert main.CLIs, "main.CLIs not defined"
2537 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002538 # NOTE: You should probably run a topology check after this
2539
2540 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2541
2542 description = "Turn off a link to ensure that Link Discovery " +\
2543 "is working properly"
2544 main.case( description )
2545
2546 main.step( "Kill Link between s3 and s28" )
2547 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2548 main.log.info( "Waiting " + str( linkSleep ) +
2549 " seconds for link down to be discovered" )
2550 time.sleep( linkSleep )
2551 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2552 onpass="Link down successful",
2553 onfail="Failed to bring link down" )
2554 # TODO do some sort of check here
2555
2556 def CASE10( self, main ):
2557 """
2558 Link s3-s28 up
2559 """
2560 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002561 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002562 assert main, "main not defined"
2563 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002564 assert main.CLIs, "main.CLIs not defined"
2565 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002566 # NOTE: You should probably run a topology check after this
2567
2568 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2569
2570 description = "Restore a link to ensure that Link Discovery is " + \
2571 "working properly"
2572 main.case( description )
2573
2574 main.step( "Bring link between s3 and s28 back up" )
2575 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2576 main.log.info( "Waiting " + str( linkSleep ) +
2577 " seconds for link up to be discovered" )
2578 time.sleep( linkSleep )
2579 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2580 onpass="Link up successful",
2581 onfail="Failed to bring link up" )
2582 # TODO do some sort of check here
2583
2584 def CASE11( self, main ):
2585 """
2586 Switch Down
2587 """
2588 # NOTE: You should probably run a topology check after this
2589 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002590 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002591 assert main, "main not defined"
2592 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002593 assert main.CLIs, "main.CLIs not defined"
2594 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002595
2596 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2597
2598 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002599 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002600 main.case( description )
2601 switch = main.params[ 'kill' ][ 'switch' ]
2602 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2603
2604 # TODO: Make this switch parameterizable
2605 main.step( "Kill " + switch )
2606 main.log.info( "Deleting " + switch )
2607 main.Mininet1.delSwitch( switch )
2608 main.log.info( "Waiting " + str( switchSleep ) +
2609 " seconds for switch down to be discovered" )
2610 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002611 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002612 # Peek at the deleted switch
2613 main.log.warn( str( device ) )
2614 result = main.FALSE
2615 if device and device[ 'available' ] is False:
2616 result = main.TRUE
2617 utilities.assert_equals( expect=main.TRUE, actual=result,
2618 onpass="Kill switch successful",
2619 onfail="Failed to kill switch?" )
2620
2621 def CASE12( self, main ):
2622 """
2623 Switch Up
2624 """
2625 # NOTE: You should probably run a topology check after this
2626 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002627 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002628 assert main, "main not defined"
2629 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002630 assert main.CLIs, "main.CLIs not defined"
2631 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002632 assert ONOS1Port, "ONOS1Port not defined"
2633 assert ONOS2Port, "ONOS2Port not defined"
2634 assert ONOS3Port, "ONOS3Port not defined"
2635 assert ONOS4Port, "ONOS4Port not defined"
2636 assert ONOS5Port, "ONOS5Port not defined"
2637 assert ONOS6Port, "ONOS6Port not defined"
2638 assert ONOS7Port, "ONOS7Port not defined"
2639
2640 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2641 switch = main.params[ 'kill' ][ 'switch' ]
2642 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2643 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002644 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002645 description = "Adding a switch to ensure it is discovered correctly"
2646 main.case( description )
2647
2648 main.step( "Add back " + switch )
2649 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2650 for peer in links:
2651 main.Mininet1.addLink( switch, peer )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002652 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002653 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2654 main.log.info( "Waiting " + str( switchSleep ) +
2655 " seconds for switch up to be discovered" )
2656 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002657 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002658 # Peek at the deleted switch
2659 main.log.warn( str( device ) )
2660 result = main.FALSE
2661 if device and device[ 'available' ]:
2662 result = main.TRUE
2663 utilities.assert_equals( expect=main.TRUE, actual=result,
2664 onpass="add switch successful",
2665 onfail="Failed to add switch?" )
2666
2667 def CASE13( self, main ):
2668 """
2669 Clean up
2670 """
2671 import os
2672 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002673 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002674 assert main, "main not defined"
2675 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002676 assert main.CLIs, "main.CLIs not defined"
2677 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002678
2679 # printing colors to terminal
2680 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2681 'blue': '\033[94m', 'green': '\033[92m',
2682 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2683 main.case( "Test Cleanup" )
2684 main.step( "Killing tcpdumps" )
2685 main.Mininet2.stopTcpdump()
2686
2687 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002688 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002689 main.step( "Copying MN pcap and ONOS log files to test station" )
2690 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2691 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002692 # NOTE: MN Pcap file is being saved to logdir.
2693 # We scp this file as MN and TestON aren't necessarily the same vm
2694
2695 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002696 # TODO: Load these from params
2697 # NOTE: must end in /
2698 logFolder = "/opt/onos/log/"
2699 logFiles = [ "karaf.log", "karaf.log.1" ]
2700 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002701 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002702 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002703 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002704 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2705 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002706 # std*.log's
2707 # NOTE: must end in /
2708 logFolder = "/opt/onos/var/"
2709 logFiles = [ "stderr.log", "stdout.log" ]
2710 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002711 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002712 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002713 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002714 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2715 logFolder + f, dstName )
2716 else:
2717 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002718
2719 main.step( "Stopping Mininet" )
2720 mnResult = main.Mininet1.stopNet()
2721 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2722 onpass="Mininet stopped",
2723 onfail="MN cleanup NOT successful" )
2724
2725 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002726 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002727 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2728 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002729
2730 try:
2731 timerLog = open( main.logdir + "/Timers.csv", 'w')
2732 # Overwrite with empty line and close
2733 labels = "Gossip Intents, Restart"
2734 data = str( gossipTime ) + ", " + str( main.restartTime )
2735 timerLog.write( labels + "\n" + data )
2736 timerLog.close()
2737 except NameError, e:
2738 main.log.exception(e)
2739
2740 def CASE14( self, main ):
2741 """
2742 start election app on all onos nodes
2743 """
Jon Halle1a3b752015-07-22 13:02:46 -07002744 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002745 assert main, "main not defined"
2746 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002747 assert main.CLIs, "main.CLIs not defined"
2748 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002749
2750 main.case("Start Leadership Election app")
2751 main.step( "Install leadership election app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002752 onosCli = main.CLIs[ main.activeNodes[0] ]
2753 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002754 utilities.assert_equals(
2755 expect=main.TRUE,
2756 actual=appResult,
2757 onpass="Election app installed",
2758 onfail="Something went wrong with installing Leadership election" )
2759
2760 main.step( "Run for election on each node" )
2761 leaderResult = main.TRUE
2762 leaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002763 for i in main.activeNodes:
2764 main.CLIs[i].electionTestRun()
2765 for i in main.activeNodes:
2766 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002767 leader = cli.electionTestLeader()
2768 if leader is None or leader == main.FALSE:
2769 main.log.error( cli.name + ": Leader for the election app " +
2770 "should be an ONOS node, instead got '" +
2771 str( leader ) + "'" )
2772 leaderResult = main.FALSE
2773 leaders.append( leader )
2774 utilities.assert_equals(
2775 expect=main.TRUE,
2776 actual=leaderResult,
2777 onpass="Successfully ran for leadership",
2778 onfail="Failed to run for leadership" )
2779
2780 main.step( "Check that each node shows the same leader" )
2781 sameLeader = main.TRUE
2782 if len( set( leaders ) ) != 1:
2783 sameLeader = main.FALSE
Jon Halle1a3b752015-07-22 13:02:46 -07002784 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
Jon Hall5cf14d52015-07-16 12:15:19 -07002785 str( leaders ) )
2786 utilities.assert_equals(
2787 expect=main.TRUE,
2788 actual=sameLeader,
2789 onpass="Leadership is consistent for the election topic",
2790 onfail="Nodes have different leaders" )
2791
2792 def CASE15( self, main ):
2793 """
2794 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002795 15.1 Run election on each node
2796 15.2 Check that each node has the same leaders and candidates
2797 15.3 Find current leader and withdraw
2798 15.4 Check that a new node was elected leader
2799 15.5 Check that that new leader was the candidate of old leader
2800 15.6 Run for election on old leader
2801 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2802 15.8 Make sure that the old leader was added to the candidate list
2803
2804 old and new variable prefixes refer to data from before vs after
2805 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002806 """
2807 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002808 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002809 assert main, "main not defined"
2810 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002811 assert main.CLIs, "main.CLIs not defined"
2812 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002813
Jon Hall5cf14d52015-07-16 12:15:19 -07002814 description = "Check that Leadership Election is still functional"
2815 main.case( description )
acsmars71adceb2015-08-31 15:09:26 -07002816 # NOTE: Need to re-run since being a canidate is not persistant
2817 # TODO: add check for "Command not found:" in the driver, this
2818 # means the election test app isn't loaded
Jon Hall5cf14d52015-07-16 12:15:19 -07002819
acsmars71adceb2015-08-31 15:09:26 -07002820 oldLeaders = [] # leaders by node before withdrawl from candidates
2821 newLeaders = [] # leaders by node after withdrawl from candidates
2822 oldAllCandidates = [] # list of lists of each nodes' candidates before
2823 newAllCandidates = [] # list of lists of each nodes' candidates after
2824 oldCandidates = [] # list of candidates from node 0 before withdrawl
2825 newCandidates = [] # list of candidates from node 0 after withdrawl
2826 oldLeader = '' # the old leader from oldLeaders, None if not same
2827 newLeader = '' # the new leaders fron newLoeaders, None if not same
2828 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2829 expectNoLeader = False # True when there is only one leader
2830 if main.numCtrls == 1:
2831 expectNoLeader = True
2832
2833 main.step( "Run for election on each node" )
2834 electionResult = main.TRUE
2835
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002836 for i in main.activeNodes: # run test election on each node
2837 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002838 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002839 utilities.assert_equals(
2840 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002841 actual=electionResult,
2842 onpass="All nodes successfully ran for leadership",
2843 onfail="At least one node failed to run for leadership" )
2844
acsmars3a72bde2015-09-02 14:16:22 -07002845 if electionResult == main.FALSE:
2846 main.log.error(
2847 "Skipping Test Case because Election Test App isn't loaded" )
2848 main.skipCase()
2849
acsmars71adceb2015-08-31 15:09:26 -07002850 main.step( "Check that each node shows the same leader and candidates" )
2851 sameResult = main.TRUE
2852 failMessage = "Nodes have different leaders"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002853 for i in main.activeNodes:
2854 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002855 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2856 oldAllCandidates.append( node )
2857 oldLeaders.append( node[ 0 ] )
2858 oldCandidates = oldAllCandidates[ 0 ]
2859
2860 # Check that each node has the same leader. Defines oldLeader
2861 if len( set( oldLeaders ) ) != 1:
2862 sameResult = main.FALSE
2863 main.log.error( "More than one leader present:" + str( oldLeaders ) )
2864 oldLeader = None
2865 else:
2866 oldLeader = oldLeaders[ 0 ]
2867
2868 # Check that each node's candidate list is the same
acsmars29233db2015-11-04 11:15:00 -08002869 candidateDiscrepancy = False # Boolean of candidate mismatches
acsmars71adceb2015-08-31 15:09:26 -07002870 for candidates in oldAllCandidates:
2871 if set( candidates ) != set( oldCandidates ):
2872 sameResult = main.FALSE
acsmars29233db2015-11-04 11:15:00 -08002873 candidateDiscrepancy = True
2874
2875 if candidateDiscrepancy:
2876 failMessage += " and candidates"
2877
acsmars71adceb2015-08-31 15:09:26 -07002878 utilities.assert_equals(
2879 expect=main.TRUE,
2880 actual=sameResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002881 onpass="Leadership is consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002882 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002883
2884 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002885 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002886 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002887 if oldLeader is None:
2888 main.log.error( "Leadership isn't consistent." )
2889 withdrawResult = main.FALSE
2890 # Get the CLI of the oldLeader
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002891 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07002892 if oldLeader == main.nodes[ i ].ip_address:
2893 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002894 break
2895 else: # FOR/ELSE statement
2896 main.log.error( "Leader election, could not find current leader" )
2897 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07002898 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07002899 utilities.assert_equals(
2900 expect=main.TRUE,
2901 actual=withdrawResult,
2902 onpass="Node was withdrawn from election",
2903 onfail="Node was not withdrawn from election" )
2904
acsmars71adceb2015-08-31 15:09:26 -07002905 main.step( "Check that a new node was elected leader" )
2906
Jon Hall5cf14d52015-07-16 12:15:19 -07002907 # FIXME: use threads
acsmars71adceb2015-08-31 15:09:26 -07002908 newLeaderResult = main.TRUE
2909 failMessage = "Nodes have different leaders"
2910
2911 # Get new leaders and candidates
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002912 for i in main.activeNodes:
2913 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002914 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2915 # elections might no have finished yet
2916 if node[ 0 ] == 'none' and not expectNoLeader:
2917 main.log.info( "Node has no leader, waiting 5 seconds to be " +
2918 "sure elections are complete." )
2919 time.sleep(5)
2920 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2921 # election still isn't done or there is a problem
2922 if node[ 0 ] == 'none':
2923 main.log.error( "No leader was elected on at least 1 node" )
2924 newLeaderResult = main.FALSE
2925 newAllCandidates.append( node )
2926 newLeaders.append( node[ 0 ] )
2927 newCandidates = newAllCandidates[ 0 ]
2928
2929 # Check that each node has the same leader. Defines newLeader
2930 if len( set( newLeaders ) ) != 1:
2931 newLeaderResult = main.FALSE
2932 main.log.error( "Nodes have different leaders: " +
2933 str( newLeaders ) )
2934 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07002935 else:
acsmars71adceb2015-08-31 15:09:26 -07002936 newLeader = newLeaders[ 0 ]
2937
2938 # Check that each node's candidate list is the same
2939 for candidates in newAllCandidates:
2940 if set( candidates ) != set( newCandidates ):
2941 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07002942 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07002943
2944 # Check that the new leader is not the older leader, which was withdrawn
2945 if newLeader == oldLeader:
2946 newLeaderResult = main.FALSE
2947 main.log.error( "All nodes still see old leader: " + oldLeader +
2948 " as the current leader" )
2949
Jon Hall5cf14d52015-07-16 12:15:19 -07002950 utilities.assert_equals(
2951 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002952 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002953 onpass="Leadership election passed",
2954 onfail="Something went wrong with Leadership election" )
2955
acsmars71adceb2015-08-31 15:09:26 -07002956 main.step( "Check that that new leader was the candidate of old leader")
2957 # candidates[ 2 ] should be come the top candidate after withdrawl
2958 correctCandidateResult = main.TRUE
2959 if expectNoLeader:
2960 if newLeader == 'none':
2961 main.log.info( "No leader expected. None found. Pass" )
2962 correctCandidateResult = main.TRUE
2963 else:
2964 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2965 correctCandidateResult = main.FALSE
2966 elif newLeader != oldCandidates[ 2 ]:
2967 correctCandidateResult = main.FALSE
2968 main.log.error( "Candidate " + newLeader + " was elected. " +
2969 oldCandidates[ 2 ] + " should have had priority." )
2970
2971 utilities.assert_equals(
2972 expect=main.TRUE,
2973 actual=correctCandidateResult,
2974 onpass="Correct Candidate Elected",
2975 onfail="Incorrect Candidate Elected" )
2976
Jon Hall5cf14d52015-07-16 12:15:19 -07002977 main.step( "Run for election on old leader( just so everyone " +
2978 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07002979 if oldLeaderCLI is not None:
2980 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07002981 else:
acsmars71adceb2015-08-31 15:09:26 -07002982 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002983 runResult = main.FALSE
2984 utilities.assert_equals(
2985 expect=main.TRUE,
2986 actual=runResult,
2987 onpass="App re-ran for election",
2988 onfail="App failed to run for election" )
acsmars71adceb2015-08-31 15:09:26 -07002989 main.step(
2990 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002991 # verify leader didn't just change
acsmars71adceb2015-08-31 15:09:26 -07002992 positionResult = main.TRUE
2993 # Get new leaders and candidates, wait if oldLeader is not a candidate yet
2994
2995 # Reset and reuse the new candidate and leaders lists
2996 newAllCandidates = []
2997 newCandidates = []
2998 newLeaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002999 for i in main.activeNodes:
3000 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07003001 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3002 if oldLeader not in node: # election might no have finished yet
3003 main.log.info( "Old Leader not elected, waiting 5 seconds to " +
3004 "be sure elections are complete" )
3005 time.sleep(5)
3006 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3007 if oldLeader not in node: # election still isn't done, errors
3008 main.log.error(
3009 "Old leader was not elected on at least one node" )
3010 positionResult = main.FALSE
3011 newAllCandidates.append( node )
3012 newLeaders.append( node[ 0 ] )
3013 newCandidates = newAllCandidates[ 0 ]
3014
3015 # Check that each node has the same leader. Defines newLeader
3016 if len( set( newLeaders ) ) != 1:
3017 positionResult = main.FALSE
3018 main.log.error( "Nodes have different leaders: " +
3019 str( newLeaders ) )
3020 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07003021 else:
acsmars71adceb2015-08-31 15:09:26 -07003022 newLeader = newLeaders[ 0 ]
3023
3024 # Check that each node's candidate list is the same
3025 for candidates in newAllCandidates:
3026 if set( candidates ) != set( newCandidates ):
3027 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07003028 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07003029
3030 # Check that the re-elected node is last on the candidate List
3031 if oldLeader != newCandidates[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003032 main.log.error( "Old Leader (" + oldLeader + ") not in the proper position " +
acsmars71adceb2015-08-31 15:09:26 -07003033 str( newCandidates ) )
3034 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003035
3036 utilities.assert_equals(
3037 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07003038 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003039 onpass="Old leader successfully re-ran for election",
3040 onfail="Something went wrong with Leadership election after " +
3041 "the old leader re-ran for election" )
3042
3043 def CASE16( self, main ):
3044 """
3045 Install Distributed Primitives app
3046 """
3047 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003048 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003049 assert main, "main not defined"
3050 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003051 assert main.CLIs, "main.CLIs not defined"
3052 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003053
3054 # Variables for the distributed primitives tests
3055 global pCounterName
3056 global iCounterName
3057 global pCounterValue
3058 global iCounterValue
3059 global onosSet
3060 global onosSetName
3061 pCounterName = "TestON-Partitions"
3062 iCounterName = "TestON-inMemory"
3063 pCounterValue = 0
3064 iCounterValue = 0
3065 onosSet = set([])
3066 onosSetName = "TestON-set"
3067
3068 description = "Install Primitives app"
3069 main.case( description )
3070 main.step( "Install Primitives app" )
3071 appName = "org.onosproject.distributedprimitives"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003072 node = main.activeNodes[0]
3073 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003074 utilities.assert_equals( expect=main.TRUE,
3075 actual=appResults,
3076 onpass="Primitives app activated",
3077 onfail="Primitives app not activated" )
3078 time.sleep( 5 ) # To allow all nodes to activate
3079
3080 def CASE17( self, main ):
3081 """
3082 Check for basic functionality with distributed primitives
3083 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003084 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003085 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003086 assert main, "main not defined"
3087 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003088 assert main.CLIs, "main.CLIs not defined"
3089 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003090 assert pCounterName, "pCounterName not defined"
3091 assert iCounterName, "iCounterName not defined"
3092 assert onosSetName, "onosSetName not defined"
3093 # NOTE: assert fails if value is 0/None/Empty/False
3094 try:
3095 pCounterValue
3096 except NameError:
3097 main.log.error( "pCounterValue not defined, setting to 0" )
3098 pCounterValue = 0
3099 try:
3100 iCounterValue
3101 except NameError:
3102 main.log.error( "iCounterValue not defined, setting to 0" )
3103 iCounterValue = 0
3104 try:
3105 onosSet
3106 except NameError:
3107 main.log.error( "onosSet not defined, setting to empty Set" )
3108 onosSet = set([])
3109 # Variables for the distributed primitives tests. These are local only
3110 addValue = "a"
3111 addAllValue = "a b c d e f"
3112 retainValue = "c d e f"
3113
3114 description = "Check for basic functionality with distributed " +\
3115 "primitives"
3116 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003117 main.caseExplanation = "Test the methods of the distributed " +\
3118 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003119 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003120 # Partitioned counters
3121 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003122 pCounters = []
3123 threads = []
3124 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003125 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003126 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3127 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003128 args=[ pCounterName ] )
3129 pCounterValue += 1
3130 addedPValues.append( pCounterValue )
3131 threads.append( t )
3132 t.start()
3133
3134 for t in threads:
3135 t.join()
3136 pCounters.append( t.result )
3137 # Check that counter incremented numController times
3138 pCounterResults = True
3139 for i in addedPValues:
3140 tmpResult = i in pCounters
3141 pCounterResults = pCounterResults and tmpResult
3142 if not tmpResult:
3143 main.log.error( str( i ) + " is not in partitioned "
3144 "counter incremented results" )
3145 utilities.assert_equals( expect=True,
3146 actual=pCounterResults,
3147 onpass="Default counter incremented",
3148 onfail="Error incrementing default" +
3149 " counter" )
3150
Jon Halle1a3b752015-07-22 13:02:46 -07003151 main.step( "Get then Increment a default counter on each node" )
3152 pCounters = []
3153 threads = []
3154 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003155 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003156 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3157 name="counterGetAndAdd-" + str( i ),
3158 args=[ pCounterName ] )
3159 addedPValues.append( pCounterValue )
3160 pCounterValue += 1
3161 threads.append( t )
3162 t.start()
3163
3164 for t in threads:
3165 t.join()
3166 pCounters.append( t.result )
3167 # Check that counter incremented numController times
3168 pCounterResults = True
3169 for i in addedPValues:
3170 tmpResult = i in pCounters
3171 pCounterResults = pCounterResults and tmpResult
3172 if not tmpResult:
3173 main.log.error( str( i ) + " is not in partitioned "
3174 "counter incremented results" )
3175 utilities.assert_equals( expect=True,
3176 actual=pCounterResults,
3177 onpass="Default counter incremented",
3178 onfail="Error incrementing default" +
3179 " counter" )
3180
3181 main.step( "Counters we added have the correct values" )
3182 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3183 utilities.assert_equals( expect=main.TRUE,
3184 actual=incrementCheck,
3185 onpass="Added counters are correct",
3186 onfail="Added counters are incorrect" )
3187
3188 main.step( "Add -8 to then get a default counter on each node" )
3189 pCounters = []
3190 threads = []
3191 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003192 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003193 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3194 name="counterIncrement-" + str( i ),
3195 args=[ pCounterName ],
3196 kwargs={ "delta": -8 } )
3197 pCounterValue += -8
3198 addedPValues.append( pCounterValue )
3199 threads.append( t )
3200 t.start()
3201
3202 for t in threads:
3203 t.join()
3204 pCounters.append( t.result )
3205 # Check that counter incremented numController times
3206 pCounterResults = True
3207 for i in addedPValues:
3208 tmpResult = i in pCounters
3209 pCounterResults = pCounterResults and tmpResult
3210 if not tmpResult:
3211 main.log.error( str( i ) + " is not in partitioned "
3212 "counter incremented results" )
3213 utilities.assert_equals( expect=True,
3214 actual=pCounterResults,
3215 onpass="Default counter incremented",
3216 onfail="Error incrementing default" +
3217 " counter" )
3218
3219 main.step( "Add 5 to then get a default counter on each node" )
3220 pCounters = []
3221 threads = []
3222 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003223 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003224 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3225 name="counterIncrement-" + str( i ),
3226 args=[ pCounterName ],
3227 kwargs={ "delta": 5 } )
3228 pCounterValue += 5
3229 addedPValues.append( pCounterValue )
3230 threads.append( t )
3231 t.start()
3232
3233 for t in threads:
3234 t.join()
3235 pCounters.append( t.result )
3236 # Check that counter incremented numController times
3237 pCounterResults = True
3238 for i in addedPValues:
3239 tmpResult = i in pCounters
3240 pCounterResults = pCounterResults and tmpResult
3241 if not tmpResult:
3242 main.log.error( str( i ) + " is not in partitioned "
3243 "counter incremented results" )
3244 utilities.assert_equals( expect=True,
3245 actual=pCounterResults,
3246 onpass="Default counter incremented",
3247 onfail="Error incrementing default" +
3248 " counter" )
3249
3250 main.step( "Get then add 5 to a default counter on each node" )
3251 pCounters = []
3252 threads = []
3253 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003254 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003255 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3256 name="counterIncrement-" + str( i ),
3257 args=[ pCounterName ],
3258 kwargs={ "delta": 5 } )
3259 addedPValues.append( pCounterValue )
3260 pCounterValue += 5
3261 threads.append( t )
3262 t.start()
3263
3264 for t in threads:
3265 t.join()
3266 pCounters.append( t.result )
3267 # Check that counter incremented numController times
3268 pCounterResults = True
3269 for i in addedPValues:
3270 tmpResult = i in pCounters
3271 pCounterResults = pCounterResults and tmpResult
3272 if not tmpResult:
3273 main.log.error( str( i ) + " is not in partitioned "
3274 "counter incremented results" )
3275 utilities.assert_equals( expect=True,
3276 actual=pCounterResults,
3277 onpass="Default counter incremented",
3278 onfail="Error incrementing default" +
3279 " counter" )
3280
3281 main.step( "Counters we added have the correct values" )
3282 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3283 utilities.assert_equals( expect=main.TRUE,
3284 actual=incrementCheck,
3285 onpass="Added counters are correct",
3286 onfail="Added counters are incorrect" )
3287
3288 # In-Memory counters
3289 main.step( "Increment and get an in-memory counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003290 iCounters = []
3291 addedIValues = []
3292 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003293 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003294 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003295 name="icounterIncrement-" + str( i ),
3296 args=[ iCounterName ],
3297 kwargs={ "inMemory": True } )
3298 iCounterValue += 1
3299 addedIValues.append( iCounterValue )
3300 threads.append( t )
3301 t.start()
3302
3303 for t in threads:
3304 t.join()
3305 iCounters.append( t.result )
3306 # Check that counter incremented numController times
3307 iCounterResults = True
3308 for i in addedIValues:
3309 tmpResult = i in iCounters
3310 iCounterResults = iCounterResults and tmpResult
3311 if not tmpResult:
3312 main.log.error( str( i ) + " is not in the in-memory "
3313 "counter incremented results" )
3314 utilities.assert_equals( expect=True,
3315 actual=iCounterResults,
Jon Halle1a3b752015-07-22 13:02:46 -07003316 onpass="In-memory counter incremented",
3317 onfail="Error incrementing in-memory" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003318 " counter" )
3319
Jon Halle1a3b752015-07-22 13:02:46 -07003320 main.step( "Get then Increment a in-memory counter on each node" )
3321 iCounters = []
3322 threads = []
3323 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003324 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003325 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3326 name="counterGetAndAdd-" + str( i ),
3327 args=[ iCounterName ],
3328 kwargs={ "inMemory": True } )
3329 addedIValues.append( iCounterValue )
3330 iCounterValue += 1
3331 threads.append( t )
3332 t.start()
3333
3334 for t in threads:
3335 t.join()
3336 iCounters.append( t.result )
3337 # Check that counter incremented numController times
3338 iCounterResults = True
3339 for i in addedIValues:
3340 tmpResult = i in iCounters
3341 iCounterResults = iCounterResults and tmpResult
3342 if not tmpResult:
3343 main.log.error( str( i ) + " is not in in-memory "
3344 "counter incremented results" )
3345 utilities.assert_equals( expect=True,
3346 actual=iCounterResults,
3347 onpass="In-memory counter incremented",
3348 onfail="Error incrementing in-memory" +
3349 " counter" )
3350
3351 main.step( "Counters we added have the correct values" )
3352 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3353 utilities.assert_equals( expect=main.TRUE,
3354 actual=incrementCheck,
3355 onpass="Added counters are correct",
3356 onfail="Added counters are incorrect" )
3357
3358 main.step( "Add -8 to then get a in-memory counter on each node" )
3359 iCounters = []
3360 threads = []
3361 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003362 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003363 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3364 name="counterIncrement-" + str( i ),
3365 args=[ iCounterName ],
3366 kwargs={ "delta": -8, "inMemory": True } )
3367 iCounterValue += -8
3368 addedIValues.append( iCounterValue )
3369 threads.append( t )
3370 t.start()
3371
3372 for t in threads:
3373 t.join()
3374 iCounters.append( t.result )
3375 # Check that counter incremented numController times
3376 iCounterResults = True
3377 for i in addedIValues:
3378 tmpResult = i in iCounters
3379 iCounterResults = iCounterResults and tmpResult
3380 if not tmpResult:
3381 main.log.error( str( i ) + " is not in in-memory "
3382 "counter incremented results" )
3383 utilities.assert_equals( expect=True,
3384 actual=pCounterResults,
3385 onpass="In-memory counter incremented",
3386 onfail="Error incrementing in-memory" +
3387 " counter" )
3388
3389 main.step( "Add 5 to then get a in-memory counter on each node" )
3390 iCounters = []
3391 threads = []
3392 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003393 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003394 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3395 name="counterIncrement-" + str( i ),
3396 args=[ iCounterName ],
3397 kwargs={ "delta": 5, "inMemory": True } )
3398 iCounterValue += 5
3399 addedIValues.append( iCounterValue )
3400 threads.append( t )
3401 t.start()
3402
3403 for t in threads:
3404 t.join()
3405 iCounters.append( t.result )
3406 # Check that counter incremented numController times
3407 iCounterResults = True
3408 for i in addedIValues:
3409 tmpResult = i in iCounters
3410 iCounterResults = iCounterResults and tmpResult
3411 if not tmpResult:
3412 main.log.error( str( i ) + " is not in in-memory "
3413 "counter incremented results" )
3414 utilities.assert_equals( expect=True,
3415 actual=pCounterResults,
3416 onpass="In-memory counter incremented",
3417 onfail="Error incrementing in-memory" +
3418 " counter" )
3419
3420 main.step( "Get then add 5 to a in-memory counter on each node" )
3421 iCounters = []
3422 threads = []
3423 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003424 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003425 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3426 name="counterIncrement-" + str( i ),
3427 args=[ iCounterName ],
3428 kwargs={ "delta": 5, "inMemory": True } )
3429 addedIValues.append( iCounterValue )
3430 iCounterValue += 5
3431 threads.append( t )
3432 t.start()
3433
3434 for t in threads:
3435 t.join()
3436 iCounters.append( t.result )
3437 # Check that counter incremented numController times
3438 iCounterResults = True
3439 for i in addedIValues:
3440 tmpResult = i in iCounters
3441 iCounterResults = iCounterResults and tmpResult
3442 if not tmpResult:
3443 main.log.error( str( i ) + " is not in in-memory "
3444 "counter incremented results" )
3445 utilities.assert_equals( expect=True,
3446 actual=iCounterResults,
3447 onpass="In-memory counter incremented",
3448 onfail="Error incrementing in-memory" +
3449 " counter" )
3450
3451 main.step( "Counters we added have the correct values" )
3452 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3453 utilities.assert_equals( expect=main.TRUE,
3454 actual=incrementCheck,
3455 onpass="Added counters are correct",
3456 onfail="Added counters are incorrect" )
3457
Jon Hall5cf14d52015-07-16 12:15:19 -07003458 main.step( "Check counters are consistant across nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07003459 onosCounters, consistentCounterResults = main.Counters.consistentCheck()
Jon Hall5cf14d52015-07-16 12:15:19 -07003460 utilities.assert_equals( expect=main.TRUE,
3461 actual=consistentCounterResults,
3462 onpass="ONOS counters are consistent " +
3463 "across nodes",
3464 onfail="ONOS Counters are inconsistent " +
3465 "across nodes" )
3466
3467 main.step( "Counters we added have the correct values" )
Jon Halle1a3b752015-07-22 13:02:46 -07003468 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3469 incrementCheck = incrementCheck and \
3470 main.Counters.counterCheck( iCounterName, iCounterValue )
Jon Hall5cf14d52015-07-16 12:15:19 -07003471 utilities.assert_equals( expect=main.TRUE,
Jon Halle1a3b752015-07-22 13:02:46 -07003472 actual=incrementCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -07003473 onpass="Added counters are correct",
3474 onfail="Added counters are incorrect" )
3475 # DISTRIBUTED SETS
3476 main.step( "Distributed Set get" )
3477 size = len( onosSet )
3478 getResponses = []
3479 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003480 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003481 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003482 name="setTestGet-" + str( i ),
3483 args=[ onosSetName ] )
3484 threads.append( t )
3485 t.start()
3486 for t in threads:
3487 t.join()
3488 getResponses.append( t.result )
3489
3490 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003491 for i in range( len( main.activeNodes ) ):
3492 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003493 if isinstance( getResponses[ i ], list):
3494 current = set( getResponses[ i ] )
3495 if len( current ) == len( getResponses[ i ] ):
3496 # no repeats
3497 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003498 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003499 " has incorrect view" +
3500 " of set " + onosSetName + ":\n" +
3501 str( getResponses[ i ] ) )
3502 main.log.debug( "Expected: " + str( onosSet ) )
3503 main.log.debug( "Actual: " + str( current ) )
3504 getResults = main.FALSE
3505 else:
3506 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003507 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003508 " has repeat elements in" +
3509 " set " + onosSetName + ":\n" +
3510 str( getResponses[ i ] ) )
3511 getResults = main.FALSE
3512 elif getResponses[ i ] == main.ERROR:
3513 getResults = main.FALSE
3514 utilities.assert_equals( expect=main.TRUE,
3515 actual=getResults,
3516 onpass="Set elements are correct",
3517 onfail="Set elements are incorrect" )
3518
3519 main.step( "Distributed Set size" )
3520 sizeResponses = []
3521 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003522 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003523 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003524 name="setTestSize-" + str( i ),
3525 args=[ onosSetName ] )
3526 threads.append( t )
3527 t.start()
3528 for t in threads:
3529 t.join()
3530 sizeResponses.append( t.result )
3531
3532 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003533 for i in range( len( main.activeNodes ) ):
3534 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003535 if size != sizeResponses[ i ]:
3536 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003537 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003538 " expected a size of " + str( size ) +
3539 " for set " + onosSetName +
3540 " but got " + str( sizeResponses[ i ] ) )
3541 utilities.assert_equals( expect=main.TRUE,
3542 actual=sizeResults,
3543 onpass="Set sizes are correct",
3544 onfail="Set sizes are incorrect" )
3545
3546 main.step( "Distributed Set add()" )
3547 onosSet.add( addValue )
3548 addResponses = []
3549 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003550 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003551 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003552 name="setTestAdd-" + str( i ),
3553 args=[ onosSetName, addValue ] )
3554 threads.append( t )
3555 t.start()
3556 for t in threads:
3557 t.join()
3558 addResponses.append( t.result )
3559
3560 # main.TRUE = successfully changed the set
3561 # main.FALSE = action resulted in no change in set
3562 # main.ERROR - Some error in executing the function
3563 addResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003564 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003565 if addResponses[ i ] == main.TRUE:
3566 # All is well
3567 pass
3568 elif addResponses[ i ] == main.FALSE:
3569 # Already in set, probably fine
3570 pass
3571 elif addResponses[ i ] == main.ERROR:
3572 # Error in execution
3573 addResults = main.FALSE
3574 else:
3575 # unexpected result
3576 addResults = main.FALSE
3577 if addResults != main.TRUE:
3578 main.log.error( "Error executing set add" )
3579
3580 # Check if set is still correct
3581 size = len( onosSet )
3582 getResponses = []
3583 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003584 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003585 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003586 name="setTestGet-" + str( i ),
3587 args=[ onosSetName ] )
3588 threads.append( t )
3589 t.start()
3590 for t in threads:
3591 t.join()
3592 getResponses.append( t.result )
3593 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003594 for i in range( len( main.activeNodes ) ):
3595 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003596 if isinstance( getResponses[ i ], list):
3597 current = set( getResponses[ i ] )
3598 if len( current ) == len( getResponses[ i ] ):
3599 # no repeats
3600 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003601 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003602 " of set " + onosSetName + ":\n" +
3603 str( getResponses[ i ] ) )
3604 main.log.debug( "Expected: " + str( onosSet ) )
3605 main.log.debug( "Actual: " + str( current ) )
3606 getResults = main.FALSE
3607 else:
3608 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003609 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003610 " set " + onosSetName + ":\n" +
3611 str( getResponses[ i ] ) )
3612 getResults = main.FALSE
3613 elif getResponses[ i ] == main.ERROR:
3614 getResults = main.FALSE
3615 sizeResponses = []
3616 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003617 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003618 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003619 name="setTestSize-" + str( i ),
3620 args=[ onosSetName ] )
3621 threads.append( t )
3622 t.start()
3623 for t in threads:
3624 t.join()
3625 sizeResponses.append( t.result )
3626 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003627 for i in range( len( main.activeNodes ) ):
3628 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003629 if size != sizeResponses[ i ]:
3630 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003631 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003632 " expected a size of " + str( size ) +
3633 " for set " + onosSetName +
3634 " but got " + str( sizeResponses[ i ] ) )
3635 addResults = addResults and getResults and sizeResults
3636 utilities.assert_equals( expect=main.TRUE,
3637 actual=addResults,
3638 onpass="Set add correct",
3639 onfail="Set add was incorrect" )
3640
3641 main.step( "Distributed Set addAll()" )
3642 onosSet.update( addAllValue.split() )
3643 addResponses = []
3644 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003645 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003646 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003647 name="setTestAddAll-" + str( i ),
3648 args=[ onosSetName, addAllValue ] )
3649 threads.append( t )
3650 t.start()
3651 for t in threads:
3652 t.join()
3653 addResponses.append( t.result )
3654
3655 # main.TRUE = successfully changed the set
3656 # main.FALSE = action resulted in no change in set
3657 # main.ERROR - Some error in executing the function
3658 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003659 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003660 if addResponses[ i ] == main.TRUE:
3661 # All is well
3662 pass
3663 elif addResponses[ i ] == main.FALSE:
3664 # Already in set, probably fine
3665 pass
3666 elif addResponses[ i ] == main.ERROR:
3667 # Error in execution
3668 addAllResults = main.FALSE
3669 else:
3670 # unexpected result
3671 addAllResults = main.FALSE
3672 if addAllResults != main.TRUE:
3673 main.log.error( "Error executing set addAll" )
3674
3675 # Check if set is still correct
3676 size = len( onosSet )
3677 getResponses = []
3678 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003679 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003680 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003681 name="setTestGet-" + str( i ),
3682 args=[ onosSetName ] )
3683 threads.append( t )
3684 t.start()
3685 for t in threads:
3686 t.join()
3687 getResponses.append( t.result )
3688 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003689 for i in range( len( main.activeNodes ) ):
3690 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003691 if isinstance( getResponses[ i ], list):
3692 current = set( getResponses[ i ] )
3693 if len( current ) == len( getResponses[ i ] ):
3694 # no repeats
3695 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003696 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003697 " has incorrect view" +
3698 " of set " + onosSetName + ":\n" +
3699 str( getResponses[ i ] ) )
3700 main.log.debug( "Expected: " + str( onosSet ) )
3701 main.log.debug( "Actual: " + str( current ) )
3702 getResults = main.FALSE
3703 else:
3704 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003705 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003706 " has repeat elements in" +
3707 " set " + onosSetName + ":\n" +
3708 str( getResponses[ i ] ) )
3709 getResults = main.FALSE
3710 elif getResponses[ i ] == main.ERROR:
3711 getResults = main.FALSE
3712 sizeResponses = []
3713 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003714 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003715 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003716 name="setTestSize-" + str( i ),
3717 args=[ onosSetName ] )
3718 threads.append( t )
3719 t.start()
3720 for t in threads:
3721 t.join()
3722 sizeResponses.append( t.result )
3723 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003724 for i in range( len( main.activeNodes ) ):
3725 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003726 if size != sizeResponses[ i ]:
3727 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003728 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003729 " expected a size of " + str( size ) +
3730 " for set " + onosSetName +
3731 " but got " + str( sizeResponses[ i ] ) )
3732 addAllResults = addAllResults and getResults and sizeResults
3733 utilities.assert_equals( expect=main.TRUE,
3734 actual=addAllResults,
3735 onpass="Set addAll correct",
3736 onfail="Set addAll was incorrect" )
3737
3738 main.step( "Distributed Set contains()" )
3739 containsResponses = []
3740 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003741 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003742 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003743 name="setContains-" + str( i ),
3744 args=[ onosSetName ],
3745 kwargs={ "values": addValue } )
3746 threads.append( t )
3747 t.start()
3748 for t in threads:
3749 t.join()
3750 # NOTE: This is the tuple
3751 containsResponses.append( t.result )
3752
3753 containsResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003754 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003755 if containsResponses[ i ] == main.ERROR:
3756 containsResults = main.FALSE
3757 else:
3758 containsResults = containsResults and\
3759 containsResponses[ i ][ 1 ]
3760 utilities.assert_equals( expect=main.TRUE,
3761 actual=containsResults,
3762 onpass="Set contains is functional",
3763 onfail="Set contains failed" )
3764
3765 main.step( "Distributed Set containsAll()" )
3766 containsAllResponses = []
3767 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003768 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003769 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003770 name="setContainsAll-" + str( i ),
3771 args=[ onosSetName ],
3772 kwargs={ "values": addAllValue } )
3773 threads.append( t )
3774 t.start()
3775 for t in threads:
3776 t.join()
3777 # NOTE: This is the tuple
3778 containsAllResponses.append( t.result )
3779
3780 containsAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003781 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003782 if containsResponses[ i ] == main.ERROR:
3783 containsResults = main.FALSE
3784 else:
3785 containsResults = containsResults and\
3786 containsResponses[ i ][ 1 ]
3787 utilities.assert_equals( expect=main.TRUE,
3788 actual=containsAllResults,
3789 onpass="Set containsAll is functional",
3790 onfail="Set containsAll failed" )
3791
3792 main.step( "Distributed Set remove()" )
3793 onosSet.remove( addValue )
3794 removeResponses = []
3795 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003796 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003797 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003798 name="setTestRemove-" + str( i ),
3799 args=[ onosSetName, addValue ] )
3800 threads.append( t )
3801 t.start()
3802 for t in threads:
3803 t.join()
3804 removeResponses.append( t.result )
3805
3806 # main.TRUE = successfully changed the set
3807 # main.FALSE = action resulted in no change in set
3808 # main.ERROR - Some error in executing the function
3809 removeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003810 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003811 if removeResponses[ i ] == main.TRUE:
3812 # All is well
3813 pass
3814 elif removeResponses[ i ] == main.FALSE:
3815 # not in set, probably fine
3816 pass
3817 elif removeResponses[ i ] == main.ERROR:
3818 # Error in execution
3819 removeResults = main.FALSE
3820 else:
3821 # unexpected result
3822 removeResults = main.FALSE
3823 if removeResults != main.TRUE:
3824 main.log.error( "Error executing set remove" )
3825
3826 # Check if set is still correct
3827 size = len( onosSet )
3828 getResponses = []
3829 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003830 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003831 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003832 name="setTestGet-" + str( i ),
3833 args=[ onosSetName ] )
3834 threads.append( t )
3835 t.start()
3836 for t in threads:
3837 t.join()
3838 getResponses.append( t.result )
3839 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003840 for i in range( len( main.activeNodes ) ):
3841 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003842 if isinstance( getResponses[ i ], list):
3843 current = set( getResponses[ i ] )
3844 if len( current ) == len( getResponses[ i ] ):
3845 # no repeats
3846 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003847 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003848 " has incorrect view" +
3849 " of set " + onosSetName + ":\n" +
3850 str( getResponses[ i ] ) )
3851 main.log.debug( "Expected: " + str( onosSet ) )
3852 main.log.debug( "Actual: " + str( current ) )
3853 getResults = main.FALSE
3854 else:
3855 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003856 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003857 " has repeat elements in" +
3858 " set " + onosSetName + ":\n" +
3859 str( getResponses[ i ] ) )
3860 getResults = main.FALSE
3861 elif getResponses[ i ] == main.ERROR:
3862 getResults = main.FALSE
3863 sizeResponses = []
3864 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003865 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003866 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003867 name="setTestSize-" + str( i ),
3868 args=[ onosSetName ] )
3869 threads.append( t )
3870 t.start()
3871 for t in threads:
3872 t.join()
3873 sizeResponses.append( t.result )
3874 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003875 for i in range( len( main.activeNodes ) ):
3876 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003877 if size != sizeResponses[ i ]:
3878 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003879 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003880 " expected a size of " + str( size ) +
3881 " for set " + onosSetName +
3882 " but got " + str( sizeResponses[ i ] ) )
3883 removeResults = removeResults and getResults and sizeResults
3884 utilities.assert_equals( expect=main.TRUE,
3885 actual=removeResults,
3886 onpass="Set remove correct",
3887 onfail="Set remove was incorrect" )
3888
3889 main.step( "Distributed Set removeAll()" )
3890 onosSet.difference_update( addAllValue.split() )
3891 removeAllResponses = []
3892 threads = []
3893 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003894 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003895 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003896 name="setTestRemoveAll-" + str( i ),
3897 args=[ onosSetName, addAllValue ] )
3898 threads.append( t )
3899 t.start()
3900 for t in threads:
3901 t.join()
3902 removeAllResponses.append( t.result )
3903 except Exception, e:
3904 main.log.exception(e)
3905
3906 # main.TRUE = successfully changed the set
3907 # main.FALSE = action resulted in no change in set
3908 # main.ERROR - Some error in executing the function
3909 removeAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003910 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003911 if removeAllResponses[ i ] == main.TRUE:
3912 # All is well
3913 pass
3914 elif removeAllResponses[ i ] == main.FALSE:
3915 # not in set, probably fine
3916 pass
3917 elif removeAllResponses[ i ] == main.ERROR:
3918 # Error in execution
3919 removeAllResults = main.FALSE
3920 else:
3921 # unexpected result
3922 removeAllResults = main.FALSE
3923 if removeAllResults != main.TRUE:
3924 main.log.error( "Error executing set removeAll" )
3925
3926 # Check if set is still correct
3927 size = len( onosSet )
3928 getResponses = []
3929 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003930 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003931 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003932 name="setTestGet-" + str( i ),
3933 args=[ onosSetName ] )
3934 threads.append( t )
3935 t.start()
3936 for t in threads:
3937 t.join()
3938 getResponses.append( t.result )
3939 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003940 for i in range( len( main.activeNodes ) ):
3941 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003942 if isinstance( getResponses[ i ], list):
3943 current = set( getResponses[ i ] )
3944 if len( current ) == len( getResponses[ i ] ):
3945 # no repeats
3946 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003947 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003948 " has incorrect view" +
3949 " of set " + onosSetName + ":\n" +
3950 str( getResponses[ i ] ) )
3951 main.log.debug( "Expected: " + str( onosSet ) )
3952 main.log.debug( "Actual: " + str( current ) )
3953 getResults = main.FALSE
3954 else:
3955 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003956 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003957 " has repeat elements in" +
3958 " set " + onosSetName + ":\n" +
3959 str( getResponses[ i ] ) )
3960 getResults = main.FALSE
3961 elif getResponses[ i ] == main.ERROR:
3962 getResults = main.FALSE
3963 sizeResponses = []
3964 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003965 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003966 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003967 name="setTestSize-" + str( i ),
3968 args=[ onosSetName ] )
3969 threads.append( t )
3970 t.start()
3971 for t in threads:
3972 t.join()
3973 sizeResponses.append( t.result )
3974 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003975 for i in range( len( main.activeNodes ) ):
3976 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003977 if size != sizeResponses[ i ]:
3978 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003979 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003980 " expected a size of " + str( size ) +
3981 " for set " + onosSetName +
3982 " but got " + str( sizeResponses[ i ] ) )
3983 removeAllResults = removeAllResults and getResults and sizeResults
3984 utilities.assert_equals( expect=main.TRUE,
3985 actual=removeAllResults,
3986 onpass="Set removeAll correct",
3987 onfail="Set removeAll was incorrect" )
3988
3989 main.step( "Distributed Set addAll()" )
3990 onosSet.update( addAllValue.split() )
3991 addResponses = []
3992 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003993 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003994 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003995 name="setTestAddAll-" + str( i ),
3996 args=[ onosSetName, addAllValue ] )
3997 threads.append( t )
3998 t.start()
3999 for t in threads:
4000 t.join()
4001 addResponses.append( t.result )
4002
4003 # main.TRUE = successfully changed the set
4004 # main.FALSE = action resulted in no change in set
4005 # main.ERROR - Some error in executing the function
4006 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004007 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004008 if addResponses[ i ] == main.TRUE:
4009 # All is well
4010 pass
4011 elif addResponses[ i ] == main.FALSE:
4012 # Already in set, probably fine
4013 pass
4014 elif addResponses[ i ] == main.ERROR:
4015 # Error in execution
4016 addAllResults = main.FALSE
4017 else:
4018 # unexpected result
4019 addAllResults = main.FALSE
4020 if addAllResults != main.TRUE:
4021 main.log.error( "Error executing set addAll" )
4022
4023 # Check if set is still correct
4024 size = len( onosSet )
4025 getResponses = []
4026 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004027 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004028 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004029 name="setTestGet-" + str( i ),
4030 args=[ onosSetName ] )
4031 threads.append( t )
4032 t.start()
4033 for t in threads:
4034 t.join()
4035 getResponses.append( t.result )
4036 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004037 for i in range( len( main.activeNodes ) ):
4038 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004039 if isinstance( getResponses[ i ], list):
4040 current = set( getResponses[ i ] )
4041 if len( current ) == len( getResponses[ i ] ):
4042 # no repeats
4043 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004044 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004045 " has incorrect view" +
4046 " of set " + onosSetName + ":\n" +
4047 str( getResponses[ i ] ) )
4048 main.log.debug( "Expected: " + str( onosSet ) )
4049 main.log.debug( "Actual: " + str( current ) )
4050 getResults = main.FALSE
4051 else:
4052 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004053 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004054 " has repeat elements in" +
4055 " set " + onosSetName + ":\n" +
4056 str( getResponses[ i ] ) )
4057 getResults = main.FALSE
4058 elif getResponses[ i ] == main.ERROR:
4059 getResults = main.FALSE
4060 sizeResponses = []
4061 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004062 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004063 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004064 name="setTestSize-" + str( i ),
4065 args=[ onosSetName ] )
4066 threads.append( t )
4067 t.start()
4068 for t in threads:
4069 t.join()
4070 sizeResponses.append( t.result )
4071 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004072 for i in range( len( main.activeNodes ) ):
4073 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004074 if size != sizeResponses[ i ]:
4075 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004076 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004077 " expected a size of " + str( size ) +
4078 " for set " + onosSetName +
4079 " but got " + str( sizeResponses[ i ] ) )
4080 addAllResults = addAllResults and getResults and sizeResults
4081 utilities.assert_equals( expect=main.TRUE,
4082 actual=addAllResults,
4083 onpass="Set addAll correct",
4084 onfail="Set addAll was incorrect" )
4085
4086 main.step( "Distributed Set clear()" )
4087 onosSet.clear()
4088 clearResponses = []
4089 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004090 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004091 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004092 name="setTestClear-" + str( i ),
4093 args=[ onosSetName, " "], # Values doesn't matter
4094 kwargs={ "clear": True } )
4095 threads.append( t )
4096 t.start()
4097 for t in threads:
4098 t.join()
4099 clearResponses.append( t.result )
4100
4101 # main.TRUE = successfully changed the set
4102 # main.FALSE = action resulted in no change in set
4103 # main.ERROR - Some error in executing the function
4104 clearResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004105 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004106 if clearResponses[ i ] == main.TRUE:
4107 # All is well
4108 pass
4109 elif clearResponses[ i ] == main.FALSE:
4110 # Nothing set, probably fine
4111 pass
4112 elif clearResponses[ i ] == main.ERROR:
4113 # Error in execution
4114 clearResults = main.FALSE
4115 else:
4116 # unexpected result
4117 clearResults = main.FALSE
4118 if clearResults != main.TRUE:
4119 main.log.error( "Error executing set clear" )
4120
4121 # Check if set is still correct
4122 size = len( onosSet )
4123 getResponses = []
4124 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004125 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004126 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004127 name="setTestGet-" + str( i ),
4128 args=[ onosSetName ] )
4129 threads.append( t )
4130 t.start()
4131 for t in threads:
4132 t.join()
4133 getResponses.append( t.result )
4134 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004135 for i in range( len( main.activeNodes ) ):
4136 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004137 if isinstance( getResponses[ i ], list):
4138 current = set( getResponses[ i ] )
4139 if len( current ) == len( getResponses[ i ] ):
4140 # no repeats
4141 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004142 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004143 " has incorrect view" +
4144 " of set " + onosSetName + ":\n" +
4145 str( getResponses[ i ] ) )
4146 main.log.debug( "Expected: " + str( onosSet ) )
4147 main.log.debug( "Actual: " + str( current ) )
4148 getResults = main.FALSE
4149 else:
4150 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004151 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004152 " has repeat elements in" +
4153 " set " + onosSetName + ":\n" +
4154 str( getResponses[ i ] ) )
4155 getResults = main.FALSE
4156 elif getResponses[ i ] == main.ERROR:
4157 getResults = main.FALSE
4158 sizeResponses = []
4159 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004160 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004161 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004162 name="setTestSize-" + str( i ),
4163 args=[ onosSetName ] )
4164 threads.append( t )
4165 t.start()
4166 for t in threads:
4167 t.join()
4168 sizeResponses.append( t.result )
4169 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004170 for i in range( len( main.activeNodes ) ):
4171 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004172 if size != sizeResponses[ i ]:
4173 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004174 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004175 " expected a size of " + str( size ) +
4176 " for set " + onosSetName +
4177 " but got " + str( sizeResponses[ i ] ) )
4178 clearResults = clearResults and getResults and sizeResults
4179 utilities.assert_equals( expect=main.TRUE,
4180 actual=clearResults,
4181 onpass="Set clear correct",
4182 onfail="Set clear was incorrect" )
4183
4184 main.step( "Distributed Set addAll()" )
4185 onosSet.update( addAllValue.split() )
4186 addResponses = []
4187 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004188 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004189 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004190 name="setTestAddAll-" + str( i ),
4191 args=[ onosSetName, addAllValue ] )
4192 threads.append( t )
4193 t.start()
4194 for t in threads:
4195 t.join()
4196 addResponses.append( t.result )
4197
4198 # main.TRUE = successfully changed the set
4199 # main.FALSE = action resulted in no change in set
4200 # main.ERROR - Some error in executing the function
4201 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004202 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004203 if addResponses[ i ] == main.TRUE:
4204 # All is well
4205 pass
4206 elif addResponses[ i ] == main.FALSE:
4207 # Already in set, probably fine
4208 pass
4209 elif addResponses[ i ] == main.ERROR:
4210 # Error in execution
4211 addAllResults = main.FALSE
4212 else:
4213 # unexpected result
4214 addAllResults = main.FALSE
4215 if addAllResults != main.TRUE:
4216 main.log.error( "Error executing set addAll" )
4217
4218 # Check if set is still correct
4219 size = len( onosSet )
4220 getResponses = []
4221 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004222 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004223 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004224 name="setTestGet-" + str( i ),
4225 args=[ onosSetName ] )
4226 threads.append( t )
4227 t.start()
4228 for t in threads:
4229 t.join()
4230 getResponses.append( t.result )
4231 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004232 for i in range( len( main.activeNodes ) ):
4233 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004234 if isinstance( getResponses[ i ], list):
4235 current = set( getResponses[ i ] )
4236 if len( current ) == len( getResponses[ i ] ):
4237 # no repeats
4238 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004239 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004240 " has incorrect view" +
4241 " of set " + onosSetName + ":\n" +
4242 str( getResponses[ i ] ) )
4243 main.log.debug( "Expected: " + str( onosSet ) )
4244 main.log.debug( "Actual: " + str( current ) )
4245 getResults = main.FALSE
4246 else:
4247 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004248 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004249 " has repeat elements in" +
4250 " set " + onosSetName + ":\n" +
4251 str( getResponses[ i ] ) )
4252 getResults = main.FALSE
4253 elif getResponses[ i ] == main.ERROR:
4254 getResults = main.FALSE
4255 sizeResponses = []
4256 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004257 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004258 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004259 name="setTestSize-" + str( i ),
4260 args=[ onosSetName ] )
4261 threads.append( t )
4262 t.start()
4263 for t in threads:
4264 t.join()
4265 sizeResponses.append( t.result )
4266 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004267 for i in range( len( main.activeNodes ) ):
4268 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004269 if size != sizeResponses[ i ]:
4270 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004271 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004272 " expected a size of " + str( size ) +
4273 " for set " + onosSetName +
4274 " but got " + str( sizeResponses[ i ] ) )
4275 addAllResults = addAllResults and getResults and sizeResults
4276 utilities.assert_equals( expect=main.TRUE,
4277 actual=addAllResults,
4278 onpass="Set addAll correct",
4279 onfail="Set addAll was incorrect" )
4280
4281 main.step( "Distributed Set retain()" )
4282 onosSet.intersection_update( retainValue.split() )
4283 retainResponses = []
4284 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004285 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004286 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004287 name="setTestRetain-" + str( i ),
4288 args=[ onosSetName, retainValue ],
4289 kwargs={ "retain": True } )
4290 threads.append( t )
4291 t.start()
4292 for t in threads:
4293 t.join()
4294 retainResponses.append( t.result )
4295
4296 # main.TRUE = successfully changed the set
4297 # main.FALSE = action resulted in no change in set
4298 # main.ERROR - Some error in executing the function
4299 retainResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004300 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004301 if retainResponses[ i ] == main.TRUE:
4302 # All is well
4303 pass
4304 elif retainResponses[ i ] == main.FALSE:
4305 # Already in set, probably fine
4306 pass
4307 elif retainResponses[ i ] == main.ERROR:
4308 # Error in execution
4309 retainResults = main.FALSE
4310 else:
4311 # unexpected result
4312 retainResults = main.FALSE
4313 if retainResults != main.TRUE:
4314 main.log.error( "Error executing set retain" )
4315
4316 # Check if set is still correct
4317 size = len( onosSet )
4318 getResponses = []
4319 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004320 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004321 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004322 name="setTestGet-" + str( i ),
4323 args=[ onosSetName ] )
4324 threads.append( t )
4325 t.start()
4326 for t in threads:
4327 t.join()
4328 getResponses.append( t.result )
4329 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004330 for i in range( len( main.activeNodes ) ):
4331 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004332 if isinstance( getResponses[ i ], list):
4333 current = set( getResponses[ i ] )
4334 if len( current ) == len( getResponses[ i ] ):
4335 # no repeats
4336 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004337 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004338 " has incorrect view" +
4339 " of set " + onosSetName + ":\n" +
4340 str( getResponses[ i ] ) )
4341 main.log.debug( "Expected: " + str( onosSet ) )
4342 main.log.debug( "Actual: " + str( current ) )
4343 getResults = main.FALSE
4344 else:
4345 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004346 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004347 " has repeat elements in" +
4348 " set " + onosSetName + ":\n" +
4349 str( getResponses[ i ] ) )
4350 getResults = main.FALSE
4351 elif getResponses[ i ] == main.ERROR:
4352 getResults = main.FALSE
4353 sizeResponses = []
4354 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004355 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004356 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004357 name="setTestSize-" + str( i ),
4358 args=[ onosSetName ] )
4359 threads.append( t )
4360 t.start()
4361 for t in threads:
4362 t.join()
4363 sizeResponses.append( t.result )
4364 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004365 for i in range( len( main.activeNodes ) ):
4366 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004367 if size != sizeResponses[ i ]:
4368 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004369 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004370 str( size ) + " for set " + onosSetName +
4371 " but got " + str( sizeResponses[ i ] ) )
4372 retainResults = retainResults and getResults and sizeResults
4373 utilities.assert_equals( expect=main.TRUE,
4374 actual=retainResults,
4375 onpass="Set retain correct",
4376 onfail="Set retain was incorrect" )
4377
Jon Hall2a5002c2015-08-21 16:49:11 -07004378 # Transactional maps
4379 main.step( "Partitioned Transactional maps put" )
4380 tMapValue = "Testing"
4381 numKeys = 100
4382 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004383 node = main.activeNodes[0]
4384 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
Jon Hall2a5002c2015-08-21 16:49:11 -07004385 if len( putResponses ) == 100:
4386 for i in putResponses:
4387 if putResponses[ i ][ 'value' ] != tMapValue:
4388 putResult = False
4389 else:
4390 putResult = False
4391 if not putResult:
4392 main.log.debug( "Put response values: " + str( putResponses ) )
4393 utilities.assert_equals( expect=True,
4394 actual=putResult,
4395 onpass="Partitioned Transactional Map put successful",
4396 onfail="Partitioned Transactional Map put values are incorrect" )
4397
4398 main.step( "Partitioned Transactional maps get" )
4399 getCheck = True
4400 for n in range( 1, numKeys + 1 ):
4401 getResponses = []
4402 threads = []
4403 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004404 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004405 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4406 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004407 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004408 threads.append( t )
4409 t.start()
4410 for t in threads:
4411 t.join()
4412 getResponses.append( t.result )
4413 for node in getResponses:
4414 if node != tMapValue:
4415 valueCheck = False
4416 if not valueCheck:
4417 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4418 main.log.warn( getResponses )
4419 getCheck = getCheck and valueCheck
4420 utilities.assert_equals( expect=True,
4421 actual=getCheck,
4422 onpass="Partitioned Transactional Map get values were correct",
4423 onfail="Partitioned Transactional Map values incorrect" )
4424
4425 main.step( "In-memory Transactional maps put" )
4426 tMapValue = "Testing"
4427 numKeys = 100
4428 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004429 node = main.activeNodes[0]
4430 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue, inMemory=True )
Jon Hall2a5002c2015-08-21 16:49:11 -07004431 if len( putResponses ) == 100:
4432 for i in putResponses:
4433 if putResponses[ i ][ 'value' ] != tMapValue:
4434 putResult = False
4435 else:
4436 putResult = False
4437 if not putResult:
4438 main.log.debug( "Put response values: " + str( putResponses ) )
4439 utilities.assert_equals( expect=True,
4440 actual=putResult,
4441 onpass="In-Memory Transactional Map put successful",
4442 onfail="In-Memory Transactional Map put values are incorrect" )
4443
4444 main.step( "In-Memory Transactional maps get" )
4445 getCheck = True
4446 for n in range( 1, numKeys + 1 ):
4447 getResponses = []
4448 threads = []
4449 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004450 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004451 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4452 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004453 args=[ "Key" + str( n ) ],
Jon Hall2a5002c2015-08-21 16:49:11 -07004454 kwargs={ "inMemory": True } )
4455 threads.append( t )
4456 t.start()
4457 for t in threads:
4458 t.join()
4459 getResponses.append( t.result )
4460 for node in getResponses:
4461 if node != tMapValue:
4462 valueCheck = False
4463 if not valueCheck:
4464 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4465 main.log.warn( getResponses )
4466 getCheck = getCheck and valueCheck
4467 utilities.assert_equals( expect=True,
4468 actual=getCheck,
4469 onpass="In-Memory Transactional Map get values were correct",
4470 onfail="In-Memory Transactional Map values incorrect" )