blob: c6e584ff1fee944437306a54d97c116068ed84e8 [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
Jon Hallb3ed8ed2015-10-28 16:43:55 -070012CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
Jon Hall5cf14d52015-07-16 12:15:19 -070014CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
Jon Hallb3ed8ed2015-10-28 16:43:55 -070028class HAstopNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -070029
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hall3b489db2015-10-05 14:38:37 -070052 import pexpect
Jon Hallb3ed8ed2015-10-28 16:43:55 -070053 main.log.info( "ONOS HA test: Stop a minority of ONOS nodes - " +
Jon Hall5cf14d52015-07-16 12:15:19 -070054 "initialization" )
55 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070056 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070057 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
59 # TODO: save all the timers and output them for plotting
60
61 # load some variables from the params file
62 PULLCODE = False
63 if main.params[ 'Git' ] == 'True':
64 PULLCODE = True
65 gitBranch = main.params[ 'branch' ]
66 cellName = main.params[ 'ENV' ][ 'cellName' ]
67
Jon Halle1a3b752015-07-22 13:02:46 -070068 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070069 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070070 if main.ONOSbench.maxNodes < main.numCtrls:
71 main.numCtrls = int( main.ONOSbench.maxNodes )
72 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070073 global ONOS1Port
74 global ONOS2Port
75 global ONOS3Port
76 global ONOS4Port
77 global ONOS5Port
78 global ONOS6Port
79 global ONOS7Port
80
81 # FIXME: just get controller port from params?
82 # TODO: do we really need all these?
83 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
84 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
85 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
86 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
87 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
88 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
89 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
90
Jon Halle1a3b752015-07-22 13:02:46 -070091 try:
92 fileName = "Counters"
93 # TODO: Maybe make a library folder somewhere?
94 path = main.params[ 'imports' ][ 'path' ]
95 main.Counters = imp.load_source( fileName,
96 path + fileName + ".py" )
97 except Exception as e:
98 main.log.exception( e )
99 main.cleanup()
100 main.exit()
101
102 main.CLIs = []
103 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700104 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700105 for i in range( 1, main.numCtrls + 1 ):
106 try:
107 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
108 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
109 ipList.append( main.nodes[ -1 ].ip_address )
110 except AttributeError:
111 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700112
113 main.step( "Create cell file" )
114 cellAppString = main.params[ 'ENV' ][ 'appString' ]
115 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
116 main.Mininet1.ip_address,
117 cellAppString, ipList )
118 main.step( "Applying cell variable to environment" )
119 cellResult = main.ONOSbench.setCell( cellName )
120 verifyResult = main.ONOSbench.verifyCell()
121
122 # FIXME:this is short term fix
123 main.log.info( "Removing raft logs" )
124 main.ONOSbench.onosRemoveRaftLogs()
125
126 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700127 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700128 main.ONOSbench.onosUninstall( node.ip_address )
129
130 # Make sure ONOS is DEAD
131 main.log.info( "Killing any ONOS processes" )
132 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700133 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700134 killed = main.ONOSbench.onosKill( node.ip_address )
135 killResults = killResults and killed
136
137 cleanInstallResult = main.TRUE
138 gitPullResult = main.TRUE
139
140 main.step( "Starting Mininet" )
141 # scp topo file to mininet
142 # TODO: move to params?
143 topoName = "obelisk.py"
144 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700145 main.ONOSbench.scp( main.Mininet1,
146 filePath + topoName,
147 main.Mininet1.home,
148 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700149 mnResult = main.Mininet1.startNet( )
150 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
151 onpass="Mininet Started",
152 onfail="Error starting Mininet" )
153
154 main.step( "Git checkout and pull " + gitBranch )
155 if PULLCODE:
156 main.ONOSbench.gitCheckout( gitBranch )
157 gitPullResult = main.ONOSbench.gitPull()
158 # values of 1 or 3 are good
159 utilities.assert_lesser( expect=0, actual=gitPullResult,
160 onpass="Git pull successful",
161 onfail="Git pull failed" )
162 main.ONOSbench.getVersion( report=True )
163
164 main.step( "Using mvn clean install" )
165 cleanInstallResult = main.TRUE
166 if PULLCODE and gitPullResult == main.TRUE:
167 cleanInstallResult = main.ONOSbench.cleanInstall()
168 else:
169 main.log.warn( "Did not pull new code so skipping mvn " +
170 "clean install" )
171 utilities.assert_equals( expect=main.TRUE,
172 actual=cleanInstallResult,
173 onpass="MCI successful",
174 onfail="MCI failed" )
175 # GRAPHS
176 # NOTE: important params here:
177 # job = name of Jenkins job
178 # Plot Name = Plot-HA, only can be used if multiple plots
179 # index = The number of the graph under plot name
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700180 job = "HAstopNodes"
Jon Hall5cf14d52015-07-16 12:15:19 -0700181 plotName = "Plot-HA"
182 graphs = '<ac:structured-macro ac:name="html">\n'
183 graphs += '<ac:plain-text-body><![CDATA[\n'
184 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
185 '/plot/' + plotName + '/getPlot?index=0' +\
186 '&width=500&height=300"' +\
187 'noborder="0" width="500" height="300" scrolling="yes" ' +\
188 'seamless="seamless"></iframe>\n'
189 graphs += ']]></ac:plain-text-body>\n'
190 graphs += '</ac:structured-macro>\n'
191 main.log.wiki(graphs)
192
193 main.step( "Creating ONOS package" )
Jon Hall3b489db2015-10-05 14:38:37 -0700194 # copy gen-partions file to ONOS
195 # NOTE: this assumes TestON and ONOS are on the same machine
196 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
197 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
198 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
199 main.ONOSbench.ip_address,
200 srcFile,
201 dstDir,
202 pwd=main.ONOSbench.pwd,
203 direction="from" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700204 packageResult = main.ONOSbench.onosPackage()
205 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
206 onpass="ONOS package successful",
207 onfail="ONOS package failed" )
208
209 main.step( "Installing ONOS package" )
210 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700211 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700212 tmpResult = main.ONOSbench.onosInstall( options="-f",
213 node=node.ip_address )
214 onosInstallResult = onosInstallResult and tmpResult
215 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
216 onpass="ONOS install successful",
217 onfail="ONOS install failed" )
Jon Hall3b489db2015-10-05 14:38:37 -0700218 # clean up gen-partitions file
219 try:
220 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
221 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
222 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
223 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
224 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
225 str( main.ONOSbench.handle.before ) )
226 except ( pexpect.TIMEOUT, pexpect.EOF ):
227 main.log.exception( "ONOSbench: pexpect exception found:" +
228 main.ONOSbench.handle.before )
229 main.cleanup()
230 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700231
232 main.step( "Checking if ONOS is up yet" )
233 for i in range( 2 ):
234 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700235 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700236 started = main.ONOSbench.isup( node.ip_address )
237 if not started:
238 main.log.error( node.name + " didn't start!" )
239 main.ONOSbench.onosStop( node.ip_address )
240 main.ONOSbench.onosStart( node.ip_address )
241 onosIsupResult = onosIsupResult and started
242 if onosIsupResult == main.TRUE:
243 break
244 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
245 onpass="ONOS startup successful",
246 onfail="ONOS startup failed" )
247
248 main.log.step( "Starting ONOS CLI sessions" )
249 cliResults = main.TRUE
250 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700251 for i in range( main.numCtrls ):
252 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700253 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700254 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700255 threads.append( t )
256 t.start()
257
258 for t in threads:
259 t.join()
260 cliResults = cliResults and t.result
261 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
262 onpass="ONOS cli startup successful",
263 onfail="ONOS cli startup failed" )
264
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700265 # Create a list of active nodes for use when some nodes are stopped
266 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
267
Jon Hall5cf14d52015-07-16 12:15:19 -0700268 if main.params[ 'tcpdump' ].lower() == "true":
269 main.step( "Start Packet Capture MN" )
270 main.Mininet2.startTcpdump(
271 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
272 + "-MN.pcap",
273 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
274 port=main.params[ 'MNtcpdump' ][ 'port' ] )
275
276 main.step( "App Ids check" )
277 appCheck = main.TRUE
278 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700279 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700280 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700281 name="appToIDCheck-" + str( i ),
282 args=[] )
283 threads.append( t )
284 t.start()
285
286 for t in threads:
287 t.join()
288 appCheck = appCheck and t.result
289 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700290 node = main.activeNodes[0]
291 main.log.warn( main.CLIs[node].apps() )
292 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700293 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
294 onpass="App Ids seem to be correct",
295 onfail="Something is wrong with app Ids" )
296
297 if cliResults == main.FALSE:
298 main.log.error( "Failed to start ONOS, stopping test" )
299 main.cleanup()
300 main.exit()
301
302 def CASE2( self, main ):
303 """
304 Assign devices to controllers
305 """
306 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700307 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700308 assert main, "main not defined"
309 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700310 assert main.CLIs, "main.CLIs not defined"
311 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700312 assert ONOS1Port, "ONOS1Port not defined"
313 assert ONOS2Port, "ONOS2Port not defined"
314 assert ONOS3Port, "ONOS3Port not defined"
315 assert ONOS4Port, "ONOS4Port not defined"
316 assert ONOS5Port, "ONOS5Port not defined"
317 assert ONOS6Port, "ONOS6Port not defined"
318 assert ONOS7Port, "ONOS7Port not defined"
319
320 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700321 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700322 "and check that an ONOS node becomes the " +\
323 "master of the device."
324 main.step( "Assign switches to controllers" )
325
326 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700327 for i in range( main.numCtrls ):
328 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700329 swList = []
330 for i in range( 1, 29 ):
331 swList.append( "s" + str( i ) )
332 main.Mininet1.assignSwController( sw=swList, ip=ipList )
333
334 mastershipCheck = main.TRUE
335 for i in range( 1, 29 ):
336 response = main.Mininet1.getSwController( "s" + str( i ) )
337 try:
338 main.log.info( str( response ) )
339 except Exception:
340 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700341 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700342 if re.search( "tcp:" + node.ip_address, response ):
343 mastershipCheck = mastershipCheck and main.TRUE
344 else:
345 main.log.error( "Error, node " + node.ip_address + " is " +
346 "not in the list of controllers s" +
347 str( i ) + " is connecting to." )
348 mastershipCheck = main.FALSE
349 utilities.assert_equals(
350 expect=main.TRUE,
351 actual=mastershipCheck,
352 onpass="Switch mastership assigned correctly",
353 onfail="Switches not assigned correctly to controllers" )
354
355 def CASE21( self, main ):
356 """
357 Assign mastership to controllers
358 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700359 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700360 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700361 assert main, "main not defined"
362 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700363 assert main.CLIs, "main.CLIs not defined"
364 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700365 assert ONOS1Port, "ONOS1Port not defined"
366 assert ONOS2Port, "ONOS2Port not defined"
367 assert ONOS3Port, "ONOS3Port not defined"
368 assert ONOS4Port, "ONOS4Port not defined"
369 assert ONOS5Port, "ONOS5Port not defined"
370 assert ONOS6Port, "ONOS6Port not defined"
371 assert ONOS7Port, "ONOS7Port not defined"
372
373 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700374 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700375 "device. Then manually assign" +\
376 " mastership to specific ONOS nodes using" +\
377 " 'device-role'"
378 main.step( "Assign mastership of switches to specific controllers" )
379 # Manually assign mastership to the controller we want
380 roleCall = main.TRUE
381
382 ipList = [ ]
383 deviceList = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700384 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700385 try:
386 # Assign mastership to specific controllers. This assignment was
387 # determined for a 7 node cluser, but will work with any sized
388 # cluster
389 for i in range( 1, 29 ): # switches 1 through 28
390 # set up correct variables:
391 if i == 1:
392 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700393 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700394 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700395 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700396 c = 1 % main.numCtrls
397 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700398 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700399 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700400 c = 1 % main.numCtrls
401 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700402 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700403 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700404 c = 3 % main.numCtrls
405 ip = main.nodes[ c ].ip_address # ONOS4
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700406 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700407 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700408 c = 2 % main.numCtrls
409 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700410 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700411 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700412 c = 2 % main.numCtrls
413 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700414 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700415 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700416 c = 5 % main.numCtrls
417 ip = main.nodes[ c ].ip_address # ONOS6
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700418 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700419 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700420 c = 4 % main.numCtrls
421 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700422 dpid = '3' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700423 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700424 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700425 c = 6 % main.numCtrls
426 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700427 dpid = '6' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700428 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700429 elif i == 28:
430 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700431 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700432 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700433 else:
434 main.log.error( "You didn't write an else statement for " +
435 "switch s" + str( i ) )
436 roleCall = main.FALSE
437 # Assign switch
438 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
439 # TODO: make this controller dynamic
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700440 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700441 ipList.append( ip )
442 deviceList.append( deviceId )
443 except ( AttributeError, AssertionError ):
444 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700445 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700446 utilities.assert_equals(
447 expect=main.TRUE,
448 actual=roleCall,
449 onpass="Re-assigned switch mastership to designated controller",
450 onfail="Something wrong with deviceRole calls" )
451
452 main.step( "Check mastership was correctly assigned" )
453 roleCheck = main.TRUE
454 # NOTE: This is due to the fact that device mastership change is not
455 # atomic and is actually a multi step process
456 time.sleep( 5 )
457 for i in range( len( ipList ) ):
458 ip = ipList[i]
459 deviceId = deviceList[i]
460 # Check assignment
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700461 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700462 if ip in master:
463 roleCheck = roleCheck and main.TRUE
464 else:
465 roleCheck = roleCheck and main.FALSE
466 main.log.error( "Error, controller " + ip + " is not" +
467 " master " + "of device " +
468 str( deviceId ) + ". Master is " +
469 repr( master ) + "." )
470 utilities.assert_equals(
471 expect=main.TRUE,
472 actual=roleCheck,
473 onpass="Switches were successfully reassigned to designated " +
474 "controller",
475 onfail="Switches were not successfully reassigned" )
476
477 def CASE3( self, main ):
478 """
479 Assign intents
480 """
481 import time
482 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700483 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700484 assert main, "main not defined"
485 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700486 assert main.CLIs, "main.CLIs not defined"
487 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700488 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700489 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700490 "assign predetermined host-to-host intents." +\
491 " After installation, check that the intent" +\
492 " is distributed to all nodes and the state" +\
493 " is INSTALLED"
494
495 # install onos-app-fwd
496 main.step( "Install reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700497 onosCli = main.CLIs[ main.activeNodes[0] ]
498 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700499 utilities.assert_equals( expect=main.TRUE, actual=installResults,
500 onpass="Install fwd successful",
501 onfail="Install fwd failed" )
502
503 main.step( "Check app ids" )
504 appCheck = main.TRUE
505 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700506 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700507 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700508 name="appToIDCheck-" + str( i ),
509 args=[] )
510 threads.append( t )
511 t.start()
512
513 for t in threads:
514 t.join()
515 appCheck = appCheck and t.result
516 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700517 main.log.warn( onosCli.apps() )
518 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700519 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
520 onpass="App Ids seem to be correct",
521 onfail="Something is wrong with app Ids" )
522
523 main.step( "Discovering Hosts( Via pingall for now )" )
524 # FIXME: Once we have a host discovery mechanism, use that instead
525 # REACTIVE FWD test
526 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700527 passMsg = "Reactive Pingall test passed"
528 time1 = time.time()
529 pingResult = main.Mininet1.pingall()
530 time2 = time.time()
531 if not pingResult:
532 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700533 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700534 passMsg += " on the second try"
535 utilities.assert_equals(
536 expect=main.TRUE,
537 actual=pingResult,
538 onpass= passMsg,
539 onfail="Reactive Pingall failed, " +
540 "one or more ping pairs failed" )
541 main.log.info( "Time for pingall: %2f seconds" %
542 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700543 # timeout for fwd flows
544 time.sleep( 11 )
545 # uninstall onos-app-fwd
546 main.step( "Uninstall reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700547 node = main.activeNodes[0]
548 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700549 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
550 onpass="Uninstall fwd successful",
551 onfail="Uninstall fwd failed" )
552
553 main.step( "Check app ids" )
554 threads = []
555 appCheck2 = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700556 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700557 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700558 name="appToIDCheck-" + str( i ),
559 args=[] )
560 threads.append( t )
561 t.start()
562
563 for t in threads:
564 t.join()
565 appCheck2 = appCheck2 and t.result
566 if appCheck2 != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700567 node = main.activeNodes[0]
568 main.log.warn( main.CLIs[node].apps() )
569 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700570 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
571 onpass="App Ids seem to be correct",
572 onfail="Something is wrong with app Ids" )
573
574 main.step( "Add host intents via cli" )
575 intentIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700576 # TODO: move the host numbers to params
577 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700578 intentAddResult = True
579 hostResult = main.TRUE
580 for i in range( 8, 18 ):
581 main.log.info( "Adding host intent between h" + str( i ) +
582 " and h" + str( i + 10 ) )
583 host1 = "00:00:00:00:00:" + \
584 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
585 host2 = "00:00:00:00:00:" + \
586 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
587 # NOTE: getHost can return None
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700588 host1Dict = onosCli.getHost( host1 )
589 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700590 host1Id = None
591 host2Id = None
592 if host1Dict and host2Dict:
593 host1Id = host1Dict.get( 'id', None )
594 host2Id = host2Dict.get( 'id', None )
595 if host1Id and host2Id:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700596 nodeNum = ( i % len( main.activeNodes ) )
597 node = main.activeNodes[nodeNum]
598 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700599 if tmpId:
600 main.log.info( "Added intent with id: " + tmpId )
601 intentIds.append( tmpId )
602 else:
603 main.log.error( "addHostIntent returned: " +
604 repr( tmpId ) )
605 else:
606 main.log.error( "Error, getHost() failed for h" + str( i ) +
607 " and/or h" + str( i + 10 ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700608 node = main.activeNodes[0]
609 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700610 main.log.warn( "Hosts output: " )
611 try:
612 main.log.warn( json.dumps( json.loads( hosts ),
613 sort_keys=True,
614 indent=4,
615 separators=( ',', ': ' ) ) )
616 except ( ValueError, TypeError ):
617 main.log.warn( repr( hosts ) )
618 hostResult = main.FALSE
619 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
620 onpass="Found a host id for each host",
621 onfail="Error looking up host ids" )
622
623 intentStart = time.time()
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700624 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700625 main.log.info( "Submitted intents: " + str( intentIds ) )
626 main.log.info( "Intents in ONOS: " + str( onosIds ) )
627 for intent in intentIds:
628 if intent in onosIds:
629 pass # intent submitted is in onos
630 else:
631 intentAddResult = False
632 if intentAddResult:
633 intentStop = time.time()
634 else:
635 intentStop = None
636 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700637 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700638 intentStates = []
639 installedCheck = True
640 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
641 count = 0
642 try:
643 for intent in json.loads( intents ):
644 state = intent.get( 'state', None )
645 if "INSTALLED" not in state:
646 installedCheck = False
647 intentId = intent.get( 'id', None )
648 intentStates.append( ( intentId, state ) )
649 except ( ValueError, TypeError ):
650 main.log.exception( "Error parsing intents" )
651 # add submitted intents not in the store
652 tmplist = [ i for i, s in intentStates ]
653 missingIntents = False
654 for i in intentIds:
655 if i not in tmplist:
656 intentStates.append( ( i, " - " ) )
657 missingIntents = True
658 intentStates.sort()
659 for i, s in intentStates:
660 count += 1
661 main.log.info( "%-6s%-15s%-15s" %
662 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700663 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700664 try:
665 missing = False
666 if leaders:
667 parsedLeaders = json.loads( leaders )
668 main.log.warn( json.dumps( parsedLeaders,
669 sort_keys=True,
670 indent=4,
671 separators=( ',', ': ' ) ) )
672 # check for all intent partitions
673 topics = []
674 for i in range( 14 ):
675 topics.append( "intent-partition-" + str( i ) )
676 main.log.debug( topics )
677 ONOStopics = [ j['topic'] for j in parsedLeaders ]
678 for topic in topics:
679 if topic not in ONOStopics:
680 main.log.error( "Error: " + topic +
681 " not in leaders" )
682 missing = True
683 else:
684 main.log.error( "leaders() returned None" )
685 except ( ValueError, TypeError ):
686 main.log.exception( "Error parsing leaders" )
687 main.log.error( repr( leaders ) )
688 # Check all nodes
689 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700690 for i in main.activeNodes:
691 response = main.CLIs[i].leaders( jsonFormat=False)
692 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700693 str( response ) )
694
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700695 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700696 try:
697 if partitions :
698 parsedPartitions = json.loads( partitions )
699 main.log.warn( json.dumps( parsedPartitions,
700 sort_keys=True,
701 indent=4,
702 separators=( ',', ': ' ) ) )
703 # TODO check for a leader in all paritions
704 # TODO check for consistency among nodes
705 else:
706 main.log.error( "partitions() returned None" )
707 except ( ValueError, TypeError ):
708 main.log.exception( "Error parsing partitions" )
709 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700710 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700711 try:
712 if pendingMap :
713 parsedPending = json.loads( pendingMap )
714 main.log.warn( json.dumps( parsedPending,
715 sort_keys=True,
716 indent=4,
717 separators=( ',', ': ' ) ) )
718 # TODO check something here?
719 else:
720 main.log.error( "pendingMap() returned None" )
721 except ( ValueError, TypeError ):
722 main.log.exception( "Error parsing pending map" )
723 main.log.error( repr( pendingMap ) )
724
725 intentAddResult = bool( intentAddResult and not missingIntents and
726 installedCheck )
727 if not intentAddResult:
728 main.log.error( "Error in pushing host intents to ONOS" )
729
730 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700731 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700732 correct = True
733 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700734 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700735 onosIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700736 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700737 onosIds.append( ids )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700738 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700739 str( sorted( onosIds ) ) )
740 if sorted( ids ) != sorted( intentIds ):
741 main.log.warn( "Set of intent IDs doesn't match" )
742 correct = False
743 break
744 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700745 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700746 for intent in intents:
747 if intent[ 'state' ] != "INSTALLED":
748 main.log.warn( "Intent " + intent[ 'id' ] +
749 " is " + intent[ 'state' ] )
750 correct = False
751 break
752 if correct:
753 break
754 else:
755 time.sleep(1)
756 if not intentStop:
757 intentStop = time.time()
758 global gossipTime
759 gossipTime = intentStop - intentStart
760 main.log.info( "It took about " + str( gossipTime ) +
761 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700762 gossipPeriod = int( main.params['timers']['gossip'] )
763 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700764 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700765 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700766 onpass="ECM anti-entropy for intents worked within " +
767 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700768 onfail="Intent ECM anti-entropy took too long. " +
769 "Expected time:{}, Actual time:{}".format( maxGossipTime,
770 gossipTime ) )
771 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700772 intentAddResult = True
773
774 if not intentAddResult or "key" in pendingMap:
775 import time
776 installedCheck = True
777 main.log.info( "Sleeping 60 seconds to see if intents are found" )
778 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700779 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700780 main.log.info( "Submitted intents: " + str( intentIds ) )
781 main.log.info( "Intents in ONOS: " + str( onosIds ) )
782 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700783 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700784 intentStates = []
785 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
786 count = 0
787 try:
788 for intent in json.loads( intents ):
789 # Iter through intents of a node
790 state = intent.get( 'state', None )
791 if "INSTALLED" not in state:
792 installedCheck = False
793 intentId = intent.get( 'id', None )
794 intentStates.append( ( intentId, state ) )
795 except ( ValueError, TypeError ):
796 main.log.exception( "Error parsing intents" )
797 # add submitted intents not in the store
798 tmplist = [ i for i, s in intentStates ]
799 for i in intentIds:
800 if i not in tmplist:
801 intentStates.append( ( i, " - " ) )
802 intentStates.sort()
803 for i, s in intentStates:
804 count += 1
805 main.log.info( "%-6s%-15s%-15s" %
806 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700807 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700808 try:
809 missing = False
810 if leaders:
811 parsedLeaders = json.loads( leaders )
812 main.log.warn( json.dumps( parsedLeaders,
813 sort_keys=True,
814 indent=4,
815 separators=( ',', ': ' ) ) )
816 # check for all intent partitions
817 # check for election
818 topics = []
819 for i in range( 14 ):
820 topics.append( "intent-partition-" + str( i ) )
821 # FIXME: this should only be after we start the app
822 topics.append( "org.onosproject.election" )
823 main.log.debug( topics )
824 ONOStopics = [ j['topic'] for j in parsedLeaders ]
825 for topic in topics:
826 if topic not in ONOStopics:
827 main.log.error( "Error: " + topic +
828 " not in leaders" )
829 missing = True
830 else:
831 main.log.error( "leaders() returned None" )
832 except ( ValueError, TypeError ):
833 main.log.exception( "Error parsing leaders" )
834 main.log.error( repr( leaders ) )
835 # Check all nodes
836 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700837 for i in main.activeNodes:
838 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700839 response = node.leaders( jsonFormat=False)
840 main.log.warn( str( node.name ) + " leaders output: \n" +
841 str( response ) )
842
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700843 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700844 try:
845 if partitions :
846 parsedPartitions = json.loads( partitions )
847 main.log.warn( json.dumps( parsedPartitions,
848 sort_keys=True,
849 indent=4,
850 separators=( ',', ': ' ) ) )
851 # TODO check for a leader in all paritions
852 # TODO check for consistency among nodes
853 else:
854 main.log.error( "partitions() returned None" )
855 except ( ValueError, TypeError ):
856 main.log.exception( "Error parsing partitions" )
857 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700858 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700859 try:
860 if pendingMap :
861 parsedPending = json.loads( pendingMap )
862 main.log.warn( json.dumps( parsedPending,
863 sort_keys=True,
864 indent=4,
865 separators=( ',', ': ' ) ) )
866 # TODO check something here?
867 else:
868 main.log.error( "pendingMap() returned None" )
869 except ( ValueError, TypeError ):
870 main.log.exception( "Error parsing pending map" )
871 main.log.error( repr( pendingMap ) )
872
873 def CASE4( self, main ):
874 """
875 Ping across added host intents
876 """
877 import json
878 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700879 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700880 assert main, "main not defined"
881 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700882 assert main.CLIs, "main.CLIs not defined"
883 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700884 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700885 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700886 "functionality and check the state of " +\
887 "the intent"
888 main.step( "Ping across added host intents" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700889 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700890 PingResult = main.TRUE
891 for i in range( 8, 18 ):
892 ping = main.Mininet1.pingHost( src="h" + str( i ),
893 target="h" + str( i + 10 ) )
894 PingResult = PingResult and ping
895 if ping == main.FALSE:
896 main.log.warn( "Ping failed between h" + str( i ) +
897 " and h" + str( i + 10 ) )
898 elif ping == main.TRUE:
899 main.log.info( "Ping test passed!" )
900 # Don't set PingResult or you'd override failures
901 if PingResult == main.FALSE:
902 main.log.error(
903 "Intents have not been installed correctly, pings failed." )
904 # TODO: pretty print
905 main.log.warn( "ONOS1 intents: " )
906 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700907 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700908 main.log.warn( json.dumps( json.loads( tmpIntents ),
909 sort_keys=True,
910 indent=4,
911 separators=( ',', ': ' ) ) )
912 except ( ValueError, TypeError ):
913 main.log.warn( repr( tmpIntents ) )
914 utilities.assert_equals(
915 expect=main.TRUE,
916 actual=PingResult,
917 onpass="Intents have been installed correctly and pings work",
918 onfail="Intents have not been installed correctly, pings failed." )
919
920 main.step( "Check Intent state" )
921 installedCheck = False
922 loopCount = 0
923 while not installedCheck and loopCount < 40:
924 installedCheck = True
925 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700926 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700927 intentStates = []
928 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
929 count = 0
930 # Iter through intents of a node
931 try:
932 for intent in json.loads( intents ):
933 state = intent.get( 'state', None )
934 if "INSTALLED" not in state:
935 installedCheck = False
936 intentId = intent.get( 'id', None )
937 intentStates.append( ( intentId, state ) )
938 except ( ValueError, TypeError ):
939 main.log.exception( "Error parsing intents." )
940 # Print states
941 intentStates.sort()
942 for i, s in intentStates:
943 count += 1
944 main.log.info( "%-6s%-15s%-15s" %
945 ( str( count ), str( i ), str( s ) ) )
946 if not installedCheck:
947 time.sleep( 1 )
948 loopCount += 1
949 utilities.assert_equals( expect=True, actual=installedCheck,
950 onpass="Intents are all INSTALLED",
951 onfail="Intents are not all in " +
952 "INSTALLED state" )
953
954 main.step( "Check leadership of topics" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700955 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700956 topicCheck = main.TRUE
957 try:
958 if leaders:
959 parsedLeaders = json.loads( leaders )
960 main.log.warn( json.dumps( parsedLeaders,
961 sort_keys=True,
962 indent=4,
963 separators=( ',', ': ' ) ) )
964 # check for all intent partitions
965 # check for election
966 # TODO: Look at Devices as topics now that it uses this system
967 topics = []
968 for i in range( 14 ):
969 topics.append( "intent-partition-" + str( i ) )
970 # FIXME: this should only be after we start the app
971 # FIXME: topics.append( "org.onosproject.election" )
972 # Print leaders output
973 main.log.debug( topics )
974 ONOStopics = [ j['topic'] for j in parsedLeaders ]
975 for topic in topics:
976 if topic not in ONOStopics:
977 main.log.error( "Error: " + topic +
978 " not in leaders" )
979 topicCheck = main.FALSE
980 else:
981 main.log.error( "leaders() returned None" )
982 topicCheck = main.FALSE
983 except ( ValueError, TypeError ):
984 topicCheck = main.FALSE
985 main.log.exception( "Error parsing leaders" )
986 main.log.error( repr( leaders ) )
987 # TODO: Check for a leader of these topics
988 # Check all nodes
989 if topicCheck:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700990 for i in main.activeNodes:
991 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700992 response = node.leaders( jsonFormat=False)
993 main.log.warn( str( node.name ) + " leaders output: \n" +
994 str( response ) )
995
996 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
997 onpass="intent Partitions is in leaders",
998 onfail="Some topics were lost " )
999 # Print partitions
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001000 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001001 try:
1002 if partitions :
1003 parsedPartitions = json.loads( partitions )
1004 main.log.warn( json.dumps( parsedPartitions,
1005 sort_keys=True,
1006 indent=4,
1007 separators=( ',', ': ' ) ) )
1008 # TODO check for a leader in all paritions
1009 # TODO check for consistency among nodes
1010 else:
1011 main.log.error( "partitions() returned None" )
1012 except ( ValueError, TypeError ):
1013 main.log.exception( "Error parsing partitions" )
1014 main.log.error( repr( partitions ) )
1015 # Print Pending Map
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001016 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001017 try:
1018 if pendingMap :
1019 parsedPending = json.loads( pendingMap )
1020 main.log.warn( json.dumps( parsedPending,
1021 sort_keys=True,
1022 indent=4,
1023 separators=( ',', ': ' ) ) )
1024 # TODO check something here?
1025 else:
1026 main.log.error( "pendingMap() returned None" )
1027 except ( ValueError, TypeError ):
1028 main.log.exception( "Error parsing pending map" )
1029 main.log.error( repr( pendingMap ) )
1030
1031 if not installedCheck:
1032 main.log.info( "Waiting 60 seconds to see if the state of " +
1033 "intents change" )
1034 time.sleep( 60 )
1035 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001036 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001037 intentStates = []
1038 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1039 count = 0
1040 # Iter through intents of a node
1041 try:
1042 for intent in json.loads( intents ):
1043 state = intent.get( 'state', None )
1044 if "INSTALLED" not in state:
1045 installedCheck = False
1046 intentId = intent.get( 'id', None )
1047 intentStates.append( ( intentId, state ) )
1048 except ( ValueError, TypeError ):
1049 main.log.exception( "Error parsing intents." )
1050 intentStates.sort()
1051 for i, s in intentStates:
1052 count += 1
1053 main.log.info( "%-6s%-15s%-15s" %
1054 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001055 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001056 try:
1057 missing = False
1058 if leaders:
1059 parsedLeaders = json.loads( leaders )
1060 main.log.warn( json.dumps( parsedLeaders,
1061 sort_keys=True,
1062 indent=4,
1063 separators=( ',', ': ' ) ) )
1064 # check for all intent partitions
1065 # check for election
1066 topics = []
1067 for i in range( 14 ):
1068 topics.append( "intent-partition-" + str( i ) )
1069 # FIXME: this should only be after we start the app
1070 topics.append( "org.onosproject.election" )
1071 main.log.debug( topics )
1072 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1073 for topic in topics:
1074 if topic not in ONOStopics:
1075 main.log.error( "Error: " + topic +
1076 " not in leaders" )
1077 missing = True
1078 else:
1079 main.log.error( "leaders() returned None" )
1080 except ( ValueError, TypeError ):
1081 main.log.exception( "Error parsing leaders" )
1082 main.log.error( repr( leaders ) )
1083 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001084 for i in main.activeNodes:
1085 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001086 response = node.leaders( jsonFormat=False)
1087 main.log.warn( str( node.name ) + " leaders output: \n" +
1088 str( response ) )
1089
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001090 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001091 try:
1092 if partitions :
1093 parsedPartitions = json.loads( partitions )
1094 main.log.warn( json.dumps( parsedPartitions,
1095 sort_keys=True,
1096 indent=4,
1097 separators=( ',', ': ' ) ) )
1098 # TODO check for a leader in all paritions
1099 # TODO check for consistency among nodes
1100 else:
1101 main.log.error( "partitions() returned None" )
1102 except ( ValueError, TypeError ):
1103 main.log.exception( "Error parsing partitions" )
1104 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001105 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001106 try:
1107 if pendingMap :
1108 parsedPending = json.loads( pendingMap )
1109 main.log.warn( json.dumps( parsedPending,
1110 sort_keys=True,
1111 indent=4,
1112 separators=( ',', ': ' ) ) )
1113 # TODO check something here?
1114 else:
1115 main.log.error( "pendingMap() returned None" )
1116 except ( ValueError, TypeError ):
1117 main.log.exception( "Error parsing pending map" )
1118 main.log.error( repr( pendingMap ) )
1119 # Print flowrules
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001120 node = main.activeNodes[0]
1121 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001122 main.step( "Wait a minute then ping again" )
1123 # the wait is above
1124 PingResult = main.TRUE
1125 for i in range( 8, 18 ):
1126 ping = main.Mininet1.pingHost( src="h" + str( i ),
1127 target="h" + str( i + 10 ) )
1128 PingResult = PingResult and ping
1129 if ping == main.FALSE:
1130 main.log.warn( "Ping failed between h" + str( i ) +
1131 " and h" + str( i + 10 ) )
1132 elif ping == main.TRUE:
1133 main.log.info( "Ping test passed!" )
1134 # Don't set PingResult or you'd override failures
1135 if PingResult == main.FALSE:
1136 main.log.error(
1137 "Intents have not been installed correctly, pings failed." )
1138 # TODO: pretty print
1139 main.log.warn( "ONOS1 intents: " )
1140 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001141 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001142 main.log.warn( json.dumps( json.loads( tmpIntents ),
1143 sort_keys=True,
1144 indent=4,
1145 separators=( ',', ': ' ) ) )
1146 except ( ValueError, TypeError ):
1147 main.log.warn( repr( tmpIntents ) )
1148 utilities.assert_equals(
1149 expect=main.TRUE,
1150 actual=PingResult,
1151 onpass="Intents have been installed correctly and pings work",
1152 onfail="Intents have not been installed correctly, pings failed." )
1153
1154 def CASE5( self, main ):
1155 """
1156 Reading state of ONOS
1157 """
1158 import json
1159 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001160 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001161 assert main, "main not defined"
1162 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001163 assert main.CLIs, "main.CLIs not defined"
1164 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001165
1166 main.case( "Setting up and gathering data for current state" )
1167 # The general idea for this test case is to pull the state of
1168 # ( intents,flows, topology,... ) from each ONOS node
1169 # We can then compare them with each other and also with past states
1170
1171 main.step( "Check that each switch has a master" )
1172 global mastershipState
1173 mastershipState = '[]'
1174
1175 # Assert that each device has a master
1176 rolesNotNull = main.TRUE
1177 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001178 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001179 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001180 name="rolesNotNull-" + str( i ),
1181 args=[] )
1182 threads.append( t )
1183 t.start()
1184
1185 for t in threads:
1186 t.join()
1187 rolesNotNull = rolesNotNull and t.result
1188 utilities.assert_equals(
1189 expect=main.TRUE,
1190 actual=rolesNotNull,
1191 onpass="Each device has a master",
1192 onfail="Some devices don't have a master assigned" )
1193
1194 main.step( "Get the Mastership of each switch from each controller" )
1195 ONOSMastership = []
1196 mastershipCheck = main.FALSE
1197 consistentMastership = True
1198 rolesResults = True
1199 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001200 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001201 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001202 name="roles-" + str( i ),
1203 args=[] )
1204 threads.append( t )
1205 t.start()
1206
1207 for t in threads:
1208 t.join()
1209 ONOSMastership.append( t.result )
1210
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001211 for i in range( len( ONOSMastership ) ):
1212 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001213 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001214 main.log.error( "Error in getting ONOS" + node + " roles" )
1215 main.log.warn( "ONOS" + node + " mastership response: " +
1216 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001217 rolesResults = False
1218 utilities.assert_equals(
1219 expect=True,
1220 actual=rolesResults,
1221 onpass="No error in reading roles output",
1222 onfail="Error in reading roles from ONOS" )
1223
1224 main.step( "Check for consistency in roles from each controller" )
1225 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1226 main.log.info(
1227 "Switch roles are consistent across all ONOS nodes" )
1228 else:
1229 consistentMastership = False
1230 utilities.assert_equals(
1231 expect=True,
1232 actual=consistentMastership,
1233 onpass="Switch roles are consistent across all ONOS nodes",
1234 onfail="ONOS nodes have different views of switch roles" )
1235
1236 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001237 for i in range( len( main.activeNodes ) ):
1238 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001239 try:
1240 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001241 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001242 json.dumps(
1243 json.loads( ONOSMastership[ i ] ),
1244 sort_keys=True,
1245 indent=4,
1246 separators=( ',', ': ' ) ) )
1247 except ( ValueError, TypeError ):
1248 main.log.warn( repr( ONOSMastership[ i ] ) )
1249 elif rolesResults and consistentMastership:
1250 mastershipCheck = main.TRUE
1251 mastershipState = ONOSMastership[ 0 ]
1252
1253 main.step( "Get the intents from each controller" )
1254 global intentState
1255 intentState = []
1256 ONOSIntents = []
1257 intentCheck = main.FALSE
1258 consistentIntents = True
1259 intentsResults = True
1260 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001261 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001262 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001263 name="intents-" + str( i ),
1264 args=[],
1265 kwargs={ 'jsonFormat': True } )
1266 threads.append( t )
1267 t.start()
1268
1269 for t in threads:
1270 t.join()
1271 ONOSIntents.append( t.result )
1272
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001273 for i in range( len( ONOSIntents ) ):
1274 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001275 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001276 main.log.error( "Error in getting ONOS" + node + " intents" )
1277 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001278 repr( ONOSIntents[ i ] ) )
1279 intentsResults = False
1280 utilities.assert_equals(
1281 expect=True,
1282 actual=intentsResults,
1283 onpass="No error in reading intents output",
1284 onfail="Error in reading intents from ONOS" )
1285
1286 main.step( "Check for consistency in Intents from each controller" )
1287 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1288 main.log.info( "Intents are consistent across all ONOS " +
1289 "nodes" )
1290 else:
1291 consistentIntents = False
1292 main.log.error( "Intents not consistent" )
1293 utilities.assert_equals(
1294 expect=True,
1295 actual=consistentIntents,
1296 onpass="Intents are consistent across all ONOS nodes",
1297 onfail="ONOS nodes have different views of intents" )
1298
1299 if intentsResults:
1300 # Try to make it easy to figure out what is happening
1301 #
1302 # Intent ONOS1 ONOS2 ...
1303 # 0x01 INSTALLED INSTALLING
1304 # ... ... ...
1305 # ... ... ...
1306 title = " Id"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001307 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001308 title += " " * 10 + "ONOS" + str( n + 1 )
1309 main.log.warn( title )
1310 # get all intent keys in the cluster
1311 keys = []
1312 for nodeStr in ONOSIntents:
1313 node = json.loads( nodeStr )
1314 for intent in node:
1315 keys.append( intent.get( 'id' ) )
1316 keys = set( keys )
1317 for key in keys:
1318 row = "%-13s" % key
1319 for nodeStr in ONOSIntents:
1320 node = json.loads( nodeStr )
1321 for intent in node:
1322 if intent.get( 'id', "Error" ) == key:
1323 row += "%-15s" % intent.get( 'state' )
1324 main.log.warn( row )
1325 # End table view
1326
1327 if intentsResults and not consistentIntents:
1328 # print the json objects
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001329 n = str( main.activeNodes[-1] + 1 )
1330 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001331 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1332 sort_keys=True,
1333 indent=4,
1334 separators=( ',', ': ' ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001335 for i in range( len( ONOSIntents ) ):
1336 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001337 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001338 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001339 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1340 sort_keys=True,
1341 indent=4,
1342 separators=( ',', ': ' ) ) )
1343 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001344 main.log.debug( "ONOS" + node + " intents match ONOS" +
1345 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001346 elif intentsResults and consistentIntents:
1347 intentCheck = main.TRUE
1348 intentState = ONOSIntents[ 0 ]
1349
1350 main.step( "Get the flows from each controller" )
1351 global flowState
1352 flowState = []
1353 ONOSFlows = []
1354 ONOSFlowsJson = []
1355 flowCheck = main.FALSE
1356 consistentFlows = True
1357 flowsResults = True
1358 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001359 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001360 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001361 name="flows-" + str( i ),
1362 args=[],
1363 kwargs={ 'jsonFormat': True } )
1364 threads.append( t )
1365 t.start()
1366
1367 # NOTE: Flows command can take some time to run
1368 time.sleep(30)
1369 for t in threads:
1370 t.join()
1371 result = t.result
1372 ONOSFlows.append( result )
1373
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001374 for i in range( len( ONOSFlows ) ):
1375 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001376 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1377 main.log.error( "Error in getting ONOS" + num + " flows" )
1378 main.log.warn( "ONOS" + num + " flows response: " +
1379 repr( ONOSFlows[ i ] ) )
1380 flowsResults = False
1381 ONOSFlowsJson.append( None )
1382 else:
1383 try:
1384 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1385 except ( ValueError, TypeError ):
1386 # FIXME: change this to log.error?
1387 main.log.exception( "Error in parsing ONOS" + num +
1388 " response as json." )
1389 main.log.error( repr( ONOSFlows[ i ] ) )
1390 ONOSFlowsJson.append( None )
1391 flowsResults = False
1392 utilities.assert_equals(
1393 expect=True,
1394 actual=flowsResults,
1395 onpass="No error in reading flows output",
1396 onfail="Error in reading flows from ONOS" )
1397
1398 main.step( "Check for consistency in Flows from each controller" )
1399 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1400 if all( tmp ):
1401 main.log.info( "Flow count is consistent across all ONOS nodes" )
1402 else:
1403 consistentFlows = False
1404 utilities.assert_equals(
1405 expect=True,
1406 actual=consistentFlows,
1407 onpass="The flow count is consistent across all ONOS nodes",
1408 onfail="ONOS nodes have different flow counts" )
1409
1410 if flowsResults and not consistentFlows:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001411 for i in range( len( ONOSFlows ) ):
1412 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001413 try:
1414 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001415 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001416 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1417 indent=4, separators=( ',', ': ' ) ) )
1418 except ( ValueError, TypeError ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001419 main.log.warn( "ONOS" + node + " flows: " +
1420 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001421 elif flowsResults and consistentFlows:
1422 flowCheck = main.TRUE
1423 flowState = ONOSFlows[ 0 ]
1424
1425 main.step( "Get the OF Table entries" )
1426 global flows
1427 flows = []
1428 for i in range( 1, 29 ):
Jon Hall9043c902015-07-30 14:23:44 -07001429 flows.append( main.Mininet1.getFlowTable( 1.3, "s" + str( i ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001430 if flowCheck == main.FALSE:
1431 for table in flows:
1432 main.log.warn( table )
1433 # TODO: Compare switch flow tables with ONOS flow tables
1434
1435 main.step( "Start continuous pings" )
1436 main.Mininet2.pingLong(
1437 src=main.params[ 'PING' ][ 'source1' ],
1438 target=main.params[ 'PING' ][ 'target1' ],
1439 pingTime=500 )
1440 main.Mininet2.pingLong(
1441 src=main.params[ 'PING' ][ 'source2' ],
1442 target=main.params[ 'PING' ][ 'target2' ],
1443 pingTime=500 )
1444 main.Mininet2.pingLong(
1445 src=main.params[ 'PING' ][ 'source3' ],
1446 target=main.params[ 'PING' ][ 'target3' ],
1447 pingTime=500 )
1448 main.Mininet2.pingLong(
1449 src=main.params[ 'PING' ][ 'source4' ],
1450 target=main.params[ 'PING' ][ 'target4' ],
1451 pingTime=500 )
1452 main.Mininet2.pingLong(
1453 src=main.params[ 'PING' ][ 'source5' ],
1454 target=main.params[ 'PING' ][ 'target5' ],
1455 pingTime=500 )
1456 main.Mininet2.pingLong(
1457 src=main.params[ 'PING' ][ 'source6' ],
1458 target=main.params[ 'PING' ][ 'target6' ],
1459 pingTime=500 )
1460 main.Mininet2.pingLong(
1461 src=main.params[ 'PING' ][ 'source7' ],
1462 target=main.params[ 'PING' ][ 'target7' ],
1463 pingTime=500 )
1464 main.Mininet2.pingLong(
1465 src=main.params[ 'PING' ][ 'source8' ],
1466 target=main.params[ 'PING' ][ 'target8' ],
1467 pingTime=500 )
1468 main.Mininet2.pingLong(
1469 src=main.params[ 'PING' ][ 'source9' ],
1470 target=main.params[ 'PING' ][ 'target9' ],
1471 pingTime=500 )
1472 main.Mininet2.pingLong(
1473 src=main.params[ 'PING' ][ 'source10' ],
1474 target=main.params[ 'PING' ][ 'target10' ],
1475 pingTime=500 )
1476
1477 main.step( "Collecting topology information from ONOS" )
1478 devices = []
1479 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001480 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001481 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001482 name="devices-" + str( i ),
1483 args=[ ] )
1484 threads.append( t )
1485 t.start()
1486
1487 for t in threads:
1488 t.join()
1489 devices.append( t.result )
1490 hosts = []
1491 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001492 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001493 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001494 name="hosts-" + str( i ),
1495 args=[ ] )
1496 threads.append( t )
1497 t.start()
1498
1499 for t in threads:
1500 t.join()
1501 try:
1502 hosts.append( json.loads( t.result ) )
1503 except ( ValueError, TypeError ):
1504 # FIXME: better handling of this, print which node
1505 # Maybe use thread name?
1506 main.log.exception( "Error parsing json output of hosts" )
1507 # FIXME: should this be an empty json object instead?
1508 hosts.append( None )
1509
1510 ports = []
1511 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001512 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001513 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001514 name="ports-" + str( i ),
1515 args=[ ] )
1516 threads.append( t )
1517 t.start()
1518
1519 for t in threads:
1520 t.join()
1521 ports.append( t.result )
1522 links = []
1523 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001524 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001525 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001526 name="links-" + str( i ),
1527 args=[ ] )
1528 threads.append( t )
1529 t.start()
1530
1531 for t in threads:
1532 t.join()
1533 links.append( t.result )
1534 clusters = []
1535 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001536 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001537 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001538 name="clusters-" + str( i ),
1539 args=[ ] )
1540 threads.append( t )
1541 t.start()
1542
1543 for t in threads:
1544 t.join()
1545 clusters.append( t.result )
1546 # Compare json objects for hosts and dataplane clusters
1547
1548 # hosts
1549 main.step( "Host view is consistent across ONOS nodes" )
1550 consistentHostsResult = main.TRUE
1551 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001552 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001553 if "Error" not in hosts[ controller ]:
1554 if hosts[ controller ] == hosts[ 0 ]:
1555 continue
1556 else: # hosts not consistent
1557 main.log.error( "hosts from ONOS" +
1558 controllerStr +
1559 " is inconsistent with ONOS1" )
1560 main.log.warn( repr( hosts[ controller ] ) )
1561 consistentHostsResult = main.FALSE
1562
1563 else:
1564 main.log.error( "Error in getting ONOS hosts from ONOS" +
1565 controllerStr )
1566 consistentHostsResult = main.FALSE
1567 main.log.warn( "ONOS" + controllerStr +
1568 " hosts response: " +
1569 repr( hosts[ controller ] ) )
1570 utilities.assert_equals(
1571 expect=main.TRUE,
1572 actual=consistentHostsResult,
1573 onpass="Hosts view is consistent across all ONOS nodes",
1574 onfail="ONOS nodes have different views of hosts" )
1575
1576 main.step( "Each host has an IP address" )
1577 ipResult = main.TRUE
1578 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001579 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001580 for host in hosts[ controller ]:
1581 if not host.get( 'ipAddresses', [ ] ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001582 main.log.error( "Error with host ips on controller" +
Jon Hall5cf14d52015-07-16 12:15:19 -07001583 controllerStr + ": " + str( host ) )
1584 ipResult = main.FALSE
1585 utilities.assert_equals(
1586 expect=main.TRUE,
1587 actual=ipResult,
1588 onpass="The ips of the hosts aren't empty",
1589 onfail="The ip of at least one host is missing" )
1590
1591 # Strongly connected clusters of devices
1592 main.step( "Cluster view is consistent across ONOS nodes" )
1593 consistentClustersResult = main.TRUE
1594 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001595 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001596 if "Error" not in clusters[ controller ]:
1597 if clusters[ controller ] == clusters[ 0 ]:
1598 continue
1599 else: # clusters not consistent
1600 main.log.error( "clusters from ONOS" + controllerStr +
1601 " is inconsistent with ONOS1" )
1602 consistentClustersResult = main.FALSE
1603
1604 else:
1605 main.log.error( "Error in getting dataplane clusters " +
1606 "from ONOS" + controllerStr )
1607 consistentClustersResult = main.FALSE
1608 main.log.warn( "ONOS" + controllerStr +
1609 " clusters response: " +
1610 repr( clusters[ controller ] ) )
1611 utilities.assert_equals(
1612 expect=main.TRUE,
1613 actual=consistentClustersResult,
1614 onpass="Clusters view is consistent across all ONOS nodes",
1615 onfail="ONOS nodes have different views of clusters" )
1616 # there should always only be one cluster
1617 main.step( "Cluster view correct across ONOS nodes" )
1618 try:
1619 numClusters = len( json.loads( clusters[ 0 ] ) )
1620 except ( ValueError, TypeError ):
1621 main.log.exception( "Error parsing clusters[0]: " +
1622 repr( clusters[ 0 ] ) )
1623 clusterResults = main.FALSE
1624 if numClusters == 1:
1625 clusterResults = main.TRUE
1626 utilities.assert_equals(
1627 expect=1,
1628 actual=numClusters,
1629 onpass="ONOS shows 1 SCC",
1630 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1631
1632 main.step( "Comparing ONOS topology to MN" )
1633 devicesResults = main.TRUE
1634 linksResults = main.TRUE
1635 hostsResults = main.TRUE
1636 mnSwitches = main.Mininet1.getSwitches()
1637 mnLinks = main.Mininet1.getLinks()
1638 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001639 for controller in main.activeNodes:
1640 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001641 if devices[ controller ] and ports[ controller ] and\
1642 "Error" not in devices[ controller ] and\
1643 "Error" not in ports[ controller ]:
1644
1645 currentDevicesResult = main.Mininet1.compareSwitches(
1646 mnSwitches,
1647 json.loads( devices[ controller ] ),
1648 json.loads( ports[ controller ] ) )
1649 else:
1650 currentDevicesResult = main.FALSE
1651 utilities.assert_equals( expect=main.TRUE,
1652 actual=currentDevicesResult,
1653 onpass="ONOS" + controllerStr +
1654 " Switches view is correct",
1655 onfail="ONOS" + controllerStr +
1656 " Switches view is incorrect" )
1657 if links[ controller ] and "Error" not in links[ controller ]:
1658 currentLinksResult = main.Mininet1.compareLinks(
1659 mnSwitches, mnLinks,
1660 json.loads( links[ controller ] ) )
1661 else:
1662 currentLinksResult = main.FALSE
1663 utilities.assert_equals( expect=main.TRUE,
1664 actual=currentLinksResult,
1665 onpass="ONOS" + controllerStr +
1666 " links view is correct",
1667 onfail="ONOS" + controllerStr +
1668 " links view is incorrect" )
1669
1670 if hosts[ controller ] or "Error" not in hosts[ controller ]:
1671 currentHostsResult = main.Mininet1.compareHosts(
1672 mnHosts,
1673 hosts[ controller ] )
1674 else:
1675 currentHostsResult = main.FALSE
1676 utilities.assert_equals( expect=main.TRUE,
1677 actual=currentHostsResult,
1678 onpass="ONOS" + controllerStr +
1679 " hosts exist in Mininet",
1680 onfail="ONOS" + controllerStr +
1681 " hosts don't match Mininet" )
1682
1683 devicesResults = devicesResults and currentDevicesResult
1684 linksResults = linksResults and currentLinksResult
1685 hostsResults = hostsResults and currentHostsResult
1686
1687 main.step( "Device information is correct" )
1688 utilities.assert_equals(
1689 expect=main.TRUE,
1690 actual=devicesResults,
1691 onpass="Device information is correct",
1692 onfail="Device information is incorrect" )
1693
1694 main.step( "Links are correct" )
1695 utilities.assert_equals(
1696 expect=main.TRUE,
1697 actual=linksResults,
1698 onpass="Link are correct",
1699 onfail="Links are incorrect" )
1700
1701 main.step( "Hosts are correct" )
1702 utilities.assert_equals(
1703 expect=main.TRUE,
1704 actual=hostsResults,
1705 onpass="Hosts are correct",
1706 onfail="Hosts are incorrect" )
1707
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001708 def CASE61( self, main ):
Jon Hall5cf14d52015-07-16 12:15:19 -07001709 """
1710 The Failure case.
1711 """
Jon Halle1a3b752015-07-22 13:02:46 -07001712 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001713 assert main, "main not defined"
1714 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001715 assert main.CLIs, "main.CLIs not defined"
1716 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001717 main.case( "Stop minority of ONOS nodes" )
Jon Hall96091e62015-09-21 17:34:17 -07001718
1719 main.step( "Checking ONOS Logs for errors" )
1720 for node in main.nodes:
1721 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1722 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1723
Jon Hall3b489db2015-10-05 14:38:37 -07001724 n = len( main.nodes ) # Number of nodes
1725 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1726 main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
1727 if n > 3:
1728 main.kill.append( p - 1 )
1729 # NOTE: This only works for cluster sizes of 3,5, or 7.
1730
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001731 main.step( "Stopping " + str( len( main.kill ) ) + " ONOS nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07001732 killResults = main.TRUE
1733 for i in main.kill:
1734 killResults = killResults and\
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001735 main.ONOSbench.onosStop( main.nodes[i].ip_address )
1736 main.activeNodes.remove( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001737 utilities.assert_equals( expect=main.TRUE, actual=killResults,
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001738 onpass="ONOS nodes stopped successfully",
1739 onfail="ONOS nodes NOT successfully stopped" )
1740
1741 def CASE62( self, main ):
1742 """
1743 The bring up stopped nodes
1744 """
1745 import time
1746 assert main.numCtrls, "main.numCtrls not defined"
1747 assert main, "main not defined"
1748 assert utilities.assert_equals, "utilities.assert_equals not defined"
1749 assert main.CLIs, "main.CLIs not defined"
1750 assert main.nodes, "main.nodes not defined"
1751 assert main.kill, "main.kill not defined"
1752 main.case( "Restart minority of ONOS nodes" )
1753
1754 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1755 startResults = main.TRUE
1756 restartTime = time.time()
1757 for i in main.kill:
1758 startResults = startResults and\
1759 main.ONOSbench.onosStart( main.nodes[i].ip_address )
1760 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1761 onpass="ONOS nodes started successfully",
1762 onfail="ONOS nodes NOT successfully started" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001763
1764 main.step( "Checking if ONOS is up yet" )
1765 count = 0
1766 onosIsupResult = main.FALSE
1767 while onosIsupResult == main.FALSE and count < 10:
Jon Hall3b489db2015-10-05 14:38:37 -07001768 onosIsupResult = main.TRUE
1769 for i in main.kill:
1770 onosIsupResult = onosIsupResult and\
1771 main.ONOSbench.isup( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07001772 count = count + 1
Jon Hall5cf14d52015-07-16 12:15:19 -07001773 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1774 onpass="ONOS restarted successfully",
1775 onfail="ONOS restart NOT successful" )
1776
Jon Halle1a3b752015-07-22 13:02:46 -07001777 main.step( "Restarting ONOS main.CLIs" )
Jon Hall3b489db2015-10-05 14:38:37 -07001778 cliResults = main.TRUE
1779 for i in main.kill:
1780 cliResults = cliResults and\
1781 main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001782 main.activeNodes.append( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001783 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1784 onpass="ONOS cli restarted",
1785 onfail="ONOS cli did not restart" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001786 main.activeNodes.sort()
1787 try:
1788 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1789 "List of active nodes has duplicates, this likely indicates something was run out of order"
1790 except AssertionError:
1791 main.log.exception( "" )
1792 main.cleanup()
1793 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -07001794
1795 # Grab the time of restart so we chan check how long the gossip
1796 # protocol has had time to work
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001797 main.restartTime = time.time() - restartTime
Jon Hall5cf14d52015-07-16 12:15:19 -07001798 main.log.debug( "Restart time: " + str( main.restartTime ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001799 # TODO: MAke this configurable. Also, we are breaking the above timer
1800 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001801 node = main.activeNodes[0]
1802 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1803 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1804 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001805
1806 def CASE7( self, main ):
1807 """
1808 Check state after ONOS failure
1809 """
1810 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001811 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001812 assert main, "main not defined"
1813 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001814 assert main.CLIs, "main.CLIs not defined"
1815 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001816 try:
1817 main.kill
1818 except AttributeError:
1819 main.kill = []
1820
Jon Hall5cf14d52015-07-16 12:15:19 -07001821 main.case( "Running ONOS Constant State Tests" )
1822
1823 main.step( "Check that each switch has a master" )
1824 # Assert that each device has a master
1825 rolesNotNull = main.TRUE
1826 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001827 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001828 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001829 name="rolesNotNull-" + str( i ),
1830 args=[ ] )
1831 threads.append( t )
1832 t.start()
1833
1834 for t in threads:
1835 t.join()
1836 rolesNotNull = rolesNotNull and t.result
1837 utilities.assert_equals(
1838 expect=main.TRUE,
1839 actual=rolesNotNull,
1840 onpass="Each device has a master",
1841 onfail="Some devices don't have a master assigned" )
1842
1843 main.step( "Read device roles from ONOS" )
1844 ONOSMastership = []
1845 consistentMastership = True
1846 rolesResults = True
1847 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001848 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001849 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001850 name="roles-" + str( i ),
1851 args=[] )
1852 threads.append( t )
1853 t.start()
1854
1855 for t in threads:
1856 t.join()
1857 ONOSMastership.append( t.result )
1858
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001859 for i in range( len( ONOSMastership ) ):
1860 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001861 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001862 main.log.error( "Error in getting ONOS" + node + " roles" )
1863 main.log.warn( "ONOS" + node + " mastership response: " +
1864 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001865 rolesResults = False
1866 utilities.assert_equals(
1867 expect=True,
1868 actual=rolesResults,
1869 onpass="No error in reading roles output",
1870 onfail="Error in reading roles from ONOS" )
1871
1872 main.step( "Check for consistency in roles from each controller" )
1873 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1874 main.log.info(
1875 "Switch roles are consistent across all ONOS nodes" )
1876 else:
1877 consistentMastership = False
1878 utilities.assert_equals(
1879 expect=True,
1880 actual=consistentMastership,
1881 onpass="Switch roles are consistent across all ONOS nodes",
1882 onfail="ONOS nodes have different views of switch roles" )
1883
1884 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001885 for i in range( len( ONOSMastership ) ):
1886 node = str( main.activeNodes[i] + 1 )
1887 main.log.warn( "ONOS" + node + " roles: ",
1888 json.dumps( json.loads( ONOSMastership[ i ] ),
1889 sort_keys=True,
1890 indent=4,
1891 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001892
1893 # NOTE: we expect mastership to change on controller failure
Jon Hall5cf14d52015-07-16 12:15:19 -07001894
1895 main.step( "Get the intents and compare across all nodes" )
1896 ONOSIntents = []
1897 intentCheck = main.FALSE
1898 consistentIntents = True
1899 intentsResults = True
1900 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001901 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001902 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001903 name="intents-" + str( i ),
1904 args=[],
1905 kwargs={ 'jsonFormat': True } )
1906 threads.append( t )
1907 t.start()
1908
1909 for t in threads:
1910 t.join()
1911 ONOSIntents.append( t.result )
1912
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001913 for i in range( len( ONOSIntents) ):
1914 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001915 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001916 main.log.error( "Error in getting ONOS" + node + " intents" )
1917 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001918 repr( ONOSIntents[ i ] ) )
1919 intentsResults = False
1920 utilities.assert_equals(
1921 expect=True,
1922 actual=intentsResults,
1923 onpass="No error in reading intents output",
1924 onfail="Error in reading intents from ONOS" )
1925
1926 main.step( "Check for consistency in Intents from each controller" )
1927 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1928 main.log.info( "Intents are consistent across all ONOS " +
1929 "nodes" )
1930 else:
1931 consistentIntents = False
1932
1933 # Try to make it easy to figure out what is happening
1934 #
1935 # Intent ONOS1 ONOS2 ...
1936 # 0x01 INSTALLED INSTALLING
1937 # ... ... ...
1938 # ... ... ...
1939 title = " ID"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001940 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001941 title += " " * 10 + "ONOS" + str( n + 1 )
1942 main.log.warn( title )
1943 # get all intent keys in the cluster
1944 keys = []
1945 for nodeStr in ONOSIntents:
1946 node = json.loads( nodeStr )
1947 for intent in node:
1948 keys.append( intent.get( 'id' ) )
1949 keys = set( keys )
1950 for key in keys:
1951 row = "%-13s" % key
1952 for nodeStr in ONOSIntents:
1953 node = json.loads( nodeStr )
1954 for intent in node:
1955 if intent.get( 'id' ) == key:
1956 row += "%-15s" % intent.get( 'state' )
1957 main.log.warn( row )
1958 # End table view
1959
1960 utilities.assert_equals(
1961 expect=True,
1962 actual=consistentIntents,
1963 onpass="Intents are consistent across all ONOS nodes",
1964 onfail="ONOS nodes have different views of intents" )
1965 intentStates = []
1966 for node in ONOSIntents: # Iter through ONOS nodes
1967 nodeStates = []
1968 # Iter through intents of a node
1969 try:
1970 for intent in json.loads( node ):
1971 nodeStates.append( intent[ 'state' ] )
1972 except ( ValueError, TypeError ):
1973 main.log.exception( "Error in parsing intents" )
1974 main.log.error( repr( node ) )
1975 intentStates.append( nodeStates )
1976 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1977 main.log.info( dict( out ) )
1978
1979 if intentsResults and not consistentIntents:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001980 for i in range( len( main.activeNodes ) ):
1981 node = str( main.activeNodes[i] + 1 )
1982 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001983 main.log.warn( json.dumps(
1984 json.loads( ONOSIntents[ i ] ),
1985 sort_keys=True,
1986 indent=4,
1987 separators=( ',', ': ' ) ) )
1988 elif intentsResults and consistentIntents:
1989 intentCheck = main.TRUE
1990
1991 # NOTE: Store has no durability, so intents are lost across system
1992 # restarts
1993 main.step( "Compare current intents with intents before the failure" )
1994 # NOTE: this requires case 5 to pass for intentState to be set.
1995 # maybe we should stop the test if that fails?
1996 sameIntents = main.FALSE
1997 if intentState and intentState == ONOSIntents[ 0 ]:
1998 sameIntents = main.TRUE
1999 main.log.info( "Intents are consistent with before failure" )
2000 # TODO: possibly the states have changed? we may need to figure out
2001 # what the acceptable states are
2002 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2003 sameIntents = main.TRUE
2004 try:
2005 before = json.loads( intentState )
2006 after = json.loads( ONOSIntents[ 0 ] )
2007 for intent in before:
2008 if intent not in after:
2009 sameIntents = main.FALSE
2010 main.log.debug( "Intent is not currently in ONOS " +
2011 "(at least in the same form):" )
2012 main.log.debug( json.dumps( intent ) )
2013 except ( ValueError, TypeError ):
2014 main.log.exception( "Exception printing intents" )
2015 main.log.debug( repr( ONOSIntents[0] ) )
2016 main.log.debug( repr( intentState ) )
2017 if sameIntents == main.FALSE:
2018 try:
2019 main.log.debug( "ONOS intents before: " )
2020 main.log.debug( json.dumps( json.loads( intentState ),
2021 sort_keys=True, indent=4,
2022 separators=( ',', ': ' ) ) )
2023 main.log.debug( "Current ONOS intents: " )
2024 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2025 sort_keys=True, indent=4,
2026 separators=( ',', ': ' ) ) )
2027 except ( ValueError, TypeError ):
2028 main.log.exception( "Exception printing intents" )
2029 main.log.debug( repr( ONOSIntents[0] ) )
2030 main.log.debug( repr( intentState ) )
2031 utilities.assert_equals(
2032 expect=main.TRUE,
2033 actual=sameIntents,
2034 onpass="Intents are consistent with before failure",
2035 onfail="The Intents changed during failure" )
2036 intentCheck = intentCheck and sameIntents
2037
2038 main.step( "Get the OF Table entries and compare to before " +
2039 "component failure" )
2040 FlowTables = main.TRUE
2041 flows2 = []
2042 for i in range( 28 ):
2043 main.log.info( "Checking flow table on s" + str( i + 1 ) )
Jon Hall9043c902015-07-30 14:23:44 -07002044 tmpFlows = main.Mininet1.getFlowTable( 1.3, "s" + str( i + 1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002045 flows2.append( tmpFlows )
Jon Hall9043c902015-07-30 14:23:44 -07002046 tempResult = main.Mininet1.flowComp(
Jon Hall5cf14d52015-07-16 12:15:19 -07002047 flow1=flows[ i ],
2048 flow2=tmpFlows )
2049 FlowTables = FlowTables and tempResult
2050 if FlowTables == main.FALSE:
2051 main.log.info( "Differences in flow table for switch: s" +
2052 str( i + 1 ) )
2053 utilities.assert_equals(
2054 expect=main.TRUE,
2055 actual=FlowTables,
2056 onpass="No changes were found in the flow tables",
2057 onfail="Changes were found in the flow tables" )
2058
2059 main.Mininet2.pingLongKill()
2060 '''
2061 main.step( "Check the continuous pings to ensure that no packets " +
2062 "were dropped during component failure" )
2063 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2064 main.params[ 'TESTONIP' ] )
2065 LossInPings = main.FALSE
2066 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2067 for i in range( 8, 18 ):
2068 main.log.info(
2069 "Checking for a loss in pings along flow from s" +
2070 str( i ) )
2071 LossInPings = main.Mininet2.checkForLoss(
2072 "/tmp/ping.h" +
2073 str( i ) ) or LossInPings
2074 if LossInPings == main.TRUE:
2075 main.log.info( "Loss in ping detected" )
2076 elif LossInPings == main.ERROR:
2077 main.log.info( "There are multiple mininet process running" )
2078 elif LossInPings == main.FALSE:
2079 main.log.info( "No Loss in the pings" )
2080 main.log.info( "No loss of dataplane connectivity" )
2081 utilities.assert_equals(
2082 expect=main.FALSE,
2083 actual=LossInPings,
2084 onpass="No Loss of connectivity",
2085 onfail="Loss of dataplane connectivity detected" )
2086 '''
2087
2088 main.step( "Leadership Election is still functional" )
2089 # Test of LeadershipElection
2090 leaderList = []
Jon Hall5cf14d52015-07-16 12:15:19 -07002091
Jon Hall3b489db2015-10-05 14:38:37 -07002092 restarted = []
2093 for i in main.kill:
2094 restarted.append( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07002095 leaderResult = main.TRUE
Jon Hall3b489db2015-10-05 14:38:37 -07002096
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002097 for i in main.activeNodes:
2098 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002099 leaderN = cli.electionTestLeader()
2100 leaderList.append( leaderN )
2101 if leaderN == main.FALSE:
2102 # error in response
2103 main.log.error( "Something is wrong with " +
2104 "electionTestLeader function, check the" +
2105 " error logs" )
2106 leaderResult = main.FALSE
2107 elif leaderN is None:
2108 main.log.error( cli.name +
2109 " shows no leader for the election-app was" +
2110 " elected after the old one died" )
2111 leaderResult = main.FALSE
2112 elif leaderN in restarted:
2113 main.log.error( cli.name + " shows " + str( leaderN ) +
2114 " as leader for the election-app, but it " +
2115 "was restarted" )
2116 leaderResult = main.FALSE
2117 if len( set( leaderList ) ) != 1:
2118 leaderResult = main.FALSE
2119 main.log.error(
2120 "Inconsistent view of leader for the election test app" )
2121 # TODO: print the list
2122 utilities.assert_equals(
2123 expect=main.TRUE,
2124 actual=leaderResult,
2125 onpass="Leadership election passed",
2126 onfail="Something went wrong with Leadership election" )
2127
2128 def CASE8( self, main ):
2129 """
2130 Compare topo
2131 """
2132 import json
2133 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002134 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002135 assert main, "main not defined"
2136 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002137 assert main.CLIs, "main.CLIs not defined"
2138 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002139
2140 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002141 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002142 " and ONOS"
2143
2144 main.step( "Comparing ONOS topology to MN" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002145 topoResult = main.FALSE
2146 elapsed = 0
2147 count = 0
2148 main.step( "Collecting topology information from ONOS" )
2149 startTime = time.time()
2150 # Give time for Gossip to work
2151 while topoResult == main.FALSE and elapsed < 60:
Jon Hall96091e62015-09-21 17:34:17 -07002152 devicesResults = main.TRUE
2153 linksResults = main.TRUE
2154 hostsResults = main.TRUE
2155 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002156 count += 1
2157 cliStart = time.time()
2158 devices = []
2159 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002160 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002161 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07002162 name="devices-" + str( i ),
2163 args=[ ] )
2164 threads.append( t )
2165 t.start()
2166
2167 for t in threads:
2168 t.join()
2169 devices.append( t.result )
2170 hosts = []
2171 ipResult = main.TRUE
2172 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002173 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002174 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07002175 name="hosts-" + str( i ),
2176 args=[ ] )
2177 threads.append( t )
2178 t.start()
2179
2180 for t in threads:
2181 t.join()
2182 try:
2183 hosts.append( json.loads( t.result ) )
2184 except ( ValueError, TypeError ):
2185 main.log.exception( "Error parsing hosts results" )
2186 main.log.error( repr( t.result ) )
2187 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002188 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002189 for host in hosts[ controller ]:
2190 if host is None or host.get( 'ipAddresses', [] ) == []:
2191 main.log.error(
2192 "DEBUG:Error with host ipAddresses on controller" +
2193 controllerStr + ": " + str( host ) )
2194 ipResult = main.FALSE
2195 ports = []
2196 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002197 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002198 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07002199 name="ports-" + str( i ),
2200 args=[ ] )
2201 threads.append( t )
2202 t.start()
2203
2204 for t in threads:
2205 t.join()
2206 ports.append( t.result )
2207 links = []
2208 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002209 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002210 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07002211 name="links-" + str( i ),
2212 args=[ ] )
2213 threads.append( t )
2214 t.start()
2215
2216 for t in threads:
2217 t.join()
2218 links.append( t.result )
2219 clusters = []
2220 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002221 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002222 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07002223 name="clusters-" + str( i ),
2224 args=[ ] )
2225 threads.append( t )
2226 t.start()
2227
2228 for t in threads:
2229 t.join()
2230 clusters.append( t.result )
2231
2232 elapsed = time.time() - startTime
2233 cliTime = time.time() - cliStart
2234 print "Elapsed time: " + str( elapsed )
2235 print "CLI time: " + str( cliTime )
2236
2237 mnSwitches = main.Mininet1.getSwitches()
2238 mnLinks = main.Mininet1.getLinks()
2239 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002240 for controller in range( len( main.activeNodes ) ):
2241 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002242 if devices[ controller ] and ports[ controller ] and\
2243 "Error" not in devices[ controller ] and\
2244 "Error" not in ports[ controller ]:
2245
2246 currentDevicesResult = main.Mininet1.compareSwitches(
2247 mnSwitches,
2248 json.loads( devices[ controller ] ),
2249 json.loads( ports[ controller ] ) )
2250 else:
2251 currentDevicesResult = main.FALSE
2252 utilities.assert_equals( expect=main.TRUE,
2253 actual=currentDevicesResult,
2254 onpass="ONOS" + controllerStr +
2255 " Switches view is correct",
2256 onfail="ONOS" + controllerStr +
2257 " Switches view is incorrect" )
2258
2259 if links[ controller ] and "Error" not in links[ controller ]:
2260 currentLinksResult = main.Mininet1.compareLinks(
2261 mnSwitches, mnLinks,
2262 json.loads( links[ controller ] ) )
2263 else:
2264 currentLinksResult = main.FALSE
2265 utilities.assert_equals( expect=main.TRUE,
2266 actual=currentLinksResult,
2267 onpass="ONOS" + controllerStr +
2268 " links view is correct",
2269 onfail="ONOS" + controllerStr +
2270 " links view is incorrect" )
2271
2272 if hosts[ controller ] or "Error" not in hosts[ controller ]:
2273 currentHostsResult = main.Mininet1.compareHosts(
2274 mnHosts,
2275 hosts[ controller ] )
2276 else:
2277 currentHostsResult = main.FALSE
2278 utilities.assert_equals( expect=main.TRUE,
2279 actual=currentHostsResult,
2280 onpass="ONOS" + controllerStr +
2281 " hosts exist in Mininet",
2282 onfail="ONOS" + controllerStr +
2283 " hosts don't match Mininet" )
2284 # CHECKING HOST ATTACHMENT POINTS
2285 hostAttachment = True
2286 zeroHosts = False
2287 # FIXME: topo-HA/obelisk specific mappings:
2288 # key is mac and value is dpid
2289 mappings = {}
2290 for i in range( 1, 29 ): # hosts 1 through 28
2291 # set up correct variables:
2292 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2293 if i == 1:
2294 deviceId = "1000".zfill(16)
2295 elif i == 2:
2296 deviceId = "2000".zfill(16)
2297 elif i == 3:
2298 deviceId = "3000".zfill(16)
2299 elif i == 4:
2300 deviceId = "3004".zfill(16)
2301 elif i == 5:
2302 deviceId = "5000".zfill(16)
2303 elif i == 6:
2304 deviceId = "6000".zfill(16)
2305 elif i == 7:
2306 deviceId = "6007".zfill(16)
2307 elif i >= 8 and i <= 17:
2308 dpid = '3' + str( i ).zfill( 3 )
2309 deviceId = dpid.zfill(16)
2310 elif i >= 18 and i <= 27:
2311 dpid = '6' + str( i ).zfill( 3 )
2312 deviceId = dpid.zfill(16)
2313 elif i == 28:
2314 deviceId = "2800".zfill(16)
2315 mappings[ macId ] = deviceId
2316 if hosts[ controller ] or "Error" not in hosts[ controller ]:
2317 if hosts[ controller ] == []:
2318 main.log.warn( "There are no hosts discovered" )
2319 zeroHosts = True
2320 else:
2321 for host in hosts[ controller ]:
2322 mac = None
2323 location = None
2324 device = None
2325 port = None
2326 try:
2327 mac = host.get( 'mac' )
2328 assert mac, "mac field could not be found for this host object"
2329
2330 location = host.get( 'location' )
2331 assert location, "location field could not be found for this host object"
2332
2333 # Trim the protocol identifier off deviceId
2334 device = str( location.get( 'elementId' ) ).split(':')[1]
2335 assert device, "elementId field could not be found for this host location object"
2336
2337 port = location.get( 'port' )
2338 assert port, "port field could not be found for this host location object"
2339
2340 # Now check if this matches where they should be
2341 if mac and device and port:
2342 if str( port ) != "1":
2343 main.log.error( "The attachment port is incorrect for " +
2344 "host " + str( mac ) +
2345 ". Expected: 1 Actual: " + str( port) )
2346 hostAttachment = False
2347 if device != mappings[ str( mac ) ]:
2348 main.log.error( "The attachment device is incorrect for " +
2349 "host " + str( mac ) +
2350 ". Expected: " + mappings[ str( mac ) ] +
2351 " Actual: " + device )
2352 hostAttachment = False
2353 else:
2354 hostAttachment = False
2355 except AssertionError:
2356 main.log.exception( "Json object not as expected" )
2357 main.log.error( repr( host ) )
2358 hostAttachment = False
2359 else:
2360 main.log.error( "No hosts json output or \"Error\"" +
2361 " in output. hosts = " +
2362 repr( hosts[ controller ] ) )
2363 if zeroHosts is False:
2364 hostAttachment = True
2365
2366 # END CHECKING HOST ATTACHMENT POINTS
2367 devicesResults = devicesResults and currentDevicesResult
2368 linksResults = linksResults and currentLinksResult
2369 hostsResults = hostsResults and currentHostsResult
2370 hostAttachmentResults = hostAttachmentResults and\
2371 hostAttachment
2372
2373 # Compare json objects for hosts and dataplane clusters
2374
2375 # hosts
2376 main.step( "Hosts view is consistent across all ONOS nodes" )
2377 consistentHostsResult = main.TRUE
2378 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002379 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002380 if "Error" not in hosts[ controller ]:
2381 if hosts[ controller ] == hosts[ 0 ]:
2382 continue
2383 else: # hosts not consistent
2384 main.log.error( "hosts from ONOS" + controllerStr +
2385 " is inconsistent with ONOS1" )
2386 main.log.warn( repr( hosts[ controller ] ) )
2387 consistentHostsResult = main.FALSE
2388
2389 else:
2390 main.log.error( "Error in getting ONOS hosts from ONOS" +
2391 controllerStr )
2392 consistentHostsResult = main.FALSE
2393 main.log.warn( "ONOS" + controllerStr +
2394 " hosts response: " +
2395 repr( hosts[ controller ] ) )
2396 utilities.assert_equals(
2397 expect=main.TRUE,
2398 actual=consistentHostsResult,
2399 onpass="Hosts view is consistent across all ONOS nodes",
2400 onfail="ONOS nodes have different views of hosts" )
2401
2402 main.step( "Hosts information is correct" )
2403 hostsResults = hostsResults and ipResult
2404 utilities.assert_equals(
2405 expect=main.TRUE,
2406 actual=hostsResults,
2407 onpass="Host information is correct",
2408 onfail="Host information is incorrect" )
2409
2410 main.step( "Host attachment points to the network" )
2411 utilities.assert_equals(
2412 expect=True,
2413 actual=hostAttachmentResults,
2414 onpass="Hosts are correctly attached to the network",
2415 onfail="ONOS did not correctly attach hosts to the network" )
2416
2417 # Strongly connected clusters of devices
2418 main.step( "Clusters view is consistent across all ONOS nodes" )
2419 consistentClustersResult = main.TRUE
2420 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002421 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002422 if "Error" not in clusters[ controller ]:
2423 if clusters[ controller ] == clusters[ 0 ]:
2424 continue
2425 else: # clusters not consistent
2426 main.log.error( "clusters from ONOS" +
2427 controllerStr +
2428 " is inconsistent with ONOS1" )
2429 consistentClustersResult = main.FALSE
2430
2431 else:
2432 main.log.error( "Error in getting dataplane clusters " +
2433 "from ONOS" + controllerStr )
2434 consistentClustersResult = main.FALSE
2435 main.log.warn( "ONOS" + controllerStr +
2436 " clusters response: " +
2437 repr( clusters[ controller ] ) )
2438 utilities.assert_equals(
2439 expect=main.TRUE,
2440 actual=consistentClustersResult,
2441 onpass="Clusters view is consistent across all ONOS nodes",
2442 onfail="ONOS nodes have different views of clusters" )
2443
2444 main.step( "There is only one SCC" )
2445 # there should always only be one cluster
2446 try:
2447 numClusters = len( json.loads( clusters[ 0 ] ) )
2448 except ( ValueError, TypeError ):
2449 main.log.exception( "Error parsing clusters[0]: " +
2450 repr( clusters[0] ) )
2451 clusterResults = main.FALSE
2452 if numClusters == 1:
2453 clusterResults = main.TRUE
2454 utilities.assert_equals(
2455 expect=1,
2456 actual=numClusters,
2457 onpass="ONOS shows 1 SCC",
2458 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2459
2460 topoResult = ( devicesResults and linksResults
2461 and hostsResults and consistentHostsResult
2462 and consistentClustersResult and clusterResults
2463 and ipResult and hostAttachmentResults )
2464
2465 topoResult = topoResult and int( count <= 2 )
2466 note = "note it takes about " + str( int( cliTime ) ) + \
2467 " seconds for the test to make all the cli calls to fetch " +\
2468 "the topology from each ONOS instance"
2469 main.log.info(
2470 "Very crass estimate for topology discovery/convergence( " +
2471 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2472 str( count ) + " tries" )
2473
2474 main.step( "Device information is correct" )
2475 utilities.assert_equals(
2476 expect=main.TRUE,
2477 actual=devicesResults,
2478 onpass="Device information is correct",
2479 onfail="Device information is incorrect" )
2480
2481 main.step( "Links are correct" )
2482 utilities.assert_equals(
2483 expect=main.TRUE,
2484 actual=linksResults,
2485 onpass="Link are correct",
2486 onfail="Links are incorrect" )
2487
2488 # FIXME: move this to an ONOS state case
2489 main.step( "Checking ONOS nodes" )
2490 nodesOutput = []
2491 nodeResults = main.TRUE
2492 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002493 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002494 t = main.Thread( target=main.CLIs[i].nodes,
Jon Hall5cf14d52015-07-16 12:15:19 -07002495 name="nodes-" + str( i ),
2496 args=[ ] )
2497 threads.append( t )
2498 t.start()
2499
2500 for t in threads:
2501 t.join()
2502 nodesOutput.append( t.result )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002503 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002504 for i in nodesOutput:
2505 try:
2506 current = json.loads( i )
2507 for node in current:
2508 currentResult = main.FALSE
2509 if node['ip'] in ips: # node in nodes() output is in cell
2510 if node['state'] == 'ACTIVE':
2511 currentResult = main.TRUE
2512 else:
2513 main.log.error( "Error in ONOS node availability" )
2514 main.log.error(
2515 json.dumps( current,
2516 sort_keys=True,
2517 indent=4,
2518 separators=( ',', ': ' ) ) )
2519 break
2520 nodeResults = nodeResults and currentResult
2521 except ( ValueError, TypeError ):
2522 main.log.error( "Error parsing nodes output" )
2523 main.log.warn( repr( i ) )
2524 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2525 onpass="Nodes check successful",
2526 onfail="Nodes check NOT successful" )
2527
2528 def CASE9( self, main ):
2529 """
2530 Link s3-s28 down
2531 """
2532 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002533 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002534 assert main, "main not defined"
2535 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002536 assert main.CLIs, "main.CLIs not defined"
2537 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002538 # NOTE: You should probably run a topology check after this
2539
2540 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2541
2542 description = "Turn off a link to ensure that Link Discovery " +\
2543 "is working properly"
2544 main.case( description )
2545
2546 main.step( "Kill Link between s3 and s28" )
2547 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2548 main.log.info( "Waiting " + str( linkSleep ) +
2549 " seconds for link down to be discovered" )
2550 time.sleep( linkSleep )
2551 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2552 onpass="Link down successful",
2553 onfail="Failed to bring link down" )
2554 # TODO do some sort of check here
2555
2556 def CASE10( self, main ):
2557 """
2558 Link s3-s28 up
2559 """
2560 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002561 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002562 assert main, "main not defined"
2563 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002564 assert main.CLIs, "main.CLIs not defined"
2565 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002566 # NOTE: You should probably run a topology check after this
2567
2568 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2569
2570 description = "Restore a link to ensure that Link Discovery is " + \
2571 "working properly"
2572 main.case( description )
2573
2574 main.step( "Bring link between s3 and s28 back up" )
2575 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2576 main.log.info( "Waiting " + str( linkSleep ) +
2577 " seconds for link up to be discovered" )
2578 time.sleep( linkSleep )
2579 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2580 onpass="Link up successful",
2581 onfail="Failed to bring link up" )
2582 # TODO do some sort of check here
2583
2584 def CASE11( self, main ):
2585 """
2586 Switch Down
2587 """
2588 # NOTE: You should probably run a topology check after this
2589 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002590 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002591 assert main, "main not defined"
2592 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002593 assert main.CLIs, "main.CLIs not defined"
2594 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002595
2596 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2597
2598 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002599 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002600 main.case( description )
2601 switch = main.params[ 'kill' ][ 'switch' ]
2602 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2603
2604 # TODO: Make this switch parameterizable
2605 main.step( "Kill " + switch )
2606 main.log.info( "Deleting " + switch )
2607 main.Mininet1.delSwitch( switch )
2608 main.log.info( "Waiting " + str( switchSleep ) +
2609 " seconds for switch down to be discovered" )
2610 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002611 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002612 # Peek at the deleted switch
2613 main.log.warn( str( device ) )
2614 result = main.FALSE
2615 if device and device[ 'available' ] is False:
2616 result = main.TRUE
2617 utilities.assert_equals( expect=main.TRUE, actual=result,
2618 onpass="Kill switch successful",
2619 onfail="Failed to kill switch?" )
2620
2621 def CASE12( self, main ):
2622 """
2623 Switch Up
2624 """
2625 # NOTE: You should probably run a topology check after this
2626 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002627 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002628 assert main, "main not defined"
2629 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002630 assert main.CLIs, "main.CLIs not defined"
2631 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002632 assert ONOS1Port, "ONOS1Port not defined"
2633 assert ONOS2Port, "ONOS2Port not defined"
2634 assert ONOS3Port, "ONOS3Port not defined"
2635 assert ONOS4Port, "ONOS4Port not defined"
2636 assert ONOS5Port, "ONOS5Port not defined"
2637 assert ONOS6Port, "ONOS6Port not defined"
2638 assert ONOS7Port, "ONOS7Port not defined"
2639
2640 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2641 switch = main.params[ 'kill' ][ 'switch' ]
2642 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2643 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002644 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002645 description = "Adding a switch to ensure it is discovered correctly"
2646 main.case( description )
2647
2648 main.step( "Add back " + switch )
2649 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2650 for peer in links:
2651 main.Mininet1.addLink( switch, peer )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002652 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002653 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2654 main.log.info( "Waiting " + str( switchSleep ) +
2655 " seconds for switch up to be discovered" )
2656 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002657 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002658 # Peek at the deleted switch
2659 main.log.warn( str( device ) )
2660 result = main.FALSE
2661 if device and device[ 'available' ]:
2662 result = main.TRUE
2663 utilities.assert_equals( expect=main.TRUE, actual=result,
2664 onpass="add switch successful",
2665 onfail="Failed to add switch?" )
2666
2667 def CASE13( self, main ):
2668 """
2669 Clean up
2670 """
2671 import os
2672 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002673 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002674 assert main, "main not defined"
2675 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002676 assert main.CLIs, "main.CLIs not defined"
2677 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002678
2679 # printing colors to terminal
2680 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2681 'blue': '\033[94m', 'green': '\033[92m',
2682 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2683 main.case( "Test Cleanup" )
2684 main.step( "Killing tcpdumps" )
2685 main.Mininet2.stopTcpdump()
2686
2687 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002688 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002689 main.step( "Copying MN pcap and ONOS log files to test station" )
2690 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2691 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002692 # NOTE: MN Pcap file is being saved to logdir.
2693 # We scp this file as MN and TestON aren't necessarily the same vm
2694
2695 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002696 # TODO: Load these from params
2697 # NOTE: must end in /
2698 logFolder = "/opt/onos/log/"
2699 logFiles = [ "karaf.log", "karaf.log.1" ]
2700 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002701 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002702 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002703 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002704 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2705 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002706 # std*.log's
2707 # NOTE: must end in /
2708 logFolder = "/opt/onos/var/"
2709 logFiles = [ "stderr.log", "stdout.log" ]
2710 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002711 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002712 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002713 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002714 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2715 logFolder + f, dstName )
2716 else:
2717 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002718
2719 main.step( "Stopping Mininet" )
2720 mnResult = main.Mininet1.stopNet()
2721 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2722 onpass="Mininet stopped",
2723 onfail="MN cleanup NOT successful" )
2724
2725 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002726 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002727 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2728 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002729
2730 try:
2731 timerLog = open( main.logdir + "/Timers.csv", 'w')
2732 # Overwrite with empty line and close
2733 labels = "Gossip Intents, Restart"
2734 data = str( gossipTime ) + ", " + str( main.restartTime )
2735 timerLog.write( labels + "\n" + data )
2736 timerLog.close()
2737 except NameError, e:
2738 main.log.exception(e)
2739
2740 def CASE14( self, main ):
2741 """
2742 start election app on all onos nodes
2743 """
Jon Halle1a3b752015-07-22 13:02:46 -07002744 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002745 assert main, "main not defined"
2746 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002747 assert main.CLIs, "main.CLIs not defined"
2748 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002749
2750 main.case("Start Leadership Election app")
2751 main.step( "Install leadership election app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002752 onosCli = main.CLIs[ main.activeNodes[0] ]
2753 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002754 utilities.assert_equals(
2755 expect=main.TRUE,
2756 actual=appResult,
2757 onpass="Election app installed",
2758 onfail="Something went wrong with installing Leadership election" )
2759
2760 main.step( "Run for election on each node" )
2761 leaderResult = main.TRUE
2762 leaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002763 for i in main.activeNodes:
2764 main.CLIs[i].electionTestRun()
2765 for i in main.activeNodes:
2766 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002767 leader = cli.electionTestLeader()
2768 if leader is None or leader == main.FALSE:
2769 main.log.error( cli.name + ": Leader for the election app " +
2770 "should be an ONOS node, instead got '" +
2771 str( leader ) + "'" )
2772 leaderResult = main.FALSE
2773 leaders.append( leader )
2774 utilities.assert_equals(
2775 expect=main.TRUE,
2776 actual=leaderResult,
2777 onpass="Successfully ran for leadership",
2778 onfail="Failed to run for leadership" )
2779
2780 main.step( "Check that each node shows the same leader" )
2781 sameLeader = main.TRUE
2782 if len( set( leaders ) ) != 1:
2783 sameLeader = main.FALSE
Jon Halle1a3b752015-07-22 13:02:46 -07002784 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
Jon Hall5cf14d52015-07-16 12:15:19 -07002785 str( leaders ) )
2786 utilities.assert_equals(
2787 expect=main.TRUE,
2788 actual=sameLeader,
2789 onpass="Leadership is consistent for the election topic",
2790 onfail="Nodes have different leaders" )
2791
2792 def CASE15( self, main ):
2793 """
2794 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002795 15.1 Run election on each node
2796 15.2 Check that each node has the same leaders and candidates
2797 15.3 Find current leader and withdraw
2798 15.4 Check that a new node was elected leader
2799 15.5 Check that that new leader was the candidate of old leader
2800 15.6 Run for election on old leader
2801 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2802 15.8 Make sure that the old leader was added to the candidate list
2803
2804 old and new variable prefixes refer to data from before vs after
2805 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002806 """
2807 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002808 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002809 assert main, "main not defined"
2810 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002811 assert main.CLIs, "main.CLIs not defined"
2812 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002813
Jon Hall5cf14d52015-07-16 12:15:19 -07002814 description = "Check that Leadership Election is still functional"
2815 main.case( description )
acsmars71adceb2015-08-31 15:09:26 -07002816 # NOTE: Need to re-run since being a canidate is not persistant
2817 # TODO: add check for "Command not found:" in the driver, this
2818 # means the election test app isn't loaded
Jon Hall5cf14d52015-07-16 12:15:19 -07002819
acsmars71adceb2015-08-31 15:09:26 -07002820 oldLeaders = [] # leaders by node before withdrawl from candidates
2821 newLeaders = [] # leaders by node after withdrawl from candidates
2822 oldAllCandidates = [] # list of lists of each nodes' candidates before
2823 newAllCandidates = [] # list of lists of each nodes' candidates after
2824 oldCandidates = [] # list of candidates from node 0 before withdrawl
2825 newCandidates = [] # list of candidates from node 0 after withdrawl
2826 oldLeader = '' # the old leader from oldLeaders, None if not same
2827 newLeader = '' # the new leaders fron newLoeaders, None if not same
2828 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2829 expectNoLeader = False # True when there is only one leader
2830 if main.numCtrls == 1:
2831 expectNoLeader = True
2832
2833 main.step( "Run for election on each node" )
2834 electionResult = main.TRUE
2835
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002836 for i in main.activeNodes: # run test election on each node
2837 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002838 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002839 utilities.assert_equals(
2840 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002841 actual=electionResult,
2842 onpass="All nodes successfully ran for leadership",
2843 onfail="At least one node failed to run for leadership" )
2844
acsmars3a72bde2015-09-02 14:16:22 -07002845 if electionResult == main.FALSE:
2846 main.log.error(
2847 "Skipping Test Case because Election Test App isn't loaded" )
2848 main.skipCase()
2849
acsmars71adceb2015-08-31 15:09:26 -07002850 main.step( "Check that each node shows the same leader and candidates" )
2851 sameResult = main.TRUE
2852 failMessage = "Nodes have different leaders"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002853 for i in main.activeNodes:
2854 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002855 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2856 oldAllCandidates.append( node )
2857 oldLeaders.append( node[ 0 ] )
2858 oldCandidates = oldAllCandidates[ 0 ]
2859
2860 # Check that each node has the same leader. Defines oldLeader
2861 if len( set( oldLeaders ) ) != 1:
2862 sameResult = main.FALSE
2863 main.log.error( "More than one leader present:" + str( oldLeaders ) )
2864 oldLeader = None
2865 else:
2866 oldLeader = oldLeaders[ 0 ]
2867
2868 # Check that each node's candidate list is the same
2869 for candidates in oldAllCandidates:
2870 if set( candidates ) != set( oldCandidates ):
2871 sameResult = main.FALSE
2872 failMessage += "and candidates"
acsmars71adceb2015-08-31 15:09:26 -07002873 utilities.assert_equals(
2874 expect=main.TRUE,
2875 actual=sameResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002876 onpass="Leadership is consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002877 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002878
2879 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002880 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002881 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002882 if oldLeader is None:
2883 main.log.error( "Leadership isn't consistent." )
2884 withdrawResult = main.FALSE
2885 # Get the CLI of the oldLeader
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002886 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07002887 if oldLeader == main.nodes[ i ].ip_address:
2888 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002889 break
2890 else: # FOR/ELSE statement
2891 main.log.error( "Leader election, could not find current leader" )
2892 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07002893 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07002894 utilities.assert_equals(
2895 expect=main.TRUE,
2896 actual=withdrawResult,
2897 onpass="Node was withdrawn from election",
2898 onfail="Node was not withdrawn from election" )
2899
acsmars71adceb2015-08-31 15:09:26 -07002900 main.step( "Check that a new node was elected leader" )
2901
Jon Hall5cf14d52015-07-16 12:15:19 -07002902 # FIXME: use threads
acsmars71adceb2015-08-31 15:09:26 -07002903 newLeaderResult = main.TRUE
2904 failMessage = "Nodes have different leaders"
2905
2906 # Get new leaders and candidates
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002907 for i in main.activeNodes:
2908 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002909 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2910 # elections might no have finished yet
2911 if node[ 0 ] == 'none' and not expectNoLeader:
2912 main.log.info( "Node has no leader, waiting 5 seconds to be " +
2913 "sure elections are complete." )
2914 time.sleep(5)
2915 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2916 # election still isn't done or there is a problem
2917 if node[ 0 ] == 'none':
2918 main.log.error( "No leader was elected on at least 1 node" )
2919 newLeaderResult = main.FALSE
2920 newAllCandidates.append( node )
2921 newLeaders.append( node[ 0 ] )
2922 newCandidates = newAllCandidates[ 0 ]
2923
2924 # Check that each node has the same leader. Defines newLeader
2925 if len( set( newLeaders ) ) != 1:
2926 newLeaderResult = main.FALSE
2927 main.log.error( "Nodes have different leaders: " +
2928 str( newLeaders ) )
2929 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07002930 else:
acsmars71adceb2015-08-31 15:09:26 -07002931 newLeader = newLeaders[ 0 ]
2932
2933 # Check that each node's candidate list is the same
2934 for candidates in newAllCandidates:
2935 if set( candidates ) != set( newCandidates ):
2936 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07002937 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07002938
2939 # Check that the new leader is not the older leader, which was withdrawn
2940 if newLeader == oldLeader:
2941 newLeaderResult = main.FALSE
2942 main.log.error( "All nodes still see old leader: " + oldLeader +
2943 " as the current leader" )
2944
Jon Hall5cf14d52015-07-16 12:15:19 -07002945 utilities.assert_equals(
2946 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002947 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002948 onpass="Leadership election passed",
2949 onfail="Something went wrong with Leadership election" )
2950
acsmars71adceb2015-08-31 15:09:26 -07002951 main.step( "Check that that new leader was the candidate of old leader")
2952 # candidates[ 2 ] should be come the top candidate after withdrawl
2953 correctCandidateResult = main.TRUE
2954 if expectNoLeader:
2955 if newLeader == 'none':
2956 main.log.info( "No leader expected. None found. Pass" )
2957 correctCandidateResult = main.TRUE
2958 else:
2959 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2960 correctCandidateResult = main.FALSE
2961 elif newLeader != oldCandidates[ 2 ]:
2962 correctCandidateResult = main.FALSE
2963 main.log.error( "Candidate " + newLeader + " was elected. " +
2964 oldCandidates[ 2 ] + " should have had priority." )
2965
2966 utilities.assert_equals(
2967 expect=main.TRUE,
2968 actual=correctCandidateResult,
2969 onpass="Correct Candidate Elected",
2970 onfail="Incorrect Candidate Elected" )
2971
Jon Hall5cf14d52015-07-16 12:15:19 -07002972 main.step( "Run for election on old leader( just so everyone " +
2973 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07002974 if oldLeaderCLI is not None:
2975 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07002976 else:
acsmars71adceb2015-08-31 15:09:26 -07002977 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002978 runResult = main.FALSE
2979 utilities.assert_equals(
2980 expect=main.TRUE,
2981 actual=runResult,
2982 onpass="App re-ran for election",
2983 onfail="App failed to run for election" )
acsmars71adceb2015-08-31 15:09:26 -07002984 main.step(
2985 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002986 # verify leader didn't just change
acsmars71adceb2015-08-31 15:09:26 -07002987 positionResult = main.TRUE
2988 # Get new leaders and candidates, wait if oldLeader is not a candidate yet
2989
2990 # Reset and reuse the new candidate and leaders lists
2991 newAllCandidates = []
2992 newCandidates = []
2993 newLeaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002994 for i in main.activeNodes:
2995 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002996 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2997 if oldLeader not in node: # election might no have finished yet
2998 main.log.info( "Old Leader not elected, waiting 5 seconds to " +
2999 "be sure elections are complete" )
3000 time.sleep(5)
3001 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3002 if oldLeader not in node: # election still isn't done, errors
3003 main.log.error(
3004 "Old leader was not elected on at least one node" )
3005 positionResult = main.FALSE
3006 newAllCandidates.append( node )
3007 newLeaders.append( node[ 0 ] )
3008 newCandidates = newAllCandidates[ 0 ]
3009
3010 # Check that each node has the same leader. Defines newLeader
3011 if len( set( newLeaders ) ) != 1:
3012 positionResult = main.FALSE
3013 main.log.error( "Nodes have different leaders: " +
3014 str( newLeaders ) )
3015 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07003016 else:
acsmars71adceb2015-08-31 15:09:26 -07003017 newLeader = newLeaders[ 0 ]
3018
3019 # Check that each node's candidate list is the same
3020 for candidates in newAllCandidates:
3021 if set( candidates ) != set( newCandidates ):
3022 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07003023 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07003024
3025 # Check that the re-elected node is last on the candidate List
3026 if oldLeader != newCandidates[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003027 main.log.error( "Old Leader (" + oldLeader + ") not in the proper position " +
acsmars71adceb2015-08-31 15:09:26 -07003028 str( newCandidates ) )
3029 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003030
3031 utilities.assert_equals(
3032 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07003033 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003034 onpass="Old leader successfully re-ran for election",
3035 onfail="Something went wrong with Leadership election after " +
3036 "the old leader re-ran for election" )
3037
3038 def CASE16( self, main ):
3039 """
3040 Install Distributed Primitives app
3041 """
3042 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003043 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003044 assert main, "main not defined"
3045 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003046 assert main.CLIs, "main.CLIs not defined"
3047 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003048
3049 # Variables for the distributed primitives tests
3050 global pCounterName
3051 global iCounterName
3052 global pCounterValue
3053 global iCounterValue
3054 global onosSet
3055 global onosSetName
3056 pCounterName = "TestON-Partitions"
3057 iCounterName = "TestON-inMemory"
3058 pCounterValue = 0
3059 iCounterValue = 0
3060 onosSet = set([])
3061 onosSetName = "TestON-set"
3062
3063 description = "Install Primitives app"
3064 main.case( description )
3065 main.step( "Install Primitives app" )
3066 appName = "org.onosproject.distributedprimitives"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003067 node = main.activeNodes[0]
3068 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003069 utilities.assert_equals( expect=main.TRUE,
3070 actual=appResults,
3071 onpass="Primitives app activated",
3072 onfail="Primitives app not activated" )
3073 time.sleep( 5 ) # To allow all nodes to activate
3074
3075 def CASE17( self, main ):
3076 """
3077 Check for basic functionality with distributed primitives
3078 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003079 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003080 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003081 assert main, "main not defined"
3082 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003083 assert main.CLIs, "main.CLIs not defined"
3084 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003085 assert pCounterName, "pCounterName not defined"
3086 assert iCounterName, "iCounterName not defined"
3087 assert onosSetName, "onosSetName not defined"
3088 # NOTE: assert fails if value is 0/None/Empty/False
3089 try:
3090 pCounterValue
3091 except NameError:
3092 main.log.error( "pCounterValue not defined, setting to 0" )
3093 pCounterValue = 0
3094 try:
3095 iCounterValue
3096 except NameError:
3097 main.log.error( "iCounterValue not defined, setting to 0" )
3098 iCounterValue = 0
3099 try:
3100 onosSet
3101 except NameError:
3102 main.log.error( "onosSet not defined, setting to empty Set" )
3103 onosSet = set([])
3104 # Variables for the distributed primitives tests. These are local only
3105 addValue = "a"
3106 addAllValue = "a b c d e f"
3107 retainValue = "c d e f"
3108
3109 description = "Check for basic functionality with distributed " +\
3110 "primitives"
3111 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003112 main.caseExplanation = "Test the methods of the distributed " +\
3113 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003114 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003115 # Partitioned counters
3116 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003117 pCounters = []
3118 threads = []
3119 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003120 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003121 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3122 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003123 args=[ pCounterName ] )
3124 pCounterValue += 1
3125 addedPValues.append( pCounterValue )
3126 threads.append( t )
3127 t.start()
3128
3129 for t in threads:
3130 t.join()
3131 pCounters.append( t.result )
3132 # Check that counter incremented numController times
3133 pCounterResults = True
3134 for i in addedPValues:
3135 tmpResult = i in pCounters
3136 pCounterResults = pCounterResults and tmpResult
3137 if not tmpResult:
3138 main.log.error( str( i ) + " is not in partitioned "
3139 "counter incremented results" )
3140 utilities.assert_equals( expect=True,
3141 actual=pCounterResults,
3142 onpass="Default counter incremented",
3143 onfail="Error incrementing default" +
3144 " counter" )
3145
Jon Halle1a3b752015-07-22 13:02:46 -07003146 main.step( "Get then Increment a default counter on each node" )
3147 pCounters = []
3148 threads = []
3149 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003150 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003151 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3152 name="counterGetAndAdd-" + str( i ),
3153 args=[ pCounterName ] )
3154 addedPValues.append( pCounterValue )
3155 pCounterValue += 1
3156 threads.append( t )
3157 t.start()
3158
3159 for t in threads:
3160 t.join()
3161 pCounters.append( t.result )
3162 # Check that counter incremented numController times
3163 pCounterResults = True
3164 for i in addedPValues:
3165 tmpResult = i in pCounters
3166 pCounterResults = pCounterResults and tmpResult
3167 if not tmpResult:
3168 main.log.error( str( i ) + " is not in partitioned "
3169 "counter incremented results" )
3170 utilities.assert_equals( expect=True,
3171 actual=pCounterResults,
3172 onpass="Default counter incremented",
3173 onfail="Error incrementing default" +
3174 " counter" )
3175
3176 main.step( "Counters we added have the correct values" )
3177 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3178 utilities.assert_equals( expect=main.TRUE,
3179 actual=incrementCheck,
3180 onpass="Added counters are correct",
3181 onfail="Added counters are incorrect" )
3182
3183 main.step( "Add -8 to then get a default counter on each node" )
3184 pCounters = []
3185 threads = []
3186 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003187 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003188 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3189 name="counterIncrement-" + str( i ),
3190 args=[ pCounterName ],
3191 kwargs={ "delta": -8 } )
3192 pCounterValue += -8
3193 addedPValues.append( pCounterValue )
3194 threads.append( t )
3195 t.start()
3196
3197 for t in threads:
3198 t.join()
3199 pCounters.append( t.result )
3200 # Check that counter incremented numController times
3201 pCounterResults = True
3202 for i in addedPValues:
3203 tmpResult = i in pCounters
3204 pCounterResults = pCounterResults and tmpResult
3205 if not tmpResult:
3206 main.log.error( str( i ) + " is not in partitioned "
3207 "counter incremented results" )
3208 utilities.assert_equals( expect=True,
3209 actual=pCounterResults,
3210 onpass="Default counter incremented",
3211 onfail="Error incrementing default" +
3212 " counter" )
3213
3214 main.step( "Add 5 to then get a default counter on each node" )
3215 pCounters = []
3216 threads = []
3217 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003218 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003219 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3220 name="counterIncrement-" + str( i ),
3221 args=[ pCounterName ],
3222 kwargs={ "delta": 5 } )
3223 pCounterValue += 5
3224 addedPValues.append( pCounterValue )
3225 threads.append( t )
3226 t.start()
3227
3228 for t in threads:
3229 t.join()
3230 pCounters.append( t.result )
3231 # Check that counter incremented numController times
3232 pCounterResults = True
3233 for i in addedPValues:
3234 tmpResult = i in pCounters
3235 pCounterResults = pCounterResults and tmpResult
3236 if not tmpResult:
3237 main.log.error( str( i ) + " is not in partitioned "
3238 "counter incremented results" )
3239 utilities.assert_equals( expect=True,
3240 actual=pCounterResults,
3241 onpass="Default counter incremented",
3242 onfail="Error incrementing default" +
3243 " counter" )
3244
3245 main.step( "Get then add 5 to a default counter on each node" )
3246 pCounters = []
3247 threads = []
3248 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003249 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003250 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3251 name="counterIncrement-" + str( i ),
3252 args=[ pCounterName ],
3253 kwargs={ "delta": 5 } )
3254 addedPValues.append( pCounterValue )
3255 pCounterValue += 5
3256 threads.append( t )
3257 t.start()
3258
3259 for t in threads:
3260 t.join()
3261 pCounters.append( t.result )
3262 # Check that counter incremented numController times
3263 pCounterResults = True
3264 for i in addedPValues:
3265 tmpResult = i in pCounters
3266 pCounterResults = pCounterResults and tmpResult
3267 if not tmpResult:
3268 main.log.error( str( i ) + " is not in partitioned "
3269 "counter incremented results" )
3270 utilities.assert_equals( expect=True,
3271 actual=pCounterResults,
3272 onpass="Default counter incremented",
3273 onfail="Error incrementing default" +
3274 " counter" )
3275
3276 main.step( "Counters we added have the correct values" )
3277 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3278 utilities.assert_equals( expect=main.TRUE,
3279 actual=incrementCheck,
3280 onpass="Added counters are correct",
3281 onfail="Added counters are incorrect" )
3282
3283 # In-Memory counters
3284 main.step( "Increment and get an in-memory counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003285 iCounters = []
3286 addedIValues = []
3287 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003288 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003289 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003290 name="icounterIncrement-" + str( i ),
3291 args=[ iCounterName ],
3292 kwargs={ "inMemory": True } )
3293 iCounterValue += 1
3294 addedIValues.append( iCounterValue )
3295 threads.append( t )
3296 t.start()
3297
3298 for t in threads:
3299 t.join()
3300 iCounters.append( t.result )
3301 # Check that counter incremented numController times
3302 iCounterResults = True
3303 for i in addedIValues:
3304 tmpResult = i in iCounters
3305 iCounterResults = iCounterResults and tmpResult
3306 if not tmpResult:
3307 main.log.error( str( i ) + " is not in the in-memory "
3308 "counter incremented results" )
3309 utilities.assert_equals( expect=True,
3310 actual=iCounterResults,
Jon Halle1a3b752015-07-22 13:02:46 -07003311 onpass="In-memory counter incremented",
3312 onfail="Error incrementing in-memory" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003313 " counter" )
3314
Jon Halle1a3b752015-07-22 13:02:46 -07003315 main.step( "Get then Increment a in-memory counter on each node" )
3316 iCounters = []
3317 threads = []
3318 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003319 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003320 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3321 name="counterGetAndAdd-" + str( i ),
3322 args=[ iCounterName ],
3323 kwargs={ "inMemory": True } )
3324 addedIValues.append( iCounterValue )
3325 iCounterValue += 1
3326 threads.append( t )
3327 t.start()
3328
3329 for t in threads:
3330 t.join()
3331 iCounters.append( t.result )
3332 # Check that counter incremented numController times
3333 iCounterResults = True
3334 for i in addedIValues:
3335 tmpResult = i in iCounters
3336 iCounterResults = iCounterResults and tmpResult
3337 if not tmpResult:
3338 main.log.error( str( i ) + " is not in in-memory "
3339 "counter incremented results" )
3340 utilities.assert_equals( expect=True,
3341 actual=iCounterResults,
3342 onpass="In-memory counter incremented",
3343 onfail="Error incrementing in-memory" +
3344 " counter" )
3345
3346 main.step( "Counters we added have the correct values" )
3347 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3348 utilities.assert_equals( expect=main.TRUE,
3349 actual=incrementCheck,
3350 onpass="Added counters are correct",
3351 onfail="Added counters are incorrect" )
3352
3353 main.step( "Add -8 to then get a in-memory counter on each node" )
3354 iCounters = []
3355 threads = []
3356 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003357 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003358 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3359 name="counterIncrement-" + str( i ),
3360 args=[ iCounterName ],
3361 kwargs={ "delta": -8, "inMemory": True } )
3362 iCounterValue += -8
3363 addedIValues.append( iCounterValue )
3364 threads.append( t )
3365 t.start()
3366
3367 for t in threads:
3368 t.join()
3369 iCounters.append( t.result )
3370 # Check that counter incremented numController times
3371 iCounterResults = True
3372 for i in addedIValues:
3373 tmpResult = i in iCounters
3374 iCounterResults = iCounterResults and tmpResult
3375 if not tmpResult:
3376 main.log.error( str( i ) + " is not in in-memory "
3377 "counter incremented results" )
3378 utilities.assert_equals( expect=True,
3379 actual=pCounterResults,
3380 onpass="In-memory counter incremented",
3381 onfail="Error incrementing in-memory" +
3382 " counter" )
3383
3384 main.step( "Add 5 to then get a in-memory counter on each node" )
3385 iCounters = []
3386 threads = []
3387 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003388 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003389 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3390 name="counterIncrement-" + str( i ),
3391 args=[ iCounterName ],
3392 kwargs={ "delta": 5, "inMemory": True } )
3393 iCounterValue += 5
3394 addedIValues.append( iCounterValue )
3395 threads.append( t )
3396 t.start()
3397
3398 for t in threads:
3399 t.join()
3400 iCounters.append( t.result )
3401 # Check that counter incremented numController times
3402 iCounterResults = True
3403 for i in addedIValues:
3404 tmpResult = i in iCounters
3405 iCounterResults = iCounterResults and tmpResult
3406 if not tmpResult:
3407 main.log.error( str( i ) + " is not in in-memory "
3408 "counter incremented results" )
3409 utilities.assert_equals( expect=True,
3410 actual=pCounterResults,
3411 onpass="In-memory counter incremented",
3412 onfail="Error incrementing in-memory" +
3413 " counter" )
3414
3415 main.step( "Get then add 5 to a in-memory counter on each node" )
3416 iCounters = []
3417 threads = []
3418 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003419 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003420 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3421 name="counterIncrement-" + str( i ),
3422 args=[ iCounterName ],
3423 kwargs={ "delta": 5, "inMemory": True } )
3424 addedIValues.append( iCounterValue )
3425 iCounterValue += 5
3426 threads.append( t )
3427 t.start()
3428
3429 for t in threads:
3430 t.join()
3431 iCounters.append( t.result )
3432 # Check that counter incremented numController times
3433 iCounterResults = True
3434 for i in addedIValues:
3435 tmpResult = i in iCounters
3436 iCounterResults = iCounterResults and tmpResult
3437 if not tmpResult:
3438 main.log.error( str( i ) + " is not in in-memory "
3439 "counter incremented results" )
3440 utilities.assert_equals( expect=True,
3441 actual=iCounterResults,
3442 onpass="In-memory counter incremented",
3443 onfail="Error incrementing in-memory" +
3444 " counter" )
3445
3446 main.step( "Counters we added have the correct values" )
3447 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3448 utilities.assert_equals( expect=main.TRUE,
3449 actual=incrementCheck,
3450 onpass="Added counters are correct",
3451 onfail="Added counters are incorrect" )
3452
Jon Hall5cf14d52015-07-16 12:15:19 -07003453 main.step( "Check counters are consistant across nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07003454 onosCounters, consistentCounterResults = main.Counters.consistentCheck()
Jon Hall5cf14d52015-07-16 12:15:19 -07003455 utilities.assert_equals( expect=main.TRUE,
3456 actual=consistentCounterResults,
3457 onpass="ONOS counters are consistent " +
3458 "across nodes",
3459 onfail="ONOS Counters are inconsistent " +
3460 "across nodes" )
3461
3462 main.step( "Counters we added have the correct values" )
Jon Halle1a3b752015-07-22 13:02:46 -07003463 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3464 incrementCheck = incrementCheck and \
3465 main.Counters.counterCheck( iCounterName, iCounterValue )
Jon Hall5cf14d52015-07-16 12:15:19 -07003466 utilities.assert_equals( expect=main.TRUE,
Jon Halle1a3b752015-07-22 13:02:46 -07003467 actual=incrementCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -07003468 onpass="Added counters are correct",
3469 onfail="Added counters are incorrect" )
3470 # DISTRIBUTED SETS
3471 main.step( "Distributed Set get" )
3472 size = len( onosSet )
3473 getResponses = []
3474 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003475 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003476 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003477 name="setTestGet-" + str( i ),
3478 args=[ onosSetName ] )
3479 threads.append( t )
3480 t.start()
3481 for t in threads:
3482 t.join()
3483 getResponses.append( t.result )
3484
3485 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003486 for i in range( len( main.activeNodes ) ):
3487 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003488 if isinstance( getResponses[ i ], list):
3489 current = set( getResponses[ i ] )
3490 if len( current ) == len( getResponses[ i ] ):
3491 # no repeats
3492 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003493 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003494 " has incorrect view" +
3495 " of set " + onosSetName + ":\n" +
3496 str( getResponses[ i ] ) )
3497 main.log.debug( "Expected: " + str( onosSet ) )
3498 main.log.debug( "Actual: " + str( current ) )
3499 getResults = main.FALSE
3500 else:
3501 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003502 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003503 " has repeat elements in" +
3504 " set " + onosSetName + ":\n" +
3505 str( getResponses[ i ] ) )
3506 getResults = main.FALSE
3507 elif getResponses[ i ] == main.ERROR:
3508 getResults = main.FALSE
3509 utilities.assert_equals( expect=main.TRUE,
3510 actual=getResults,
3511 onpass="Set elements are correct",
3512 onfail="Set elements are incorrect" )
3513
3514 main.step( "Distributed Set size" )
3515 sizeResponses = []
3516 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003517 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003518 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003519 name="setTestSize-" + str( i ),
3520 args=[ onosSetName ] )
3521 threads.append( t )
3522 t.start()
3523 for t in threads:
3524 t.join()
3525 sizeResponses.append( t.result )
3526
3527 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003528 for i in range( len( main.activeNodes ) ):
3529 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003530 if size != sizeResponses[ i ]:
3531 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003532 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003533 " expected a size of " + str( size ) +
3534 " for set " + onosSetName +
3535 " but got " + str( sizeResponses[ i ] ) )
3536 utilities.assert_equals( expect=main.TRUE,
3537 actual=sizeResults,
3538 onpass="Set sizes are correct",
3539 onfail="Set sizes are incorrect" )
3540
3541 main.step( "Distributed Set add()" )
3542 onosSet.add( addValue )
3543 addResponses = []
3544 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003545 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003546 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003547 name="setTestAdd-" + str( i ),
3548 args=[ onosSetName, addValue ] )
3549 threads.append( t )
3550 t.start()
3551 for t in threads:
3552 t.join()
3553 addResponses.append( t.result )
3554
3555 # main.TRUE = successfully changed the set
3556 # main.FALSE = action resulted in no change in set
3557 # main.ERROR - Some error in executing the function
3558 addResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003559 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003560 if addResponses[ i ] == main.TRUE:
3561 # All is well
3562 pass
3563 elif addResponses[ i ] == main.FALSE:
3564 # Already in set, probably fine
3565 pass
3566 elif addResponses[ i ] == main.ERROR:
3567 # Error in execution
3568 addResults = main.FALSE
3569 else:
3570 # unexpected result
3571 addResults = main.FALSE
3572 if addResults != main.TRUE:
3573 main.log.error( "Error executing set add" )
3574
3575 # Check if set is still correct
3576 size = len( onosSet )
3577 getResponses = []
3578 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003579 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003580 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003581 name="setTestGet-" + str( i ),
3582 args=[ onosSetName ] )
3583 threads.append( t )
3584 t.start()
3585 for t in threads:
3586 t.join()
3587 getResponses.append( t.result )
3588 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003589 for i in range( len( main.activeNodes ) ):
3590 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003591 if isinstance( getResponses[ i ], list):
3592 current = set( getResponses[ i ] )
3593 if len( current ) == len( getResponses[ i ] ):
3594 # no repeats
3595 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003596 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003597 " of set " + onosSetName + ":\n" +
3598 str( getResponses[ i ] ) )
3599 main.log.debug( "Expected: " + str( onosSet ) )
3600 main.log.debug( "Actual: " + str( current ) )
3601 getResults = main.FALSE
3602 else:
3603 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003604 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003605 " set " + onosSetName + ":\n" +
3606 str( getResponses[ i ] ) )
3607 getResults = main.FALSE
3608 elif getResponses[ i ] == main.ERROR:
3609 getResults = main.FALSE
3610 sizeResponses = []
3611 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003612 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003613 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003614 name="setTestSize-" + str( i ),
3615 args=[ onosSetName ] )
3616 threads.append( t )
3617 t.start()
3618 for t in threads:
3619 t.join()
3620 sizeResponses.append( t.result )
3621 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003622 for i in range( len( main.activeNodes ) ):
3623 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003624 if size != sizeResponses[ i ]:
3625 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003626 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003627 " expected a size of " + str( size ) +
3628 " for set " + onosSetName +
3629 " but got " + str( sizeResponses[ i ] ) )
3630 addResults = addResults and getResults and sizeResults
3631 utilities.assert_equals( expect=main.TRUE,
3632 actual=addResults,
3633 onpass="Set add correct",
3634 onfail="Set add was incorrect" )
3635
3636 main.step( "Distributed Set addAll()" )
3637 onosSet.update( addAllValue.split() )
3638 addResponses = []
3639 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003640 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003641 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003642 name="setTestAddAll-" + str( i ),
3643 args=[ onosSetName, addAllValue ] )
3644 threads.append( t )
3645 t.start()
3646 for t in threads:
3647 t.join()
3648 addResponses.append( t.result )
3649
3650 # main.TRUE = successfully changed the set
3651 # main.FALSE = action resulted in no change in set
3652 # main.ERROR - Some error in executing the function
3653 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003654 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003655 if addResponses[ i ] == main.TRUE:
3656 # All is well
3657 pass
3658 elif addResponses[ i ] == main.FALSE:
3659 # Already in set, probably fine
3660 pass
3661 elif addResponses[ i ] == main.ERROR:
3662 # Error in execution
3663 addAllResults = main.FALSE
3664 else:
3665 # unexpected result
3666 addAllResults = main.FALSE
3667 if addAllResults != main.TRUE:
3668 main.log.error( "Error executing set addAll" )
3669
3670 # Check if set is still correct
3671 size = len( onosSet )
3672 getResponses = []
3673 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003674 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003675 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003676 name="setTestGet-" + str( i ),
3677 args=[ onosSetName ] )
3678 threads.append( t )
3679 t.start()
3680 for t in threads:
3681 t.join()
3682 getResponses.append( t.result )
3683 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003684 for i in range( len( main.activeNodes ) ):
3685 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003686 if isinstance( getResponses[ i ], list):
3687 current = set( getResponses[ i ] )
3688 if len( current ) == len( getResponses[ i ] ):
3689 # no repeats
3690 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003691 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003692 " has incorrect view" +
3693 " of set " + onosSetName + ":\n" +
3694 str( getResponses[ i ] ) )
3695 main.log.debug( "Expected: " + str( onosSet ) )
3696 main.log.debug( "Actual: " + str( current ) )
3697 getResults = main.FALSE
3698 else:
3699 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003700 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003701 " has repeat elements in" +
3702 " set " + onosSetName + ":\n" +
3703 str( getResponses[ i ] ) )
3704 getResults = main.FALSE
3705 elif getResponses[ i ] == main.ERROR:
3706 getResults = main.FALSE
3707 sizeResponses = []
3708 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003709 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003710 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003711 name="setTestSize-" + str( i ),
3712 args=[ onosSetName ] )
3713 threads.append( t )
3714 t.start()
3715 for t in threads:
3716 t.join()
3717 sizeResponses.append( t.result )
3718 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003719 for i in range( len( main.activeNodes ) ):
3720 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003721 if size != sizeResponses[ i ]:
3722 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003723 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003724 " expected a size of " + str( size ) +
3725 " for set " + onosSetName +
3726 " but got " + str( sizeResponses[ i ] ) )
3727 addAllResults = addAllResults and getResults and sizeResults
3728 utilities.assert_equals( expect=main.TRUE,
3729 actual=addAllResults,
3730 onpass="Set addAll correct",
3731 onfail="Set addAll was incorrect" )
3732
3733 main.step( "Distributed Set contains()" )
3734 containsResponses = []
3735 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003736 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003737 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003738 name="setContains-" + str( i ),
3739 args=[ onosSetName ],
3740 kwargs={ "values": addValue } )
3741 threads.append( t )
3742 t.start()
3743 for t in threads:
3744 t.join()
3745 # NOTE: This is the tuple
3746 containsResponses.append( t.result )
3747
3748 containsResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003749 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003750 if containsResponses[ i ] == main.ERROR:
3751 containsResults = main.FALSE
3752 else:
3753 containsResults = containsResults and\
3754 containsResponses[ i ][ 1 ]
3755 utilities.assert_equals( expect=main.TRUE,
3756 actual=containsResults,
3757 onpass="Set contains is functional",
3758 onfail="Set contains failed" )
3759
3760 main.step( "Distributed Set containsAll()" )
3761 containsAllResponses = []
3762 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003763 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003764 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003765 name="setContainsAll-" + str( i ),
3766 args=[ onosSetName ],
3767 kwargs={ "values": addAllValue } )
3768 threads.append( t )
3769 t.start()
3770 for t in threads:
3771 t.join()
3772 # NOTE: This is the tuple
3773 containsAllResponses.append( t.result )
3774
3775 containsAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003776 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003777 if containsResponses[ i ] == main.ERROR:
3778 containsResults = main.FALSE
3779 else:
3780 containsResults = containsResults and\
3781 containsResponses[ i ][ 1 ]
3782 utilities.assert_equals( expect=main.TRUE,
3783 actual=containsAllResults,
3784 onpass="Set containsAll is functional",
3785 onfail="Set containsAll failed" )
3786
3787 main.step( "Distributed Set remove()" )
3788 onosSet.remove( addValue )
3789 removeResponses = []
3790 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003791 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003792 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003793 name="setTestRemove-" + str( i ),
3794 args=[ onosSetName, addValue ] )
3795 threads.append( t )
3796 t.start()
3797 for t in threads:
3798 t.join()
3799 removeResponses.append( t.result )
3800
3801 # main.TRUE = successfully changed the set
3802 # main.FALSE = action resulted in no change in set
3803 # main.ERROR - Some error in executing the function
3804 removeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003805 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003806 if removeResponses[ i ] == main.TRUE:
3807 # All is well
3808 pass
3809 elif removeResponses[ i ] == main.FALSE:
3810 # not in set, probably fine
3811 pass
3812 elif removeResponses[ i ] == main.ERROR:
3813 # Error in execution
3814 removeResults = main.FALSE
3815 else:
3816 # unexpected result
3817 removeResults = main.FALSE
3818 if removeResults != main.TRUE:
3819 main.log.error( "Error executing set remove" )
3820
3821 # Check if set is still correct
3822 size = len( onosSet )
3823 getResponses = []
3824 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003825 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003826 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003827 name="setTestGet-" + str( i ),
3828 args=[ onosSetName ] )
3829 threads.append( t )
3830 t.start()
3831 for t in threads:
3832 t.join()
3833 getResponses.append( t.result )
3834 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003835 for i in range( len( main.activeNodes ) ):
3836 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003837 if isinstance( getResponses[ i ], list):
3838 current = set( getResponses[ i ] )
3839 if len( current ) == len( getResponses[ i ] ):
3840 # no repeats
3841 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003842 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003843 " has incorrect view" +
3844 " of set " + onosSetName + ":\n" +
3845 str( getResponses[ i ] ) )
3846 main.log.debug( "Expected: " + str( onosSet ) )
3847 main.log.debug( "Actual: " + str( current ) )
3848 getResults = main.FALSE
3849 else:
3850 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003851 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003852 " has repeat elements in" +
3853 " set " + onosSetName + ":\n" +
3854 str( getResponses[ i ] ) )
3855 getResults = main.FALSE
3856 elif getResponses[ i ] == main.ERROR:
3857 getResults = main.FALSE
3858 sizeResponses = []
3859 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003860 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003861 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003862 name="setTestSize-" + str( i ),
3863 args=[ onosSetName ] )
3864 threads.append( t )
3865 t.start()
3866 for t in threads:
3867 t.join()
3868 sizeResponses.append( t.result )
3869 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003870 for i in range( len( main.activeNodes ) ):
3871 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003872 if size != sizeResponses[ i ]:
3873 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003874 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003875 " expected a size of " + str( size ) +
3876 " for set " + onosSetName +
3877 " but got " + str( sizeResponses[ i ] ) )
3878 removeResults = removeResults and getResults and sizeResults
3879 utilities.assert_equals( expect=main.TRUE,
3880 actual=removeResults,
3881 onpass="Set remove correct",
3882 onfail="Set remove was incorrect" )
3883
3884 main.step( "Distributed Set removeAll()" )
3885 onosSet.difference_update( addAllValue.split() )
3886 removeAllResponses = []
3887 threads = []
3888 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003889 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003890 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003891 name="setTestRemoveAll-" + str( i ),
3892 args=[ onosSetName, addAllValue ] )
3893 threads.append( t )
3894 t.start()
3895 for t in threads:
3896 t.join()
3897 removeAllResponses.append( t.result )
3898 except Exception, e:
3899 main.log.exception(e)
3900
3901 # main.TRUE = successfully changed the set
3902 # main.FALSE = action resulted in no change in set
3903 # main.ERROR - Some error in executing the function
3904 removeAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003905 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003906 if removeAllResponses[ i ] == main.TRUE:
3907 # All is well
3908 pass
3909 elif removeAllResponses[ i ] == main.FALSE:
3910 # not in set, probably fine
3911 pass
3912 elif removeAllResponses[ i ] == main.ERROR:
3913 # Error in execution
3914 removeAllResults = main.FALSE
3915 else:
3916 # unexpected result
3917 removeAllResults = main.FALSE
3918 if removeAllResults != main.TRUE:
3919 main.log.error( "Error executing set removeAll" )
3920
3921 # Check if set is still correct
3922 size = len( onosSet )
3923 getResponses = []
3924 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003925 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003926 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003927 name="setTestGet-" + str( i ),
3928 args=[ onosSetName ] )
3929 threads.append( t )
3930 t.start()
3931 for t in threads:
3932 t.join()
3933 getResponses.append( t.result )
3934 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003935 for i in range( len( main.activeNodes ) ):
3936 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003937 if isinstance( getResponses[ i ], list):
3938 current = set( getResponses[ i ] )
3939 if len( current ) == len( getResponses[ i ] ):
3940 # no repeats
3941 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003942 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003943 " has incorrect view" +
3944 " of set " + onosSetName + ":\n" +
3945 str( getResponses[ i ] ) )
3946 main.log.debug( "Expected: " + str( onosSet ) )
3947 main.log.debug( "Actual: " + str( current ) )
3948 getResults = main.FALSE
3949 else:
3950 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003951 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003952 " has repeat elements in" +
3953 " set " + onosSetName + ":\n" +
3954 str( getResponses[ i ] ) )
3955 getResults = main.FALSE
3956 elif getResponses[ i ] == main.ERROR:
3957 getResults = main.FALSE
3958 sizeResponses = []
3959 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003960 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003961 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003962 name="setTestSize-" + str( i ),
3963 args=[ onosSetName ] )
3964 threads.append( t )
3965 t.start()
3966 for t in threads:
3967 t.join()
3968 sizeResponses.append( t.result )
3969 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003970 for i in range( len( main.activeNodes ) ):
3971 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003972 if size != sizeResponses[ i ]:
3973 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003974 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003975 " expected a size of " + str( size ) +
3976 " for set " + onosSetName +
3977 " but got " + str( sizeResponses[ i ] ) )
3978 removeAllResults = removeAllResults and getResults and sizeResults
3979 utilities.assert_equals( expect=main.TRUE,
3980 actual=removeAllResults,
3981 onpass="Set removeAll correct",
3982 onfail="Set removeAll was incorrect" )
3983
3984 main.step( "Distributed Set addAll()" )
3985 onosSet.update( addAllValue.split() )
3986 addResponses = []
3987 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003988 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003989 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003990 name="setTestAddAll-" + str( i ),
3991 args=[ onosSetName, addAllValue ] )
3992 threads.append( t )
3993 t.start()
3994 for t in threads:
3995 t.join()
3996 addResponses.append( t.result )
3997
3998 # main.TRUE = successfully changed the set
3999 # main.FALSE = action resulted in no change in set
4000 # main.ERROR - Some error in executing the function
4001 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004002 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004003 if addResponses[ i ] == main.TRUE:
4004 # All is well
4005 pass
4006 elif addResponses[ i ] == main.FALSE:
4007 # Already in set, probably fine
4008 pass
4009 elif addResponses[ i ] == main.ERROR:
4010 # Error in execution
4011 addAllResults = main.FALSE
4012 else:
4013 # unexpected result
4014 addAllResults = main.FALSE
4015 if addAllResults != main.TRUE:
4016 main.log.error( "Error executing set addAll" )
4017
4018 # Check if set is still correct
4019 size = len( onosSet )
4020 getResponses = []
4021 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004022 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004023 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004024 name="setTestGet-" + str( i ),
4025 args=[ onosSetName ] )
4026 threads.append( t )
4027 t.start()
4028 for t in threads:
4029 t.join()
4030 getResponses.append( t.result )
4031 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004032 for i in range( len( main.activeNodes ) ):
4033 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004034 if isinstance( getResponses[ i ], list):
4035 current = set( getResponses[ i ] )
4036 if len( current ) == len( getResponses[ i ] ):
4037 # no repeats
4038 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004039 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004040 " has incorrect view" +
4041 " of set " + onosSetName + ":\n" +
4042 str( getResponses[ i ] ) )
4043 main.log.debug( "Expected: " + str( onosSet ) )
4044 main.log.debug( "Actual: " + str( current ) )
4045 getResults = main.FALSE
4046 else:
4047 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004048 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004049 " has repeat elements in" +
4050 " set " + onosSetName + ":\n" +
4051 str( getResponses[ i ] ) )
4052 getResults = main.FALSE
4053 elif getResponses[ i ] == main.ERROR:
4054 getResults = main.FALSE
4055 sizeResponses = []
4056 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004057 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004058 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004059 name="setTestSize-" + str( i ),
4060 args=[ onosSetName ] )
4061 threads.append( t )
4062 t.start()
4063 for t in threads:
4064 t.join()
4065 sizeResponses.append( t.result )
4066 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004067 for i in range( len( main.activeNodes ) ):
4068 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004069 if size != sizeResponses[ i ]:
4070 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004071 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004072 " expected a size of " + str( size ) +
4073 " for set " + onosSetName +
4074 " but got " + str( sizeResponses[ i ] ) )
4075 addAllResults = addAllResults and getResults and sizeResults
4076 utilities.assert_equals( expect=main.TRUE,
4077 actual=addAllResults,
4078 onpass="Set addAll correct",
4079 onfail="Set addAll was incorrect" )
4080
4081 main.step( "Distributed Set clear()" )
4082 onosSet.clear()
4083 clearResponses = []
4084 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004085 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004086 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004087 name="setTestClear-" + str( i ),
4088 args=[ onosSetName, " "], # Values doesn't matter
4089 kwargs={ "clear": True } )
4090 threads.append( t )
4091 t.start()
4092 for t in threads:
4093 t.join()
4094 clearResponses.append( t.result )
4095
4096 # main.TRUE = successfully changed the set
4097 # main.FALSE = action resulted in no change in set
4098 # main.ERROR - Some error in executing the function
4099 clearResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004100 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004101 if clearResponses[ i ] == main.TRUE:
4102 # All is well
4103 pass
4104 elif clearResponses[ i ] == main.FALSE:
4105 # Nothing set, probably fine
4106 pass
4107 elif clearResponses[ i ] == main.ERROR:
4108 # Error in execution
4109 clearResults = main.FALSE
4110 else:
4111 # unexpected result
4112 clearResults = main.FALSE
4113 if clearResults != main.TRUE:
4114 main.log.error( "Error executing set clear" )
4115
4116 # Check if set is still correct
4117 size = len( onosSet )
4118 getResponses = []
4119 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004120 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004121 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004122 name="setTestGet-" + str( i ),
4123 args=[ onosSetName ] )
4124 threads.append( t )
4125 t.start()
4126 for t in threads:
4127 t.join()
4128 getResponses.append( t.result )
4129 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004130 for i in range( len( main.activeNodes ) ):
4131 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004132 if isinstance( getResponses[ i ], list):
4133 current = set( getResponses[ i ] )
4134 if len( current ) == len( getResponses[ i ] ):
4135 # no repeats
4136 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004137 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004138 " has incorrect view" +
4139 " of set " + onosSetName + ":\n" +
4140 str( getResponses[ i ] ) )
4141 main.log.debug( "Expected: " + str( onosSet ) )
4142 main.log.debug( "Actual: " + str( current ) )
4143 getResults = main.FALSE
4144 else:
4145 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004146 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004147 " has repeat elements in" +
4148 " set " + onosSetName + ":\n" +
4149 str( getResponses[ i ] ) )
4150 getResults = main.FALSE
4151 elif getResponses[ i ] == main.ERROR:
4152 getResults = main.FALSE
4153 sizeResponses = []
4154 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004155 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004156 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004157 name="setTestSize-" + str( i ),
4158 args=[ onosSetName ] )
4159 threads.append( t )
4160 t.start()
4161 for t in threads:
4162 t.join()
4163 sizeResponses.append( t.result )
4164 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004165 for i in range( len( main.activeNodes ) ):
4166 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004167 if size != sizeResponses[ i ]:
4168 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004169 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004170 " expected a size of " + str( size ) +
4171 " for set " + onosSetName +
4172 " but got " + str( sizeResponses[ i ] ) )
4173 clearResults = clearResults and getResults and sizeResults
4174 utilities.assert_equals( expect=main.TRUE,
4175 actual=clearResults,
4176 onpass="Set clear correct",
4177 onfail="Set clear was incorrect" )
4178
4179 main.step( "Distributed Set addAll()" )
4180 onosSet.update( addAllValue.split() )
4181 addResponses = []
4182 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004183 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004184 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004185 name="setTestAddAll-" + str( i ),
4186 args=[ onosSetName, addAllValue ] )
4187 threads.append( t )
4188 t.start()
4189 for t in threads:
4190 t.join()
4191 addResponses.append( t.result )
4192
4193 # main.TRUE = successfully changed the set
4194 # main.FALSE = action resulted in no change in set
4195 # main.ERROR - Some error in executing the function
4196 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004197 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004198 if addResponses[ i ] == main.TRUE:
4199 # All is well
4200 pass
4201 elif addResponses[ i ] == main.FALSE:
4202 # Already in set, probably fine
4203 pass
4204 elif addResponses[ i ] == main.ERROR:
4205 # Error in execution
4206 addAllResults = main.FALSE
4207 else:
4208 # unexpected result
4209 addAllResults = main.FALSE
4210 if addAllResults != main.TRUE:
4211 main.log.error( "Error executing set addAll" )
4212
4213 # Check if set is still correct
4214 size = len( onosSet )
4215 getResponses = []
4216 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004217 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004218 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004219 name="setTestGet-" + str( i ),
4220 args=[ onosSetName ] )
4221 threads.append( t )
4222 t.start()
4223 for t in threads:
4224 t.join()
4225 getResponses.append( t.result )
4226 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004227 for i in range( len( main.activeNodes ) ):
4228 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004229 if isinstance( getResponses[ i ], list):
4230 current = set( getResponses[ i ] )
4231 if len( current ) == len( getResponses[ i ] ):
4232 # no repeats
4233 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004234 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004235 " has incorrect view" +
4236 " of set " + onosSetName + ":\n" +
4237 str( getResponses[ i ] ) )
4238 main.log.debug( "Expected: " + str( onosSet ) )
4239 main.log.debug( "Actual: " + str( current ) )
4240 getResults = main.FALSE
4241 else:
4242 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004243 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004244 " has repeat elements in" +
4245 " set " + onosSetName + ":\n" +
4246 str( getResponses[ i ] ) )
4247 getResults = main.FALSE
4248 elif getResponses[ i ] == main.ERROR:
4249 getResults = main.FALSE
4250 sizeResponses = []
4251 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004252 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004253 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004254 name="setTestSize-" + str( i ),
4255 args=[ onosSetName ] )
4256 threads.append( t )
4257 t.start()
4258 for t in threads:
4259 t.join()
4260 sizeResponses.append( t.result )
4261 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004262 for i in range( len( main.activeNodes ) ):
4263 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004264 if size != sizeResponses[ i ]:
4265 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004266 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004267 " expected a size of " + str( size ) +
4268 " for set " + onosSetName +
4269 " but got " + str( sizeResponses[ i ] ) )
4270 addAllResults = addAllResults and getResults and sizeResults
4271 utilities.assert_equals( expect=main.TRUE,
4272 actual=addAllResults,
4273 onpass="Set addAll correct",
4274 onfail="Set addAll was incorrect" )
4275
4276 main.step( "Distributed Set retain()" )
4277 onosSet.intersection_update( retainValue.split() )
4278 retainResponses = []
4279 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004280 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004281 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004282 name="setTestRetain-" + str( i ),
4283 args=[ onosSetName, retainValue ],
4284 kwargs={ "retain": True } )
4285 threads.append( t )
4286 t.start()
4287 for t in threads:
4288 t.join()
4289 retainResponses.append( t.result )
4290
4291 # main.TRUE = successfully changed the set
4292 # main.FALSE = action resulted in no change in set
4293 # main.ERROR - Some error in executing the function
4294 retainResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004295 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004296 if retainResponses[ i ] == main.TRUE:
4297 # All is well
4298 pass
4299 elif retainResponses[ i ] == main.FALSE:
4300 # Already in set, probably fine
4301 pass
4302 elif retainResponses[ i ] == main.ERROR:
4303 # Error in execution
4304 retainResults = main.FALSE
4305 else:
4306 # unexpected result
4307 retainResults = main.FALSE
4308 if retainResults != main.TRUE:
4309 main.log.error( "Error executing set retain" )
4310
4311 # Check if set is still correct
4312 size = len( onosSet )
4313 getResponses = []
4314 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004315 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004316 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004317 name="setTestGet-" + str( i ),
4318 args=[ onosSetName ] )
4319 threads.append( t )
4320 t.start()
4321 for t in threads:
4322 t.join()
4323 getResponses.append( t.result )
4324 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004325 for i in range( len( main.activeNodes ) ):
4326 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004327 if isinstance( getResponses[ i ], list):
4328 current = set( getResponses[ i ] )
4329 if len( current ) == len( getResponses[ i ] ):
4330 # no repeats
4331 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004332 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004333 " has incorrect view" +
4334 " of set " + onosSetName + ":\n" +
4335 str( getResponses[ i ] ) )
4336 main.log.debug( "Expected: " + str( onosSet ) )
4337 main.log.debug( "Actual: " + str( current ) )
4338 getResults = main.FALSE
4339 else:
4340 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004341 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004342 " has repeat elements in" +
4343 " set " + onosSetName + ":\n" +
4344 str( getResponses[ i ] ) )
4345 getResults = main.FALSE
4346 elif getResponses[ i ] == main.ERROR:
4347 getResults = main.FALSE
4348 sizeResponses = []
4349 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004350 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004351 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004352 name="setTestSize-" + str( i ),
4353 args=[ onosSetName ] )
4354 threads.append( t )
4355 t.start()
4356 for t in threads:
4357 t.join()
4358 sizeResponses.append( t.result )
4359 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004360 for i in range( len( main.activeNodes ) ):
4361 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004362 if size != sizeResponses[ i ]:
4363 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004364 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004365 str( size ) + " for set " + onosSetName +
4366 " but got " + str( sizeResponses[ i ] ) )
4367 retainResults = retainResults and getResults and sizeResults
4368 utilities.assert_equals( expect=main.TRUE,
4369 actual=retainResults,
4370 onpass="Set retain correct",
4371 onfail="Set retain was incorrect" )
4372
Jon Hall2a5002c2015-08-21 16:49:11 -07004373 # Transactional maps
4374 main.step( "Partitioned Transactional maps put" )
4375 tMapValue = "Testing"
4376 numKeys = 100
4377 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004378 node = main.activeNodes[0]
4379 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
Jon Hall2a5002c2015-08-21 16:49:11 -07004380 if len( putResponses ) == 100:
4381 for i in putResponses:
4382 if putResponses[ i ][ 'value' ] != tMapValue:
4383 putResult = False
4384 else:
4385 putResult = False
4386 if not putResult:
4387 main.log.debug( "Put response values: " + str( putResponses ) )
4388 utilities.assert_equals( expect=True,
4389 actual=putResult,
4390 onpass="Partitioned Transactional Map put successful",
4391 onfail="Partitioned Transactional Map put values are incorrect" )
4392
4393 main.step( "Partitioned Transactional maps get" )
4394 getCheck = True
4395 for n in range( 1, numKeys + 1 ):
4396 getResponses = []
4397 threads = []
4398 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004399 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004400 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4401 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004402 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004403 threads.append( t )
4404 t.start()
4405 for t in threads:
4406 t.join()
4407 getResponses.append( t.result )
4408 for node in getResponses:
4409 if node != tMapValue:
4410 valueCheck = False
4411 if not valueCheck:
4412 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4413 main.log.warn( getResponses )
4414 getCheck = getCheck and valueCheck
4415 utilities.assert_equals( expect=True,
4416 actual=getCheck,
4417 onpass="Partitioned Transactional Map get values were correct",
4418 onfail="Partitioned Transactional Map values incorrect" )
4419
4420 main.step( "In-memory Transactional maps put" )
4421 tMapValue = "Testing"
4422 numKeys = 100
4423 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004424 node = main.activeNodes[0]
4425 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue, inMemory=True )
Jon Hall2a5002c2015-08-21 16:49:11 -07004426 if len( putResponses ) == 100:
4427 for i in putResponses:
4428 if putResponses[ i ][ 'value' ] != tMapValue:
4429 putResult = False
4430 else:
4431 putResult = False
4432 if not putResult:
4433 main.log.debug( "Put response values: " + str( putResponses ) )
4434 utilities.assert_equals( expect=True,
4435 actual=putResult,
4436 onpass="In-Memory Transactional Map put successful",
4437 onfail="In-Memory Transactional Map put values are incorrect" )
4438
4439 main.step( "In-Memory Transactional maps get" )
4440 getCheck = True
4441 for n in range( 1, numKeys + 1 ):
4442 getResponses = []
4443 threads = []
4444 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004445 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004446 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4447 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004448 args=[ "Key" + str( n ) ],
Jon Hall2a5002c2015-08-21 16:49:11 -07004449 kwargs={ "inMemory": True } )
4450 threads.append( t )
4451 t.start()
4452 for t in threads:
4453 t.join()
4454 getResponses.append( t.result )
4455 for node in getResponses:
4456 if node != tMapValue:
4457 valueCheck = False
4458 if not valueCheck:
4459 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4460 main.log.warn( getResponses )
4461 getCheck = getCheck and valueCheck
4462 utilities.assert_equals( expect=True,
4463 actual=getCheck,
4464 onpass="In-Memory Transactional Map get values were correct",
4465 onfail="In-Memory Transactional Map values incorrect" )