blob: 47689afb060ec477c1369cab177eb47609e4b30a [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
Jon Hallb3ed8ed2015-10-28 16:43:55 -070012CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
Jon Hall5cf14d52015-07-16 12:15:19 -070014CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
Jon Hallb3ed8ed2015-10-28 16:43:55 -070028class HAstopNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -070029
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
Jon Halle1a3b752015-07-22 13:02:46 -070051 import imp
Jon Hall3b489db2015-10-05 14:38:37 -070052 import pexpect
Jon Hallb3ed8ed2015-10-28 16:43:55 -070053 main.log.info( "ONOS HA test: Stop a minority of ONOS nodes - " +
Jon Hall5cf14d52015-07-16 12:15:19 -070054 "initialization" )
55 main.case( "Setting up test environment" )
Jon Hall783bbf92015-07-23 14:33:19 -070056 main.caseExplanation = "Setup the test environment including " +\
Jon Hall5cf14d52015-07-16 12:15:19 -070057 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
59 # TODO: save all the timers and output them for plotting
60
61 # load some variables from the params file
62 PULLCODE = False
63 if main.params[ 'Git' ] == 'True':
64 PULLCODE = True
65 gitBranch = main.params[ 'branch' ]
66 cellName = main.params[ 'ENV' ][ 'cellName' ]
67
Jon Halle1a3b752015-07-22 13:02:46 -070068 main.numCtrls = int( main.params[ 'num_controllers' ] )
Jon Hall5cf14d52015-07-16 12:15:19 -070069 if main.ONOSbench.maxNodes:
Jon Halle1a3b752015-07-22 13:02:46 -070070 if main.ONOSbench.maxNodes < main.numCtrls:
71 main.numCtrls = int( main.ONOSbench.maxNodes )
72 # set global variables
Jon Hall5cf14d52015-07-16 12:15:19 -070073 global ONOS1Port
74 global ONOS2Port
75 global ONOS3Port
76 global ONOS4Port
77 global ONOS5Port
78 global ONOS6Port
79 global ONOS7Port
80
81 # FIXME: just get controller port from params?
82 # TODO: do we really need all these?
83 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
84 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
85 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
86 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
87 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
88 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
89 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
90
Jon Halle1a3b752015-07-22 13:02:46 -070091 try:
92 fileName = "Counters"
93 # TODO: Maybe make a library folder somewhere?
94 path = main.params[ 'imports' ][ 'path' ]
95 main.Counters = imp.load_source( fileName,
96 path + fileName + ".py" )
97 except Exception as e:
98 main.log.exception( e )
99 main.cleanup()
100 main.exit()
101
102 main.CLIs = []
103 main.nodes = []
Jon Hall5cf14d52015-07-16 12:15:19 -0700104 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700105 for i in range( 1, main.numCtrls + 1 ):
106 try:
107 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
108 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
109 ipList.append( main.nodes[ -1 ].ip_address )
110 except AttributeError:
111 break
Jon Hall5cf14d52015-07-16 12:15:19 -0700112
113 main.step( "Create cell file" )
114 cellAppString = main.params[ 'ENV' ][ 'appString' ]
115 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
116 main.Mininet1.ip_address,
117 cellAppString, ipList )
118 main.step( "Applying cell variable to environment" )
119 cellResult = main.ONOSbench.setCell( cellName )
120 verifyResult = main.ONOSbench.verifyCell()
121
122 # FIXME:this is short term fix
123 main.log.info( "Removing raft logs" )
124 main.ONOSbench.onosRemoveRaftLogs()
125
126 main.log.info( "Uninstalling ONOS" )
Jon Halle1a3b752015-07-22 13:02:46 -0700127 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700128 main.ONOSbench.onosUninstall( node.ip_address )
129
130 # Make sure ONOS is DEAD
131 main.log.info( "Killing any ONOS processes" )
132 killResults = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700133 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700134 killed = main.ONOSbench.onosKill( node.ip_address )
135 killResults = killResults and killed
136
137 cleanInstallResult = main.TRUE
138 gitPullResult = main.TRUE
139
140 main.step( "Starting Mininet" )
141 # scp topo file to mininet
142 # TODO: move to params?
143 topoName = "obelisk.py"
144 filePath = main.ONOSbench.home + "/tools/test/topos/"
kelvin-onlabd9e23de2015-08-06 10:34:44 -0700145 main.ONOSbench.scp( main.Mininet1,
146 filePath + topoName,
147 main.Mininet1.home,
148 direction="to" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700149 mnResult = main.Mininet1.startNet( )
150 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
151 onpass="Mininet Started",
152 onfail="Error starting Mininet" )
153
154 main.step( "Git checkout and pull " + gitBranch )
155 if PULLCODE:
156 main.ONOSbench.gitCheckout( gitBranch )
157 gitPullResult = main.ONOSbench.gitPull()
158 # values of 1 or 3 are good
159 utilities.assert_lesser( expect=0, actual=gitPullResult,
160 onpass="Git pull successful",
161 onfail="Git pull failed" )
162 main.ONOSbench.getVersion( report=True )
163
164 main.step( "Using mvn clean install" )
165 cleanInstallResult = main.TRUE
166 if PULLCODE and gitPullResult == main.TRUE:
167 cleanInstallResult = main.ONOSbench.cleanInstall()
168 else:
169 main.log.warn( "Did not pull new code so skipping mvn " +
170 "clean install" )
171 utilities.assert_equals( expect=main.TRUE,
172 actual=cleanInstallResult,
173 onpass="MCI successful",
174 onfail="MCI failed" )
175 # GRAPHS
176 # NOTE: important params here:
177 # job = name of Jenkins job
178 # Plot Name = Plot-HA, only can be used if multiple plots
179 # index = The number of the graph under plot name
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700180 job = "HAstopNodes"
Jon Hall5cf14d52015-07-16 12:15:19 -0700181 plotName = "Plot-HA"
182 graphs = '<ac:structured-macro ac:name="html">\n'
183 graphs += '<ac:plain-text-body><![CDATA[\n'
184 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
185 '/plot/' + plotName + '/getPlot?index=0' +\
186 '&width=500&height=300"' +\
187 'noborder="0" width="500" height="300" scrolling="yes" ' +\
188 'seamless="seamless"></iframe>\n'
189 graphs += ']]></ac:plain-text-body>\n'
190 graphs += '</ac:structured-macro>\n'
191 main.log.wiki(graphs)
192
193 main.step( "Creating ONOS package" )
Jon Hall3b489db2015-10-05 14:38:37 -0700194 # copy gen-partions file to ONOS
195 # NOTE: this assumes TestON and ONOS are on the same machine
196 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
197 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
198 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
199 main.ONOSbench.ip_address,
200 srcFile,
201 dstDir,
202 pwd=main.ONOSbench.pwd,
203 direction="from" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700204 packageResult = main.ONOSbench.onosPackage()
205 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
206 onpass="ONOS package successful",
207 onfail="ONOS package failed" )
208
209 main.step( "Installing ONOS package" )
210 onosInstallResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700211 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700212 tmpResult = main.ONOSbench.onosInstall( options="-f",
213 node=node.ip_address )
214 onosInstallResult = onosInstallResult and tmpResult
215 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
216 onpass="ONOS install successful",
217 onfail="ONOS install failed" )
Jon Hall3b489db2015-10-05 14:38:37 -0700218 # clean up gen-partitions file
219 try:
220 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
221 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
222 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
223 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
224 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
225 str( main.ONOSbench.handle.before ) )
226 except ( pexpect.TIMEOUT, pexpect.EOF ):
227 main.log.exception( "ONOSbench: pexpect exception found:" +
228 main.ONOSbench.handle.before )
229 main.cleanup()
230 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -0700231
232 main.step( "Checking if ONOS is up yet" )
233 for i in range( 2 ):
234 onosIsupResult = main.TRUE
Jon Halle1a3b752015-07-22 13:02:46 -0700235 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700236 started = main.ONOSbench.isup( node.ip_address )
237 if not started:
238 main.log.error( node.name + " didn't start!" )
239 main.ONOSbench.onosStop( node.ip_address )
240 main.ONOSbench.onosStart( node.ip_address )
241 onosIsupResult = onosIsupResult and started
242 if onosIsupResult == main.TRUE:
243 break
244 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
245 onpass="ONOS startup successful",
246 onfail="ONOS startup failed" )
247
248 main.log.step( "Starting ONOS CLI sessions" )
249 cliResults = main.TRUE
250 threads = []
Jon Halle1a3b752015-07-22 13:02:46 -0700251 for i in range( main.numCtrls ):
252 t = main.Thread( target=main.CLIs[i].startOnosCli,
Jon Hall5cf14d52015-07-16 12:15:19 -0700253 name="startOnosCli-" + str( i ),
Jon Halle1a3b752015-07-22 13:02:46 -0700254 args=[main.nodes[i].ip_address] )
Jon Hall5cf14d52015-07-16 12:15:19 -0700255 threads.append( t )
256 t.start()
257
258 for t in threads:
259 t.join()
260 cliResults = cliResults and t.result
261 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
262 onpass="ONOS cli startup successful",
263 onfail="ONOS cli startup failed" )
264
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700265 # Create a list of active nodes for use when some nodes are stopped
266 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
267
Jon Hall5cf14d52015-07-16 12:15:19 -0700268 if main.params[ 'tcpdump' ].lower() == "true":
269 main.step( "Start Packet Capture MN" )
270 main.Mininet2.startTcpdump(
271 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
272 + "-MN.pcap",
273 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
274 port=main.params[ 'MNtcpdump' ][ 'port' ] )
275
276 main.step( "App Ids check" )
277 appCheck = main.TRUE
278 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700279 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700280 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700281 name="appToIDCheck-" + str( i ),
282 args=[] )
283 threads.append( t )
284 t.start()
285
286 for t in threads:
287 t.join()
288 appCheck = appCheck and t.result
289 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700290 node = main.activeNodes[0]
291 main.log.warn( main.CLIs[node].apps() )
292 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700293 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
294 onpass="App Ids seem to be correct",
295 onfail="Something is wrong with app Ids" )
296
297 if cliResults == main.FALSE:
298 main.log.error( "Failed to start ONOS, stopping test" )
299 main.cleanup()
300 main.exit()
301
302 def CASE2( self, main ):
303 """
304 Assign devices to controllers
305 """
306 import re
Jon Halle1a3b752015-07-22 13:02:46 -0700307 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700308 assert main, "main not defined"
309 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700310 assert main.CLIs, "main.CLIs not defined"
311 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700312 assert ONOS1Port, "ONOS1Port not defined"
313 assert ONOS2Port, "ONOS2Port not defined"
314 assert ONOS3Port, "ONOS3Port not defined"
315 assert ONOS4Port, "ONOS4Port not defined"
316 assert ONOS5Port, "ONOS5Port not defined"
317 assert ONOS6Port, "ONOS6Port not defined"
318 assert ONOS7Port, "ONOS7Port not defined"
319
320 main.case( "Assigning devices to controllers" )
Jon Hall783bbf92015-07-23 14:33:19 -0700321 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700322 "and check that an ONOS node becomes the " +\
323 "master of the device."
324 main.step( "Assign switches to controllers" )
325
326 ipList = []
Jon Halle1a3b752015-07-22 13:02:46 -0700327 for i in range( main.numCtrls ):
328 ipList.append( main.nodes[ i ].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -0700329 swList = []
330 for i in range( 1, 29 ):
331 swList.append( "s" + str( i ) )
332 main.Mininet1.assignSwController( sw=swList, ip=ipList )
333
334 mastershipCheck = main.TRUE
335 for i in range( 1, 29 ):
336 response = main.Mininet1.getSwController( "s" + str( i ) )
337 try:
338 main.log.info( str( response ) )
339 except Exception:
340 main.log.info( repr( response ) )
Jon Halle1a3b752015-07-22 13:02:46 -0700341 for node in main.nodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700342 if re.search( "tcp:" + node.ip_address, response ):
343 mastershipCheck = mastershipCheck and main.TRUE
344 else:
345 main.log.error( "Error, node " + node.ip_address + " is " +
346 "not in the list of controllers s" +
347 str( i ) + " is connecting to." )
348 mastershipCheck = main.FALSE
349 utilities.assert_equals(
350 expect=main.TRUE,
351 actual=mastershipCheck,
352 onpass="Switch mastership assigned correctly",
353 onfail="Switches not assigned correctly to controllers" )
354
355 def CASE21( self, main ):
356 """
357 Assign mastership to controllers
358 """
Jon Hall5cf14d52015-07-16 12:15:19 -0700359 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700360 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700361 assert main, "main not defined"
362 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700363 assert main.CLIs, "main.CLIs not defined"
364 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700365 assert ONOS1Port, "ONOS1Port not defined"
366 assert ONOS2Port, "ONOS2Port not defined"
367 assert ONOS3Port, "ONOS3Port not defined"
368 assert ONOS4Port, "ONOS4Port not defined"
369 assert ONOS5Port, "ONOS5Port not defined"
370 assert ONOS6Port, "ONOS6Port not defined"
371 assert ONOS7Port, "ONOS7Port not defined"
372
373 main.case( "Assigning Controller roles for switches" )
Jon Hall783bbf92015-07-23 14:33:19 -0700374 main.caseExplanation = "Check that ONOS is connected to each " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700375 "device. Then manually assign" +\
376 " mastership to specific ONOS nodes using" +\
377 " 'device-role'"
378 main.step( "Assign mastership of switches to specific controllers" )
379 # Manually assign mastership to the controller we want
380 roleCall = main.TRUE
381
382 ipList = [ ]
383 deviceList = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700384 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700385 try:
386 # Assign mastership to specific controllers. This assignment was
387 # determined for a 7 node cluser, but will work with any sized
388 # cluster
389 for i in range( 1, 29 ): # switches 1 through 28
390 # set up correct variables:
391 if i == 1:
392 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700393 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700394 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700395 elif i == 2:
Jon Halle1a3b752015-07-22 13:02:46 -0700396 c = 1 % main.numCtrls
397 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700398 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700399 elif i == 3:
Jon Halle1a3b752015-07-22 13:02:46 -0700400 c = 1 % main.numCtrls
401 ip = main.nodes[ c ].ip_address # ONOS2
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700402 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700403 elif i == 4:
Jon Halle1a3b752015-07-22 13:02:46 -0700404 c = 3 % main.numCtrls
405 ip = main.nodes[ c ].ip_address # ONOS4
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700406 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700407 elif i == 5:
Jon Halle1a3b752015-07-22 13:02:46 -0700408 c = 2 % main.numCtrls
409 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700410 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700411 elif i == 6:
Jon Halle1a3b752015-07-22 13:02:46 -0700412 c = 2 % main.numCtrls
413 ip = main.nodes[ c ].ip_address # ONOS3
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700414 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700415 elif i == 7:
Jon Halle1a3b752015-07-22 13:02:46 -0700416 c = 5 % main.numCtrls
417 ip = main.nodes[ c ].ip_address # ONOS6
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700418 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700419 elif i >= 8 and i <= 17:
Jon Halle1a3b752015-07-22 13:02:46 -0700420 c = 4 % main.numCtrls
421 ip = main.nodes[ c ].ip_address # ONOS5
Jon Hall5cf14d52015-07-16 12:15:19 -0700422 dpid = '3' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700423 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700424 elif i >= 18 and i <= 27:
Jon Halle1a3b752015-07-22 13:02:46 -0700425 c = 6 % main.numCtrls
426 ip = main.nodes[ c ].ip_address # ONOS7
Jon Hall5cf14d52015-07-16 12:15:19 -0700427 dpid = '6' + str( i ).zfill( 3 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700428 deviceId = onosCli.getDevice( dpid ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700429 elif i == 28:
430 c = 0
Jon Halle1a3b752015-07-22 13:02:46 -0700431 ip = main.nodes[ c ].ip_address # ONOS1
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700432 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700433 else:
434 main.log.error( "You didn't write an else statement for " +
435 "switch s" + str( i ) )
436 roleCall = main.FALSE
437 # Assign switch
438 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
439 # TODO: make this controller dynamic
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700440 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
Jon Hall5cf14d52015-07-16 12:15:19 -0700441 ipList.append( ip )
442 deviceList.append( deviceId )
443 except ( AttributeError, AssertionError ):
444 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700445 main.log.info( onosCli.devices() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700446 utilities.assert_equals(
447 expect=main.TRUE,
448 actual=roleCall,
449 onpass="Re-assigned switch mastership to designated controller",
450 onfail="Something wrong with deviceRole calls" )
451
452 main.step( "Check mastership was correctly assigned" )
453 roleCheck = main.TRUE
454 # NOTE: This is due to the fact that device mastership change is not
455 # atomic and is actually a multi step process
456 time.sleep( 5 )
457 for i in range( len( ipList ) ):
458 ip = ipList[i]
459 deviceId = deviceList[i]
460 # Check assignment
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700461 master = onosCli.getRole( deviceId ).get( 'master' )
Jon Hall5cf14d52015-07-16 12:15:19 -0700462 if ip in master:
463 roleCheck = roleCheck and main.TRUE
464 else:
465 roleCheck = roleCheck and main.FALSE
466 main.log.error( "Error, controller " + ip + " is not" +
467 " master " + "of device " +
468 str( deviceId ) + ". Master is " +
469 repr( master ) + "." )
470 utilities.assert_equals(
471 expect=main.TRUE,
472 actual=roleCheck,
473 onpass="Switches were successfully reassigned to designated " +
474 "controller",
475 onfail="Switches were not successfully reassigned" )
476
477 def CASE3( self, main ):
478 """
479 Assign intents
480 """
481 import time
482 import json
Jon Halle1a3b752015-07-22 13:02:46 -0700483 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700484 assert main, "main not defined"
485 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700486 assert main.CLIs, "main.CLIs not defined"
487 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700488 main.case( "Adding host Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700489 main.caseExplanation = "Discover hosts by using pingall then " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700490 "assign predetermined host-to-host intents." +\
491 " After installation, check that the intent" +\
492 " is distributed to all nodes and the state" +\
493 " is INSTALLED"
494
495 # install onos-app-fwd
496 main.step( "Install reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700497 onosCli = main.CLIs[ main.activeNodes[0] ]
498 installResults = onosCli.activateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700499 utilities.assert_equals( expect=main.TRUE, actual=installResults,
500 onpass="Install fwd successful",
501 onfail="Install fwd failed" )
502
503 main.step( "Check app ids" )
504 appCheck = main.TRUE
505 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700506 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700507 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700508 name="appToIDCheck-" + str( i ),
509 args=[] )
510 threads.append( t )
511 t.start()
512
513 for t in threads:
514 t.join()
515 appCheck = appCheck and t.result
516 if appCheck != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700517 main.log.warn( onosCli.apps() )
518 main.log.warn( onosCli.appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700519 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
520 onpass="App Ids seem to be correct",
521 onfail="Something is wrong with app Ids" )
522
523 main.step( "Discovering Hosts( Via pingall for now )" )
524 # FIXME: Once we have a host discovery mechanism, use that instead
525 # REACTIVE FWD test
526 pingResult = main.FALSE
Jon Hall96091e62015-09-21 17:34:17 -0700527 passMsg = "Reactive Pingall test passed"
528 time1 = time.time()
529 pingResult = main.Mininet1.pingall()
530 time2 = time.time()
531 if not pingResult:
532 main.log.warn("First pingall failed. Trying again...")
Jon Hall5cf14d52015-07-16 12:15:19 -0700533 pingResult = main.Mininet1.pingall()
Jon Hall96091e62015-09-21 17:34:17 -0700534 passMsg += " on the second try"
535 utilities.assert_equals(
536 expect=main.TRUE,
537 actual=pingResult,
538 onpass= passMsg,
539 onfail="Reactive Pingall failed, " +
540 "one or more ping pairs failed" )
541 main.log.info( "Time for pingall: %2f seconds" %
542 ( time2 - time1 ) )
Jon Hall5cf14d52015-07-16 12:15:19 -0700543 # timeout for fwd flows
544 time.sleep( 11 )
545 # uninstall onos-app-fwd
546 main.step( "Uninstall reactive forwarding app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700547 node = main.activeNodes[0]
548 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
Jon Hall5cf14d52015-07-16 12:15:19 -0700549 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
550 onpass="Uninstall fwd successful",
551 onfail="Uninstall fwd failed" )
552
553 main.step( "Check app ids" )
554 threads = []
555 appCheck2 = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700556 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -0700557 t = main.Thread( target=main.CLIs[i].appToIDCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -0700558 name="appToIDCheck-" + str( i ),
559 args=[] )
560 threads.append( t )
561 t.start()
562
563 for t in threads:
564 t.join()
565 appCheck2 = appCheck2 and t.result
566 if appCheck2 != main.TRUE:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700567 node = main.activeNodes[0]
568 main.log.warn( main.CLIs[node].apps() )
569 main.log.warn( main.CLIs[node].appIDs() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700570 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
571 onpass="App Ids seem to be correct",
572 onfail="Something is wrong with app Ids" )
573
574 main.step( "Add host intents via cli" )
575 intentIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700576 # TODO: move the host numbers to params
577 # Maybe look at all the paths we ping?
Jon Hall5cf14d52015-07-16 12:15:19 -0700578 intentAddResult = True
579 hostResult = main.TRUE
580 for i in range( 8, 18 ):
581 main.log.info( "Adding host intent between h" + str( i ) +
582 " and h" + str( i + 10 ) )
583 host1 = "00:00:00:00:00:" + \
584 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
585 host2 = "00:00:00:00:00:" + \
586 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
587 # NOTE: getHost can return None
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700588 host1Dict = onosCli.getHost( host1 )
589 host2Dict = onosCli.getHost( host2 )
Jon Hall5cf14d52015-07-16 12:15:19 -0700590 host1Id = None
591 host2Id = None
592 if host1Dict and host2Dict:
593 host1Id = host1Dict.get( 'id', None )
594 host2Id = host2Dict.get( 'id', None )
595 if host1Id and host2Id:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700596 nodeNum = ( i % len( main.activeNodes ) )
597 node = main.activeNodes[nodeNum]
598 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
Jon Hall5cf14d52015-07-16 12:15:19 -0700599 if tmpId:
600 main.log.info( "Added intent with id: " + tmpId )
601 intentIds.append( tmpId )
602 else:
603 main.log.error( "addHostIntent returned: " +
604 repr( tmpId ) )
605 else:
606 main.log.error( "Error, getHost() failed for h" + str( i ) +
607 " and/or h" + str( i + 10 ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700608 node = main.activeNodes[0]
609 hosts = main.CLIs[node].hosts()
Jon Hall5cf14d52015-07-16 12:15:19 -0700610 main.log.warn( "Hosts output: " )
611 try:
612 main.log.warn( json.dumps( json.loads( hosts ),
613 sort_keys=True,
614 indent=4,
615 separators=( ',', ': ' ) ) )
616 except ( ValueError, TypeError ):
617 main.log.warn( repr( hosts ) )
618 hostResult = main.FALSE
619 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
620 onpass="Found a host id for each host",
621 onfail="Error looking up host ids" )
622
623 intentStart = time.time()
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700624 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700625 main.log.info( "Submitted intents: " + str( intentIds ) )
626 main.log.info( "Intents in ONOS: " + str( onosIds ) )
627 for intent in intentIds:
628 if intent in onosIds:
629 pass # intent submitted is in onos
630 else:
631 intentAddResult = False
632 if intentAddResult:
633 intentStop = time.time()
634 else:
635 intentStop = None
636 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700637 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700638 intentStates = []
639 installedCheck = True
640 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
641 count = 0
642 try:
643 for intent in json.loads( intents ):
644 state = intent.get( 'state', None )
645 if "INSTALLED" not in state:
646 installedCheck = False
647 intentId = intent.get( 'id', None )
648 intentStates.append( ( intentId, state ) )
649 except ( ValueError, TypeError ):
650 main.log.exception( "Error parsing intents" )
651 # add submitted intents not in the store
652 tmplist = [ i for i, s in intentStates ]
653 missingIntents = False
654 for i in intentIds:
655 if i not in tmplist:
656 intentStates.append( ( i, " - " ) )
657 missingIntents = True
658 intentStates.sort()
659 for i, s in intentStates:
660 count += 1
661 main.log.info( "%-6s%-15s%-15s" %
662 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700663 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700664 try:
665 missing = False
666 if leaders:
667 parsedLeaders = json.loads( leaders )
668 main.log.warn( json.dumps( parsedLeaders,
669 sort_keys=True,
670 indent=4,
671 separators=( ',', ': ' ) ) )
672 # check for all intent partitions
673 topics = []
674 for i in range( 14 ):
675 topics.append( "intent-partition-" + str( i ) )
676 main.log.debug( topics )
677 ONOStopics = [ j['topic'] for j in parsedLeaders ]
678 for topic in topics:
679 if topic not in ONOStopics:
680 main.log.error( "Error: " + topic +
681 " not in leaders" )
682 missing = True
683 else:
684 main.log.error( "leaders() returned None" )
685 except ( ValueError, TypeError ):
686 main.log.exception( "Error parsing leaders" )
687 main.log.error( repr( leaders ) )
688 # Check all nodes
689 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700690 for i in main.activeNodes:
691 response = main.CLIs[i].leaders( jsonFormat=False)
692 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
Jon Hall5cf14d52015-07-16 12:15:19 -0700693 str( response ) )
694
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700695 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700696 try:
697 if partitions :
698 parsedPartitions = json.loads( partitions )
699 main.log.warn( json.dumps( parsedPartitions,
700 sort_keys=True,
701 indent=4,
702 separators=( ',', ': ' ) ) )
703 # TODO check for a leader in all paritions
704 # TODO check for consistency among nodes
705 else:
706 main.log.error( "partitions() returned None" )
707 except ( ValueError, TypeError ):
708 main.log.exception( "Error parsing partitions" )
709 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700710 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700711 try:
712 if pendingMap :
713 parsedPending = json.loads( pendingMap )
714 main.log.warn( json.dumps( parsedPending,
715 sort_keys=True,
716 indent=4,
717 separators=( ',', ': ' ) ) )
718 # TODO check something here?
719 else:
720 main.log.error( "pendingMap() returned None" )
721 except ( ValueError, TypeError ):
722 main.log.exception( "Error parsing pending map" )
723 main.log.error( repr( pendingMap ) )
724
725 intentAddResult = bool( intentAddResult and not missingIntents and
726 installedCheck )
727 if not intentAddResult:
728 main.log.error( "Error in pushing host intents to ONOS" )
729
730 main.step( "Intent Anti-Entropy dispersion" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700731 for j in range(100):
Jon Hall5cf14d52015-07-16 12:15:19 -0700732 correct = True
733 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700734 for i in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -0700735 onosIds = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700736 ids = main.CLIs[i].getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700737 onosIds.append( ids )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700738 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
Jon Hall5cf14d52015-07-16 12:15:19 -0700739 str( sorted( onosIds ) ) )
740 if sorted( ids ) != sorted( intentIds ):
741 main.log.warn( "Set of intent IDs doesn't match" )
742 correct = False
743 break
744 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700745 intents = json.loads( main.CLIs[i].intents() )
Jon Hall5cf14d52015-07-16 12:15:19 -0700746 for intent in intents:
747 if intent[ 'state' ] != "INSTALLED":
748 main.log.warn( "Intent " + intent[ 'id' ] +
749 " is " + intent[ 'state' ] )
750 correct = False
751 break
752 if correct:
753 break
754 else:
755 time.sleep(1)
756 if not intentStop:
757 intentStop = time.time()
758 global gossipTime
759 gossipTime = intentStop - intentStart
760 main.log.info( "It took about " + str( gossipTime ) +
761 " seconds for all intents to appear in each node" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700762 gossipPeriod = int( main.params['timers']['gossip'] )
763 maxGossipTime = gossipPeriod * len( main.activeNodes )
Jon Hall5cf14d52015-07-16 12:15:19 -0700764 utilities.assert_greater_equals(
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700765 expect=maxGossipTime, actual=gossipTime,
Jon Hall5cf14d52015-07-16 12:15:19 -0700766 onpass="ECM anti-entropy for intents worked within " +
767 "expected time",
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700768 onfail="Intent ECM anti-entropy took too long. " +
769 "Expected time:{}, Actual time:{}".format( maxGossipTime,
770 gossipTime ) )
771 if gossipTime <= maxGossipTime:
Jon Hall5cf14d52015-07-16 12:15:19 -0700772 intentAddResult = True
773
774 if not intentAddResult or "key" in pendingMap:
775 import time
776 installedCheck = True
777 main.log.info( "Sleeping 60 seconds to see if intents are found" )
778 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700779 onosIds = onosCli.getAllIntentsId()
Jon Hall5cf14d52015-07-16 12:15:19 -0700780 main.log.info( "Submitted intents: " + str( intentIds ) )
781 main.log.info( "Intents in ONOS: " + str( onosIds ) )
782 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700783 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700784 intentStates = []
785 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
786 count = 0
787 try:
788 for intent in json.loads( intents ):
789 # Iter through intents of a node
790 state = intent.get( 'state', None )
791 if "INSTALLED" not in state:
792 installedCheck = False
793 intentId = intent.get( 'id', None )
794 intentStates.append( ( intentId, state ) )
795 except ( ValueError, TypeError ):
796 main.log.exception( "Error parsing intents" )
797 # add submitted intents not in the store
798 tmplist = [ i for i, s in intentStates ]
799 for i in intentIds:
800 if i not in tmplist:
801 intentStates.append( ( i, " - " ) )
802 intentStates.sort()
803 for i, s in intentStates:
804 count += 1
805 main.log.info( "%-6s%-15s%-15s" %
806 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700807 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700808 try:
809 missing = False
810 if leaders:
811 parsedLeaders = json.loads( leaders )
812 main.log.warn( json.dumps( parsedLeaders,
813 sort_keys=True,
814 indent=4,
815 separators=( ',', ': ' ) ) )
816 # check for all intent partitions
817 # check for election
818 topics = []
819 for i in range( 14 ):
820 topics.append( "intent-partition-" + str( i ) )
821 # FIXME: this should only be after we start the app
822 topics.append( "org.onosproject.election" )
823 main.log.debug( topics )
824 ONOStopics = [ j['topic'] for j in parsedLeaders ]
825 for topic in topics:
826 if topic not in ONOStopics:
827 main.log.error( "Error: " + topic +
828 " not in leaders" )
829 missing = True
830 else:
831 main.log.error( "leaders() returned None" )
832 except ( ValueError, TypeError ):
833 main.log.exception( "Error parsing leaders" )
834 main.log.error( repr( leaders ) )
835 # Check all nodes
836 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700837 for i in main.activeNodes:
838 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700839 response = node.leaders( jsonFormat=False)
840 main.log.warn( str( node.name ) + " leaders output: \n" +
841 str( response ) )
842
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700843 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -0700844 try:
845 if partitions :
846 parsedPartitions = json.loads( partitions )
847 main.log.warn( json.dumps( parsedPartitions,
848 sort_keys=True,
849 indent=4,
850 separators=( ',', ': ' ) ) )
851 # TODO check for a leader in all paritions
852 # TODO check for consistency among nodes
853 else:
854 main.log.error( "partitions() returned None" )
855 except ( ValueError, TypeError ):
856 main.log.exception( "Error parsing partitions" )
857 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700858 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -0700859 try:
860 if pendingMap :
861 parsedPending = json.loads( pendingMap )
862 main.log.warn( json.dumps( parsedPending,
863 sort_keys=True,
864 indent=4,
865 separators=( ',', ': ' ) ) )
866 # TODO check something here?
867 else:
868 main.log.error( "pendingMap() returned None" )
869 except ( ValueError, TypeError ):
870 main.log.exception( "Error parsing pending map" )
871 main.log.error( repr( pendingMap ) )
872
873 def CASE4( self, main ):
874 """
875 Ping across added host intents
876 """
877 import json
878 import time
Jon Halle1a3b752015-07-22 13:02:46 -0700879 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -0700880 assert main, "main not defined"
881 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -0700882 assert main.CLIs, "main.CLIs not defined"
883 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700884 main.case( "Verify connectivity by sending traffic across Intents" )
Jon Hall783bbf92015-07-23 14:33:19 -0700885 main.caseExplanation = "Ping across added host intents to check " +\
Jon Hall5cf14d52015-07-16 12:15:19 -0700886 "functionality and check the state of " +\
887 "the intent"
888 main.step( "Ping across added host intents" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700889 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -0700890 PingResult = main.TRUE
891 for i in range( 8, 18 ):
892 ping = main.Mininet1.pingHost( src="h" + str( i ),
893 target="h" + str( i + 10 ) )
894 PingResult = PingResult and ping
895 if ping == main.FALSE:
896 main.log.warn( "Ping failed between h" + str( i ) +
897 " and h" + str( i + 10 ) )
898 elif ping == main.TRUE:
899 main.log.info( "Ping test passed!" )
900 # Don't set PingResult or you'd override failures
901 if PingResult == main.FALSE:
902 main.log.error(
903 "Intents have not been installed correctly, pings failed." )
904 # TODO: pretty print
905 main.log.warn( "ONOS1 intents: " )
906 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700907 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700908 main.log.warn( json.dumps( json.loads( tmpIntents ),
909 sort_keys=True,
910 indent=4,
911 separators=( ',', ': ' ) ) )
912 except ( ValueError, TypeError ):
913 main.log.warn( repr( tmpIntents ) )
914 utilities.assert_equals(
915 expect=main.TRUE,
916 actual=PingResult,
917 onpass="Intents have been installed correctly and pings work",
918 onfail="Intents have not been installed correctly, pings failed." )
919
920 main.step( "Check Intent state" )
921 installedCheck = False
922 loopCount = 0
923 while not installedCheck and loopCount < 40:
924 installedCheck = True
925 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700926 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -0700927 intentStates = []
928 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
929 count = 0
930 # Iter through intents of a node
931 try:
932 for intent in json.loads( intents ):
933 state = intent.get( 'state', None )
934 if "INSTALLED" not in state:
935 installedCheck = False
936 intentId = intent.get( 'id', None )
937 intentStates.append( ( intentId, state ) )
938 except ( ValueError, TypeError ):
939 main.log.exception( "Error parsing intents." )
940 # Print states
941 intentStates.sort()
942 for i, s in intentStates:
943 count += 1
944 main.log.info( "%-6s%-15s%-15s" %
945 ( str( count ), str( i ), str( s ) ) )
946 if not installedCheck:
947 time.sleep( 1 )
948 loopCount += 1
949 utilities.assert_equals( expect=True, actual=installedCheck,
950 onpass="Intents are all INSTALLED",
951 onfail="Intents are not all in " +
952 "INSTALLED state" )
953
954 main.step( "Check leadership of topics" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700955 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -0700956 topicCheck = main.TRUE
957 try:
958 if leaders:
959 parsedLeaders = json.loads( leaders )
960 main.log.warn( json.dumps( parsedLeaders,
961 sort_keys=True,
962 indent=4,
963 separators=( ',', ': ' ) ) )
964 # check for all intent partitions
965 # check for election
966 # TODO: Look at Devices as topics now that it uses this system
967 topics = []
968 for i in range( 14 ):
969 topics.append( "intent-partition-" + str( i ) )
970 # FIXME: this should only be after we start the app
971 # FIXME: topics.append( "org.onosproject.election" )
972 # Print leaders output
973 main.log.debug( topics )
974 ONOStopics = [ j['topic'] for j in parsedLeaders ]
975 for topic in topics:
976 if topic not in ONOStopics:
977 main.log.error( "Error: " + topic +
978 " not in leaders" )
979 topicCheck = main.FALSE
980 else:
981 main.log.error( "leaders() returned None" )
982 topicCheck = main.FALSE
983 except ( ValueError, TypeError ):
984 topicCheck = main.FALSE
985 main.log.exception( "Error parsing leaders" )
986 main.log.error( repr( leaders ) )
987 # TODO: Check for a leader of these topics
988 # Check all nodes
989 if topicCheck:
Jon Hallb3ed8ed2015-10-28 16:43:55 -0700990 for i in main.activeNodes:
991 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -0700992 response = node.leaders( jsonFormat=False)
993 main.log.warn( str( node.name ) + " leaders output: \n" +
994 str( response ) )
995
996 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
997 onpass="intent Partitions is in leaders",
998 onfail="Some topics were lost " )
999 # Print partitions
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001000 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001001 try:
1002 if partitions :
1003 parsedPartitions = json.loads( partitions )
1004 main.log.warn( json.dumps( parsedPartitions,
1005 sort_keys=True,
1006 indent=4,
1007 separators=( ',', ': ' ) ) )
1008 # TODO check for a leader in all paritions
1009 # TODO check for consistency among nodes
1010 else:
1011 main.log.error( "partitions() returned None" )
1012 except ( ValueError, TypeError ):
1013 main.log.exception( "Error parsing partitions" )
1014 main.log.error( repr( partitions ) )
1015 # Print Pending Map
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001016 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001017 try:
1018 if pendingMap :
1019 parsedPending = json.loads( pendingMap )
1020 main.log.warn( json.dumps( parsedPending,
1021 sort_keys=True,
1022 indent=4,
1023 separators=( ',', ': ' ) ) )
1024 # TODO check something here?
1025 else:
1026 main.log.error( "pendingMap() returned None" )
1027 except ( ValueError, TypeError ):
1028 main.log.exception( "Error parsing pending map" )
1029 main.log.error( repr( pendingMap ) )
1030
1031 if not installedCheck:
1032 main.log.info( "Waiting 60 seconds to see if the state of " +
1033 "intents change" )
1034 time.sleep( 60 )
1035 # Print the intent states
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001036 intents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001037 intentStates = []
1038 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1039 count = 0
1040 # Iter through intents of a node
1041 try:
1042 for intent in json.loads( intents ):
1043 state = intent.get( 'state', None )
1044 if "INSTALLED" not in state:
1045 installedCheck = False
1046 intentId = intent.get( 'id', None )
1047 intentStates.append( ( intentId, state ) )
1048 except ( ValueError, TypeError ):
1049 main.log.exception( "Error parsing intents." )
1050 intentStates.sort()
1051 for i, s in intentStates:
1052 count += 1
1053 main.log.info( "%-6s%-15s%-15s" %
1054 ( str( count ), str( i ), str( s ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001055 leaders = onosCli.leaders()
Jon Hall5cf14d52015-07-16 12:15:19 -07001056 try:
1057 missing = False
1058 if leaders:
1059 parsedLeaders = json.loads( leaders )
1060 main.log.warn( json.dumps( parsedLeaders,
1061 sort_keys=True,
1062 indent=4,
1063 separators=( ',', ': ' ) ) )
1064 # check for all intent partitions
1065 # check for election
1066 topics = []
1067 for i in range( 14 ):
1068 topics.append( "intent-partition-" + str( i ) )
1069 # FIXME: this should only be after we start the app
1070 topics.append( "org.onosproject.election" )
1071 main.log.debug( topics )
1072 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1073 for topic in topics:
1074 if topic not in ONOStopics:
1075 main.log.error( "Error: " + topic +
1076 " not in leaders" )
1077 missing = True
1078 else:
1079 main.log.error( "leaders() returned None" )
1080 except ( ValueError, TypeError ):
1081 main.log.exception( "Error parsing leaders" )
1082 main.log.error( repr( leaders ) )
1083 if missing:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001084 for i in main.activeNodes:
1085 node = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07001086 response = node.leaders( jsonFormat=False)
1087 main.log.warn( str( node.name ) + " leaders output: \n" +
1088 str( response ) )
1089
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001090 partitions = onosCli.partitions()
Jon Hall5cf14d52015-07-16 12:15:19 -07001091 try:
1092 if partitions :
1093 parsedPartitions = json.loads( partitions )
1094 main.log.warn( json.dumps( parsedPartitions,
1095 sort_keys=True,
1096 indent=4,
1097 separators=( ',', ': ' ) ) )
1098 # TODO check for a leader in all paritions
1099 # TODO check for consistency among nodes
1100 else:
1101 main.log.error( "partitions() returned None" )
1102 except ( ValueError, TypeError ):
1103 main.log.exception( "Error parsing partitions" )
1104 main.log.error( repr( partitions ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001105 pendingMap = onosCli.pendingMap()
Jon Hall5cf14d52015-07-16 12:15:19 -07001106 try:
1107 if pendingMap :
1108 parsedPending = json.loads( pendingMap )
1109 main.log.warn( json.dumps( parsedPending,
1110 sort_keys=True,
1111 indent=4,
1112 separators=( ',', ': ' ) ) )
1113 # TODO check something here?
1114 else:
1115 main.log.error( "pendingMap() returned None" )
1116 except ( ValueError, TypeError ):
1117 main.log.exception( "Error parsing pending map" )
1118 main.log.error( repr( pendingMap ) )
1119 # Print flowrules
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001120 node = main.activeNodes[0]
1121 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001122 main.step( "Wait a minute then ping again" )
1123 # the wait is above
1124 PingResult = main.TRUE
1125 for i in range( 8, 18 ):
1126 ping = main.Mininet1.pingHost( src="h" + str( i ),
1127 target="h" + str( i + 10 ) )
1128 PingResult = PingResult and ping
1129 if ping == main.FALSE:
1130 main.log.warn( "Ping failed between h" + str( i ) +
1131 " and h" + str( i + 10 ) )
1132 elif ping == main.TRUE:
1133 main.log.info( "Ping test passed!" )
1134 # Don't set PingResult or you'd override failures
1135 if PingResult == main.FALSE:
1136 main.log.error(
1137 "Intents have not been installed correctly, pings failed." )
1138 # TODO: pretty print
1139 main.log.warn( "ONOS1 intents: " )
1140 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001141 tmpIntents = onosCli.intents()
Jon Hall5cf14d52015-07-16 12:15:19 -07001142 main.log.warn( json.dumps( json.loads( tmpIntents ),
1143 sort_keys=True,
1144 indent=4,
1145 separators=( ',', ': ' ) ) )
1146 except ( ValueError, TypeError ):
1147 main.log.warn( repr( tmpIntents ) )
1148 utilities.assert_equals(
1149 expect=main.TRUE,
1150 actual=PingResult,
1151 onpass="Intents have been installed correctly and pings work",
1152 onfail="Intents have not been installed correctly, pings failed." )
1153
1154 def CASE5( self, main ):
1155 """
1156 Reading state of ONOS
1157 """
1158 import json
1159 import time
Jon Halle1a3b752015-07-22 13:02:46 -07001160 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001161 assert main, "main not defined"
1162 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001163 assert main.CLIs, "main.CLIs not defined"
1164 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001165
1166 main.case( "Setting up and gathering data for current state" )
1167 # The general idea for this test case is to pull the state of
1168 # ( intents,flows, topology,... ) from each ONOS node
1169 # We can then compare them with each other and also with past states
1170
1171 main.step( "Check that each switch has a master" )
1172 global mastershipState
1173 mastershipState = '[]'
1174
1175 # Assert that each device has a master
1176 rolesNotNull = main.TRUE
1177 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001178 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001179 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001180 name="rolesNotNull-" + str( i ),
1181 args=[] )
1182 threads.append( t )
1183 t.start()
1184
1185 for t in threads:
1186 t.join()
1187 rolesNotNull = rolesNotNull and t.result
1188 utilities.assert_equals(
1189 expect=main.TRUE,
1190 actual=rolesNotNull,
1191 onpass="Each device has a master",
1192 onfail="Some devices don't have a master assigned" )
1193
1194 main.step( "Get the Mastership of each switch from each controller" )
1195 ONOSMastership = []
1196 mastershipCheck = main.FALSE
1197 consistentMastership = True
1198 rolesResults = True
1199 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001200 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001201 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001202 name="roles-" + str( i ),
1203 args=[] )
1204 threads.append( t )
1205 t.start()
1206
1207 for t in threads:
1208 t.join()
1209 ONOSMastership.append( t.result )
1210
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001211 for i in range( len( ONOSMastership ) ):
1212 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001213 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001214 main.log.error( "Error in getting ONOS" + node + " roles" )
1215 main.log.warn( "ONOS" + node + " mastership response: " +
1216 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001217 rolesResults = False
1218 utilities.assert_equals(
1219 expect=True,
1220 actual=rolesResults,
1221 onpass="No error in reading roles output",
1222 onfail="Error in reading roles from ONOS" )
1223
1224 main.step( "Check for consistency in roles from each controller" )
1225 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1226 main.log.info(
1227 "Switch roles are consistent across all ONOS nodes" )
1228 else:
1229 consistentMastership = False
1230 utilities.assert_equals(
1231 expect=True,
1232 actual=consistentMastership,
1233 onpass="Switch roles are consistent across all ONOS nodes",
1234 onfail="ONOS nodes have different views of switch roles" )
1235
1236 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001237 for i in range( len( main.activeNodes ) ):
1238 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001239 try:
1240 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001241 "ONOS" + node + " roles: ",
Jon Hall5cf14d52015-07-16 12:15:19 -07001242 json.dumps(
1243 json.loads( ONOSMastership[ i ] ),
1244 sort_keys=True,
1245 indent=4,
1246 separators=( ',', ': ' ) ) )
1247 except ( ValueError, TypeError ):
1248 main.log.warn( repr( ONOSMastership[ i ] ) )
1249 elif rolesResults and consistentMastership:
1250 mastershipCheck = main.TRUE
1251 mastershipState = ONOSMastership[ 0 ]
1252
1253 main.step( "Get the intents from each controller" )
1254 global intentState
1255 intentState = []
1256 ONOSIntents = []
1257 intentCheck = main.FALSE
1258 consistentIntents = True
1259 intentsResults = True
1260 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001261 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001262 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001263 name="intents-" + str( i ),
1264 args=[],
1265 kwargs={ 'jsonFormat': True } )
1266 threads.append( t )
1267 t.start()
1268
1269 for t in threads:
1270 t.join()
1271 ONOSIntents.append( t.result )
1272
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001273 for i in range( len( ONOSIntents ) ):
1274 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001275 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001276 main.log.error( "Error in getting ONOS" + node + " intents" )
1277 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001278 repr( ONOSIntents[ i ] ) )
1279 intentsResults = False
1280 utilities.assert_equals(
1281 expect=True,
1282 actual=intentsResults,
1283 onpass="No error in reading intents output",
1284 onfail="Error in reading intents from ONOS" )
1285
1286 main.step( "Check for consistency in Intents from each controller" )
1287 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1288 main.log.info( "Intents are consistent across all ONOS " +
1289 "nodes" )
1290 else:
1291 consistentIntents = False
1292 main.log.error( "Intents not consistent" )
1293 utilities.assert_equals(
1294 expect=True,
1295 actual=consistentIntents,
1296 onpass="Intents are consistent across all ONOS nodes",
1297 onfail="ONOS nodes have different views of intents" )
1298
1299 if intentsResults:
1300 # Try to make it easy to figure out what is happening
1301 #
1302 # Intent ONOS1 ONOS2 ...
1303 # 0x01 INSTALLED INSTALLING
1304 # ... ... ...
1305 # ... ... ...
1306 title = " Id"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001307 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001308 title += " " * 10 + "ONOS" + str( n + 1 )
1309 main.log.warn( title )
1310 # get all intent keys in the cluster
1311 keys = []
1312 for nodeStr in ONOSIntents:
1313 node = json.loads( nodeStr )
1314 for intent in node:
1315 keys.append( intent.get( 'id' ) )
1316 keys = set( keys )
1317 for key in keys:
1318 row = "%-13s" % key
1319 for nodeStr in ONOSIntents:
1320 node = json.loads( nodeStr )
1321 for intent in node:
1322 if intent.get( 'id', "Error" ) == key:
1323 row += "%-15s" % intent.get( 'state' )
1324 main.log.warn( row )
1325 # End table view
1326
1327 if intentsResults and not consistentIntents:
1328 # print the json objects
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001329 n = str( main.activeNodes[-1] + 1 )
1330 main.log.debug( "ONOS" + n + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001331 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1332 sort_keys=True,
1333 indent=4,
1334 separators=( ',', ': ' ) ) )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001335 for i in range( len( ONOSIntents ) ):
1336 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001337 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001338 main.log.debug( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001339 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1340 sort_keys=True,
1341 indent=4,
1342 separators=( ',', ': ' ) ) )
1343 else:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001344 main.log.debug( "ONOS" + node + " intents match ONOS" +
1345 n + " intents" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001346 elif intentsResults and consistentIntents:
1347 intentCheck = main.TRUE
1348 intentState = ONOSIntents[ 0 ]
1349
1350 main.step( "Get the flows from each controller" )
1351 global flowState
1352 flowState = []
1353 ONOSFlows = []
1354 ONOSFlowsJson = []
1355 flowCheck = main.FALSE
1356 consistentFlows = True
1357 flowsResults = True
1358 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001359 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001360 t = main.Thread( target=main.CLIs[i].flows,
Jon Hall5cf14d52015-07-16 12:15:19 -07001361 name="flows-" + str( i ),
1362 args=[],
1363 kwargs={ 'jsonFormat': True } )
1364 threads.append( t )
1365 t.start()
1366
1367 # NOTE: Flows command can take some time to run
1368 time.sleep(30)
1369 for t in threads:
1370 t.join()
1371 result = t.result
1372 ONOSFlows.append( result )
1373
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001374 for i in range( len( ONOSFlows ) ):
1375 num = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001376 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1377 main.log.error( "Error in getting ONOS" + num + " flows" )
1378 main.log.warn( "ONOS" + num + " flows response: " +
1379 repr( ONOSFlows[ i ] ) )
1380 flowsResults = False
1381 ONOSFlowsJson.append( None )
1382 else:
1383 try:
1384 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1385 except ( ValueError, TypeError ):
1386 # FIXME: change this to log.error?
1387 main.log.exception( "Error in parsing ONOS" + num +
1388 " response as json." )
1389 main.log.error( repr( ONOSFlows[ i ] ) )
1390 ONOSFlowsJson.append( None )
1391 flowsResults = False
1392 utilities.assert_equals(
1393 expect=True,
1394 actual=flowsResults,
1395 onpass="No error in reading flows output",
1396 onfail="Error in reading flows from ONOS" )
1397
1398 main.step( "Check for consistency in Flows from each controller" )
1399 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1400 if all( tmp ):
1401 main.log.info( "Flow count is consistent across all ONOS nodes" )
1402 else:
1403 consistentFlows = False
1404 utilities.assert_equals(
1405 expect=True,
1406 actual=consistentFlows,
1407 onpass="The flow count is consistent across all ONOS nodes",
1408 onfail="ONOS nodes have different flow counts" )
1409
1410 if flowsResults and not consistentFlows:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001411 for i in range( len( ONOSFlows ) ):
1412 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001413 try:
1414 main.log.warn(
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001415 "ONOS" + node + " flows: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001416 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1417 indent=4, separators=( ',', ': ' ) ) )
1418 except ( ValueError, TypeError ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001419 main.log.warn( "ONOS" + node + " flows: " +
1420 repr( ONOSFlows[ i ] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001421 elif flowsResults and consistentFlows:
1422 flowCheck = main.TRUE
1423 flowState = ONOSFlows[ 0 ]
1424
1425 main.step( "Get the OF Table entries" )
1426 global flows
1427 flows = []
1428 for i in range( 1, 29 ):
GlennRC68467eb2015-11-16 18:01:01 -08001429 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001430 if flowCheck == main.FALSE:
1431 for table in flows:
1432 main.log.warn( table )
1433 # TODO: Compare switch flow tables with ONOS flow tables
1434
1435 main.step( "Start continuous pings" )
1436 main.Mininet2.pingLong(
1437 src=main.params[ 'PING' ][ 'source1' ],
1438 target=main.params[ 'PING' ][ 'target1' ],
1439 pingTime=500 )
1440 main.Mininet2.pingLong(
1441 src=main.params[ 'PING' ][ 'source2' ],
1442 target=main.params[ 'PING' ][ 'target2' ],
1443 pingTime=500 )
1444 main.Mininet2.pingLong(
1445 src=main.params[ 'PING' ][ 'source3' ],
1446 target=main.params[ 'PING' ][ 'target3' ],
1447 pingTime=500 )
1448 main.Mininet2.pingLong(
1449 src=main.params[ 'PING' ][ 'source4' ],
1450 target=main.params[ 'PING' ][ 'target4' ],
1451 pingTime=500 )
1452 main.Mininet2.pingLong(
1453 src=main.params[ 'PING' ][ 'source5' ],
1454 target=main.params[ 'PING' ][ 'target5' ],
1455 pingTime=500 )
1456 main.Mininet2.pingLong(
1457 src=main.params[ 'PING' ][ 'source6' ],
1458 target=main.params[ 'PING' ][ 'target6' ],
1459 pingTime=500 )
1460 main.Mininet2.pingLong(
1461 src=main.params[ 'PING' ][ 'source7' ],
1462 target=main.params[ 'PING' ][ 'target7' ],
1463 pingTime=500 )
1464 main.Mininet2.pingLong(
1465 src=main.params[ 'PING' ][ 'source8' ],
1466 target=main.params[ 'PING' ][ 'target8' ],
1467 pingTime=500 )
1468 main.Mininet2.pingLong(
1469 src=main.params[ 'PING' ][ 'source9' ],
1470 target=main.params[ 'PING' ][ 'target9' ],
1471 pingTime=500 )
1472 main.Mininet2.pingLong(
1473 src=main.params[ 'PING' ][ 'source10' ],
1474 target=main.params[ 'PING' ][ 'target10' ],
1475 pingTime=500 )
1476
1477 main.step( "Collecting topology information from ONOS" )
1478 devices = []
1479 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001480 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001481 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07001482 name="devices-" + str( i ),
1483 args=[ ] )
1484 threads.append( t )
1485 t.start()
1486
1487 for t in threads:
1488 t.join()
1489 devices.append( t.result )
1490 hosts = []
1491 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001492 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001493 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07001494 name="hosts-" + str( i ),
1495 args=[ ] )
1496 threads.append( t )
1497 t.start()
1498
1499 for t in threads:
1500 t.join()
1501 try:
1502 hosts.append( json.loads( t.result ) )
1503 except ( ValueError, TypeError ):
1504 # FIXME: better handling of this, print which node
1505 # Maybe use thread name?
1506 main.log.exception( "Error parsing json output of hosts" )
1507 # FIXME: should this be an empty json object instead?
1508 hosts.append( None )
1509
1510 ports = []
1511 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001512 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001513 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07001514 name="ports-" + str( i ),
1515 args=[ ] )
1516 threads.append( t )
1517 t.start()
1518
1519 for t in threads:
1520 t.join()
1521 ports.append( t.result )
1522 links = []
1523 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001524 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001525 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07001526 name="links-" + str( i ),
1527 args=[ ] )
1528 threads.append( t )
1529 t.start()
1530
1531 for t in threads:
1532 t.join()
1533 links.append( t.result )
1534 clusters = []
1535 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001536 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001537 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07001538 name="clusters-" + str( i ),
1539 args=[ ] )
1540 threads.append( t )
1541 t.start()
1542
1543 for t in threads:
1544 t.join()
1545 clusters.append( t.result )
1546 # Compare json objects for hosts and dataplane clusters
1547
1548 # hosts
1549 main.step( "Host view is consistent across ONOS nodes" )
1550 consistentHostsResult = main.TRUE
1551 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001552 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001553 if "Error" not in hosts[ controller ]:
1554 if hosts[ controller ] == hosts[ 0 ]:
1555 continue
1556 else: # hosts not consistent
1557 main.log.error( "hosts from ONOS" +
1558 controllerStr +
1559 " is inconsistent with ONOS1" )
1560 main.log.warn( repr( hosts[ controller ] ) )
1561 consistentHostsResult = main.FALSE
1562
1563 else:
1564 main.log.error( "Error in getting ONOS hosts from ONOS" +
1565 controllerStr )
1566 consistentHostsResult = main.FALSE
1567 main.log.warn( "ONOS" + controllerStr +
1568 " hosts response: " +
1569 repr( hosts[ controller ] ) )
1570 utilities.assert_equals(
1571 expect=main.TRUE,
1572 actual=consistentHostsResult,
1573 onpass="Hosts view is consistent across all ONOS nodes",
1574 onfail="ONOS nodes have different views of hosts" )
1575
1576 main.step( "Each host has an IP address" )
1577 ipResult = main.TRUE
1578 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001579 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001580 for host in hosts[ controller ]:
1581 if not host.get( 'ipAddresses', [ ] ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001582 main.log.error( "Error with host ips on controller" +
Jon Hall5cf14d52015-07-16 12:15:19 -07001583 controllerStr + ": " + str( host ) )
1584 ipResult = main.FALSE
1585 utilities.assert_equals(
1586 expect=main.TRUE,
1587 actual=ipResult,
1588 onpass="The ips of the hosts aren't empty",
1589 onfail="The ip of at least one host is missing" )
1590
1591 # Strongly connected clusters of devices
1592 main.step( "Cluster view is consistent across ONOS nodes" )
1593 consistentClustersResult = main.TRUE
1594 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001595 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001596 if "Error" not in clusters[ controller ]:
1597 if clusters[ controller ] == clusters[ 0 ]:
1598 continue
1599 else: # clusters not consistent
1600 main.log.error( "clusters from ONOS" + controllerStr +
1601 " is inconsistent with ONOS1" )
1602 consistentClustersResult = main.FALSE
1603
1604 else:
1605 main.log.error( "Error in getting dataplane clusters " +
1606 "from ONOS" + controllerStr )
1607 consistentClustersResult = main.FALSE
1608 main.log.warn( "ONOS" + controllerStr +
1609 " clusters response: " +
1610 repr( clusters[ controller ] ) )
1611 utilities.assert_equals(
1612 expect=main.TRUE,
1613 actual=consistentClustersResult,
1614 onpass="Clusters view is consistent across all ONOS nodes",
1615 onfail="ONOS nodes have different views of clusters" )
1616 # there should always only be one cluster
1617 main.step( "Cluster view correct across ONOS nodes" )
1618 try:
1619 numClusters = len( json.loads( clusters[ 0 ] ) )
1620 except ( ValueError, TypeError ):
1621 main.log.exception( "Error parsing clusters[0]: " +
1622 repr( clusters[ 0 ] ) )
1623 clusterResults = main.FALSE
1624 if numClusters == 1:
1625 clusterResults = main.TRUE
1626 utilities.assert_equals(
1627 expect=1,
1628 actual=numClusters,
1629 onpass="ONOS shows 1 SCC",
1630 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1631
1632 main.step( "Comparing ONOS topology to MN" )
1633 devicesResults = main.TRUE
1634 linksResults = main.TRUE
1635 hostsResults = main.TRUE
1636 mnSwitches = main.Mininet1.getSwitches()
1637 mnLinks = main.Mininet1.getLinks()
1638 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001639 for controller in main.activeNodes:
1640 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001641 if devices[ controller ] and ports[ controller ] and\
1642 "Error" not in devices[ controller ] and\
1643 "Error" not in ports[ controller ]:
1644
1645 currentDevicesResult = main.Mininet1.compareSwitches(
1646 mnSwitches,
1647 json.loads( devices[ controller ] ),
1648 json.loads( ports[ controller ] ) )
1649 else:
1650 currentDevicesResult = main.FALSE
1651 utilities.assert_equals( expect=main.TRUE,
1652 actual=currentDevicesResult,
1653 onpass="ONOS" + controllerStr +
1654 " Switches view is correct",
1655 onfail="ONOS" + controllerStr +
1656 " Switches view is incorrect" )
1657 if links[ controller ] and "Error" not in links[ controller ]:
1658 currentLinksResult = main.Mininet1.compareLinks(
1659 mnSwitches, mnLinks,
1660 json.loads( links[ controller ] ) )
1661 else:
1662 currentLinksResult = main.FALSE
1663 utilities.assert_equals( expect=main.TRUE,
1664 actual=currentLinksResult,
1665 onpass="ONOS" + controllerStr +
1666 " links view is correct",
1667 onfail="ONOS" + controllerStr +
1668 " links view is incorrect" )
1669
1670 if hosts[ controller ] or "Error" not in hosts[ controller ]:
1671 currentHostsResult = main.Mininet1.compareHosts(
1672 mnHosts,
1673 hosts[ controller ] )
1674 else:
1675 currentHostsResult = main.FALSE
1676 utilities.assert_equals( expect=main.TRUE,
1677 actual=currentHostsResult,
1678 onpass="ONOS" + controllerStr +
1679 " hosts exist in Mininet",
1680 onfail="ONOS" + controllerStr +
1681 " hosts don't match Mininet" )
1682
1683 devicesResults = devicesResults and currentDevicesResult
1684 linksResults = linksResults and currentLinksResult
1685 hostsResults = hostsResults and currentHostsResult
1686
1687 main.step( "Device information is correct" )
1688 utilities.assert_equals(
1689 expect=main.TRUE,
1690 actual=devicesResults,
1691 onpass="Device information is correct",
1692 onfail="Device information is incorrect" )
1693
1694 main.step( "Links are correct" )
1695 utilities.assert_equals(
1696 expect=main.TRUE,
1697 actual=linksResults,
1698 onpass="Link are correct",
1699 onfail="Links are incorrect" )
1700
1701 main.step( "Hosts are correct" )
1702 utilities.assert_equals(
1703 expect=main.TRUE,
1704 actual=hostsResults,
1705 onpass="Hosts are correct",
1706 onfail="Hosts are incorrect" )
1707
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001708 def CASE61( self, main ):
Jon Hall5cf14d52015-07-16 12:15:19 -07001709 """
1710 The Failure case.
1711 """
Jon Halle1a3b752015-07-22 13:02:46 -07001712 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001713 assert main, "main not defined"
1714 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001715 assert main.CLIs, "main.CLIs not defined"
1716 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001717 main.case( "Stop minority of ONOS nodes" )
Jon Hall96091e62015-09-21 17:34:17 -07001718
1719 main.step( "Checking ONOS Logs for errors" )
1720 for node in main.nodes:
1721 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1722 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1723
Jon Hall3b489db2015-10-05 14:38:37 -07001724 n = len( main.nodes ) # Number of nodes
1725 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1726 main.kill = [ 0 ] # ONOS node to kill, listed by index in main.nodes
1727 if n > 3:
1728 main.kill.append( p - 1 )
1729 # NOTE: This only works for cluster sizes of 3,5, or 7.
1730
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001731 main.step( "Stopping " + str( len( main.kill ) ) + " ONOS nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07001732 killResults = main.TRUE
1733 for i in main.kill:
1734 killResults = killResults and\
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001735 main.ONOSbench.onosStop( main.nodes[i].ip_address )
1736 main.activeNodes.remove( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001737 utilities.assert_equals( expect=main.TRUE, actual=killResults,
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001738 onpass="ONOS nodes stopped successfully",
1739 onfail="ONOS nodes NOT successfully stopped" )
1740
1741 def CASE62( self, main ):
1742 """
1743 The bring up stopped nodes
1744 """
1745 import time
1746 assert main.numCtrls, "main.numCtrls not defined"
1747 assert main, "main not defined"
1748 assert utilities.assert_equals, "utilities.assert_equals not defined"
1749 assert main.CLIs, "main.CLIs not defined"
1750 assert main.nodes, "main.nodes not defined"
1751 assert main.kill, "main.kill not defined"
1752 main.case( "Restart minority of ONOS nodes" )
1753
1754 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
1755 startResults = main.TRUE
1756 restartTime = time.time()
1757 for i in main.kill:
1758 startResults = startResults and\
1759 main.ONOSbench.onosStart( main.nodes[i].ip_address )
1760 utilities.assert_equals( expect=main.TRUE, actual=startResults,
1761 onpass="ONOS nodes started successfully",
1762 onfail="ONOS nodes NOT successfully started" )
Jon Hall5cf14d52015-07-16 12:15:19 -07001763
1764 main.step( "Checking if ONOS is up yet" )
1765 count = 0
1766 onosIsupResult = main.FALSE
1767 while onosIsupResult == main.FALSE and count < 10:
Jon Hall3b489db2015-10-05 14:38:37 -07001768 onosIsupResult = main.TRUE
1769 for i in main.kill:
1770 onosIsupResult = onosIsupResult and\
1771 main.ONOSbench.isup( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07001772 count = count + 1
Jon Hall5cf14d52015-07-16 12:15:19 -07001773 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1774 onpass="ONOS restarted successfully",
1775 onfail="ONOS restart NOT successful" )
1776
Jon Halle1a3b752015-07-22 13:02:46 -07001777 main.step( "Restarting ONOS main.CLIs" )
Jon Hall3b489db2015-10-05 14:38:37 -07001778 cliResults = main.TRUE
1779 for i in main.kill:
1780 cliResults = cliResults and\
1781 main.CLIs[i].startOnosCli( main.nodes[i].ip_address )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001782 main.activeNodes.append( i )
Jon Hall5cf14d52015-07-16 12:15:19 -07001783 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1784 onpass="ONOS cli restarted",
1785 onfail="ONOS cli did not restart" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001786 main.activeNodes.sort()
1787 try:
1788 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1789 "List of active nodes has duplicates, this likely indicates something was run out of order"
1790 except AssertionError:
1791 main.log.exception( "" )
1792 main.cleanup()
1793 main.exit()
Jon Hall5cf14d52015-07-16 12:15:19 -07001794
1795 # Grab the time of restart so we chan check how long the gossip
1796 # protocol has had time to work
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001797 main.restartTime = time.time() - restartTime
Jon Hall5cf14d52015-07-16 12:15:19 -07001798 main.log.debug( "Restart time: " + str( main.restartTime ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001799 # TODO: MAke this configurable. Also, we are breaking the above timer
1800 time.sleep( 60 )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001801 node = main.activeNodes[0]
1802 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1803 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1804 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001805
1806 def CASE7( self, main ):
1807 """
1808 Check state after ONOS failure
1809 """
1810 import json
Jon Halle1a3b752015-07-22 13:02:46 -07001811 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07001812 assert main, "main not defined"
1813 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07001814 assert main.CLIs, "main.CLIs not defined"
1815 assert main.nodes, "main.nodes not defined"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001816 try:
1817 main.kill
1818 except AttributeError:
1819 main.kill = []
1820
Jon Hall5cf14d52015-07-16 12:15:19 -07001821 main.case( "Running ONOS Constant State Tests" )
1822
1823 main.step( "Check that each switch has a master" )
1824 # Assert that each device has a master
1825 rolesNotNull = main.TRUE
1826 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001827 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001828 t = main.Thread( target=main.CLIs[i].rolesNotNull,
Jon Hall5cf14d52015-07-16 12:15:19 -07001829 name="rolesNotNull-" + str( i ),
1830 args=[ ] )
1831 threads.append( t )
1832 t.start()
1833
1834 for t in threads:
1835 t.join()
1836 rolesNotNull = rolesNotNull and t.result
1837 utilities.assert_equals(
1838 expect=main.TRUE,
1839 actual=rolesNotNull,
1840 onpass="Each device has a master",
1841 onfail="Some devices don't have a master assigned" )
1842
1843 main.step( "Read device roles from ONOS" )
1844 ONOSMastership = []
1845 consistentMastership = True
1846 rolesResults = True
1847 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001848 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001849 t = main.Thread( target=main.CLIs[i].roles,
Jon Hall5cf14d52015-07-16 12:15:19 -07001850 name="roles-" + str( i ),
1851 args=[] )
1852 threads.append( t )
1853 t.start()
1854
1855 for t in threads:
1856 t.join()
1857 ONOSMastership.append( t.result )
1858
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001859 for i in range( len( ONOSMastership ) ):
1860 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001861 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001862 main.log.error( "Error in getting ONOS" + node + " roles" )
1863 main.log.warn( "ONOS" + node + " mastership response: " +
1864 repr( ONOSMastership[i] ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001865 rolesResults = False
1866 utilities.assert_equals(
1867 expect=True,
1868 actual=rolesResults,
1869 onpass="No error in reading roles output",
1870 onfail="Error in reading roles from ONOS" )
1871
1872 main.step( "Check for consistency in roles from each controller" )
1873 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1874 main.log.info(
1875 "Switch roles are consistent across all ONOS nodes" )
1876 else:
1877 consistentMastership = False
1878 utilities.assert_equals(
1879 expect=True,
1880 actual=consistentMastership,
1881 onpass="Switch roles are consistent across all ONOS nodes",
1882 onfail="ONOS nodes have different views of switch roles" )
1883
1884 if rolesResults and not consistentMastership:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001885 for i in range( len( ONOSMastership ) ):
1886 node = str( main.activeNodes[i] + 1 )
1887 main.log.warn( "ONOS" + node + " roles: ",
1888 json.dumps( json.loads( ONOSMastership[ i ] ),
1889 sort_keys=True,
1890 indent=4,
1891 separators=( ',', ': ' ) ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07001892
1893 # NOTE: we expect mastership to change on controller failure
Jon Hall5cf14d52015-07-16 12:15:19 -07001894
1895 main.step( "Get the intents and compare across all nodes" )
1896 ONOSIntents = []
1897 intentCheck = main.FALSE
1898 consistentIntents = True
1899 intentsResults = True
1900 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001901 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07001902 t = main.Thread( target=main.CLIs[i].intents,
Jon Hall5cf14d52015-07-16 12:15:19 -07001903 name="intents-" + str( i ),
1904 args=[],
1905 kwargs={ 'jsonFormat': True } )
1906 threads.append( t )
1907 t.start()
1908
1909 for t in threads:
1910 t.join()
1911 ONOSIntents.append( t.result )
1912
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001913 for i in range( len( ONOSIntents) ):
1914 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07001915 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001916 main.log.error( "Error in getting ONOS" + node + " intents" )
1917 main.log.warn( "ONOS" + node + " intents response: " +
Jon Hall5cf14d52015-07-16 12:15:19 -07001918 repr( ONOSIntents[ i ] ) )
1919 intentsResults = False
1920 utilities.assert_equals(
1921 expect=True,
1922 actual=intentsResults,
1923 onpass="No error in reading intents output",
1924 onfail="Error in reading intents from ONOS" )
1925
1926 main.step( "Check for consistency in Intents from each controller" )
1927 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1928 main.log.info( "Intents are consistent across all ONOS " +
1929 "nodes" )
1930 else:
1931 consistentIntents = False
1932
1933 # Try to make it easy to figure out what is happening
1934 #
1935 # Intent ONOS1 ONOS2 ...
1936 # 0x01 INSTALLED INSTALLING
1937 # ... ... ...
1938 # ... ... ...
1939 title = " ID"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001940 for n in main.activeNodes:
Jon Hall5cf14d52015-07-16 12:15:19 -07001941 title += " " * 10 + "ONOS" + str( n + 1 )
1942 main.log.warn( title )
1943 # get all intent keys in the cluster
1944 keys = []
1945 for nodeStr in ONOSIntents:
1946 node = json.loads( nodeStr )
1947 for intent in node:
1948 keys.append( intent.get( 'id' ) )
1949 keys = set( keys )
1950 for key in keys:
1951 row = "%-13s" % key
1952 for nodeStr in ONOSIntents:
1953 node = json.loads( nodeStr )
1954 for intent in node:
1955 if intent.get( 'id' ) == key:
1956 row += "%-15s" % intent.get( 'state' )
1957 main.log.warn( row )
1958 # End table view
1959
1960 utilities.assert_equals(
1961 expect=True,
1962 actual=consistentIntents,
1963 onpass="Intents are consistent across all ONOS nodes",
1964 onfail="ONOS nodes have different views of intents" )
1965 intentStates = []
1966 for node in ONOSIntents: # Iter through ONOS nodes
1967 nodeStates = []
1968 # Iter through intents of a node
1969 try:
1970 for intent in json.loads( node ):
1971 nodeStates.append( intent[ 'state' ] )
1972 except ( ValueError, TypeError ):
1973 main.log.exception( "Error in parsing intents" )
1974 main.log.error( repr( node ) )
1975 intentStates.append( nodeStates )
1976 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1977 main.log.info( dict( out ) )
1978
1979 if intentsResults and not consistentIntents:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07001980 for i in range( len( main.activeNodes ) ):
1981 node = str( main.activeNodes[i] + 1 )
1982 main.log.warn( "ONOS" + node + " intents: " )
Jon Hall5cf14d52015-07-16 12:15:19 -07001983 main.log.warn( json.dumps(
1984 json.loads( ONOSIntents[ i ] ),
1985 sort_keys=True,
1986 indent=4,
1987 separators=( ',', ': ' ) ) )
1988 elif intentsResults and consistentIntents:
1989 intentCheck = main.TRUE
1990
1991 # NOTE: Store has no durability, so intents are lost across system
1992 # restarts
1993 main.step( "Compare current intents with intents before the failure" )
1994 # NOTE: this requires case 5 to pass for intentState to be set.
1995 # maybe we should stop the test if that fails?
1996 sameIntents = main.FALSE
1997 if intentState and intentState == ONOSIntents[ 0 ]:
1998 sameIntents = main.TRUE
1999 main.log.info( "Intents are consistent with before failure" )
2000 # TODO: possibly the states have changed? we may need to figure out
2001 # what the acceptable states are
2002 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2003 sameIntents = main.TRUE
2004 try:
2005 before = json.loads( intentState )
2006 after = json.loads( ONOSIntents[ 0 ] )
2007 for intent in before:
2008 if intent not in after:
2009 sameIntents = main.FALSE
2010 main.log.debug( "Intent is not currently in ONOS " +
2011 "(at least in the same form):" )
2012 main.log.debug( json.dumps( intent ) )
2013 except ( ValueError, TypeError ):
2014 main.log.exception( "Exception printing intents" )
2015 main.log.debug( repr( ONOSIntents[0] ) )
2016 main.log.debug( repr( intentState ) )
2017 if sameIntents == main.FALSE:
2018 try:
2019 main.log.debug( "ONOS intents before: " )
2020 main.log.debug( json.dumps( json.loads( intentState ),
2021 sort_keys=True, indent=4,
2022 separators=( ',', ': ' ) ) )
2023 main.log.debug( "Current ONOS intents: " )
2024 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2025 sort_keys=True, indent=4,
2026 separators=( ',', ': ' ) ) )
2027 except ( ValueError, TypeError ):
2028 main.log.exception( "Exception printing intents" )
2029 main.log.debug( repr( ONOSIntents[0] ) )
2030 main.log.debug( repr( intentState ) )
2031 utilities.assert_equals(
2032 expect=main.TRUE,
2033 actual=sameIntents,
2034 onpass="Intents are consistent with before failure",
2035 onfail="The Intents changed during failure" )
2036 intentCheck = intentCheck and sameIntents
2037
2038 main.step( "Get the OF Table entries and compare to before " +
2039 "component failure" )
2040 FlowTables = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002041 for i in range( 28 ):
2042 main.log.info( "Checking flow table on s" + str( i + 1 ) )
GlennRC68467eb2015-11-16 18:01:01 -08002043 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2044 FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
Jon Hall5cf14d52015-07-16 12:15:19 -07002045 if FlowTables == main.FALSE:
GlennRC68467eb2015-11-16 18:01:01 -08002046 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2047
Jon Hall5cf14d52015-07-16 12:15:19 -07002048 utilities.assert_equals(
2049 expect=main.TRUE,
2050 actual=FlowTables,
2051 onpass="No changes were found in the flow tables",
2052 onfail="Changes were found in the flow tables" )
2053
2054 main.Mininet2.pingLongKill()
2055 '''
2056 main.step( "Check the continuous pings to ensure that no packets " +
2057 "were dropped during component failure" )
2058 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2059 main.params[ 'TESTONIP' ] )
2060 LossInPings = main.FALSE
2061 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2062 for i in range( 8, 18 ):
2063 main.log.info(
2064 "Checking for a loss in pings along flow from s" +
2065 str( i ) )
2066 LossInPings = main.Mininet2.checkForLoss(
2067 "/tmp/ping.h" +
2068 str( i ) ) or LossInPings
2069 if LossInPings == main.TRUE:
2070 main.log.info( "Loss in ping detected" )
2071 elif LossInPings == main.ERROR:
2072 main.log.info( "There are multiple mininet process running" )
2073 elif LossInPings == main.FALSE:
2074 main.log.info( "No Loss in the pings" )
2075 main.log.info( "No loss of dataplane connectivity" )
2076 utilities.assert_equals(
2077 expect=main.FALSE,
2078 actual=LossInPings,
2079 onpass="No Loss of connectivity",
2080 onfail="Loss of dataplane connectivity detected" )
2081 '''
2082
2083 main.step( "Leadership Election is still functional" )
2084 # Test of LeadershipElection
2085 leaderList = []
Jon Hall5cf14d52015-07-16 12:15:19 -07002086
Jon Hall3b489db2015-10-05 14:38:37 -07002087 restarted = []
2088 for i in main.kill:
2089 restarted.append( main.nodes[i].ip_address )
Jon Hall5cf14d52015-07-16 12:15:19 -07002090 leaderResult = main.TRUE
Jon Hall3b489db2015-10-05 14:38:37 -07002091
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002092 for i in main.activeNodes:
2093 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002094 leaderN = cli.electionTestLeader()
2095 leaderList.append( leaderN )
2096 if leaderN == main.FALSE:
2097 # error in response
2098 main.log.error( "Something is wrong with " +
2099 "electionTestLeader function, check the" +
2100 " error logs" )
2101 leaderResult = main.FALSE
2102 elif leaderN is None:
2103 main.log.error( cli.name +
2104 " shows no leader for the election-app was" +
2105 " elected after the old one died" )
2106 leaderResult = main.FALSE
2107 elif leaderN in restarted:
2108 main.log.error( cli.name + " shows " + str( leaderN ) +
2109 " as leader for the election-app, but it " +
2110 "was restarted" )
2111 leaderResult = main.FALSE
2112 if len( set( leaderList ) ) != 1:
2113 leaderResult = main.FALSE
2114 main.log.error(
2115 "Inconsistent view of leader for the election test app" )
2116 # TODO: print the list
2117 utilities.assert_equals(
2118 expect=main.TRUE,
2119 actual=leaderResult,
2120 onpass="Leadership election passed",
2121 onfail="Something went wrong with Leadership election" )
2122
2123 def CASE8( self, main ):
2124 """
2125 Compare topo
2126 """
2127 import json
2128 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002129 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002130 assert main, "main not defined"
2131 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002132 assert main.CLIs, "main.CLIs not defined"
2133 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002134
2135 main.case( "Compare ONOS Topology view to Mininet topology" )
Jon Hall783bbf92015-07-23 14:33:19 -07002136 main.caseExplanation = "Compare topology objects between Mininet" +\
Jon Hall5cf14d52015-07-16 12:15:19 -07002137 " and ONOS"
Jon Hall5cf14d52015-07-16 12:15:19 -07002138 topoResult = main.FALSE
2139 elapsed = 0
2140 count = 0
Jon Halle9b1fa32015-12-08 15:32:21 -08002141 main.step( "Comparing ONOS topology to MN topology" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002142 startTime = time.time()
2143 # Give time for Gossip to work
Jon Halle9b1fa32015-12-08 15:32:21 -08002144 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
Jon Hall96091e62015-09-21 17:34:17 -07002145 devicesResults = main.TRUE
2146 linksResults = main.TRUE
2147 hostsResults = main.TRUE
2148 hostAttachmentResults = True
Jon Hall5cf14d52015-07-16 12:15:19 -07002149 count += 1
2150 cliStart = time.time()
2151 devices = []
2152 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002153 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002154 t = main.Thread( target=main.CLIs[i].devices,
Jon Hall5cf14d52015-07-16 12:15:19 -07002155 name="devices-" + str( i ),
2156 args=[ ] )
2157 threads.append( t )
2158 t.start()
2159
2160 for t in threads:
2161 t.join()
2162 devices.append( t.result )
2163 hosts = []
2164 ipResult = main.TRUE
2165 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002166 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002167 t = main.Thread( target=main.CLIs[i].hosts,
Jon Hall5cf14d52015-07-16 12:15:19 -07002168 name="hosts-" + str( i ),
2169 args=[ ] )
2170 threads.append( t )
2171 t.start()
2172
2173 for t in threads:
2174 t.join()
2175 try:
2176 hosts.append( json.loads( t.result ) )
2177 except ( ValueError, TypeError ):
2178 main.log.exception( "Error parsing hosts results" )
2179 main.log.error( repr( t.result ) )
Jon Halle9b1fa32015-12-08 15:32:21 -08002180 hosts.append( [] )
Jon Hall5cf14d52015-07-16 12:15:19 -07002181 for controller in range( 0, len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002182 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002183 for host in hosts[ controller ]:
2184 if host is None or host.get( 'ipAddresses', [] ) == []:
2185 main.log.error(
2186 "DEBUG:Error with host ipAddresses on controller" +
2187 controllerStr + ": " + str( host ) )
2188 ipResult = main.FALSE
2189 ports = []
2190 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002191 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002192 t = main.Thread( target=main.CLIs[i].ports,
Jon Hall5cf14d52015-07-16 12:15:19 -07002193 name="ports-" + str( i ),
2194 args=[ ] )
2195 threads.append( t )
2196 t.start()
2197
2198 for t in threads:
2199 t.join()
2200 ports.append( t.result )
2201 links = []
2202 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002203 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002204 t = main.Thread( target=main.CLIs[i].links,
Jon Hall5cf14d52015-07-16 12:15:19 -07002205 name="links-" + str( i ),
2206 args=[ ] )
2207 threads.append( t )
2208 t.start()
2209
2210 for t in threads:
2211 t.join()
2212 links.append( t.result )
2213 clusters = []
2214 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002215 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002216 t = main.Thread( target=main.CLIs[i].clusters,
Jon Hall5cf14d52015-07-16 12:15:19 -07002217 name="clusters-" + str( i ),
2218 args=[ ] )
2219 threads.append( t )
2220 t.start()
2221
2222 for t in threads:
2223 t.join()
2224 clusters.append( t.result )
2225
2226 elapsed = time.time() - startTime
2227 cliTime = time.time() - cliStart
2228 print "Elapsed time: " + str( elapsed )
2229 print "CLI time: " + str( cliTime )
2230
2231 mnSwitches = main.Mininet1.getSwitches()
2232 mnLinks = main.Mininet1.getLinks()
2233 mnHosts = main.Mininet1.getHosts()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002234 for controller in range( len( main.activeNodes ) ):
2235 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002236 if devices[ controller ] and ports[ controller ] and\
2237 "Error" not in devices[ controller ] and\
2238 "Error" not in ports[ controller ]:
2239
2240 currentDevicesResult = main.Mininet1.compareSwitches(
2241 mnSwitches,
2242 json.loads( devices[ controller ] ),
2243 json.loads( ports[ controller ] ) )
2244 else:
2245 currentDevicesResult = main.FALSE
2246 utilities.assert_equals( expect=main.TRUE,
2247 actual=currentDevicesResult,
2248 onpass="ONOS" + controllerStr +
2249 " Switches view is correct",
2250 onfail="ONOS" + controllerStr +
2251 " Switches view is incorrect" )
2252
2253 if links[ controller ] and "Error" not in links[ controller ]:
2254 currentLinksResult = main.Mininet1.compareLinks(
2255 mnSwitches, mnLinks,
2256 json.loads( links[ controller ] ) )
2257 else:
2258 currentLinksResult = main.FALSE
2259 utilities.assert_equals( expect=main.TRUE,
2260 actual=currentLinksResult,
2261 onpass="ONOS" + controllerStr +
2262 " links view is correct",
2263 onfail="ONOS" + controllerStr +
2264 " links view is incorrect" )
2265
2266 if hosts[ controller ] or "Error" not in hosts[ controller ]:
2267 currentHostsResult = main.Mininet1.compareHosts(
2268 mnHosts,
2269 hosts[ controller ] )
2270 else:
2271 currentHostsResult = main.FALSE
2272 utilities.assert_equals( expect=main.TRUE,
2273 actual=currentHostsResult,
2274 onpass="ONOS" + controllerStr +
2275 " hosts exist in Mininet",
2276 onfail="ONOS" + controllerStr +
2277 " hosts don't match Mininet" )
2278 # CHECKING HOST ATTACHMENT POINTS
2279 hostAttachment = True
2280 zeroHosts = False
2281 # FIXME: topo-HA/obelisk specific mappings:
2282 # key is mac and value is dpid
2283 mappings = {}
2284 for i in range( 1, 29 ): # hosts 1 through 28
2285 # set up correct variables:
2286 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2287 if i == 1:
2288 deviceId = "1000".zfill(16)
2289 elif i == 2:
2290 deviceId = "2000".zfill(16)
2291 elif i == 3:
2292 deviceId = "3000".zfill(16)
2293 elif i == 4:
2294 deviceId = "3004".zfill(16)
2295 elif i == 5:
2296 deviceId = "5000".zfill(16)
2297 elif i == 6:
2298 deviceId = "6000".zfill(16)
2299 elif i == 7:
2300 deviceId = "6007".zfill(16)
2301 elif i >= 8 and i <= 17:
2302 dpid = '3' + str( i ).zfill( 3 )
2303 deviceId = dpid.zfill(16)
2304 elif i >= 18 and i <= 27:
2305 dpid = '6' + str( i ).zfill( 3 )
2306 deviceId = dpid.zfill(16)
2307 elif i == 28:
2308 deviceId = "2800".zfill(16)
2309 mappings[ macId ] = deviceId
2310 if hosts[ controller ] or "Error" not in hosts[ controller ]:
2311 if hosts[ controller ] == []:
2312 main.log.warn( "There are no hosts discovered" )
2313 zeroHosts = True
2314 else:
2315 for host in hosts[ controller ]:
2316 mac = None
2317 location = None
2318 device = None
2319 port = None
2320 try:
2321 mac = host.get( 'mac' )
2322 assert mac, "mac field could not be found for this host object"
2323
2324 location = host.get( 'location' )
2325 assert location, "location field could not be found for this host object"
2326
2327 # Trim the protocol identifier off deviceId
2328 device = str( location.get( 'elementId' ) ).split(':')[1]
2329 assert device, "elementId field could not be found for this host location object"
2330
2331 port = location.get( 'port' )
2332 assert port, "port field could not be found for this host location object"
2333
2334 # Now check if this matches where they should be
2335 if mac and device and port:
2336 if str( port ) != "1":
2337 main.log.error( "The attachment port is incorrect for " +
2338 "host " + str( mac ) +
2339 ". Expected: 1 Actual: " + str( port) )
2340 hostAttachment = False
2341 if device != mappings[ str( mac ) ]:
2342 main.log.error( "The attachment device is incorrect for " +
2343 "host " + str( mac ) +
2344 ". Expected: " + mappings[ str( mac ) ] +
2345 " Actual: " + device )
2346 hostAttachment = False
2347 else:
2348 hostAttachment = False
2349 except AssertionError:
2350 main.log.exception( "Json object not as expected" )
2351 main.log.error( repr( host ) )
2352 hostAttachment = False
2353 else:
2354 main.log.error( "No hosts json output or \"Error\"" +
2355 " in output. hosts = " +
2356 repr( hosts[ controller ] ) )
2357 if zeroHosts is False:
2358 hostAttachment = True
2359
2360 # END CHECKING HOST ATTACHMENT POINTS
2361 devicesResults = devicesResults and currentDevicesResult
2362 linksResults = linksResults and currentLinksResult
2363 hostsResults = hostsResults and currentHostsResult
2364 hostAttachmentResults = hostAttachmentResults and\
2365 hostAttachment
Jon Halle9b1fa32015-12-08 15:32:21 -08002366 topoResult = devicesResults and linksResults and\
2367 hostsResults and hostAttachmentResults
2368 utilities.assert_equals( expect=True,
2369 actual=topoResult,
2370 onpass="ONOS topology matches Mininet",
2371 onfail="ONOS topology don't match Mininet" )
2372 # End of While loop to pull ONOS state
Jon Hall5cf14d52015-07-16 12:15:19 -07002373
2374 # Compare json objects for hosts and dataplane clusters
2375
2376 # hosts
2377 main.step( "Hosts view is consistent across all ONOS nodes" )
2378 consistentHostsResult = main.TRUE
2379 for controller in range( len( hosts ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002380 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002381 if "Error" not in hosts[ controller ]:
2382 if hosts[ controller ] == hosts[ 0 ]:
2383 continue
2384 else: # hosts not consistent
2385 main.log.error( "hosts from ONOS" + controllerStr +
2386 " is inconsistent with ONOS1" )
2387 main.log.warn( repr( hosts[ controller ] ) )
2388 consistentHostsResult = main.FALSE
2389
2390 else:
2391 main.log.error( "Error in getting ONOS hosts from ONOS" +
2392 controllerStr )
2393 consistentHostsResult = main.FALSE
2394 main.log.warn( "ONOS" + controllerStr +
2395 " hosts response: " +
2396 repr( hosts[ controller ] ) )
2397 utilities.assert_equals(
2398 expect=main.TRUE,
2399 actual=consistentHostsResult,
2400 onpass="Hosts view is consistent across all ONOS nodes",
2401 onfail="ONOS nodes have different views of hosts" )
2402
2403 main.step( "Hosts information is correct" )
2404 hostsResults = hostsResults and ipResult
2405 utilities.assert_equals(
2406 expect=main.TRUE,
2407 actual=hostsResults,
2408 onpass="Host information is correct",
2409 onfail="Host information is incorrect" )
2410
2411 main.step( "Host attachment points to the network" )
2412 utilities.assert_equals(
2413 expect=True,
2414 actual=hostAttachmentResults,
2415 onpass="Hosts are correctly attached to the network",
2416 onfail="ONOS did not correctly attach hosts to the network" )
2417
2418 # Strongly connected clusters of devices
2419 main.step( "Clusters view is consistent across all ONOS nodes" )
2420 consistentClustersResult = main.TRUE
2421 for controller in range( len( clusters ) ):
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002422 controllerStr = str( main.activeNodes[controller] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07002423 if "Error" not in clusters[ controller ]:
2424 if clusters[ controller ] == clusters[ 0 ]:
2425 continue
2426 else: # clusters not consistent
2427 main.log.error( "clusters from ONOS" +
2428 controllerStr +
2429 " is inconsistent with ONOS1" )
2430 consistentClustersResult = main.FALSE
2431
2432 else:
2433 main.log.error( "Error in getting dataplane clusters " +
2434 "from ONOS" + controllerStr )
2435 consistentClustersResult = main.FALSE
2436 main.log.warn( "ONOS" + controllerStr +
2437 " clusters response: " +
2438 repr( clusters[ controller ] ) )
2439 utilities.assert_equals(
2440 expect=main.TRUE,
2441 actual=consistentClustersResult,
2442 onpass="Clusters view is consistent across all ONOS nodes",
2443 onfail="ONOS nodes have different views of clusters" )
2444
2445 main.step( "There is only one SCC" )
2446 # there should always only be one cluster
2447 try:
2448 numClusters = len( json.loads( clusters[ 0 ] ) )
2449 except ( ValueError, TypeError ):
2450 main.log.exception( "Error parsing clusters[0]: " +
2451 repr( clusters[0] ) )
2452 clusterResults = main.FALSE
2453 if numClusters == 1:
2454 clusterResults = main.TRUE
2455 utilities.assert_equals(
2456 expect=1,
2457 actual=numClusters,
2458 onpass="ONOS shows 1 SCC",
2459 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2460
2461 topoResult = ( devicesResults and linksResults
2462 and hostsResults and consistentHostsResult
2463 and consistentClustersResult and clusterResults
2464 and ipResult and hostAttachmentResults )
2465
2466 topoResult = topoResult and int( count <= 2 )
2467 note = "note it takes about " + str( int( cliTime ) ) + \
2468 " seconds for the test to make all the cli calls to fetch " +\
2469 "the topology from each ONOS instance"
2470 main.log.info(
2471 "Very crass estimate for topology discovery/convergence( " +
2472 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2473 str( count ) + " tries" )
2474
2475 main.step( "Device information is correct" )
2476 utilities.assert_equals(
2477 expect=main.TRUE,
2478 actual=devicesResults,
2479 onpass="Device information is correct",
2480 onfail="Device information is incorrect" )
2481
2482 main.step( "Links are correct" )
2483 utilities.assert_equals(
2484 expect=main.TRUE,
2485 actual=linksResults,
2486 onpass="Link are correct",
2487 onfail="Links are incorrect" )
2488
2489 # FIXME: move this to an ONOS state case
2490 main.step( "Checking ONOS nodes" )
2491 nodesOutput = []
2492 nodeResults = main.TRUE
2493 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002494 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07002495 t = main.Thread( target=main.CLIs[i].nodes,
Jon Hall5cf14d52015-07-16 12:15:19 -07002496 name="nodes-" + str( i ),
2497 args=[ ] )
2498 threads.append( t )
2499 t.start()
2500
2501 for t in threads:
2502 t.join()
2503 nodesOutput.append( t.result )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002504 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
Jon Halle9b1fa32015-12-08 15:32:21 -08002505 ips.sort()
Jon Hall5cf14d52015-07-16 12:15:19 -07002506 for i in nodesOutput:
2507 try:
2508 current = json.loads( i )
Jon Halle9b1fa32015-12-08 15:32:21 -08002509 activeIps = []
2510 currentResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002511 for node in current:
Jon Halle9b1fa32015-12-08 15:32:21 -08002512 if node['state'] == 'ACTIVE':
2513 activeIps.append( node['ip'] )
2514 activeIps.sort()
2515 if ips == activeIps:
2516 currentResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002517 except ( ValueError, TypeError ):
2518 main.log.error( "Error parsing nodes output" )
2519 main.log.warn( repr( i ) )
Jon Halle9b1fa32015-12-08 15:32:21 -08002520 currentResult = main.FALSE
2521 nodeResults = nodeResults and currentResult
Jon Hall5cf14d52015-07-16 12:15:19 -07002522 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2523 onpass="Nodes check successful",
2524 onfail="Nodes check NOT successful" )
2525
2526 def CASE9( self, main ):
2527 """
2528 Link s3-s28 down
2529 """
2530 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002531 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002532 assert main, "main not defined"
2533 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002534 assert main.CLIs, "main.CLIs not defined"
2535 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002536 # NOTE: You should probably run a topology check after this
2537
2538 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2539
2540 description = "Turn off a link to ensure that Link Discovery " +\
2541 "is working properly"
2542 main.case( description )
2543
2544 main.step( "Kill Link between s3 and s28" )
2545 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2546 main.log.info( "Waiting " + str( linkSleep ) +
2547 " seconds for link down to be discovered" )
2548 time.sleep( linkSleep )
2549 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2550 onpass="Link down successful",
2551 onfail="Failed to bring link down" )
2552 # TODO do some sort of check here
2553
2554 def CASE10( self, main ):
2555 """
2556 Link s3-s28 up
2557 """
2558 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002559 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002560 assert main, "main not defined"
2561 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002562 assert main.CLIs, "main.CLIs not defined"
2563 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002564 # NOTE: You should probably run a topology check after this
2565
2566 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2567
2568 description = "Restore a link to ensure that Link Discovery is " + \
2569 "working properly"
2570 main.case( description )
2571
2572 main.step( "Bring link between s3 and s28 back up" )
2573 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2574 main.log.info( "Waiting " + str( linkSleep ) +
2575 " seconds for link up to be discovered" )
2576 time.sleep( linkSleep )
2577 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2578 onpass="Link up successful",
2579 onfail="Failed to bring link up" )
2580 # TODO do some sort of check here
2581
2582 def CASE11( self, main ):
2583 """
2584 Switch Down
2585 """
2586 # NOTE: You should probably run a topology check after this
2587 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002588 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002589 assert main, "main not defined"
2590 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002591 assert main.CLIs, "main.CLIs not defined"
2592 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002593
2594 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2595
2596 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002597 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002598 main.case( description )
2599 switch = main.params[ 'kill' ][ 'switch' ]
2600 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2601
2602 # TODO: Make this switch parameterizable
2603 main.step( "Kill " + switch )
2604 main.log.info( "Deleting " + switch )
2605 main.Mininet1.delSwitch( switch )
2606 main.log.info( "Waiting " + str( switchSleep ) +
2607 " seconds for switch down to be discovered" )
2608 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002609 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002610 # Peek at the deleted switch
2611 main.log.warn( str( device ) )
2612 result = main.FALSE
2613 if device and device[ 'available' ] is False:
2614 result = main.TRUE
2615 utilities.assert_equals( expect=main.TRUE, actual=result,
2616 onpass="Kill switch successful",
2617 onfail="Failed to kill switch?" )
2618
2619 def CASE12( self, main ):
2620 """
2621 Switch Up
2622 """
2623 # NOTE: You should probably run a topology check after this
2624 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002625 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002626 assert main, "main not defined"
2627 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002628 assert main.CLIs, "main.CLIs not defined"
2629 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002630 assert ONOS1Port, "ONOS1Port not defined"
2631 assert ONOS2Port, "ONOS2Port not defined"
2632 assert ONOS3Port, "ONOS3Port not defined"
2633 assert ONOS4Port, "ONOS4Port not defined"
2634 assert ONOS5Port, "ONOS5Port not defined"
2635 assert ONOS6Port, "ONOS6Port not defined"
2636 assert ONOS7Port, "ONOS7Port not defined"
2637
2638 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2639 switch = main.params[ 'kill' ][ 'switch' ]
2640 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2641 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002642 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002643 description = "Adding a switch to ensure it is discovered correctly"
2644 main.case( description )
2645
2646 main.step( "Add back " + switch )
2647 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2648 for peer in links:
2649 main.Mininet1.addLink( switch, peer )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002650 ipList = [ node.ip_address for node in main.nodes ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002651 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2652 main.log.info( "Waiting " + str( switchSleep ) +
2653 " seconds for switch up to be discovered" )
2654 time.sleep( switchSleep )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002655 device = onosCli.getDevice( dpid=switchDPID )
Jon Hall5cf14d52015-07-16 12:15:19 -07002656 # Peek at the deleted switch
2657 main.log.warn( str( device ) )
2658 result = main.FALSE
2659 if device and device[ 'available' ]:
2660 result = main.TRUE
2661 utilities.assert_equals( expect=main.TRUE, actual=result,
2662 onpass="add switch successful",
2663 onfail="Failed to add switch?" )
2664
2665 def CASE13( self, main ):
2666 """
2667 Clean up
2668 """
2669 import os
2670 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002671 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002672 assert main, "main not defined"
2673 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002674 assert main.CLIs, "main.CLIs not defined"
2675 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002676
2677 # printing colors to terminal
2678 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2679 'blue': '\033[94m', 'green': '\033[92m',
2680 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2681 main.case( "Test Cleanup" )
2682 main.step( "Killing tcpdumps" )
2683 main.Mininet2.stopTcpdump()
2684
2685 testname = main.TEST
Jon Hall96091e62015-09-21 17:34:17 -07002686 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
Jon Hall5cf14d52015-07-16 12:15:19 -07002687 main.step( "Copying MN pcap and ONOS log files to test station" )
2688 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2689 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
Jon Hall96091e62015-09-21 17:34:17 -07002690 # NOTE: MN Pcap file is being saved to logdir.
2691 # We scp this file as MN and TestON aren't necessarily the same vm
2692
2693 # FIXME: To be replaced with a Jenkin's post script
Jon Hall5cf14d52015-07-16 12:15:19 -07002694 # TODO: Load these from params
2695 # NOTE: must end in /
2696 logFolder = "/opt/onos/log/"
2697 logFiles = [ "karaf.log", "karaf.log.1" ]
2698 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002699 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002700 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002701 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002702 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2703 logFolder + f, dstName )
Jon Hall5cf14d52015-07-16 12:15:19 -07002704 # std*.log's
2705 # NOTE: must end in /
2706 logFolder = "/opt/onos/var/"
2707 logFiles = [ "stderr.log", "stdout.log" ]
2708 # NOTE: must end in /
Jon Hall5cf14d52015-07-16 12:15:19 -07002709 for f in logFiles:
Jon Halle1a3b752015-07-22 13:02:46 -07002710 for node in main.nodes:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002711 dstName = main.logdir + "/" + node.name + "-" + f
Jon Hall96091e62015-09-21 17:34:17 -07002712 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2713 logFolder + f, dstName )
2714 else:
2715 main.log.debug( "skipping saving log files" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002716
2717 main.step( "Stopping Mininet" )
2718 mnResult = main.Mininet1.stopNet()
2719 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2720 onpass="Mininet stopped",
2721 onfail="MN cleanup NOT successful" )
2722
2723 main.step( "Checking ONOS Logs for errors" )
Jon Halle1a3b752015-07-22 13:02:46 -07002724 for node in main.nodes:
Jon Hall96091e62015-09-21 17:34:17 -07002725 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2726 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
Jon Hall5cf14d52015-07-16 12:15:19 -07002727
2728 try:
2729 timerLog = open( main.logdir + "/Timers.csv", 'w')
2730 # Overwrite with empty line and close
2731 labels = "Gossip Intents, Restart"
2732 data = str( gossipTime ) + ", " + str( main.restartTime )
2733 timerLog.write( labels + "\n" + data )
2734 timerLog.close()
2735 except NameError, e:
2736 main.log.exception(e)
2737
2738 def CASE14( self, main ):
2739 """
2740 start election app on all onos nodes
2741 """
Jon Halle1a3b752015-07-22 13:02:46 -07002742 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002743 assert main, "main not defined"
2744 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002745 assert main.CLIs, "main.CLIs not defined"
2746 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002747
2748 main.case("Start Leadership Election app")
2749 main.step( "Install leadership election app" )
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002750 onosCli = main.CLIs[ main.activeNodes[0] ]
2751 appResult = onosCli.activateApp( "org.onosproject.election" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002752 utilities.assert_equals(
2753 expect=main.TRUE,
2754 actual=appResult,
2755 onpass="Election app installed",
2756 onfail="Something went wrong with installing Leadership election" )
2757
2758 main.step( "Run for election on each node" )
2759 leaderResult = main.TRUE
2760 leaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002761 for i in main.activeNodes:
2762 main.CLIs[i].electionTestRun()
2763 for i in main.activeNodes:
2764 cli = main.CLIs[i]
Jon Hall5cf14d52015-07-16 12:15:19 -07002765 leader = cli.electionTestLeader()
2766 if leader is None or leader == main.FALSE:
2767 main.log.error( cli.name + ": Leader for the election app " +
2768 "should be an ONOS node, instead got '" +
2769 str( leader ) + "'" )
2770 leaderResult = main.FALSE
2771 leaders.append( leader )
2772 utilities.assert_equals(
2773 expect=main.TRUE,
2774 actual=leaderResult,
2775 onpass="Successfully ran for leadership",
2776 onfail="Failed to run for leadership" )
2777
2778 main.step( "Check that each node shows the same leader" )
2779 sameLeader = main.TRUE
2780 if len( set( leaders ) ) != 1:
2781 sameLeader = main.FALSE
Jon Halle1a3b752015-07-22 13:02:46 -07002782 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
Jon Hall5cf14d52015-07-16 12:15:19 -07002783 str( leaders ) )
2784 utilities.assert_equals(
2785 expect=main.TRUE,
2786 actual=sameLeader,
2787 onpass="Leadership is consistent for the election topic",
2788 onfail="Nodes have different leaders" )
2789
2790 def CASE15( self, main ):
2791 """
2792 Check that Leadership Election is still functional
acsmars71adceb2015-08-31 15:09:26 -07002793 15.1 Run election on each node
2794 15.2 Check that each node has the same leaders and candidates
2795 15.3 Find current leader and withdraw
2796 15.4 Check that a new node was elected leader
2797 15.5 Check that that new leader was the candidate of old leader
2798 15.6 Run for election on old leader
2799 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2800 15.8 Make sure that the old leader was added to the candidate list
2801
2802 old and new variable prefixes refer to data from before vs after
2803 withdrawl and later before withdrawl vs after re-election
Jon Hall5cf14d52015-07-16 12:15:19 -07002804 """
2805 import time
Jon Halle1a3b752015-07-22 13:02:46 -07002806 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002807 assert main, "main not defined"
2808 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07002809 assert main.CLIs, "main.CLIs not defined"
2810 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07002811
Jon Hall5cf14d52015-07-16 12:15:19 -07002812 description = "Check that Leadership Election is still functional"
2813 main.case( description )
acsmars71adceb2015-08-31 15:09:26 -07002814 # NOTE: Need to re-run since being a canidate is not persistant
2815 # TODO: add check for "Command not found:" in the driver, this
2816 # means the election test app isn't loaded
Jon Hall5cf14d52015-07-16 12:15:19 -07002817
acsmars71adceb2015-08-31 15:09:26 -07002818 oldLeaders = [] # leaders by node before withdrawl from candidates
2819 newLeaders = [] # leaders by node after withdrawl from candidates
2820 oldAllCandidates = [] # list of lists of each nodes' candidates before
2821 newAllCandidates = [] # list of lists of each nodes' candidates after
2822 oldCandidates = [] # list of candidates from node 0 before withdrawl
2823 newCandidates = [] # list of candidates from node 0 after withdrawl
2824 oldLeader = '' # the old leader from oldLeaders, None if not same
2825 newLeader = '' # the new leaders fron newLoeaders, None if not same
2826 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2827 expectNoLeader = False # True when there is only one leader
2828 if main.numCtrls == 1:
2829 expectNoLeader = True
2830
2831 main.step( "Run for election on each node" )
2832 electionResult = main.TRUE
2833
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002834 for i in main.activeNodes: # run test election on each node
2835 if main.CLIs[i].electionTestRun() == main.FALSE:
acsmars71adceb2015-08-31 15:09:26 -07002836 electionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07002837 utilities.assert_equals(
2838 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002839 actual=electionResult,
2840 onpass="All nodes successfully ran for leadership",
2841 onfail="At least one node failed to run for leadership" )
2842
acsmars3a72bde2015-09-02 14:16:22 -07002843 if electionResult == main.FALSE:
2844 main.log.error(
2845 "Skipping Test Case because Election Test App isn't loaded" )
2846 main.skipCase()
2847
acsmars71adceb2015-08-31 15:09:26 -07002848 main.step( "Check that each node shows the same leader and candidates" )
2849 sameResult = main.TRUE
2850 failMessage = "Nodes have different leaders"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002851 for i in main.activeNodes:
2852 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002853 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2854 oldAllCandidates.append( node )
2855 oldLeaders.append( node[ 0 ] )
2856 oldCandidates = oldAllCandidates[ 0 ]
2857
2858 # Check that each node has the same leader. Defines oldLeader
2859 if len( set( oldLeaders ) ) != 1:
2860 sameResult = main.FALSE
2861 main.log.error( "More than one leader present:" + str( oldLeaders ) )
2862 oldLeader = None
2863 else:
2864 oldLeader = oldLeaders[ 0 ]
2865
2866 # Check that each node's candidate list is the same
acsmars29233db2015-11-04 11:15:00 -08002867 candidateDiscrepancy = False # Boolean of candidate mismatches
acsmars71adceb2015-08-31 15:09:26 -07002868 for candidates in oldAllCandidates:
2869 if set( candidates ) != set( oldCandidates ):
2870 sameResult = main.FALSE
acsmars29233db2015-11-04 11:15:00 -08002871 candidateDiscrepancy = True
2872
2873 if candidateDiscrepancy:
2874 failMessage += " and candidates"
2875
acsmars71adceb2015-08-31 15:09:26 -07002876 utilities.assert_equals(
2877 expect=main.TRUE,
2878 actual=sameResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002879 onpass="Leadership is consistent for the election topic",
acsmars71adceb2015-08-31 15:09:26 -07002880 onfail=failMessage )
Jon Hall5cf14d52015-07-16 12:15:19 -07002881
2882 main.step( "Find current leader and withdraw" )
acsmars71adceb2015-08-31 15:09:26 -07002883 withdrawResult = main.TRUE
Jon Hall5cf14d52015-07-16 12:15:19 -07002884 # do some sanity checking on leader before using it
acsmars71adceb2015-08-31 15:09:26 -07002885 if oldLeader is None:
2886 main.log.error( "Leadership isn't consistent." )
2887 withdrawResult = main.FALSE
2888 # Get the CLI of the oldLeader
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002889 for i in main.activeNodes:
acsmars71adceb2015-08-31 15:09:26 -07002890 if oldLeader == main.nodes[ i ].ip_address:
2891 oldLeaderCLI = main.CLIs[ i ]
Jon Hall5cf14d52015-07-16 12:15:19 -07002892 break
2893 else: # FOR/ELSE statement
2894 main.log.error( "Leader election, could not find current leader" )
2895 if oldLeader:
acsmars71adceb2015-08-31 15:09:26 -07002896 withdrawResult = oldLeaderCLI.electionTestWithdraw()
Jon Hall5cf14d52015-07-16 12:15:19 -07002897 utilities.assert_equals(
2898 expect=main.TRUE,
2899 actual=withdrawResult,
2900 onpass="Node was withdrawn from election",
2901 onfail="Node was not withdrawn from election" )
2902
acsmars71adceb2015-08-31 15:09:26 -07002903 main.step( "Check that a new node was elected leader" )
2904
Jon Hall5cf14d52015-07-16 12:15:19 -07002905 # FIXME: use threads
acsmars71adceb2015-08-31 15:09:26 -07002906 newLeaderResult = main.TRUE
2907 failMessage = "Nodes have different leaders"
2908
2909 # Get new leaders and candidates
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002910 for i in main.activeNodes:
2911 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002912 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2913 # elections might no have finished yet
2914 if node[ 0 ] == 'none' and not expectNoLeader:
2915 main.log.info( "Node has no leader, waiting 5 seconds to be " +
2916 "sure elections are complete." )
2917 time.sleep(5)
2918 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2919 # election still isn't done or there is a problem
2920 if node[ 0 ] == 'none':
2921 main.log.error( "No leader was elected on at least 1 node" )
2922 newLeaderResult = main.FALSE
2923 newAllCandidates.append( node )
2924 newLeaders.append( node[ 0 ] )
2925 newCandidates = newAllCandidates[ 0 ]
2926
2927 # Check that each node has the same leader. Defines newLeader
2928 if len( set( newLeaders ) ) != 1:
2929 newLeaderResult = main.FALSE
2930 main.log.error( "Nodes have different leaders: " +
2931 str( newLeaders ) )
2932 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07002933 else:
acsmars71adceb2015-08-31 15:09:26 -07002934 newLeader = newLeaders[ 0 ]
2935
2936 # Check that each node's candidate list is the same
2937 for candidates in newAllCandidates:
2938 if set( candidates ) != set( newCandidates ):
2939 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07002940 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07002941
2942 # Check that the new leader is not the older leader, which was withdrawn
2943 if newLeader == oldLeader:
2944 newLeaderResult = main.FALSE
2945 main.log.error( "All nodes still see old leader: " + oldLeader +
2946 " as the current leader" )
2947
Jon Hall5cf14d52015-07-16 12:15:19 -07002948 utilities.assert_equals(
2949 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07002950 actual=newLeaderResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07002951 onpass="Leadership election passed",
2952 onfail="Something went wrong with Leadership election" )
2953
acsmars71adceb2015-08-31 15:09:26 -07002954 main.step( "Check that that new leader was the candidate of old leader")
2955 # candidates[ 2 ] should be come the top candidate after withdrawl
2956 correctCandidateResult = main.TRUE
2957 if expectNoLeader:
2958 if newLeader == 'none':
2959 main.log.info( "No leader expected. None found. Pass" )
2960 correctCandidateResult = main.TRUE
2961 else:
2962 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2963 correctCandidateResult = main.FALSE
2964 elif newLeader != oldCandidates[ 2 ]:
2965 correctCandidateResult = main.FALSE
2966 main.log.error( "Candidate " + newLeader + " was elected. " +
2967 oldCandidates[ 2 ] + " should have had priority." )
2968
2969 utilities.assert_equals(
2970 expect=main.TRUE,
2971 actual=correctCandidateResult,
2972 onpass="Correct Candidate Elected",
2973 onfail="Incorrect Candidate Elected" )
2974
Jon Hall5cf14d52015-07-16 12:15:19 -07002975 main.step( "Run for election on old leader( just so everyone " +
2976 "is in the hat )" )
acsmars71adceb2015-08-31 15:09:26 -07002977 if oldLeaderCLI is not None:
2978 runResult = oldLeaderCLI.electionTestRun()
Jon Hall5cf14d52015-07-16 12:15:19 -07002979 else:
acsmars71adceb2015-08-31 15:09:26 -07002980 main.log.error( "No old leader to re-elect" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002981 runResult = main.FALSE
2982 utilities.assert_equals(
2983 expect=main.TRUE,
2984 actual=runResult,
2985 onpass="App re-ran for election",
2986 onfail="App failed to run for election" )
acsmars71adceb2015-08-31 15:09:26 -07002987 main.step(
2988 "Check that oldLeader is a candidate, and leader if only 1 node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07002989 # verify leader didn't just change
acsmars71adceb2015-08-31 15:09:26 -07002990 positionResult = main.TRUE
2991 # Get new leaders and candidates, wait if oldLeader is not a candidate yet
2992
2993 # Reset and reuse the new candidate and leaders lists
2994 newAllCandidates = []
2995 newCandidates = []
2996 newLeaders = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07002997 for i in main.activeNodes:
2998 cli = main.CLIs[i]
acsmars71adceb2015-08-31 15:09:26 -07002999 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3000 if oldLeader not in node: # election might no have finished yet
3001 main.log.info( "Old Leader not elected, waiting 5 seconds to " +
3002 "be sure elections are complete" )
3003 time.sleep(5)
3004 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3005 if oldLeader not in node: # election still isn't done, errors
3006 main.log.error(
3007 "Old leader was not elected on at least one node" )
3008 positionResult = main.FALSE
3009 newAllCandidates.append( node )
3010 newLeaders.append( node[ 0 ] )
3011 newCandidates = newAllCandidates[ 0 ]
3012
3013 # Check that each node has the same leader. Defines newLeader
3014 if len( set( newLeaders ) ) != 1:
3015 positionResult = main.FALSE
3016 main.log.error( "Nodes have different leaders: " +
3017 str( newLeaders ) )
3018 newLeader = None
Jon Hall5cf14d52015-07-16 12:15:19 -07003019 else:
acsmars71adceb2015-08-31 15:09:26 -07003020 newLeader = newLeaders[ 0 ]
3021
3022 # Check that each node's candidate list is the same
3023 for candidates in newAllCandidates:
3024 if set( candidates ) != set( newCandidates ):
3025 newLeaderResult = main.FALSE
Jon Hallceb4abb2015-09-25 12:03:06 -07003026 main.log.error( "Discrepancy in candidate lists detected" )
acsmars71adceb2015-08-31 15:09:26 -07003027
3028 # Check that the re-elected node is last on the candidate List
3029 if oldLeader != newCandidates[ -1 ]:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003030 main.log.error( "Old Leader (" + oldLeader + ") not in the proper position " +
acsmars71adceb2015-08-31 15:09:26 -07003031 str( newCandidates ) )
3032 positionResult = main.FALSE
Jon Hall5cf14d52015-07-16 12:15:19 -07003033
3034 utilities.assert_equals(
3035 expect=main.TRUE,
acsmars71adceb2015-08-31 15:09:26 -07003036 actual=positionResult,
Jon Hall5cf14d52015-07-16 12:15:19 -07003037 onpass="Old leader successfully re-ran for election",
3038 onfail="Something went wrong with Leadership election after " +
3039 "the old leader re-ran for election" )
3040
3041 def CASE16( self, main ):
3042 """
3043 Install Distributed Primitives app
3044 """
3045 import time
Jon Halle1a3b752015-07-22 13:02:46 -07003046 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003047 assert main, "main not defined"
3048 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003049 assert main.CLIs, "main.CLIs not defined"
3050 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003051
3052 # Variables for the distributed primitives tests
3053 global pCounterName
3054 global iCounterName
3055 global pCounterValue
3056 global iCounterValue
3057 global onosSet
3058 global onosSetName
3059 pCounterName = "TestON-Partitions"
3060 iCounterName = "TestON-inMemory"
3061 pCounterValue = 0
3062 iCounterValue = 0
3063 onosSet = set([])
3064 onosSetName = "TestON-set"
3065
3066 description = "Install Primitives app"
3067 main.case( description )
3068 main.step( "Install Primitives app" )
3069 appName = "org.onosproject.distributedprimitives"
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003070 node = main.activeNodes[0]
3071 appResults = main.CLIs[node].activateApp( appName )
Jon Hall5cf14d52015-07-16 12:15:19 -07003072 utilities.assert_equals( expect=main.TRUE,
3073 actual=appResults,
3074 onpass="Primitives app activated",
3075 onfail="Primitives app not activated" )
3076 time.sleep( 5 ) # To allow all nodes to activate
3077
3078 def CASE17( self, main ):
3079 """
3080 Check for basic functionality with distributed primitives
3081 """
Jon Hall5cf14d52015-07-16 12:15:19 -07003082 # Make sure variables are defined/set
Jon Halle1a3b752015-07-22 13:02:46 -07003083 assert main.numCtrls, "main.numCtrls not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003084 assert main, "main not defined"
3085 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Halle1a3b752015-07-22 13:02:46 -07003086 assert main.CLIs, "main.CLIs not defined"
3087 assert main.nodes, "main.nodes not defined"
Jon Hall5cf14d52015-07-16 12:15:19 -07003088 assert pCounterName, "pCounterName not defined"
3089 assert iCounterName, "iCounterName not defined"
3090 assert onosSetName, "onosSetName not defined"
3091 # NOTE: assert fails if value is 0/None/Empty/False
3092 try:
3093 pCounterValue
3094 except NameError:
3095 main.log.error( "pCounterValue not defined, setting to 0" )
3096 pCounterValue = 0
3097 try:
3098 iCounterValue
3099 except NameError:
3100 main.log.error( "iCounterValue not defined, setting to 0" )
3101 iCounterValue = 0
3102 try:
3103 onosSet
3104 except NameError:
3105 main.log.error( "onosSet not defined, setting to empty Set" )
3106 onosSet = set([])
3107 # Variables for the distributed primitives tests. These are local only
3108 addValue = "a"
3109 addAllValue = "a b c d e f"
3110 retainValue = "c d e f"
3111
3112 description = "Check for basic functionality with distributed " +\
3113 "primitives"
3114 main.case( description )
Jon Halle1a3b752015-07-22 13:02:46 -07003115 main.caseExplanation = "Test the methods of the distributed " +\
3116 "primitives (counters and sets) throught the cli"
Jon Hall5cf14d52015-07-16 12:15:19 -07003117 # DISTRIBUTED ATOMIC COUNTERS
Jon Halle1a3b752015-07-22 13:02:46 -07003118 # Partitioned counters
3119 main.step( "Increment then get a default counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003120 pCounters = []
3121 threads = []
3122 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003123 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003124 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3125 name="counterAddAndGet-" + str( i ),
Jon Hall5cf14d52015-07-16 12:15:19 -07003126 args=[ pCounterName ] )
3127 pCounterValue += 1
3128 addedPValues.append( pCounterValue )
3129 threads.append( t )
3130 t.start()
3131
3132 for t in threads:
3133 t.join()
3134 pCounters.append( t.result )
3135 # Check that counter incremented numController times
3136 pCounterResults = True
3137 for i in addedPValues:
3138 tmpResult = i in pCounters
3139 pCounterResults = pCounterResults and tmpResult
3140 if not tmpResult:
3141 main.log.error( str( i ) + " is not in partitioned "
3142 "counter incremented results" )
3143 utilities.assert_equals( expect=True,
3144 actual=pCounterResults,
3145 onpass="Default counter incremented",
3146 onfail="Error incrementing default" +
3147 " counter" )
3148
Jon Halle1a3b752015-07-22 13:02:46 -07003149 main.step( "Get then Increment a default counter on each node" )
3150 pCounters = []
3151 threads = []
3152 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003153 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003154 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3155 name="counterGetAndAdd-" + str( i ),
3156 args=[ pCounterName ] )
3157 addedPValues.append( pCounterValue )
3158 pCounterValue += 1
3159 threads.append( t )
3160 t.start()
3161
3162 for t in threads:
3163 t.join()
3164 pCounters.append( t.result )
3165 # Check that counter incremented numController times
3166 pCounterResults = True
3167 for i in addedPValues:
3168 tmpResult = i in pCounters
3169 pCounterResults = pCounterResults and tmpResult
3170 if not tmpResult:
3171 main.log.error( str( i ) + " is not in partitioned "
3172 "counter incremented results" )
3173 utilities.assert_equals( expect=True,
3174 actual=pCounterResults,
3175 onpass="Default counter incremented",
3176 onfail="Error incrementing default" +
3177 " counter" )
3178
3179 main.step( "Counters we added have the correct values" )
3180 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3181 utilities.assert_equals( expect=main.TRUE,
3182 actual=incrementCheck,
3183 onpass="Added counters are correct",
3184 onfail="Added counters are incorrect" )
3185
3186 main.step( "Add -8 to then get a default counter on each node" )
3187 pCounters = []
3188 threads = []
3189 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003190 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003191 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3192 name="counterIncrement-" + str( i ),
3193 args=[ pCounterName ],
3194 kwargs={ "delta": -8 } )
3195 pCounterValue += -8
3196 addedPValues.append( pCounterValue )
3197 threads.append( t )
3198 t.start()
3199
3200 for t in threads:
3201 t.join()
3202 pCounters.append( t.result )
3203 # Check that counter incremented numController times
3204 pCounterResults = True
3205 for i in addedPValues:
3206 tmpResult = i in pCounters
3207 pCounterResults = pCounterResults and tmpResult
3208 if not tmpResult:
3209 main.log.error( str( i ) + " is not in partitioned "
3210 "counter incremented results" )
3211 utilities.assert_equals( expect=True,
3212 actual=pCounterResults,
3213 onpass="Default counter incremented",
3214 onfail="Error incrementing default" +
3215 " counter" )
3216
3217 main.step( "Add 5 to then get a default counter on each node" )
3218 pCounters = []
3219 threads = []
3220 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003221 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003222 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3223 name="counterIncrement-" + str( i ),
3224 args=[ pCounterName ],
3225 kwargs={ "delta": 5 } )
3226 pCounterValue += 5
3227 addedPValues.append( pCounterValue )
3228 threads.append( t )
3229 t.start()
3230
3231 for t in threads:
3232 t.join()
3233 pCounters.append( t.result )
3234 # Check that counter incremented numController times
3235 pCounterResults = True
3236 for i in addedPValues:
3237 tmpResult = i in pCounters
3238 pCounterResults = pCounterResults and tmpResult
3239 if not tmpResult:
3240 main.log.error( str( i ) + " is not in partitioned "
3241 "counter incremented results" )
3242 utilities.assert_equals( expect=True,
3243 actual=pCounterResults,
3244 onpass="Default counter incremented",
3245 onfail="Error incrementing default" +
3246 " counter" )
3247
3248 main.step( "Get then add 5 to a default counter on each node" )
3249 pCounters = []
3250 threads = []
3251 addedPValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003252 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003253 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3254 name="counterIncrement-" + str( i ),
3255 args=[ pCounterName ],
3256 kwargs={ "delta": 5 } )
3257 addedPValues.append( pCounterValue )
3258 pCounterValue += 5
3259 threads.append( t )
3260 t.start()
3261
3262 for t in threads:
3263 t.join()
3264 pCounters.append( t.result )
3265 # Check that counter incremented numController times
3266 pCounterResults = True
3267 for i in addedPValues:
3268 tmpResult = i in pCounters
3269 pCounterResults = pCounterResults and tmpResult
3270 if not tmpResult:
3271 main.log.error( str( i ) + " is not in partitioned "
3272 "counter incremented results" )
3273 utilities.assert_equals( expect=True,
3274 actual=pCounterResults,
3275 onpass="Default counter incremented",
3276 onfail="Error incrementing default" +
3277 " counter" )
3278
3279 main.step( "Counters we added have the correct values" )
3280 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3281 utilities.assert_equals( expect=main.TRUE,
3282 actual=incrementCheck,
3283 onpass="Added counters are correct",
3284 onfail="Added counters are incorrect" )
3285
3286 # In-Memory counters
3287 main.step( "Increment and get an in-memory counter on each node" )
Jon Hall5cf14d52015-07-16 12:15:19 -07003288 iCounters = []
3289 addedIValues = []
3290 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003291 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003292 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003293 name="icounterIncrement-" + str( i ),
3294 args=[ iCounterName ],
3295 kwargs={ "inMemory": True } )
3296 iCounterValue += 1
3297 addedIValues.append( iCounterValue )
3298 threads.append( t )
3299 t.start()
3300
3301 for t in threads:
3302 t.join()
3303 iCounters.append( t.result )
3304 # Check that counter incremented numController times
3305 iCounterResults = True
3306 for i in addedIValues:
3307 tmpResult = i in iCounters
3308 iCounterResults = iCounterResults and tmpResult
3309 if not tmpResult:
3310 main.log.error( str( i ) + " is not in the in-memory "
3311 "counter incremented results" )
3312 utilities.assert_equals( expect=True,
3313 actual=iCounterResults,
Jon Halle1a3b752015-07-22 13:02:46 -07003314 onpass="In-memory counter incremented",
3315 onfail="Error incrementing in-memory" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003316 " counter" )
3317
Jon Halle1a3b752015-07-22 13:02:46 -07003318 main.step( "Get then Increment a in-memory counter on each node" )
3319 iCounters = []
3320 threads = []
3321 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003322 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003323 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3324 name="counterGetAndAdd-" + str( i ),
3325 args=[ iCounterName ],
3326 kwargs={ "inMemory": True } )
3327 addedIValues.append( iCounterValue )
3328 iCounterValue += 1
3329 threads.append( t )
3330 t.start()
3331
3332 for t in threads:
3333 t.join()
3334 iCounters.append( t.result )
3335 # Check that counter incremented numController times
3336 iCounterResults = True
3337 for i in addedIValues:
3338 tmpResult = i in iCounters
3339 iCounterResults = iCounterResults and tmpResult
3340 if not tmpResult:
3341 main.log.error( str( i ) + " is not in in-memory "
3342 "counter incremented results" )
3343 utilities.assert_equals( expect=True,
3344 actual=iCounterResults,
3345 onpass="In-memory counter incremented",
3346 onfail="Error incrementing in-memory" +
3347 " counter" )
3348
3349 main.step( "Counters we added have the correct values" )
3350 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3351 utilities.assert_equals( expect=main.TRUE,
3352 actual=incrementCheck,
3353 onpass="Added counters are correct",
3354 onfail="Added counters are incorrect" )
3355
3356 main.step( "Add -8 to then get a in-memory counter on each node" )
3357 iCounters = []
3358 threads = []
3359 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003360 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003361 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3362 name="counterIncrement-" + str( i ),
3363 args=[ iCounterName ],
3364 kwargs={ "delta": -8, "inMemory": True } )
3365 iCounterValue += -8
3366 addedIValues.append( iCounterValue )
3367 threads.append( t )
3368 t.start()
3369
3370 for t in threads:
3371 t.join()
3372 iCounters.append( t.result )
3373 # Check that counter incremented numController times
3374 iCounterResults = True
3375 for i in addedIValues:
3376 tmpResult = i in iCounters
3377 iCounterResults = iCounterResults and tmpResult
3378 if not tmpResult:
3379 main.log.error( str( i ) + " is not in in-memory "
3380 "counter incremented results" )
3381 utilities.assert_equals( expect=True,
3382 actual=pCounterResults,
3383 onpass="In-memory counter incremented",
3384 onfail="Error incrementing in-memory" +
3385 " counter" )
3386
3387 main.step( "Add 5 to then get a in-memory counter on each node" )
3388 iCounters = []
3389 threads = []
3390 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003391 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003392 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3393 name="counterIncrement-" + str( i ),
3394 args=[ iCounterName ],
3395 kwargs={ "delta": 5, "inMemory": True } )
3396 iCounterValue += 5
3397 addedIValues.append( iCounterValue )
3398 threads.append( t )
3399 t.start()
3400
3401 for t in threads:
3402 t.join()
3403 iCounters.append( t.result )
3404 # Check that counter incremented numController times
3405 iCounterResults = True
3406 for i in addedIValues:
3407 tmpResult = i in iCounters
3408 iCounterResults = iCounterResults and tmpResult
3409 if not tmpResult:
3410 main.log.error( str( i ) + " is not in in-memory "
3411 "counter incremented results" )
3412 utilities.assert_equals( expect=True,
3413 actual=pCounterResults,
3414 onpass="In-memory counter incremented",
3415 onfail="Error incrementing in-memory" +
3416 " counter" )
3417
3418 main.step( "Get then add 5 to a in-memory counter on each node" )
3419 iCounters = []
3420 threads = []
3421 addedIValues = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003422 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003423 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3424 name="counterIncrement-" + str( i ),
3425 args=[ iCounterName ],
3426 kwargs={ "delta": 5, "inMemory": True } )
3427 addedIValues.append( iCounterValue )
3428 iCounterValue += 5
3429 threads.append( t )
3430 t.start()
3431
3432 for t in threads:
3433 t.join()
3434 iCounters.append( t.result )
3435 # Check that counter incremented numController times
3436 iCounterResults = True
3437 for i in addedIValues:
3438 tmpResult = i in iCounters
3439 iCounterResults = iCounterResults and tmpResult
3440 if not tmpResult:
3441 main.log.error( str( i ) + " is not in in-memory "
3442 "counter incremented results" )
3443 utilities.assert_equals( expect=True,
3444 actual=iCounterResults,
3445 onpass="In-memory counter incremented",
3446 onfail="Error incrementing in-memory" +
3447 " counter" )
3448
3449 main.step( "Counters we added have the correct values" )
3450 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3451 utilities.assert_equals( expect=main.TRUE,
3452 actual=incrementCheck,
3453 onpass="Added counters are correct",
3454 onfail="Added counters are incorrect" )
3455
Jon Hall5cf14d52015-07-16 12:15:19 -07003456 main.step( "Check counters are consistant across nodes" )
Jon Hall3b489db2015-10-05 14:38:37 -07003457 onosCounters, consistentCounterResults = main.Counters.consistentCheck()
Jon Hall5cf14d52015-07-16 12:15:19 -07003458 utilities.assert_equals( expect=main.TRUE,
3459 actual=consistentCounterResults,
3460 onpass="ONOS counters are consistent " +
3461 "across nodes",
3462 onfail="ONOS Counters are inconsistent " +
3463 "across nodes" )
3464
3465 main.step( "Counters we added have the correct values" )
Jon Halle1a3b752015-07-22 13:02:46 -07003466 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3467 incrementCheck = incrementCheck and \
3468 main.Counters.counterCheck( iCounterName, iCounterValue )
Jon Hall5cf14d52015-07-16 12:15:19 -07003469 utilities.assert_equals( expect=main.TRUE,
Jon Halle1a3b752015-07-22 13:02:46 -07003470 actual=incrementCheck,
Jon Hall5cf14d52015-07-16 12:15:19 -07003471 onpass="Added counters are correct",
3472 onfail="Added counters are incorrect" )
3473 # DISTRIBUTED SETS
3474 main.step( "Distributed Set get" )
3475 size = len( onosSet )
3476 getResponses = []
3477 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003478 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003479 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003480 name="setTestGet-" + str( i ),
3481 args=[ onosSetName ] )
3482 threads.append( t )
3483 t.start()
3484 for t in threads:
3485 t.join()
3486 getResponses.append( t.result )
3487
3488 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003489 for i in range( len( main.activeNodes ) ):
3490 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003491 if isinstance( getResponses[ i ], list):
3492 current = set( getResponses[ i ] )
3493 if len( current ) == len( getResponses[ i ] ):
3494 # no repeats
3495 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003496 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003497 " has incorrect view" +
3498 " of set " + onosSetName + ":\n" +
3499 str( getResponses[ i ] ) )
3500 main.log.debug( "Expected: " + str( onosSet ) )
3501 main.log.debug( "Actual: " + str( current ) )
3502 getResults = main.FALSE
3503 else:
3504 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003505 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003506 " has repeat elements in" +
3507 " set " + onosSetName + ":\n" +
3508 str( getResponses[ i ] ) )
3509 getResults = main.FALSE
3510 elif getResponses[ i ] == main.ERROR:
3511 getResults = main.FALSE
3512 utilities.assert_equals( expect=main.TRUE,
3513 actual=getResults,
3514 onpass="Set elements are correct",
3515 onfail="Set elements are incorrect" )
3516
3517 main.step( "Distributed Set size" )
3518 sizeResponses = []
3519 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003520 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003521 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003522 name="setTestSize-" + str( i ),
3523 args=[ onosSetName ] )
3524 threads.append( t )
3525 t.start()
3526 for t in threads:
3527 t.join()
3528 sizeResponses.append( t.result )
3529
3530 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003531 for i in range( len( main.activeNodes ) ):
3532 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003533 if size != sizeResponses[ i ]:
3534 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003535 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003536 " expected a size of " + str( size ) +
3537 " for set " + onosSetName +
3538 " but got " + str( sizeResponses[ i ] ) )
3539 utilities.assert_equals( expect=main.TRUE,
3540 actual=sizeResults,
3541 onpass="Set sizes are correct",
3542 onfail="Set sizes are incorrect" )
3543
3544 main.step( "Distributed Set add()" )
3545 onosSet.add( addValue )
3546 addResponses = []
3547 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003548 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003549 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003550 name="setTestAdd-" + str( i ),
3551 args=[ onosSetName, addValue ] )
3552 threads.append( t )
3553 t.start()
3554 for t in threads:
3555 t.join()
3556 addResponses.append( t.result )
3557
3558 # main.TRUE = successfully changed the set
3559 # main.FALSE = action resulted in no change in set
3560 # main.ERROR - Some error in executing the function
3561 addResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003562 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003563 if addResponses[ i ] == main.TRUE:
3564 # All is well
3565 pass
3566 elif addResponses[ i ] == main.FALSE:
3567 # Already in set, probably fine
3568 pass
3569 elif addResponses[ i ] == main.ERROR:
3570 # Error in execution
3571 addResults = main.FALSE
3572 else:
3573 # unexpected result
3574 addResults = main.FALSE
3575 if addResults != main.TRUE:
3576 main.log.error( "Error executing set add" )
3577
3578 # Check if set is still correct
3579 size = len( onosSet )
3580 getResponses = []
3581 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003582 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003583 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003584 name="setTestGet-" + str( i ),
3585 args=[ onosSetName ] )
3586 threads.append( t )
3587 t.start()
3588 for t in threads:
3589 t.join()
3590 getResponses.append( t.result )
3591 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003592 for i in range( len( main.activeNodes ) ):
3593 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003594 if isinstance( getResponses[ i ], list):
3595 current = set( getResponses[ i ] )
3596 if len( current ) == len( getResponses[ i ] ):
3597 # no repeats
3598 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003599 main.log.error( "ONOS" + node + " has incorrect view" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003600 " of set " + onosSetName + ":\n" +
3601 str( getResponses[ i ] ) )
3602 main.log.debug( "Expected: " + str( onosSet ) )
3603 main.log.debug( "Actual: " + str( current ) )
3604 getResults = main.FALSE
3605 else:
3606 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003607 main.log.error( "ONOS" + node + " has repeat elements in" +
Jon Hall5cf14d52015-07-16 12:15:19 -07003608 " set " + onosSetName + ":\n" +
3609 str( getResponses[ i ] ) )
3610 getResults = main.FALSE
3611 elif getResponses[ i ] == main.ERROR:
3612 getResults = main.FALSE
3613 sizeResponses = []
3614 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003615 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003616 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003617 name="setTestSize-" + str( i ),
3618 args=[ onosSetName ] )
3619 threads.append( t )
3620 t.start()
3621 for t in threads:
3622 t.join()
3623 sizeResponses.append( t.result )
3624 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003625 for i in range( len( main.activeNodes ) ):
3626 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003627 if size != sizeResponses[ i ]:
3628 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003629 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003630 " expected a size of " + str( size ) +
3631 " for set " + onosSetName +
3632 " but got " + str( sizeResponses[ i ] ) )
3633 addResults = addResults and getResults and sizeResults
3634 utilities.assert_equals( expect=main.TRUE,
3635 actual=addResults,
3636 onpass="Set add correct",
3637 onfail="Set add was incorrect" )
3638
3639 main.step( "Distributed Set addAll()" )
3640 onosSet.update( addAllValue.split() )
3641 addResponses = []
3642 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003643 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003644 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003645 name="setTestAddAll-" + str( i ),
3646 args=[ onosSetName, addAllValue ] )
3647 threads.append( t )
3648 t.start()
3649 for t in threads:
3650 t.join()
3651 addResponses.append( t.result )
3652
3653 # main.TRUE = successfully changed the set
3654 # main.FALSE = action resulted in no change in set
3655 # main.ERROR - Some error in executing the function
3656 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003657 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003658 if addResponses[ i ] == main.TRUE:
3659 # All is well
3660 pass
3661 elif addResponses[ i ] == main.FALSE:
3662 # Already in set, probably fine
3663 pass
3664 elif addResponses[ i ] == main.ERROR:
3665 # Error in execution
3666 addAllResults = main.FALSE
3667 else:
3668 # unexpected result
3669 addAllResults = main.FALSE
3670 if addAllResults != main.TRUE:
3671 main.log.error( "Error executing set addAll" )
3672
3673 # Check if set is still correct
3674 size = len( onosSet )
3675 getResponses = []
3676 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003677 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003678 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003679 name="setTestGet-" + str( i ),
3680 args=[ onosSetName ] )
3681 threads.append( t )
3682 t.start()
3683 for t in threads:
3684 t.join()
3685 getResponses.append( t.result )
3686 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003687 for i in range( len( main.activeNodes ) ):
3688 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003689 if isinstance( getResponses[ i ], list):
3690 current = set( getResponses[ i ] )
3691 if len( current ) == len( getResponses[ i ] ):
3692 # no repeats
3693 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003694 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003695 " has incorrect view" +
3696 " of set " + onosSetName + ":\n" +
3697 str( getResponses[ i ] ) )
3698 main.log.debug( "Expected: " + str( onosSet ) )
3699 main.log.debug( "Actual: " + str( current ) )
3700 getResults = main.FALSE
3701 else:
3702 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003703 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003704 " has repeat elements in" +
3705 " set " + onosSetName + ":\n" +
3706 str( getResponses[ i ] ) )
3707 getResults = main.FALSE
3708 elif getResponses[ i ] == main.ERROR:
3709 getResults = main.FALSE
3710 sizeResponses = []
3711 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003712 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003713 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003714 name="setTestSize-" + str( i ),
3715 args=[ onosSetName ] )
3716 threads.append( t )
3717 t.start()
3718 for t in threads:
3719 t.join()
3720 sizeResponses.append( t.result )
3721 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003722 for i in range( len( main.activeNodes ) ):
3723 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003724 if size != sizeResponses[ i ]:
3725 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003726 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003727 " expected a size of " + str( size ) +
3728 " for set " + onosSetName +
3729 " but got " + str( sizeResponses[ i ] ) )
3730 addAllResults = addAllResults and getResults and sizeResults
3731 utilities.assert_equals( expect=main.TRUE,
3732 actual=addAllResults,
3733 onpass="Set addAll correct",
3734 onfail="Set addAll was incorrect" )
3735
3736 main.step( "Distributed Set contains()" )
3737 containsResponses = []
3738 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003739 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003740 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003741 name="setContains-" + str( i ),
3742 args=[ onosSetName ],
3743 kwargs={ "values": addValue } )
3744 threads.append( t )
3745 t.start()
3746 for t in threads:
3747 t.join()
3748 # NOTE: This is the tuple
3749 containsResponses.append( t.result )
3750
3751 containsResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003752 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003753 if containsResponses[ i ] == main.ERROR:
3754 containsResults = main.FALSE
3755 else:
3756 containsResults = containsResults and\
3757 containsResponses[ i ][ 1 ]
3758 utilities.assert_equals( expect=main.TRUE,
3759 actual=containsResults,
3760 onpass="Set contains is functional",
3761 onfail="Set contains failed" )
3762
3763 main.step( "Distributed Set containsAll()" )
3764 containsAllResponses = []
3765 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003766 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003767 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003768 name="setContainsAll-" + str( i ),
3769 args=[ onosSetName ],
3770 kwargs={ "values": addAllValue } )
3771 threads.append( t )
3772 t.start()
3773 for t in threads:
3774 t.join()
3775 # NOTE: This is the tuple
3776 containsAllResponses.append( t.result )
3777
3778 containsAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003779 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003780 if containsResponses[ i ] == main.ERROR:
3781 containsResults = main.FALSE
3782 else:
3783 containsResults = containsResults and\
3784 containsResponses[ i ][ 1 ]
3785 utilities.assert_equals( expect=main.TRUE,
3786 actual=containsAllResults,
3787 onpass="Set containsAll is functional",
3788 onfail="Set containsAll failed" )
3789
3790 main.step( "Distributed Set remove()" )
3791 onosSet.remove( addValue )
3792 removeResponses = []
3793 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003794 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003795 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003796 name="setTestRemove-" + str( i ),
3797 args=[ onosSetName, addValue ] )
3798 threads.append( t )
3799 t.start()
3800 for t in threads:
3801 t.join()
3802 removeResponses.append( t.result )
3803
3804 # main.TRUE = successfully changed the set
3805 # main.FALSE = action resulted in no change in set
3806 # main.ERROR - Some error in executing the function
3807 removeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003808 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003809 if removeResponses[ i ] == main.TRUE:
3810 # All is well
3811 pass
3812 elif removeResponses[ i ] == main.FALSE:
3813 # not in set, probably fine
3814 pass
3815 elif removeResponses[ i ] == main.ERROR:
3816 # Error in execution
3817 removeResults = main.FALSE
3818 else:
3819 # unexpected result
3820 removeResults = main.FALSE
3821 if removeResults != main.TRUE:
3822 main.log.error( "Error executing set remove" )
3823
3824 # Check if set is still correct
3825 size = len( onosSet )
3826 getResponses = []
3827 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003828 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003829 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003830 name="setTestGet-" + str( i ),
3831 args=[ onosSetName ] )
3832 threads.append( t )
3833 t.start()
3834 for t in threads:
3835 t.join()
3836 getResponses.append( t.result )
3837 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003838 for i in range( len( main.activeNodes ) ):
3839 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003840 if isinstance( getResponses[ i ], list):
3841 current = set( getResponses[ i ] )
3842 if len( current ) == len( getResponses[ i ] ):
3843 # no repeats
3844 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003845 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003846 " has incorrect view" +
3847 " of set " + onosSetName + ":\n" +
3848 str( getResponses[ i ] ) )
3849 main.log.debug( "Expected: " + str( onosSet ) )
3850 main.log.debug( "Actual: " + str( current ) )
3851 getResults = main.FALSE
3852 else:
3853 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003854 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003855 " has repeat elements in" +
3856 " set " + onosSetName + ":\n" +
3857 str( getResponses[ i ] ) )
3858 getResults = main.FALSE
3859 elif getResponses[ i ] == main.ERROR:
3860 getResults = main.FALSE
3861 sizeResponses = []
3862 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003863 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003864 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003865 name="setTestSize-" + str( i ),
3866 args=[ onosSetName ] )
3867 threads.append( t )
3868 t.start()
3869 for t in threads:
3870 t.join()
3871 sizeResponses.append( t.result )
3872 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003873 for i in range( len( main.activeNodes ) ):
3874 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003875 if size != sizeResponses[ i ]:
3876 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003877 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003878 " expected a size of " + str( size ) +
3879 " for set " + onosSetName +
3880 " but got " + str( sizeResponses[ i ] ) )
3881 removeResults = removeResults and getResults and sizeResults
3882 utilities.assert_equals( expect=main.TRUE,
3883 actual=removeResults,
3884 onpass="Set remove correct",
3885 onfail="Set remove was incorrect" )
3886
3887 main.step( "Distributed Set removeAll()" )
3888 onosSet.difference_update( addAllValue.split() )
3889 removeAllResponses = []
3890 threads = []
3891 try:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003892 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003893 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07003894 name="setTestRemoveAll-" + str( i ),
3895 args=[ onosSetName, addAllValue ] )
3896 threads.append( t )
3897 t.start()
3898 for t in threads:
3899 t.join()
3900 removeAllResponses.append( t.result )
3901 except Exception, e:
3902 main.log.exception(e)
3903
3904 # main.TRUE = successfully changed the set
3905 # main.FALSE = action resulted in no change in set
3906 # main.ERROR - Some error in executing the function
3907 removeAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003908 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07003909 if removeAllResponses[ i ] == main.TRUE:
3910 # All is well
3911 pass
3912 elif removeAllResponses[ i ] == main.FALSE:
3913 # not in set, probably fine
3914 pass
3915 elif removeAllResponses[ i ] == main.ERROR:
3916 # Error in execution
3917 removeAllResults = main.FALSE
3918 else:
3919 # unexpected result
3920 removeAllResults = main.FALSE
3921 if removeAllResults != main.TRUE:
3922 main.log.error( "Error executing set removeAll" )
3923
3924 # Check if set is still correct
3925 size = len( onosSet )
3926 getResponses = []
3927 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003928 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003929 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07003930 name="setTestGet-" + str( i ),
3931 args=[ onosSetName ] )
3932 threads.append( t )
3933 t.start()
3934 for t in threads:
3935 t.join()
3936 getResponses.append( t.result )
3937 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003938 for i in range( len( main.activeNodes ) ):
3939 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003940 if isinstance( getResponses[ i ], list):
3941 current = set( getResponses[ i ] )
3942 if len( current ) == len( getResponses[ i ] ):
3943 # no repeats
3944 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003945 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003946 " has incorrect view" +
3947 " of set " + onosSetName + ":\n" +
3948 str( getResponses[ i ] ) )
3949 main.log.debug( "Expected: " + str( onosSet ) )
3950 main.log.debug( "Actual: " + str( current ) )
3951 getResults = main.FALSE
3952 else:
3953 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003954 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003955 " has repeat elements in" +
3956 " set " + onosSetName + ":\n" +
3957 str( getResponses[ i ] ) )
3958 getResults = main.FALSE
3959 elif getResponses[ i ] == main.ERROR:
3960 getResults = main.FALSE
3961 sizeResponses = []
3962 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003963 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003964 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07003965 name="setTestSize-" + str( i ),
3966 args=[ onosSetName ] )
3967 threads.append( t )
3968 t.start()
3969 for t in threads:
3970 t.join()
3971 sizeResponses.append( t.result )
3972 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003973 for i in range( len( main.activeNodes ) ):
3974 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07003975 if size != sizeResponses[ i ]:
3976 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003977 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07003978 " expected a size of " + str( size ) +
3979 " for set " + onosSetName +
3980 " but got " + str( sizeResponses[ i ] ) )
3981 removeAllResults = removeAllResults and getResults and sizeResults
3982 utilities.assert_equals( expect=main.TRUE,
3983 actual=removeAllResults,
3984 onpass="Set removeAll correct",
3985 onfail="Set removeAll was incorrect" )
3986
3987 main.step( "Distributed Set addAll()" )
3988 onosSet.update( addAllValue.split() )
3989 addResponses = []
3990 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07003991 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07003992 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07003993 name="setTestAddAll-" + str( i ),
3994 args=[ onosSetName, addAllValue ] )
3995 threads.append( t )
3996 t.start()
3997 for t in threads:
3998 t.join()
3999 addResponses.append( t.result )
4000
4001 # main.TRUE = successfully changed the set
4002 # main.FALSE = action resulted in no change in set
4003 # main.ERROR - Some error in executing the function
4004 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004005 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004006 if addResponses[ i ] == main.TRUE:
4007 # All is well
4008 pass
4009 elif addResponses[ i ] == main.FALSE:
4010 # Already in set, probably fine
4011 pass
4012 elif addResponses[ i ] == main.ERROR:
4013 # Error in execution
4014 addAllResults = main.FALSE
4015 else:
4016 # unexpected result
4017 addAllResults = main.FALSE
4018 if addAllResults != main.TRUE:
4019 main.log.error( "Error executing set addAll" )
4020
4021 # Check if set is still correct
4022 size = len( onosSet )
4023 getResponses = []
4024 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004025 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004026 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004027 name="setTestGet-" + str( i ),
4028 args=[ onosSetName ] )
4029 threads.append( t )
4030 t.start()
4031 for t in threads:
4032 t.join()
4033 getResponses.append( t.result )
4034 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004035 for i in range( len( main.activeNodes ) ):
4036 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004037 if isinstance( getResponses[ i ], list):
4038 current = set( getResponses[ i ] )
4039 if len( current ) == len( getResponses[ i ] ):
4040 # no repeats
4041 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004042 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004043 " has incorrect view" +
4044 " of set " + onosSetName + ":\n" +
4045 str( getResponses[ i ] ) )
4046 main.log.debug( "Expected: " + str( onosSet ) )
4047 main.log.debug( "Actual: " + str( current ) )
4048 getResults = main.FALSE
4049 else:
4050 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004051 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004052 " has repeat elements in" +
4053 " set " + onosSetName + ":\n" +
4054 str( getResponses[ i ] ) )
4055 getResults = main.FALSE
4056 elif getResponses[ i ] == main.ERROR:
4057 getResults = main.FALSE
4058 sizeResponses = []
4059 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004060 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004061 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004062 name="setTestSize-" + str( i ),
4063 args=[ onosSetName ] )
4064 threads.append( t )
4065 t.start()
4066 for t in threads:
4067 t.join()
4068 sizeResponses.append( t.result )
4069 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004070 for i in range( len( main.activeNodes ) ):
4071 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004072 if size != sizeResponses[ i ]:
4073 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004074 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004075 " expected a size of " + str( size ) +
4076 " for set " + onosSetName +
4077 " but got " + str( sizeResponses[ i ] ) )
4078 addAllResults = addAllResults and getResults and sizeResults
4079 utilities.assert_equals( expect=main.TRUE,
4080 actual=addAllResults,
4081 onpass="Set addAll correct",
4082 onfail="Set addAll was incorrect" )
4083
4084 main.step( "Distributed Set clear()" )
4085 onosSet.clear()
4086 clearResponses = []
4087 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004088 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004089 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004090 name="setTestClear-" + str( i ),
4091 args=[ onosSetName, " "], # Values doesn't matter
4092 kwargs={ "clear": True } )
4093 threads.append( t )
4094 t.start()
4095 for t in threads:
4096 t.join()
4097 clearResponses.append( t.result )
4098
4099 # main.TRUE = successfully changed the set
4100 # main.FALSE = action resulted in no change in set
4101 # main.ERROR - Some error in executing the function
4102 clearResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004103 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004104 if clearResponses[ i ] == main.TRUE:
4105 # All is well
4106 pass
4107 elif clearResponses[ i ] == main.FALSE:
4108 # Nothing set, probably fine
4109 pass
4110 elif clearResponses[ i ] == main.ERROR:
4111 # Error in execution
4112 clearResults = main.FALSE
4113 else:
4114 # unexpected result
4115 clearResults = main.FALSE
4116 if clearResults != main.TRUE:
4117 main.log.error( "Error executing set clear" )
4118
4119 # Check if set is still correct
4120 size = len( onosSet )
4121 getResponses = []
4122 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004123 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004124 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004125 name="setTestGet-" + str( i ),
4126 args=[ onosSetName ] )
4127 threads.append( t )
4128 t.start()
4129 for t in threads:
4130 t.join()
4131 getResponses.append( t.result )
4132 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004133 for i in range( len( main.activeNodes ) ):
4134 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004135 if isinstance( getResponses[ i ], list):
4136 current = set( getResponses[ i ] )
4137 if len( current ) == len( getResponses[ i ] ):
4138 # no repeats
4139 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004140 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004141 " has incorrect view" +
4142 " of set " + onosSetName + ":\n" +
4143 str( getResponses[ i ] ) )
4144 main.log.debug( "Expected: " + str( onosSet ) )
4145 main.log.debug( "Actual: " + str( current ) )
4146 getResults = main.FALSE
4147 else:
4148 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004149 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004150 " has repeat elements in" +
4151 " set " + onosSetName + ":\n" +
4152 str( getResponses[ i ] ) )
4153 getResults = main.FALSE
4154 elif getResponses[ i ] == main.ERROR:
4155 getResults = main.FALSE
4156 sizeResponses = []
4157 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004158 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004159 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004160 name="setTestSize-" + str( i ),
4161 args=[ onosSetName ] )
4162 threads.append( t )
4163 t.start()
4164 for t in threads:
4165 t.join()
4166 sizeResponses.append( t.result )
4167 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004168 for i in range( len( main.activeNodes ) ):
4169 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004170 if size != sizeResponses[ i ]:
4171 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004172 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004173 " expected a size of " + str( size ) +
4174 " for set " + onosSetName +
4175 " but got " + str( sizeResponses[ i ] ) )
4176 clearResults = clearResults and getResults and sizeResults
4177 utilities.assert_equals( expect=main.TRUE,
4178 actual=clearResults,
4179 onpass="Set clear correct",
4180 onfail="Set clear was incorrect" )
4181
4182 main.step( "Distributed Set addAll()" )
4183 onosSet.update( addAllValue.split() )
4184 addResponses = []
4185 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004186 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004187 t = main.Thread( target=main.CLIs[i].setTestAdd,
Jon Hall5cf14d52015-07-16 12:15:19 -07004188 name="setTestAddAll-" + str( i ),
4189 args=[ onosSetName, addAllValue ] )
4190 threads.append( t )
4191 t.start()
4192 for t in threads:
4193 t.join()
4194 addResponses.append( t.result )
4195
4196 # main.TRUE = successfully changed the set
4197 # main.FALSE = action resulted in no change in set
4198 # main.ERROR - Some error in executing the function
4199 addAllResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004200 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004201 if addResponses[ i ] == main.TRUE:
4202 # All is well
4203 pass
4204 elif addResponses[ i ] == main.FALSE:
4205 # Already in set, probably fine
4206 pass
4207 elif addResponses[ i ] == main.ERROR:
4208 # Error in execution
4209 addAllResults = main.FALSE
4210 else:
4211 # unexpected result
4212 addAllResults = main.FALSE
4213 if addAllResults != main.TRUE:
4214 main.log.error( "Error executing set addAll" )
4215
4216 # Check if set is still correct
4217 size = len( onosSet )
4218 getResponses = []
4219 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004220 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004221 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004222 name="setTestGet-" + str( i ),
4223 args=[ onosSetName ] )
4224 threads.append( t )
4225 t.start()
4226 for t in threads:
4227 t.join()
4228 getResponses.append( t.result )
4229 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004230 for i in range( len( main.activeNodes ) ):
4231 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004232 if isinstance( getResponses[ i ], list):
4233 current = set( getResponses[ i ] )
4234 if len( current ) == len( getResponses[ i ] ):
4235 # no repeats
4236 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004237 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004238 " has incorrect view" +
4239 " of set " + onosSetName + ":\n" +
4240 str( getResponses[ i ] ) )
4241 main.log.debug( "Expected: " + str( onosSet ) )
4242 main.log.debug( "Actual: " + str( current ) )
4243 getResults = main.FALSE
4244 else:
4245 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004246 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004247 " has repeat elements in" +
4248 " set " + onosSetName + ":\n" +
4249 str( getResponses[ i ] ) )
4250 getResults = main.FALSE
4251 elif getResponses[ i ] == main.ERROR:
4252 getResults = main.FALSE
4253 sizeResponses = []
4254 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004255 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004256 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004257 name="setTestSize-" + str( i ),
4258 args=[ onosSetName ] )
4259 threads.append( t )
4260 t.start()
4261 for t in threads:
4262 t.join()
4263 sizeResponses.append( t.result )
4264 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004265 for i in range( len( main.activeNodes ) ):
4266 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004267 if size != sizeResponses[ i ]:
4268 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004269 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004270 " expected a size of " + str( size ) +
4271 " for set " + onosSetName +
4272 " but got " + str( sizeResponses[ i ] ) )
4273 addAllResults = addAllResults and getResults and sizeResults
4274 utilities.assert_equals( expect=main.TRUE,
4275 actual=addAllResults,
4276 onpass="Set addAll correct",
4277 onfail="Set addAll was incorrect" )
4278
4279 main.step( "Distributed Set retain()" )
4280 onosSet.intersection_update( retainValue.split() )
4281 retainResponses = []
4282 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004283 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004284 t = main.Thread( target=main.CLIs[i].setTestRemove,
Jon Hall5cf14d52015-07-16 12:15:19 -07004285 name="setTestRetain-" + str( i ),
4286 args=[ onosSetName, retainValue ],
4287 kwargs={ "retain": True } )
4288 threads.append( t )
4289 t.start()
4290 for t in threads:
4291 t.join()
4292 retainResponses.append( t.result )
4293
4294 # main.TRUE = successfully changed the set
4295 # main.FALSE = action resulted in no change in set
4296 # main.ERROR - Some error in executing the function
4297 retainResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004298 for i in range( len( main.activeNodes ) ):
Jon Hall5cf14d52015-07-16 12:15:19 -07004299 if retainResponses[ i ] == main.TRUE:
4300 # All is well
4301 pass
4302 elif retainResponses[ i ] == main.FALSE:
4303 # Already in set, probably fine
4304 pass
4305 elif retainResponses[ i ] == main.ERROR:
4306 # Error in execution
4307 retainResults = main.FALSE
4308 else:
4309 # unexpected result
4310 retainResults = main.FALSE
4311 if retainResults != main.TRUE:
4312 main.log.error( "Error executing set retain" )
4313
4314 # Check if set is still correct
4315 size = len( onosSet )
4316 getResponses = []
4317 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004318 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004319 t = main.Thread( target=main.CLIs[i].setTestGet,
Jon Hall5cf14d52015-07-16 12:15:19 -07004320 name="setTestGet-" + str( i ),
4321 args=[ onosSetName ] )
4322 threads.append( t )
4323 t.start()
4324 for t in threads:
4325 t.join()
4326 getResponses.append( t.result )
4327 getResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004328 for i in range( len( main.activeNodes ) ):
4329 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004330 if isinstance( getResponses[ i ], list):
4331 current = set( getResponses[ i ] )
4332 if len( current ) == len( getResponses[ i ] ):
4333 # no repeats
4334 if onosSet != current:
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004335 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004336 " has incorrect view" +
4337 " of set " + onosSetName + ":\n" +
4338 str( getResponses[ i ] ) )
4339 main.log.debug( "Expected: " + str( onosSet ) )
4340 main.log.debug( "Actual: " + str( current ) )
4341 getResults = main.FALSE
4342 else:
4343 # error, set is not a set
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004344 main.log.error( "ONOS" + node +
Jon Hall5cf14d52015-07-16 12:15:19 -07004345 " has repeat elements in" +
4346 " set " + onosSetName + ":\n" +
4347 str( getResponses[ i ] ) )
4348 getResults = main.FALSE
4349 elif getResponses[ i ] == main.ERROR:
4350 getResults = main.FALSE
4351 sizeResponses = []
4352 threads = []
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004353 for i in main.activeNodes:
Jon Halle1a3b752015-07-22 13:02:46 -07004354 t = main.Thread( target=main.CLIs[i].setTestSize,
Jon Hall5cf14d52015-07-16 12:15:19 -07004355 name="setTestSize-" + str( i ),
4356 args=[ onosSetName ] )
4357 threads.append( t )
4358 t.start()
4359 for t in threads:
4360 t.join()
4361 sizeResponses.append( t.result )
4362 sizeResults = main.TRUE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004363 for i in range( len( main.activeNodes ) ):
4364 node = str( main.activeNodes[i] + 1 )
Jon Hall5cf14d52015-07-16 12:15:19 -07004365 if size != sizeResponses[ i ]:
4366 sizeResults = main.FALSE
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004367 main.log.error( "ONOS" + node + " expected a size of " +
Jon Hall5cf14d52015-07-16 12:15:19 -07004368 str( size ) + " for set " + onosSetName +
4369 " but got " + str( sizeResponses[ i ] ) )
4370 retainResults = retainResults and getResults and sizeResults
4371 utilities.assert_equals( expect=main.TRUE,
4372 actual=retainResults,
4373 onpass="Set retain correct",
4374 onfail="Set retain was incorrect" )
4375
Jon Hall2a5002c2015-08-21 16:49:11 -07004376 # Transactional maps
4377 main.step( "Partitioned Transactional maps put" )
4378 tMapValue = "Testing"
4379 numKeys = 100
4380 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004381 node = main.activeNodes[0]
4382 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
Jon Hall2a5002c2015-08-21 16:49:11 -07004383 if len( putResponses ) == 100:
4384 for i in putResponses:
4385 if putResponses[ i ][ 'value' ] != tMapValue:
4386 putResult = False
4387 else:
4388 putResult = False
4389 if not putResult:
4390 main.log.debug( "Put response values: " + str( putResponses ) )
4391 utilities.assert_equals( expect=True,
4392 actual=putResult,
4393 onpass="Partitioned Transactional Map put successful",
4394 onfail="Partitioned Transactional Map put values are incorrect" )
4395
4396 main.step( "Partitioned Transactional maps get" )
4397 getCheck = True
4398 for n in range( 1, numKeys + 1 ):
4399 getResponses = []
4400 threads = []
4401 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004402 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004403 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4404 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004405 args=[ "Key" + str( n ) ] )
Jon Hall2a5002c2015-08-21 16:49:11 -07004406 threads.append( t )
4407 t.start()
4408 for t in threads:
4409 t.join()
4410 getResponses.append( t.result )
4411 for node in getResponses:
4412 if node != tMapValue:
4413 valueCheck = False
4414 if not valueCheck:
4415 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4416 main.log.warn( getResponses )
4417 getCheck = getCheck and valueCheck
4418 utilities.assert_equals( expect=True,
4419 actual=getCheck,
4420 onpass="Partitioned Transactional Map get values were correct",
4421 onfail="Partitioned Transactional Map values incorrect" )
4422
4423 main.step( "In-memory Transactional maps put" )
4424 tMapValue = "Testing"
4425 numKeys = 100
4426 putResult = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004427 node = main.activeNodes[0]
4428 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue, inMemory=True )
Jon Hall2a5002c2015-08-21 16:49:11 -07004429 if len( putResponses ) == 100:
4430 for i in putResponses:
4431 if putResponses[ i ][ 'value' ] != tMapValue:
4432 putResult = False
4433 else:
4434 putResult = False
4435 if not putResult:
4436 main.log.debug( "Put response values: " + str( putResponses ) )
4437 utilities.assert_equals( expect=True,
4438 actual=putResult,
4439 onpass="In-Memory Transactional Map put successful",
4440 onfail="In-Memory Transactional Map put values are incorrect" )
4441
4442 main.step( "In-Memory Transactional maps get" )
4443 getCheck = True
4444 for n in range( 1, numKeys + 1 ):
4445 getResponses = []
4446 threads = []
4447 valueCheck = True
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004448 for i in main.activeNodes:
Jon Hall2a5002c2015-08-21 16:49:11 -07004449 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4450 name="TMap-get-" + str( i ),
Jon Hallb3ed8ed2015-10-28 16:43:55 -07004451 args=[ "Key" + str( n ) ],
Jon Hall2a5002c2015-08-21 16:49:11 -07004452 kwargs={ "inMemory": True } )
4453 threads.append( t )
4454 t.start()
4455 for t in threads:
4456 t.join()
4457 getResponses.append( t.result )
4458 for node in getResponses:
4459 if node != tMapValue:
4460 valueCheck = False
4461 if not valueCheck:
4462 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4463 main.log.warn( getResponses )
4464 getCheck = getCheck and valueCheck
4465 utilities.assert_equals( expect=True,
4466 actual=getCheck,
4467 onpass="In-Memory Transactional Map get values were correct",
4468 onfail="In-Memory Transactional Map values incorrect" )