blob: f7aec3bbfc53dd0014342f4f1d93f0e227cd5d5d [file] [log] [blame]
Jon Hall6e709752016-02-01 13:38:46 -08001"""
2Description: This test is to determine if ONOS can handle
3 a full network partion
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
14CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
28class HAfullNetPartition:
29
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
51 import imp
52 import pexpect
53 import time
Jon Halla440e872016-03-31 15:15:50 -070054 import json
Jon Hall6e709752016-02-01 13:38:46 -080055 main.log.info( "ONOS HA test: Partition ONOS nodes into two sub-clusters - " +
56 "initialization" )
57 main.case( "Setting up test environment" )
58 main.caseExplanation = "Setup the test environment including " +\
59 "installing ONOS, starting Mininet and ONOS" +\
60 "cli sessions."
61
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
69 main.numCtrls = int( main.params[ 'num_controllers' ] )
70 if main.ONOSbench.maxNodes:
71 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
74 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070081 # These are for csv plotting in jenkins
82 global labels
83 global data
84 labels = []
85 data = []
Jon Hall6e709752016-02-01 13:38:46 -080086
87 # FIXME: just get controller port from params?
88 # TODO: do we really need all these?
89 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
90 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
91 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
92 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
93 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
94 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
95 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
96
97 try:
Jon Hall53c5e662016-04-13 16:06:56 -070098 from tests.HA.dependencies.HA import HA
Jon Hall41d39f12016-04-11 22:54:35 -070099 main.HA = HA()
Jon Hall6e709752016-02-01 13:38:46 -0800100 except Exception as e:
101 main.log.exception( e )
102 main.cleanup()
103 main.exit()
104
105 main.CLIs = []
106 main.nodes = []
107 ipList = []
108 for i in range( 1, main.numCtrls + 1 ):
109 try:
110 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
111 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
112 ipList.append( main.nodes[ -1 ].ip_address )
113 except AttributeError:
114 break
115
116 main.step( "Create cell file" )
117 cellAppString = main.params[ 'ENV' ][ 'appString' ]
118 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
119 main.Mininet1.ip_address,
120 cellAppString, ipList )
121 main.step( "Applying cell variable to environment" )
122 cellResult = main.ONOSbench.setCell( cellName )
123 verifyResult = main.ONOSbench.verifyCell()
124
125 # FIXME:this is short term fix
126 main.log.info( "Removing raft logs" )
127 main.ONOSbench.onosRemoveRaftLogs()
128
129 main.log.info( "Uninstalling ONOS" )
130 for node in main.nodes:
131 main.ONOSbench.onosUninstall( node.ip_address )
132
133 # Make sure ONOS is DEAD
134 main.log.info( "Killing any ONOS processes" )
135 killResults = main.TRUE
136 for node in main.nodes:
137 killed = main.ONOSbench.onosKill( node.ip_address )
138 killResults = killResults and killed
139
140 cleanInstallResult = main.TRUE
141 gitPullResult = main.TRUE
142
143 main.step( "Starting Mininet" )
144 # scp topo file to mininet
145 # TODO: move to params?
146 topoName = "obelisk.py"
147 filePath = main.ONOSbench.home + "/tools/test/topos/"
148 main.ONOSbench.scp( main.Mininet1,
149 filePath + topoName,
150 main.Mininet1.home,
151 direction="to" )
152 mnResult = main.Mininet1.startNet( )
153 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
154 onpass="Mininet Started",
155 onfail="Error starting Mininet" )
156
157 main.step( "Git checkout and pull " + gitBranch )
158 if PULLCODE:
159 main.ONOSbench.gitCheckout( gitBranch )
160 gitPullResult = main.ONOSbench.gitPull()
161 # values of 1 or 3 are good
162 utilities.assert_lesser( expect=0, actual=gitPullResult,
163 onpass="Git pull successful",
164 onfail="Git pull failed" )
165 main.ONOSbench.getVersion( report=True )
166
167 main.step( "Using mvn clean install" )
168 cleanInstallResult = main.TRUE
169 if PULLCODE and gitPullResult == main.TRUE:
170 cleanInstallResult = main.ONOSbench.cleanInstall()
171 else:
172 main.log.warn( "Did not pull new code so skipping mvn " +
173 "clean install" )
174 utilities.assert_equals( expect=main.TRUE,
175 actual=cleanInstallResult,
176 onpass="MCI successful",
177 onfail="MCI failed" )
178 # GRAPHS
179 # NOTE: important params here:
180 # job = name of Jenkins job
181 # Plot Name = Plot-HA, only can be used if multiple plots
182 # index = The number of the graph under plot name
183 job = "HAfullNetPartition"
184 plotName = "Plot-HA"
Jon Hall676e5432016-09-26 11:32:50 -0700185 index = "2"
Jon Hall6e709752016-02-01 13:38:46 -0800186 graphs = '<ac:structured-macro ac:name="html">\n'
187 graphs += '<ac:plain-text-body><![CDATA[\n'
188 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
189 '/plot/' + plotName + '/getPlot?index=' + index +\
190 '&width=500&height=300"' +\
191 'noborder="0" width="500" height="300" scrolling="yes" ' +\
192 'seamless="seamless"></iframe>\n'
193 graphs += ']]></ac:plain-text-body>\n'
194 graphs += '</ac:structured-macro>\n'
195 main.log.wiki(graphs)
196
197 main.step( "Creating ONOS package" )
198 # copy gen-partions file to ONOS
199 # NOTE: this assumes TestON and ONOS are on the same machine
Jon Hall53c5e662016-04-13 16:06:56 -0700200 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
Jon Hall6e709752016-02-01 13:38:46 -0800201 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
202 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
203 main.ONOSbench.ip_address,
204 srcFile,
205 dstDir,
206 pwd=main.ONOSbench.pwd,
207 direction="from" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700208 packageResult = main.ONOSbench.buckBuild()
Jon Hall6e709752016-02-01 13:38:46 -0800209 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
210 onpass="ONOS package successful",
211 onfail="ONOS package failed" )
212
213 main.step( "Installing ONOS package" )
214 onosInstallResult = main.TRUE
215 for node in main.nodes:
216 tmpResult = main.ONOSbench.onosInstall( options="-f",
217 node=node.ip_address )
218 onosInstallResult = onosInstallResult and tmpResult
219 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
220 onpass="ONOS install successful",
221 onfail="ONOS install failed" )
222 # clean up gen-partitions file
223 try:
224 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
225 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
226 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
227 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
228 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
229 str( main.ONOSbench.handle.before ) )
230 except ( pexpect.TIMEOUT, pexpect.EOF ):
231 main.log.exception( "ONOSbench: pexpect exception found:" +
232 main.ONOSbench.handle.before )
233 main.cleanup()
234 main.exit()
235
You Wangf5de25b2017-01-06 15:13:01 -0800236 main.step( "Set up ONOS secure SSH" )
237 secureSshResult = main.TRUE
238 for node in main.nodes:
239 secureSshResult = secureSshResult and main.ONOSbench.onosSecureSSH( node=node.ip_address )
240 utilities.assert_equals( expect=main.TRUE, actual=secureSshResult,
241 onpass="Test step PASS",
242 onfail="Test step FAIL" )
243
Jon Hall6e709752016-02-01 13:38:46 -0800244 main.step( "Checking if ONOS is up yet" )
245 for i in range( 2 ):
246 onosIsupResult = main.TRUE
247 for node in main.nodes:
248 started = main.ONOSbench.isup( node.ip_address )
249 if not started:
250 main.log.error( node.name + " hasn't started" )
251 onosIsupResult = onosIsupResult and started
252 if onosIsupResult == main.TRUE:
253 break
254 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
255 onpass="ONOS startup successful",
256 onfail="ONOS startup failed" )
257
Jon Hall6509dbf2016-06-21 17:01:17 -0700258 main.step( "Starting ONOS CLI sessions" )
Jon Hall6e709752016-02-01 13:38:46 -0800259 cliResults = main.TRUE
260 threads = []
261 for i in range( main.numCtrls ):
262 t = main.Thread( target=main.CLIs[i].startOnosCli,
263 name="startOnosCli-" + str( i ),
264 args=[main.nodes[i].ip_address] )
265 threads.append( t )
266 t.start()
267
268 for t in threads:
269 t.join()
270 cliResults = cliResults and t.result
271 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
272 onpass="ONOS cli startup successful",
273 onfail="ONOS cli startup failed" )
274
275 # Create a list of active nodes for use when some nodes are stopped
276 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
277
278 if main.params[ 'tcpdump' ].lower() == "true":
279 main.step( "Start Packet Capture MN" )
280 main.Mininet2.startTcpdump(
281 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
282 + "-MN.pcap",
283 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
284 port=main.params[ 'MNtcpdump' ][ 'port' ] )
285
Jon Halla440e872016-03-31 15:15:50 -0700286 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -0700287 nodeResults = utilities.retry( main.HA.nodesCheck,
288 False,
289 args=[main.activeNodes],
290 attempts=5 )
Jon Halla440e872016-03-31 15:15:50 -0700291
Jon Hall41d39f12016-04-11 22:54:35 -0700292 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Halla440e872016-03-31 15:15:50 -0700293 onpass="Nodes check successful",
294 onfail="Nodes check NOT successful" )
295
296 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700297 for i in main.activeNodes:
298 cli = main.CLIs[i]
Jon Halla440e872016-03-31 15:15:50 -0700299 main.log.debug( "{} components not ACTIVE: \n{}".format(
300 cli.name,
301 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall6e709752016-02-01 13:38:46 -0800302 main.log.error( "Failed to start ONOS, stopping test" )
303 main.cleanup()
304 main.exit()
305
Jon Hall172b7ba2016-04-07 18:12:20 -0700306 main.step( "Activate apps defined in the params file" )
307 # get data from the params
308 apps = main.params.get( 'apps' )
309 if apps:
310 apps = apps.split(',')
311 main.log.warn( apps )
312 activateResult = True
313 for app in apps:
314 main.CLIs[ 0 ].app( app, "Activate" )
315 # TODO: check this worked
316 time.sleep( 10 ) # wait for apps to activate
317 for app in apps:
318 state = main.CLIs[ 0 ].appStatus( app )
319 if state == "ACTIVE":
Jon Hall937bc812017-01-31 16:44:10 -0800320 activateResult = activateResult and True
Jon Hall172b7ba2016-04-07 18:12:20 -0700321 else:
322 main.log.error( "{} is in {} state".format( app, state ) )
Jon Hall937bc812017-01-31 16:44:10 -0800323 activateResult = False
Jon Hall172b7ba2016-04-07 18:12:20 -0700324 utilities.assert_equals( expect=True,
325 actual=activateResult,
326 onpass="Successfully activated apps",
327 onfail="Failed to activate apps" )
328 else:
329 main.log.warn( "No apps were specified to be loaded after startup" )
330
331 main.step( "Set ONOS configurations" )
332 config = main.params.get( 'ONOS_Configuration' )
333 if config:
334 main.log.debug( config )
335 checkResult = main.TRUE
336 for component in config:
337 for setting in config[component]:
338 value = config[component][setting]
339 check = main.CLIs[ 0 ].setCfg( component, setting, value )
340 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
341 checkResult = check and checkResult
342 utilities.assert_equals( expect=main.TRUE,
343 actual=checkResult,
344 onpass="Successfully set config",
345 onfail="Failed to set config" )
346 else:
347 main.log.warn( "No configurations were specified to be changed after startup" )
348
Jon Hall9d2dcad2016-04-08 10:15:20 -0700349 main.step( "App Ids check" )
350 appCheck = main.TRUE
351 threads = []
352 for i in main.activeNodes:
353 t = main.Thread( target=main.CLIs[i].appToIDCheck,
354 name="appToIDCheck-" + str( i ),
355 args=[] )
356 threads.append( t )
357 t.start()
358
359 for t in threads:
360 t.join()
361 appCheck = appCheck and t.result
362 if appCheck != main.TRUE:
363 node = main.activeNodes[0]
364 main.log.warn( main.CLIs[node].apps() )
365 main.log.warn( main.CLIs[node].appIDs() )
366 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
367 onpass="App Ids seem to be correct",
368 onfail="Something is wrong with app Ids" )
369
Jon Hall6e709752016-02-01 13:38:46 -0800370 def CASE2( self, main ):
371 """
372 Assign devices to controllers
373 """
374 import re
375 assert main.numCtrls, "main.numCtrls not defined"
376 assert main, "main not defined"
377 assert utilities.assert_equals, "utilities.assert_equals not defined"
378 assert main.CLIs, "main.CLIs not defined"
379 assert main.nodes, "main.nodes not defined"
380 assert ONOS1Port, "ONOS1Port not defined"
381 assert ONOS2Port, "ONOS2Port not defined"
382 assert ONOS3Port, "ONOS3Port not defined"
383 assert ONOS4Port, "ONOS4Port not defined"
384 assert ONOS5Port, "ONOS5Port not defined"
385 assert ONOS6Port, "ONOS6Port not defined"
386 assert ONOS7Port, "ONOS7Port not defined"
387
388 main.case( "Assigning devices to controllers" )
389 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
390 "and check that an ONOS node becomes the " +\
391 "master of the device."
392 main.step( "Assign switches to controllers" )
393
394 ipList = []
395 for i in range( main.numCtrls ):
396 ipList.append( main.nodes[ i ].ip_address )
397 swList = []
398 for i in range( 1, 29 ):
399 swList.append( "s" + str( i ) )
400 main.Mininet1.assignSwController( sw=swList, ip=ipList )
401
402 mastershipCheck = main.TRUE
403 for i in range( 1, 29 ):
404 response = main.Mininet1.getSwController( "s" + str( i ) )
405 try:
406 main.log.info( str( response ) )
407 except Exception:
408 main.log.info( repr( response ) )
409 for node in main.nodes:
410 if re.search( "tcp:" + node.ip_address, response ):
411 mastershipCheck = mastershipCheck and main.TRUE
412 else:
413 main.log.error( "Error, node " + node.ip_address + " is " +
414 "not in the list of controllers s" +
415 str( i ) + " is connecting to." )
416 mastershipCheck = main.FALSE
417 utilities.assert_equals(
418 expect=main.TRUE,
419 actual=mastershipCheck,
420 onpass="Switch mastership assigned correctly",
421 onfail="Switches not assigned correctly to controllers" )
422
423 def CASE21( self, main ):
424 """
425 Assign mastership to controllers
426 """
427 import time
428 assert main.numCtrls, "main.numCtrls not defined"
429 assert main, "main not defined"
430 assert utilities.assert_equals, "utilities.assert_equals not defined"
431 assert main.CLIs, "main.CLIs not defined"
432 assert main.nodes, "main.nodes not defined"
433 assert ONOS1Port, "ONOS1Port not defined"
434 assert ONOS2Port, "ONOS2Port not defined"
435 assert ONOS3Port, "ONOS3Port not defined"
436 assert ONOS4Port, "ONOS4Port not defined"
437 assert ONOS5Port, "ONOS5Port not defined"
438 assert ONOS6Port, "ONOS6Port not defined"
439 assert ONOS7Port, "ONOS7Port not defined"
440
441 main.case( "Assigning Controller roles for switches" )
442 main.caseExplanation = "Check that ONOS is connected to each " +\
443 "device. Then manually assign" +\
444 " mastership to specific ONOS nodes using" +\
445 " 'device-role'"
446 main.step( "Assign mastership of switches to specific controllers" )
447 # Manually assign mastership to the controller we want
448 roleCall = main.TRUE
449
450 ipList = [ ]
451 deviceList = []
452 onosCli = main.CLIs[ main.activeNodes[0] ]
453 try:
454 # Assign mastership to specific controllers. This assignment was
455 # determined for a 7 node cluser, but will work with any sized
456 # cluster
457 for i in range( 1, 29 ): # switches 1 through 28
458 # set up correct variables:
459 if i == 1:
460 c = 0
461 ip = main.nodes[ c ].ip_address # ONOS1
462 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
463 elif i == 2:
464 c = 1 % main.numCtrls
465 ip = main.nodes[ c ].ip_address # ONOS2
466 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
467 elif i == 3:
468 c = 1 % main.numCtrls
469 ip = main.nodes[ c ].ip_address # ONOS2
470 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
471 elif i == 4:
472 c = 3 % main.numCtrls
473 ip = main.nodes[ c ].ip_address # ONOS4
474 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
475 elif i == 5:
476 c = 2 % main.numCtrls
477 ip = main.nodes[ c ].ip_address # ONOS3
478 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
479 elif i == 6:
480 c = 2 % main.numCtrls
481 ip = main.nodes[ c ].ip_address # ONOS3
482 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
483 elif i == 7:
484 c = 5 % main.numCtrls
485 ip = main.nodes[ c ].ip_address # ONOS6
486 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
487 elif i >= 8 and i <= 17:
488 c = 4 % main.numCtrls
489 ip = main.nodes[ c ].ip_address # ONOS5
490 dpid = '3' + str( i ).zfill( 3 )
491 deviceId = onosCli.getDevice( dpid ).get( 'id' )
492 elif i >= 18 and i <= 27:
493 c = 6 % main.numCtrls
494 ip = main.nodes[ c ].ip_address # ONOS7
495 dpid = '6' + str( i ).zfill( 3 )
496 deviceId = onosCli.getDevice( dpid ).get( 'id' )
497 elif i == 28:
498 c = 0
499 ip = main.nodes[ c ].ip_address # ONOS1
500 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
501 else:
502 main.log.error( "You didn't write an else statement for " +
503 "switch s" + str( i ) )
504 roleCall = main.FALSE
505 # Assign switch
506 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
507 # TODO: make this controller dynamic
508 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
509 ipList.append( ip )
510 deviceList.append( deviceId )
511 except ( AttributeError, AssertionError ):
512 main.log.exception( "Something is wrong with ONOS device view" )
513 main.log.info( onosCli.devices() )
514 utilities.assert_equals(
515 expect=main.TRUE,
516 actual=roleCall,
517 onpass="Re-assigned switch mastership to designated controller",
518 onfail="Something wrong with deviceRole calls" )
519
520 main.step( "Check mastership was correctly assigned" )
521 roleCheck = main.TRUE
522 # NOTE: This is due to the fact that device mastership change is not
523 # atomic and is actually a multi step process
524 time.sleep( 5 )
525 for i in range( len( ipList ) ):
526 ip = ipList[i]
527 deviceId = deviceList[i]
528 # Check assignment
529 master = onosCli.getRole( deviceId ).get( 'master' )
530 if ip in master:
531 roleCheck = roleCheck and main.TRUE
532 else:
533 roleCheck = roleCheck and main.FALSE
534 main.log.error( "Error, controller " + ip + " is not" +
535 " master " + "of device " +
536 str( deviceId ) + ". Master is " +
537 repr( master ) + "." )
538 utilities.assert_equals(
539 expect=main.TRUE,
540 actual=roleCheck,
541 onpass="Switches were successfully reassigned to designated " +
542 "controller",
543 onfail="Switches were not successfully reassigned" )
544
545 def CASE3( self, main ):
546 """
547 Assign intents
548 """
549 import time
550 import json
551 assert main.numCtrls, "main.numCtrls not defined"
552 assert main, "main not defined"
553 assert utilities.assert_equals, "utilities.assert_equals not defined"
554 assert main.CLIs, "main.CLIs not defined"
555 assert main.nodes, "main.nodes not defined"
556 main.case( "Adding host Intents" )
557 main.caseExplanation = "Discover hosts by using pingall then " +\
558 "assign predetermined host-to-host intents." +\
559 " After installation, check that the intent" +\
560 " is distributed to all nodes and the state" +\
561 " is INSTALLED"
562
563 # install onos-app-fwd
564 main.step( "Install reactive forwarding app" )
565 onosCli = main.CLIs[ main.activeNodes[0] ]
566 installResults = onosCli.activateApp( "org.onosproject.fwd" )
567 utilities.assert_equals( expect=main.TRUE, actual=installResults,
568 onpass="Install fwd successful",
569 onfail="Install fwd failed" )
570
571 main.step( "Check app ids" )
572 appCheck = main.TRUE
573 threads = []
574 for i in main.activeNodes:
575 t = main.Thread( target=main.CLIs[i].appToIDCheck,
576 name="appToIDCheck-" + str( i ),
577 args=[] )
578 threads.append( t )
579 t.start()
580
581 for t in threads:
582 t.join()
583 appCheck = appCheck and t.result
584 if appCheck != main.TRUE:
585 main.log.warn( onosCli.apps() )
586 main.log.warn( onosCli.appIDs() )
587 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
588 onpass="App Ids seem to be correct",
589 onfail="Something is wrong with app Ids" )
590
591 main.step( "Discovering Hosts( Via pingall for now )" )
592 # FIXME: Once we have a host discovery mechanism, use that instead
593 # REACTIVE FWD test
594 pingResult = main.FALSE
595 passMsg = "Reactive Pingall test passed"
596 time1 = time.time()
597 pingResult = main.Mininet1.pingall()
598 time2 = time.time()
599 if not pingResult:
600 main.log.warn("First pingall failed. Trying again...")
601 pingResult = main.Mininet1.pingall()
602 passMsg += " on the second try"
603 utilities.assert_equals(
604 expect=main.TRUE,
605 actual=pingResult,
606 onpass= passMsg,
607 onfail="Reactive Pingall failed, " +
608 "one or more ping pairs failed" )
609 main.log.info( "Time for pingall: %2f seconds" %
610 ( time2 - time1 ) )
611 # timeout for fwd flows
612 time.sleep( 11 )
613 # uninstall onos-app-fwd
614 main.step( "Uninstall reactive forwarding app" )
615 node = main.activeNodes[0]
616 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
617 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
618 onpass="Uninstall fwd successful",
619 onfail="Uninstall fwd failed" )
620
621 main.step( "Check app ids" )
622 threads = []
623 appCheck2 = main.TRUE
624 for i in main.activeNodes:
625 t = main.Thread( target=main.CLIs[i].appToIDCheck,
626 name="appToIDCheck-" + str( i ),
627 args=[] )
628 threads.append( t )
629 t.start()
630
631 for t in threads:
632 t.join()
633 appCheck2 = appCheck2 and t.result
634 if appCheck2 != main.TRUE:
635 node = main.activeNodes[0]
636 main.log.warn( main.CLIs[node].apps() )
637 main.log.warn( main.CLIs[node].appIDs() )
638 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
639 onpass="App Ids seem to be correct",
640 onfail="Something is wrong with app Ids" )
641
642 main.step( "Add host intents via cli" )
643 intentIds = []
644 # TODO: move the host numbers to params
645 # Maybe look at all the paths we ping?
646 intentAddResult = True
647 hostResult = main.TRUE
648 for i in range( 8, 18 ):
649 main.log.info( "Adding host intent between h" + str( i ) +
650 " and h" + str( i + 10 ) )
651 host1 = "00:00:00:00:00:" + \
652 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
653 host2 = "00:00:00:00:00:" + \
654 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
655 # NOTE: getHost can return None
656 host1Dict = onosCli.getHost( host1 )
657 host2Dict = onosCli.getHost( host2 )
658 host1Id = None
659 host2Id = None
660 if host1Dict and host2Dict:
661 host1Id = host1Dict.get( 'id', None )
662 host2Id = host2Dict.get( 'id', None )
663 if host1Id and host2Id:
664 nodeNum = ( i % len( main.activeNodes ) )
665 node = main.activeNodes[nodeNum]
666 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
667 if tmpId:
668 main.log.info( "Added intent with id: " + tmpId )
669 intentIds.append( tmpId )
670 else:
671 main.log.error( "addHostIntent returned: " +
672 repr( tmpId ) )
673 else:
674 main.log.error( "Error, getHost() failed for h" + str( i ) +
675 " and/or h" + str( i + 10 ) )
676 node = main.activeNodes[0]
677 hosts = main.CLIs[node].hosts()
678 main.log.warn( "Hosts output: " )
679 try:
680 main.log.warn( json.dumps( json.loads( hosts ),
681 sort_keys=True,
682 indent=4,
683 separators=( ',', ': ' ) ) )
684 except ( ValueError, TypeError ):
685 main.log.warn( repr( hosts ) )
686 hostResult = main.FALSE
687 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
688 onpass="Found a host id for each host",
689 onfail="Error looking up host ids" )
690
691 intentStart = time.time()
692 onosIds = onosCli.getAllIntentsId()
693 main.log.info( "Submitted intents: " + str( intentIds ) )
694 main.log.info( "Intents in ONOS: " + str( onosIds ) )
695 for intent in intentIds:
696 if intent in onosIds:
697 pass # intent submitted is in onos
698 else:
699 intentAddResult = False
700 if intentAddResult:
701 intentStop = time.time()
702 else:
703 intentStop = None
704 # Print the intent states
705 intents = onosCli.intents()
706 intentStates = []
707 installedCheck = True
708 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
709 count = 0
710 try:
711 for intent in json.loads( intents ):
712 state = intent.get( 'state', None )
713 if "INSTALLED" not in state:
714 installedCheck = False
715 intentId = intent.get( 'id', None )
716 intentStates.append( ( intentId, state ) )
717 except ( ValueError, TypeError ):
718 main.log.exception( "Error parsing intents" )
719 # add submitted intents not in the store
720 tmplist = [ i for i, s in intentStates ]
721 missingIntents = False
722 for i in intentIds:
723 if i not in tmplist:
724 intentStates.append( ( i, " - " ) )
725 missingIntents = True
726 intentStates.sort()
727 for i, s in intentStates:
728 count += 1
729 main.log.info( "%-6s%-15s%-15s" %
730 ( str( count ), str( i ), str( s ) ) )
731 leaders = onosCli.leaders()
732 try:
733 missing = False
734 if leaders:
735 parsedLeaders = json.loads( leaders )
736 main.log.warn( json.dumps( parsedLeaders,
737 sort_keys=True,
738 indent=4,
739 separators=( ',', ': ' ) ) )
740 # check for all intent partitions
741 topics = []
742 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700743 topics.append( "work-partition-" + str( i ) )
Jon Hall6e709752016-02-01 13:38:46 -0800744 main.log.debug( topics )
745 ONOStopics = [ j['topic'] for j in parsedLeaders ]
746 for topic in topics:
747 if topic not in ONOStopics:
748 main.log.error( "Error: " + topic +
749 " not in leaders" )
750 missing = True
751 else:
752 main.log.error( "leaders() returned None" )
753 except ( ValueError, TypeError ):
754 main.log.exception( "Error parsing leaders" )
755 main.log.error( repr( leaders ) )
756 # Check all nodes
757 if missing:
758 for i in main.activeNodes:
759 response = main.CLIs[i].leaders( jsonFormat=False)
760 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
761 str( response ) )
762
763 partitions = onosCli.partitions()
764 try:
765 if partitions :
766 parsedPartitions = json.loads( partitions )
767 main.log.warn( json.dumps( parsedPartitions,
768 sort_keys=True,
769 indent=4,
770 separators=( ',', ': ' ) ) )
771 # TODO check for a leader in all paritions
772 # TODO check for consistency among nodes
773 else:
774 main.log.error( "partitions() returned None" )
775 except ( ValueError, TypeError ):
776 main.log.exception( "Error parsing partitions" )
777 main.log.error( repr( partitions ) )
778 pendingMap = onosCli.pendingMap()
779 try:
780 if pendingMap :
781 parsedPending = json.loads( pendingMap )
782 main.log.warn( json.dumps( parsedPending,
783 sort_keys=True,
784 indent=4,
785 separators=( ',', ': ' ) ) )
786 # TODO check something here?
787 else:
788 main.log.error( "pendingMap() returned None" )
789 except ( ValueError, TypeError ):
790 main.log.exception( "Error parsing pending map" )
791 main.log.error( repr( pendingMap ) )
792
793 intentAddResult = bool( intentAddResult and not missingIntents and
794 installedCheck )
795 if not intentAddResult:
796 main.log.error( "Error in pushing host intents to ONOS" )
797
798 main.step( "Intent Anti-Entropy dispersion" )
799 for j in range(100):
800 correct = True
801 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
802 for i in main.activeNodes:
803 onosIds = []
804 ids = main.CLIs[i].getAllIntentsId()
805 onosIds.append( ids )
806 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
807 str( sorted( onosIds ) ) )
808 if sorted( ids ) != sorted( intentIds ):
809 main.log.warn( "Set of intent IDs doesn't match" )
810 correct = False
811 break
812 else:
813 intents = json.loads( main.CLIs[i].intents() )
814 for intent in intents:
815 if intent[ 'state' ] != "INSTALLED":
816 main.log.warn( "Intent " + intent[ 'id' ] +
817 " is " + intent[ 'state' ] )
818 correct = False
819 break
820 if correct:
821 break
822 else:
823 time.sleep(1)
824 if not intentStop:
825 intentStop = time.time()
826 global gossipTime
827 gossipTime = intentStop - intentStart
828 main.log.info( "It took about " + str( gossipTime ) +
829 " seconds for all intents to appear in each node" )
830 gossipPeriod = int( main.params['timers']['gossip'] )
831 maxGossipTime = gossipPeriod * len( main.activeNodes )
832 utilities.assert_greater_equals(
833 expect=maxGossipTime, actual=gossipTime,
834 onpass="ECM anti-entropy for intents worked within " +
835 "expected time",
836 onfail="Intent ECM anti-entropy took too long. " +
837 "Expected time:{}, Actual time:{}".format( maxGossipTime,
838 gossipTime ) )
839 if gossipTime <= maxGossipTime:
840 intentAddResult = True
841
842 if not intentAddResult or "key" in pendingMap:
843 import time
844 installedCheck = True
845 main.log.info( "Sleeping 60 seconds to see if intents are found" )
846 time.sleep( 60 )
847 onosIds = onosCli.getAllIntentsId()
848 main.log.info( "Submitted intents: " + str( intentIds ) )
849 main.log.info( "Intents in ONOS: " + str( onosIds ) )
850 # Print the intent states
851 intents = onosCli.intents()
852 intentStates = []
853 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
854 count = 0
855 try:
856 for intent in json.loads( intents ):
857 # Iter through intents of a node
858 state = intent.get( 'state', None )
859 if "INSTALLED" not in state:
860 installedCheck = False
861 intentId = intent.get( 'id', None )
862 intentStates.append( ( intentId, state ) )
863 except ( ValueError, TypeError ):
864 main.log.exception( "Error parsing intents" )
865 # add submitted intents not in the store
866 tmplist = [ i for i, s in intentStates ]
867 for i in intentIds:
868 if i not in tmplist:
869 intentStates.append( ( i, " - " ) )
870 intentStates.sort()
871 for i, s in intentStates:
872 count += 1
873 main.log.info( "%-6s%-15s%-15s" %
874 ( str( count ), str( i ), str( s ) ) )
875 leaders = onosCli.leaders()
876 try:
877 missing = False
878 if leaders:
879 parsedLeaders = json.loads( leaders )
880 main.log.warn( json.dumps( parsedLeaders,
881 sort_keys=True,
882 indent=4,
883 separators=( ',', ': ' ) ) )
884 # check for all intent partitions
885 # check for election
886 topics = []
887 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700888 topics.append( "work-partition-" + str( i ) )
Jon Hall6e709752016-02-01 13:38:46 -0800889 # FIXME: this should only be after we start the app
890 topics.append( "org.onosproject.election" )
891 main.log.debug( topics )
892 ONOStopics = [ j['topic'] for j in parsedLeaders ]
893 for topic in topics:
894 if topic not in ONOStopics:
895 main.log.error( "Error: " + topic +
896 " not in leaders" )
897 missing = True
898 else:
899 main.log.error( "leaders() returned None" )
900 except ( ValueError, TypeError ):
901 main.log.exception( "Error parsing leaders" )
902 main.log.error( repr( leaders ) )
903 # Check all nodes
904 if missing:
905 for i in main.activeNodes:
906 node = main.CLIs[i]
907 response = node.leaders( jsonFormat=False)
908 main.log.warn( str( node.name ) + " leaders output: \n" +
909 str( response ) )
910
911 partitions = onosCli.partitions()
912 try:
913 if partitions :
914 parsedPartitions = json.loads( partitions )
915 main.log.warn( json.dumps( parsedPartitions,
916 sort_keys=True,
917 indent=4,
918 separators=( ',', ': ' ) ) )
919 # TODO check for a leader in all paritions
920 # TODO check for consistency among nodes
921 else:
922 main.log.error( "partitions() returned None" )
923 except ( ValueError, TypeError ):
924 main.log.exception( "Error parsing partitions" )
925 main.log.error( repr( partitions ) )
926 pendingMap = onosCli.pendingMap()
927 try:
928 if pendingMap :
929 parsedPending = json.loads( pendingMap )
930 main.log.warn( json.dumps( parsedPending,
931 sort_keys=True,
932 indent=4,
933 separators=( ',', ': ' ) ) )
934 # TODO check something here?
935 else:
936 main.log.error( "pendingMap() returned None" )
937 except ( ValueError, TypeError ):
938 main.log.exception( "Error parsing pending map" )
939 main.log.error( repr( pendingMap ) )
940
941 def CASE4( self, main ):
942 """
943 Ping across added host intents
944 """
945 import json
946 import time
947 assert main.numCtrls, "main.numCtrls not defined"
948 assert main, "main not defined"
949 assert utilities.assert_equals, "utilities.assert_equals not defined"
950 assert main.CLIs, "main.CLIs not defined"
951 assert main.nodes, "main.nodes not defined"
952 main.case( "Verify connectivity by sending traffic across Intents" )
953 main.caseExplanation = "Ping across added host intents to check " +\
954 "functionality and check the state of " +\
955 "the intent"
Jon Hall6e709752016-02-01 13:38:46 -0800956
Jon Hall41d39f12016-04-11 22:54:35 -0700957 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall6e709752016-02-01 13:38:46 -0800958 main.step( "Check Intent state" )
959 installedCheck = False
960 loopCount = 0
961 while not installedCheck and loopCount < 40:
962 installedCheck = True
963 # Print the intent states
964 intents = onosCli.intents()
965 intentStates = []
966 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
967 count = 0
968 # Iter through intents of a node
969 try:
970 for intent in json.loads( intents ):
971 state = intent.get( 'state', None )
972 if "INSTALLED" not in state:
973 installedCheck = False
974 intentId = intent.get( 'id', None )
975 intentStates.append( ( intentId, state ) )
976 except ( ValueError, TypeError ):
977 main.log.exception( "Error parsing intents." )
978 # Print states
979 intentStates.sort()
980 for i, s in intentStates:
981 count += 1
982 main.log.info( "%-6s%-15s%-15s" %
983 ( str( count ), str( i ), str( s ) ) )
984 if not installedCheck:
985 time.sleep( 1 )
986 loopCount += 1
987 utilities.assert_equals( expect=True, actual=installedCheck,
988 onpass="Intents are all INSTALLED",
989 onfail="Intents are not all in " +
990 "INSTALLED state" )
991
Jon Hall9d2dcad2016-04-08 10:15:20 -0700992 main.step( "Ping across added host intents" )
Jon Hall9d2dcad2016-04-08 10:15:20 -0700993 PingResult = main.TRUE
994 for i in range( 8, 18 ):
995 ping = main.Mininet1.pingHost( src="h" + str( i ),
996 target="h" + str( i + 10 ) )
997 PingResult = PingResult and ping
998 if ping == main.FALSE:
999 main.log.warn( "Ping failed between h" + str( i ) +
1000 " and h" + str( i + 10 ) )
1001 elif ping == main.TRUE:
1002 main.log.info( "Ping test passed!" )
1003 # Don't set PingResult or you'd override failures
1004 if PingResult == main.FALSE:
1005 main.log.error(
1006 "Intents have not been installed correctly, pings failed." )
1007 # TODO: pretty print
1008 main.log.warn( "ONOS1 intents: " )
1009 try:
1010 tmpIntents = onosCli.intents()
1011 main.log.warn( json.dumps( json.loads( tmpIntents ),
1012 sort_keys=True,
1013 indent=4,
1014 separators=( ',', ': ' ) ) )
1015 except ( ValueError, TypeError ):
1016 main.log.warn( repr( tmpIntents ) )
1017 utilities.assert_equals(
1018 expect=main.TRUE,
1019 actual=PingResult,
1020 onpass="Intents have been installed correctly and pings work",
1021 onfail="Intents have not been installed correctly, pings failed." )
1022
Jon Hall6e709752016-02-01 13:38:46 -08001023 main.step( "Check leadership of topics" )
1024 leaders = onosCli.leaders()
1025 topicCheck = main.TRUE
1026 try:
1027 if leaders:
1028 parsedLeaders = json.loads( leaders )
1029 main.log.warn( json.dumps( parsedLeaders,
1030 sort_keys=True,
1031 indent=4,
1032 separators=( ',', ': ' ) ) )
1033 # check for all intent partitions
1034 # check for election
1035 # TODO: Look at Devices as topics now that it uses this system
1036 topics = []
1037 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001038 topics.append( "work-partition-" + str( i ) )
Jon Hall6e709752016-02-01 13:38:46 -08001039 # FIXME: this should only be after we start the app
1040 # FIXME: topics.append( "org.onosproject.election" )
1041 # Print leaders output
1042 main.log.debug( topics )
1043 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1044 for topic in topics:
1045 if topic not in ONOStopics:
1046 main.log.error( "Error: " + topic +
1047 " not in leaders" )
1048 topicCheck = main.FALSE
1049 else:
1050 main.log.error( "leaders() returned None" )
1051 topicCheck = main.FALSE
1052 except ( ValueError, TypeError ):
1053 topicCheck = main.FALSE
1054 main.log.exception( "Error parsing leaders" )
1055 main.log.error( repr( leaders ) )
1056 # TODO: Check for a leader of these topics
1057 # Check all nodes
1058 if topicCheck:
1059 for i in main.activeNodes:
1060 node = main.CLIs[i]
1061 response = node.leaders( jsonFormat=False)
1062 main.log.warn( str( node.name ) + " leaders output: \n" +
1063 str( response ) )
1064
1065 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1066 onpass="intent Partitions is in leaders",
1067 onfail="Some topics were lost " )
1068 # Print partitions
1069 partitions = onosCli.partitions()
1070 try:
1071 if partitions :
1072 parsedPartitions = json.loads( partitions )
1073 main.log.warn( json.dumps( parsedPartitions,
1074 sort_keys=True,
1075 indent=4,
1076 separators=( ',', ': ' ) ) )
1077 # TODO check for a leader in all paritions
1078 # TODO check for consistency among nodes
1079 else:
1080 main.log.error( "partitions() returned None" )
1081 except ( ValueError, TypeError ):
1082 main.log.exception( "Error parsing partitions" )
1083 main.log.error( repr( partitions ) )
1084 # Print Pending Map
1085 pendingMap = onosCli.pendingMap()
1086 try:
1087 if pendingMap :
1088 parsedPending = json.loads( pendingMap )
1089 main.log.warn( json.dumps( parsedPending,
1090 sort_keys=True,
1091 indent=4,
1092 separators=( ',', ': ' ) ) )
1093 # TODO check something here?
1094 else:
1095 main.log.error( "pendingMap() returned None" )
1096 except ( ValueError, TypeError ):
1097 main.log.exception( "Error parsing pending map" )
1098 main.log.error( repr( pendingMap ) )
1099
1100 if not installedCheck:
1101 main.log.info( "Waiting 60 seconds to see if the state of " +
1102 "intents change" )
1103 time.sleep( 60 )
1104 # Print the intent states
1105 intents = onosCli.intents()
1106 intentStates = []
1107 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1108 count = 0
1109 # Iter through intents of a node
1110 try:
1111 for intent in json.loads( intents ):
1112 state = intent.get( 'state', None )
1113 if "INSTALLED" not in state:
1114 installedCheck = False
1115 intentId = intent.get( 'id', None )
1116 intentStates.append( ( intentId, state ) )
1117 except ( ValueError, TypeError ):
1118 main.log.exception( "Error parsing intents." )
1119 intentStates.sort()
1120 for i, s in intentStates:
1121 count += 1
1122 main.log.info( "%-6s%-15s%-15s" %
1123 ( str( count ), str( i ), str( s ) ) )
1124 leaders = onosCli.leaders()
1125 try:
1126 missing = False
1127 if leaders:
1128 parsedLeaders = json.loads( leaders )
1129 main.log.warn( json.dumps( parsedLeaders,
1130 sort_keys=True,
1131 indent=4,
1132 separators=( ',', ': ' ) ) )
1133 # check for all intent partitions
1134 # check for election
1135 topics = []
1136 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001137 topics.append( "work-partition-" + str( i ) )
Jon Hall6e709752016-02-01 13:38:46 -08001138 # FIXME: this should only be after we start the app
1139 topics.append( "org.onosproject.election" )
1140 main.log.debug( topics )
1141 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1142 for topic in topics:
1143 if topic not in ONOStopics:
1144 main.log.error( "Error: " + topic +
1145 " not in leaders" )
1146 missing = True
1147 else:
1148 main.log.error( "leaders() returned None" )
1149 except ( ValueError, TypeError ):
1150 main.log.exception( "Error parsing leaders" )
1151 main.log.error( repr( leaders ) )
1152 if missing:
1153 for i in main.activeNodes:
1154 node = main.CLIs[i]
1155 response = node.leaders( jsonFormat=False)
1156 main.log.warn( str( node.name ) + " leaders output: \n" +
1157 str( response ) )
1158
1159 partitions = onosCli.partitions()
1160 try:
1161 if partitions :
1162 parsedPartitions = json.loads( partitions )
1163 main.log.warn( json.dumps( parsedPartitions,
1164 sort_keys=True,
1165 indent=4,
1166 separators=( ',', ': ' ) ) )
1167 # TODO check for a leader in all paritions
1168 # TODO check for consistency among nodes
1169 else:
1170 main.log.error( "partitions() returned None" )
1171 except ( ValueError, TypeError ):
1172 main.log.exception( "Error parsing partitions" )
1173 main.log.error( repr( partitions ) )
1174 pendingMap = onosCli.pendingMap()
1175 try:
1176 if pendingMap :
1177 parsedPending = json.loads( pendingMap )
1178 main.log.warn( json.dumps( parsedPending,
1179 sort_keys=True,
1180 indent=4,
1181 separators=( ',', ': ' ) ) )
1182 # TODO check something here?
1183 else:
1184 main.log.error( "pendingMap() returned None" )
1185 except ( ValueError, TypeError ):
1186 main.log.exception( "Error parsing pending map" )
1187 main.log.error( repr( pendingMap ) )
1188 # Print flowrules
1189 node = main.activeNodes[0]
1190 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1191 main.step( "Wait a minute then ping again" )
1192 # the wait is above
1193 PingResult = main.TRUE
1194 for i in range( 8, 18 ):
1195 ping = main.Mininet1.pingHost( src="h" + str( i ),
1196 target="h" + str( i + 10 ) )
1197 PingResult = PingResult and ping
1198 if ping == main.FALSE:
1199 main.log.warn( "Ping failed between h" + str( i ) +
1200 " and h" + str( i + 10 ) )
1201 elif ping == main.TRUE:
1202 main.log.info( "Ping test passed!" )
1203 # Don't set PingResult or you'd override failures
1204 if PingResult == main.FALSE:
1205 main.log.error(
1206 "Intents have not been installed correctly, pings failed." )
1207 # TODO: pretty print
1208 main.log.warn( "ONOS1 intents: " )
1209 try:
1210 tmpIntents = onosCli.intents()
1211 main.log.warn( json.dumps( json.loads( tmpIntents ),
1212 sort_keys=True,
1213 indent=4,
1214 separators=( ',', ': ' ) ) )
1215 except ( ValueError, TypeError ):
1216 main.log.warn( repr( tmpIntents ) )
1217 utilities.assert_equals(
1218 expect=main.TRUE,
1219 actual=PingResult,
1220 onpass="Intents have been installed correctly and pings work",
1221 onfail="Intents have not been installed correctly, pings failed." )
1222
1223 def CASE5( self, main ):
1224 """
1225 Reading state of ONOS
1226 """
1227 import json
1228 import time
1229 assert main.numCtrls, "main.numCtrls not defined"
1230 assert main, "main not defined"
1231 assert utilities.assert_equals, "utilities.assert_equals not defined"
1232 assert main.CLIs, "main.CLIs not defined"
1233 assert main.nodes, "main.nodes not defined"
1234
1235 main.case( "Setting up and gathering data for current state" )
1236 # The general idea for this test case is to pull the state of
1237 # ( intents,flows, topology,... ) from each ONOS node
1238 # We can then compare them with each other and also with past states
1239
1240 main.step( "Check that each switch has a master" )
1241 global mastershipState
1242 mastershipState = '[]'
1243
1244 # Assert that each device has a master
1245 rolesNotNull = main.TRUE
1246 threads = []
1247 for i in main.activeNodes:
1248 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1249 name="rolesNotNull-" + str( i ),
1250 args=[] )
1251 threads.append( t )
1252 t.start()
1253
1254 for t in threads:
1255 t.join()
1256 rolesNotNull = rolesNotNull and t.result
1257 utilities.assert_equals(
1258 expect=main.TRUE,
1259 actual=rolesNotNull,
1260 onpass="Each device has a master",
1261 onfail="Some devices don't have a master assigned" )
1262
1263 main.step( "Get the Mastership of each switch from each controller" )
1264 ONOSMastership = []
1265 mastershipCheck = main.FALSE
1266 consistentMastership = True
1267 rolesResults = True
1268 threads = []
1269 for i in main.activeNodes:
1270 t = main.Thread( target=main.CLIs[i].roles,
1271 name="roles-" + str( i ),
1272 args=[] )
1273 threads.append( t )
1274 t.start()
1275
1276 for t in threads:
1277 t.join()
1278 ONOSMastership.append( t.result )
1279
1280 for i in range( len( ONOSMastership ) ):
1281 node = str( main.activeNodes[i] + 1 )
1282 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1283 main.log.error( "Error in getting ONOS" + node + " roles" )
1284 main.log.warn( "ONOS" + node + " mastership response: " +
1285 repr( ONOSMastership[i] ) )
1286 rolesResults = False
1287 utilities.assert_equals(
1288 expect=True,
1289 actual=rolesResults,
1290 onpass="No error in reading roles output",
1291 onfail="Error in reading roles from ONOS" )
1292
1293 main.step( "Check for consistency in roles from each controller" )
1294 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1295 main.log.info(
1296 "Switch roles are consistent across all ONOS nodes" )
1297 else:
1298 consistentMastership = False
1299 utilities.assert_equals(
1300 expect=True,
1301 actual=consistentMastership,
1302 onpass="Switch roles are consistent across all ONOS nodes",
1303 onfail="ONOS nodes have different views of switch roles" )
1304
1305 if rolesResults and not consistentMastership:
1306 for i in range( len( main.activeNodes ) ):
1307 node = str( main.activeNodes[i] + 1 )
1308 try:
1309 main.log.warn(
1310 "ONOS" + node + " roles: ",
1311 json.dumps(
1312 json.loads( ONOSMastership[ i ] ),
1313 sort_keys=True,
1314 indent=4,
1315 separators=( ',', ': ' ) ) )
1316 except ( ValueError, TypeError ):
1317 main.log.warn( repr( ONOSMastership[ i ] ) )
1318 elif rolesResults and consistentMastership:
1319 mastershipCheck = main.TRUE
1320 mastershipState = ONOSMastership[ 0 ]
1321
1322 main.step( "Get the intents from each controller" )
1323 global intentState
1324 intentState = []
1325 ONOSIntents = []
1326 intentCheck = main.FALSE
1327 consistentIntents = True
1328 intentsResults = True
1329 threads = []
1330 for i in main.activeNodes:
1331 t = main.Thread( target=main.CLIs[i].intents,
1332 name="intents-" + str( i ),
1333 args=[],
1334 kwargs={ 'jsonFormat': True } )
1335 threads.append( t )
1336 t.start()
1337
1338 for t in threads:
1339 t.join()
1340 ONOSIntents.append( t.result )
1341
1342 for i in range( len( ONOSIntents ) ):
1343 node = str( main.activeNodes[i] + 1 )
1344 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1345 main.log.error( "Error in getting ONOS" + node + " intents" )
1346 main.log.warn( "ONOS" + node + " intents response: " +
1347 repr( ONOSIntents[ i ] ) )
1348 intentsResults = False
1349 utilities.assert_equals(
1350 expect=True,
1351 actual=intentsResults,
1352 onpass="No error in reading intents output",
1353 onfail="Error in reading intents from ONOS" )
1354
1355 main.step( "Check for consistency in Intents from each controller" )
1356 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1357 main.log.info( "Intents are consistent across all ONOS " +
1358 "nodes" )
1359 else:
1360 consistentIntents = False
1361 main.log.error( "Intents not consistent" )
1362 utilities.assert_equals(
1363 expect=True,
1364 actual=consistentIntents,
1365 onpass="Intents are consistent across all ONOS nodes",
1366 onfail="ONOS nodes have different views of intents" )
1367
1368 if intentsResults:
1369 # Try to make it easy to figure out what is happening
1370 #
1371 # Intent ONOS1 ONOS2 ...
1372 # 0x01 INSTALLED INSTALLING
1373 # ... ... ...
1374 # ... ... ...
1375 title = " Id"
1376 for n in main.activeNodes:
1377 title += " " * 10 + "ONOS" + str( n + 1 )
1378 main.log.warn( title )
1379 # get all intent keys in the cluster
1380 keys = []
Jon Halla440e872016-03-31 15:15:50 -07001381 try:
1382 # Get the set of all intent keys
Jon Hall6e709752016-02-01 13:38:46 -08001383 for nodeStr in ONOSIntents:
1384 node = json.loads( nodeStr )
1385 for intent in node:
Jon Halla440e872016-03-31 15:15:50 -07001386 keys.append( intent.get( 'id' ) )
1387 keys = set( keys )
1388 # For each intent key, print the state on each node
1389 for key in keys:
1390 row = "%-13s" % key
1391 for nodeStr in ONOSIntents:
1392 node = json.loads( nodeStr )
1393 for intent in node:
1394 if intent.get( 'id', "Error" ) == key:
1395 row += "%-15s" % intent.get( 'state' )
1396 main.log.warn( row )
1397 # End of intent state table
1398 except ValueError as e:
1399 main.log.exception( e )
1400 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
Jon Hall6e709752016-02-01 13:38:46 -08001401
1402 if intentsResults and not consistentIntents:
1403 # print the json objects
1404 n = str( main.activeNodes[-1] + 1 )
1405 main.log.debug( "ONOS" + n + " intents: " )
1406 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1407 sort_keys=True,
1408 indent=4,
1409 separators=( ',', ': ' ) ) )
1410 for i in range( len( ONOSIntents ) ):
1411 node = str( main.activeNodes[i] + 1 )
1412 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1413 main.log.debug( "ONOS" + node + " intents: " )
1414 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1415 sort_keys=True,
1416 indent=4,
1417 separators=( ',', ': ' ) ) )
1418 else:
1419 main.log.debug( "ONOS" + node + " intents match ONOS" +
1420 n + " intents" )
1421 elif intentsResults and consistentIntents:
1422 intentCheck = main.TRUE
1423 intentState = ONOSIntents[ 0 ]
1424
1425 main.step( "Get the flows from each controller" )
1426 global flowState
1427 flowState = []
1428 ONOSFlows = []
1429 ONOSFlowsJson = []
1430 flowCheck = main.FALSE
1431 consistentFlows = True
1432 flowsResults = True
1433 threads = []
1434 for i in main.activeNodes:
1435 t = main.Thread( target=main.CLIs[i].flows,
1436 name="flows-" + str( i ),
1437 args=[],
1438 kwargs={ 'jsonFormat': True } )
1439 threads.append( t )
1440 t.start()
1441
1442 # NOTE: Flows command can take some time to run
1443 time.sleep(30)
1444 for t in threads:
1445 t.join()
1446 result = t.result
1447 ONOSFlows.append( result )
1448
1449 for i in range( len( ONOSFlows ) ):
1450 num = str( main.activeNodes[i] + 1 )
1451 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1452 main.log.error( "Error in getting ONOS" + num + " flows" )
1453 main.log.warn( "ONOS" + num + " flows response: " +
1454 repr( ONOSFlows[ i ] ) )
1455 flowsResults = False
1456 ONOSFlowsJson.append( None )
1457 else:
1458 try:
1459 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1460 except ( ValueError, TypeError ):
1461 # FIXME: change this to log.error?
1462 main.log.exception( "Error in parsing ONOS" + num +
1463 " response as json." )
1464 main.log.error( repr( ONOSFlows[ i ] ) )
1465 ONOSFlowsJson.append( None )
1466 flowsResults = False
1467 utilities.assert_equals(
1468 expect=True,
1469 actual=flowsResults,
1470 onpass="No error in reading flows output",
1471 onfail="Error in reading flows from ONOS" )
1472
1473 main.step( "Check for consistency in Flows from each controller" )
1474 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1475 if all( tmp ):
1476 main.log.info( "Flow count is consistent across all ONOS nodes" )
1477 else:
1478 consistentFlows = False
1479 utilities.assert_equals(
1480 expect=True,
1481 actual=consistentFlows,
1482 onpass="The flow count is consistent across all ONOS nodes",
1483 onfail="ONOS nodes have different flow counts" )
1484
1485 if flowsResults and not consistentFlows:
1486 for i in range( len( ONOSFlows ) ):
1487 node = str( main.activeNodes[i] + 1 )
1488 try:
1489 main.log.warn(
1490 "ONOS" + node + " flows: " +
1491 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1492 indent=4, separators=( ',', ': ' ) ) )
1493 except ( ValueError, TypeError ):
1494 main.log.warn( "ONOS" + node + " flows: " +
1495 repr( ONOSFlows[ i ] ) )
1496 elif flowsResults and consistentFlows:
1497 flowCheck = main.TRUE
1498 flowState = ONOSFlows[ 0 ]
1499
1500 main.step( "Get the OF Table entries" )
1501 global flows
1502 flows = []
1503 for i in range( 1, 29 ):
1504 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1505 if flowCheck == main.FALSE:
1506 for table in flows:
1507 main.log.warn( table )
1508 # TODO: Compare switch flow tables with ONOS flow tables
1509
1510 main.step( "Start continuous pings" )
1511 main.Mininet2.pingLong(
1512 src=main.params[ 'PING' ][ 'source1' ],
1513 target=main.params[ 'PING' ][ 'target1' ],
1514 pingTime=500 )
1515 main.Mininet2.pingLong(
1516 src=main.params[ 'PING' ][ 'source2' ],
1517 target=main.params[ 'PING' ][ 'target2' ],
1518 pingTime=500 )
1519 main.Mininet2.pingLong(
1520 src=main.params[ 'PING' ][ 'source3' ],
1521 target=main.params[ 'PING' ][ 'target3' ],
1522 pingTime=500 )
1523 main.Mininet2.pingLong(
1524 src=main.params[ 'PING' ][ 'source4' ],
1525 target=main.params[ 'PING' ][ 'target4' ],
1526 pingTime=500 )
1527 main.Mininet2.pingLong(
1528 src=main.params[ 'PING' ][ 'source5' ],
1529 target=main.params[ 'PING' ][ 'target5' ],
1530 pingTime=500 )
1531 main.Mininet2.pingLong(
1532 src=main.params[ 'PING' ][ 'source6' ],
1533 target=main.params[ 'PING' ][ 'target6' ],
1534 pingTime=500 )
1535 main.Mininet2.pingLong(
1536 src=main.params[ 'PING' ][ 'source7' ],
1537 target=main.params[ 'PING' ][ 'target7' ],
1538 pingTime=500 )
1539 main.Mininet2.pingLong(
1540 src=main.params[ 'PING' ][ 'source8' ],
1541 target=main.params[ 'PING' ][ 'target8' ],
1542 pingTime=500 )
1543 main.Mininet2.pingLong(
1544 src=main.params[ 'PING' ][ 'source9' ],
1545 target=main.params[ 'PING' ][ 'target9' ],
1546 pingTime=500 )
1547 main.Mininet2.pingLong(
1548 src=main.params[ 'PING' ][ 'source10' ],
1549 target=main.params[ 'PING' ][ 'target10' ],
1550 pingTime=500 )
1551
1552 main.step( "Collecting topology information from ONOS" )
1553 devices = []
1554 threads = []
1555 for i in main.activeNodes:
1556 t = main.Thread( target=main.CLIs[i].devices,
1557 name="devices-" + str( i ),
1558 args=[ ] )
1559 threads.append( t )
1560 t.start()
1561
1562 for t in threads:
1563 t.join()
1564 devices.append( t.result )
1565 hosts = []
1566 threads = []
1567 for i in main.activeNodes:
1568 t = main.Thread( target=main.CLIs[i].hosts,
1569 name="hosts-" + str( i ),
1570 args=[ ] )
1571 threads.append( t )
1572 t.start()
1573
1574 for t in threads:
1575 t.join()
1576 try:
1577 hosts.append( json.loads( t.result ) )
1578 except ( ValueError, TypeError ):
1579 # FIXME: better handling of this, print which node
1580 # Maybe use thread name?
1581 main.log.exception( "Error parsing json output of hosts" )
1582 main.log.warn( repr( t.result ) )
1583 hosts.append( None )
1584
1585 ports = []
1586 threads = []
1587 for i in main.activeNodes:
1588 t = main.Thread( target=main.CLIs[i].ports,
1589 name="ports-" + str( i ),
1590 args=[ ] )
1591 threads.append( t )
1592 t.start()
1593
1594 for t in threads:
1595 t.join()
1596 ports.append( t.result )
1597 links = []
1598 threads = []
1599 for i in main.activeNodes:
1600 t = main.Thread( target=main.CLIs[i].links,
1601 name="links-" + str( i ),
1602 args=[ ] )
1603 threads.append( t )
1604 t.start()
1605
1606 for t in threads:
1607 t.join()
1608 links.append( t.result )
1609 clusters = []
1610 threads = []
1611 for i in main.activeNodes:
1612 t = main.Thread( target=main.CLIs[i].clusters,
1613 name="clusters-" + str( i ),
1614 args=[ ] )
1615 threads.append( t )
1616 t.start()
1617
1618 for t in threads:
1619 t.join()
1620 clusters.append( t.result )
1621 # Compare json objects for hosts and dataplane clusters
1622
1623 # hosts
1624 main.step( "Host view is consistent across ONOS nodes" )
1625 consistentHostsResult = main.TRUE
1626 for controller in range( len( hosts ) ):
1627 controllerStr = str( main.activeNodes[controller] + 1 )
1628 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1629 if hosts[ controller ] == hosts[ 0 ]:
1630 continue
1631 else: # hosts not consistent
1632 main.log.error( "hosts from ONOS" +
1633 controllerStr +
1634 " is inconsistent with ONOS1" )
1635 main.log.warn( repr( hosts[ controller ] ) )
1636 consistentHostsResult = main.FALSE
1637
1638 else:
1639 main.log.error( "Error in getting ONOS hosts from ONOS" +
1640 controllerStr )
1641 consistentHostsResult = main.FALSE
1642 main.log.warn( "ONOS" + controllerStr +
1643 " hosts response: " +
1644 repr( hosts[ controller ] ) )
1645 utilities.assert_equals(
1646 expect=main.TRUE,
1647 actual=consistentHostsResult,
1648 onpass="Hosts view is consistent across all ONOS nodes",
1649 onfail="ONOS nodes have different views of hosts" )
1650
1651 main.step( "Each host has an IP address" )
1652 ipResult = main.TRUE
1653 for controller in range( 0, len( hosts ) ):
1654 controllerStr = str( main.activeNodes[controller] + 1 )
1655 if hosts[ controller ]:
1656 for host in hosts[ controller ]:
1657 if not host.get( 'ipAddresses', [ ] ):
1658 main.log.error( "Error with host ips on controller" +
1659 controllerStr + ": " + str( host ) )
1660 ipResult = main.FALSE
1661 utilities.assert_equals(
1662 expect=main.TRUE,
1663 actual=ipResult,
1664 onpass="The ips of the hosts aren't empty",
1665 onfail="The ip of at least one host is missing" )
1666
1667 # Strongly connected clusters of devices
1668 main.step( "Cluster view is consistent across ONOS nodes" )
1669 consistentClustersResult = main.TRUE
1670 for controller in range( len( clusters ) ):
1671 controllerStr = str( main.activeNodes[controller] + 1 )
1672 if "Error" not in clusters[ controller ]:
1673 if clusters[ controller ] == clusters[ 0 ]:
1674 continue
1675 else: # clusters not consistent
1676 main.log.error( "clusters from ONOS" + controllerStr +
1677 " is inconsistent with ONOS1" )
1678 consistentClustersResult = main.FALSE
1679
1680 else:
1681 main.log.error( "Error in getting dataplane clusters " +
1682 "from ONOS" + controllerStr )
1683 consistentClustersResult = main.FALSE
1684 main.log.warn( "ONOS" + controllerStr +
1685 " clusters response: " +
1686 repr( clusters[ controller ] ) )
1687 utilities.assert_equals(
1688 expect=main.TRUE,
1689 actual=consistentClustersResult,
1690 onpass="Clusters view is consistent across all ONOS nodes",
1691 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07001692 if not consistentClustersResult:
Jon Hall172b7ba2016-04-07 18:12:20 -07001693 main.log.debug( clusters )
Jon Hall64948022016-05-12 13:38:50 -07001694
Jon Hall6e709752016-02-01 13:38:46 -08001695 # there should always only be one cluster
1696 main.step( "Cluster view correct across ONOS nodes" )
1697 try:
1698 numClusters = len( json.loads( clusters[ 0 ] ) )
1699 except ( ValueError, TypeError ):
1700 main.log.exception( "Error parsing clusters[0]: " +
1701 repr( clusters[ 0 ] ) )
1702 numClusters = "ERROR"
1703 clusterResults = main.FALSE
1704 if numClusters == 1:
1705 clusterResults = main.TRUE
1706 utilities.assert_equals(
1707 expect=1,
1708 actual=numClusters,
1709 onpass="ONOS shows 1 SCC",
1710 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1711
1712 main.step( "Comparing ONOS topology to MN" )
1713 devicesResults = main.TRUE
1714 linksResults = main.TRUE
1715 hostsResults = main.TRUE
1716 mnSwitches = main.Mininet1.getSwitches()
1717 mnLinks = main.Mininet1.getLinks()
1718 mnHosts = main.Mininet1.getHosts()
1719 for controller in main.activeNodes:
1720 controllerStr = str( main.activeNodes[controller] + 1 )
1721 if devices[ controller ] and ports[ controller ] and\
1722 "Error" not in devices[ controller ] and\
1723 "Error" not in ports[ controller ]:
1724 currentDevicesResult = main.Mininet1.compareSwitches(
1725 mnSwitches,
1726 json.loads( devices[ controller ] ),
1727 json.loads( ports[ controller ] ) )
1728 else:
1729 currentDevicesResult = main.FALSE
1730 utilities.assert_equals( expect=main.TRUE,
1731 actual=currentDevicesResult,
1732 onpass="ONOS" + controllerStr +
1733 " Switches view is correct",
1734 onfail="ONOS" + controllerStr +
1735 " Switches view is incorrect" )
1736 if links[ controller ] and "Error" not in links[ controller ]:
1737 currentLinksResult = main.Mininet1.compareLinks(
1738 mnSwitches, mnLinks,
1739 json.loads( links[ controller ] ) )
1740 else:
1741 currentLinksResult = main.FALSE
1742 utilities.assert_equals( expect=main.TRUE,
1743 actual=currentLinksResult,
1744 onpass="ONOS" + controllerStr +
1745 " links view is correct",
1746 onfail="ONOS" + controllerStr +
1747 " links view is incorrect" )
1748
1749 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1750 currentHostsResult = main.Mininet1.compareHosts(
1751 mnHosts,
1752 hosts[ controller ] )
1753 else:
1754 currentHostsResult = main.FALSE
1755 utilities.assert_equals( expect=main.TRUE,
1756 actual=currentHostsResult,
1757 onpass="ONOS" + controllerStr +
1758 " hosts exist in Mininet",
1759 onfail="ONOS" + controllerStr +
1760 " hosts don't match Mininet" )
1761
1762 devicesResults = devicesResults and currentDevicesResult
1763 linksResults = linksResults and currentLinksResult
1764 hostsResults = hostsResults and currentHostsResult
1765
1766 main.step( "Device information is correct" )
1767 utilities.assert_equals(
1768 expect=main.TRUE,
1769 actual=devicesResults,
1770 onpass="Device information is correct",
1771 onfail="Device information is incorrect" )
1772
1773 main.step( "Links are correct" )
1774 utilities.assert_equals(
1775 expect=main.TRUE,
1776 actual=linksResults,
1777 onpass="Link are correct",
1778 onfail="Links are incorrect" )
1779
1780 main.step( "Hosts are correct" )
1781 utilities.assert_equals(
1782 expect=main.TRUE,
1783 actual=hostsResults,
1784 onpass="Hosts are correct",
1785 onfail="Hosts are incorrect" )
1786
1787 def CASE61( self, main ):
1788 """
1789 The Failure case.
1790 """
1791 import math
1792 assert main.numCtrls, "main.numCtrls not defined"
1793 assert main, "main not defined"
1794 assert utilities.assert_equals, "utilities.assert_equals not defined"
1795 assert main.CLIs, "main.CLIs not defined"
1796 assert main.nodes, "main.nodes not defined"
1797 main.case( "Partition ONOS nodes into two distinct partitions" )
1798
1799 main.step( "Checking ONOS Logs for errors" )
1800 for node in main.nodes:
1801 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1802 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1803
Jon Halld2871c22016-07-26 11:01:14 -07001804 main.log.debug( main.CLIs[0].roles( jsonFormat=False ) )
1805
Jon Hall6e709752016-02-01 13:38:46 -08001806 n = len( main.nodes ) # Number of nodes
1807 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1808 main.partition = [ 0 ] # ONOS node to partition, listed by index in main.nodes
1809 if n > 3:
1810 main.partition.append( p - 1 )
1811 # NOTE: This only works for cluster sizes of 3,5, or 7.
1812
1813 main.step( "Partitioning ONOS nodes" )
1814 nodeList = [ str( i + 1 ) for i in main.partition ]
1815 main.log.info( "Nodes to be partitioned: " + str( nodeList ) )
1816 partitionResults = main.TRUE
1817 for i in range( 0, n ):
1818 this = main.nodes[i]
1819 if i not in main.partition:
1820 for j in main.partition:
1821 foe = main.nodes[j]
1822 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1823 #CMD HERE
1824 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1825 this.handle.sendline( cmdStr )
1826 this.handle.expect( "\$" )
1827 main.log.debug( this.handle.before )
1828 else:
1829 for j in range( 0, n ):
1830 if j not in main.partition:
1831 foe = main.nodes[j]
1832 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1833 #CMD HERE
1834 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1835 this.handle.sendline( cmdStr )
1836 this.handle.expect( "\$" )
1837 main.log.debug( this.handle.before )
1838 main.activeNodes.remove( i )
1839 # NOTE: When dynamic clustering is finished, we need to start checking
1840 # main.partion nodes still work when partitioned
1841 utilities.assert_equals( expect=main.TRUE, actual=partitionResults,
1842 onpass="Firewall rules set successfully",
1843 onfail="Error setting firewall rules" )
1844
Jon Hall6509dbf2016-06-21 17:01:17 -07001845 main.step( "Sleeping 60 seconds" )
Jon Hall6e709752016-02-01 13:38:46 -08001846 time.sleep( 60 )
1847
1848 def CASE62( self, main ):
1849 """
1850 Healing Partition
1851 """
1852 import time
1853 assert main.numCtrls, "main.numCtrls not defined"
1854 assert main, "main not defined"
1855 assert utilities.assert_equals, "utilities.assert_equals not defined"
1856 assert main.CLIs, "main.CLIs not defined"
1857 assert main.nodes, "main.nodes not defined"
1858 assert main.partition, "main.partition not defined"
1859 main.case( "Healing Partition" )
1860
1861 main.step( "Deleteing firewall rules" )
1862 healResults = main.TRUE
1863 for node in main.nodes:
1864 cmdStr = "sudo iptables -F"
1865 node.handle.sendline( cmdStr )
1866 node.handle.expect( "\$" )
1867 main.log.debug( node.handle.before )
1868 utilities.assert_equals( expect=main.TRUE, actual=healResults,
1869 onpass="Firewall rules removed",
1870 onfail="Error removing firewall rules" )
1871
1872 for node in main.partition:
1873 main.activeNodes.append( node )
1874 main.activeNodes.sort()
1875 try:
1876 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1877 "List of active nodes has duplicates, this likely indicates something was run out of order"
1878 except AssertionError:
1879 main.log.exception( "" )
1880 main.cleanup()
1881 main.exit()
1882
Jon Halld2871c22016-07-26 11:01:14 -07001883 main.step( "Checking ONOS nodes" )
1884 nodeResults = utilities.retry( main.HA.nodesCheck,
1885 False,
1886 args=[main.activeNodes],
1887 sleep=15,
1888 attempts=5 )
1889
1890 utilities.assert_equals( expect=True, actual=nodeResults,
1891 onpass="Nodes check successful",
1892 onfail="Nodes check NOT successful" )
1893
1894 if not nodeResults:
1895 for i in main.activeNodes:
1896 cli = main.CLIs[i]
1897 main.log.debug( "{} components not ACTIVE: \n{}".format(
1898 cli.name,
1899 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
1900 main.log.error( "Failed to start ONOS, stopping test" )
1901 main.cleanup()
1902 main.exit()
1903
Jon Hall6e709752016-02-01 13:38:46 -08001904 def CASE7( self, main ):
1905 """
1906 Check state after ONOS failure
1907 """
1908 import json
1909 assert main.numCtrls, "main.numCtrls not defined"
1910 assert main, "main not defined"
1911 assert utilities.assert_equals, "utilities.assert_equals not defined"
1912 assert main.CLIs, "main.CLIs not defined"
1913 assert main.nodes, "main.nodes not defined"
1914 try:
1915 main.partition
1916 except AttributeError:
1917 main.partition = []
1918
1919 main.case( "Running ONOS Constant State Tests" )
1920
1921 main.step( "Check that each switch has a master" )
1922 # Assert that each device has a master
1923 rolesNotNull = main.TRUE
1924 threads = []
1925 for i in main.activeNodes:
1926 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1927 name="rolesNotNull-" + str( i ),
1928 args=[ ] )
1929 threads.append( t )
1930 t.start()
1931
1932 for t in threads:
1933 t.join()
1934 rolesNotNull = rolesNotNull and t.result
1935 utilities.assert_equals(
1936 expect=main.TRUE,
1937 actual=rolesNotNull,
1938 onpass="Each device has a master",
1939 onfail="Some devices don't have a master assigned" )
1940
1941 main.step( "Read device roles from ONOS" )
1942 ONOSMastership = []
1943 mastershipCheck = main.FALSE
1944 consistentMastership = True
1945 rolesResults = True
1946 threads = []
1947 for i in main.activeNodes:
1948 t = main.Thread( target=main.CLIs[i].roles,
1949 name="roles-" + str( i ),
1950 args=[] )
1951 threads.append( t )
1952 t.start()
1953
1954 for t in threads:
1955 t.join()
1956 ONOSMastership.append( t.result )
1957
1958 for i in range( len( ONOSMastership ) ):
1959 node = str( main.activeNodes[i] + 1 )
1960 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1961 main.log.error( "Error in getting ONOS" + node + " roles" )
1962 main.log.warn( "ONOS" + node + " mastership response: " +
1963 repr( ONOSMastership[i] ) )
1964 rolesResults = False
1965 utilities.assert_equals(
1966 expect=True,
1967 actual=rolesResults,
1968 onpass="No error in reading roles output",
1969 onfail="Error in reading roles from ONOS" )
1970
1971 main.step( "Check for consistency in roles from each controller" )
1972 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1973 main.log.info(
1974 "Switch roles are consistent across all ONOS nodes" )
1975 else:
1976 consistentMastership = False
1977 utilities.assert_equals(
1978 expect=True,
1979 actual=consistentMastership,
1980 onpass="Switch roles are consistent across all ONOS nodes",
1981 onfail="ONOS nodes have different views of switch roles" )
1982
1983 if rolesResults and not consistentMastership:
1984 for i in range( len( ONOSMastership ) ):
1985 node = str( main.activeNodes[i] + 1 )
1986 main.log.warn( "ONOS" + node + " roles: ",
1987 json.dumps( json.loads( ONOSMastership[ i ] ),
1988 sort_keys=True,
1989 indent=4,
1990 separators=( ',', ': ' ) ) )
1991
1992 # NOTE: we expect mastership to change on controller failure
1993
1994 main.step( "Get the intents and compare across all nodes" )
1995 ONOSIntents = []
1996 intentCheck = main.FALSE
1997 consistentIntents = True
1998 intentsResults = True
1999 threads = []
2000 for i in main.activeNodes:
2001 t = main.Thread( target=main.CLIs[i].intents,
2002 name="intents-" + str( i ),
2003 args=[],
2004 kwargs={ 'jsonFormat': True } )
2005 threads.append( t )
2006 t.start()
2007
2008 for t in threads:
2009 t.join()
2010 ONOSIntents.append( t.result )
2011
2012 for i in range( len( ONOSIntents) ):
2013 node = str( main.activeNodes[i] + 1 )
2014 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2015 main.log.error( "Error in getting ONOS" + node + " intents" )
2016 main.log.warn( "ONOS" + node + " intents response: " +
2017 repr( ONOSIntents[ i ] ) )
2018 intentsResults = False
2019 utilities.assert_equals(
2020 expect=True,
2021 actual=intentsResults,
2022 onpass="No error in reading intents output",
2023 onfail="Error in reading intents from ONOS" )
2024
2025 main.step( "Check for consistency in Intents from each controller" )
2026 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2027 main.log.info( "Intents are consistent across all ONOS " +
2028 "nodes" )
2029 else:
2030 consistentIntents = False
2031
2032 # Try to make it easy to figure out what is happening
2033 #
2034 # Intent ONOS1 ONOS2 ...
2035 # 0x01 INSTALLED INSTALLING
2036 # ... ... ...
2037 # ... ... ...
2038 title = " ID"
2039 for n in main.activeNodes:
2040 title += " " * 10 + "ONOS" + str( n + 1 )
2041 main.log.warn( title )
2042 # get all intent keys in the cluster
2043 keys = []
2044 for nodeStr in ONOSIntents:
2045 node = json.loads( nodeStr )
2046 for intent in node:
2047 keys.append( intent.get( 'id' ) )
2048 keys = set( keys )
2049 for key in keys:
2050 row = "%-13s" % key
2051 for nodeStr in ONOSIntents:
2052 node = json.loads( nodeStr )
2053 for intent in node:
2054 if intent.get( 'id' ) == key:
2055 row += "%-15s" % intent.get( 'state' )
2056 main.log.warn( row )
2057 # End table view
2058
2059 utilities.assert_equals(
2060 expect=True,
2061 actual=consistentIntents,
2062 onpass="Intents are consistent across all ONOS nodes",
2063 onfail="ONOS nodes have different views of intents" )
2064 intentStates = []
2065 for node in ONOSIntents: # Iter through ONOS nodes
2066 nodeStates = []
2067 # Iter through intents of a node
2068 try:
2069 for intent in json.loads( node ):
2070 nodeStates.append( intent[ 'state' ] )
2071 except ( ValueError, TypeError ):
2072 main.log.exception( "Error in parsing intents" )
2073 main.log.error( repr( node ) )
2074 intentStates.append( nodeStates )
2075 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2076 main.log.info( dict( out ) )
2077
2078 if intentsResults and not consistentIntents:
2079 for i in range( len( main.activeNodes ) ):
2080 node = str( main.activeNodes[i] + 1 )
2081 main.log.warn( "ONOS" + node + " intents: " )
2082 main.log.warn( json.dumps(
2083 json.loads( ONOSIntents[ i ] ),
2084 sort_keys=True,
2085 indent=4,
2086 separators=( ',', ': ' ) ) )
2087 elif intentsResults and consistentIntents:
2088 intentCheck = main.TRUE
2089
2090 # NOTE: Store has no durability, so intents are lost across system
2091 # restarts
2092 main.step( "Compare current intents with intents before the failure" )
2093 # NOTE: this requires case 5 to pass for intentState to be set.
2094 # maybe we should stop the test if that fails?
2095 sameIntents = main.FALSE
2096 try:
2097 intentState
2098 except NameError:
2099 main.log.warn( "No previous intent state was saved" )
2100 else:
2101 if intentState and intentState == ONOSIntents[ 0 ]:
2102 sameIntents = main.TRUE
2103 main.log.info( "Intents are consistent with before failure" )
2104 # TODO: possibly the states have changed? we may need to figure out
2105 # what the acceptable states are
2106 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2107 sameIntents = main.TRUE
2108 try:
2109 before = json.loads( intentState )
2110 after = json.loads( ONOSIntents[ 0 ] )
2111 for intent in before:
2112 if intent not in after:
2113 sameIntents = main.FALSE
2114 main.log.debug( "Intent is not currently in ONOS " +
2115 "(at least in the same form):" )
2116 main.log.debug( json.dumps( intent ) )
2117 except ( ValueError, TypeError ):
2118 main.log.exception( "Exception printing intents" )
2119 main.log.debug( repr( ONOSIntents[0] ) )
2120 main.log.debug( repr( intentState ) )
2121 if sameIntents == main.FALSE:
2122 try:
2123 main.log.debug( "ONOS intents before: " )
2124 main.log.debug( json.dumps( json.loads( intentState ),
2125 sort_keys=True, indent=4,
2126 separators=( ',', ': ' ) ) )
2127 main.log.debug( "Current ONOS intents: " )
2128 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2129 sort_keys=True, indent=4,
2130 separators=( ',', ': ' ) ) )
2131 except ( ValueError, TypeError ):
2132 main.log.exception( "Exception printing intents" )
2133 main.log.debug( repr( ONOSIntents[0] ) )
2134 main.log.debug( repr( intentState ) )
2135 utilities.assert_equals(
2136 expect=main.TRUE,
2137 actual=sameIntents,
2138 onpass="Intents are consistent with before failure",
2139 onfail="The Intents changed during failure" )
2140 intentCheck = intentCheck and sameIntents
2141
2142 main.step( "Get the OF Table entries and compare to before " +
2143 "component failure" )
2144 FlowTables = main.TRUE
2145 for i in range( 28 ):
2146 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2147 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
Jon Hall41d39f12016-04-11 22:54:35 -07002148 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2149 FlowTables = FlowTables and curSwitch
2150 if curSwitch == main.FALSE:
Jon Hall6e709752016-02-01 13:38:46 -08002151 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2152 utilities.assert_equals(
2153 expect=main.TRUE,
2154 actual=FlowTables,
2155 onpass="No changes were found in the flow tables",
2156 onfail="Changes were found in the flow tables" )
2157
2158 main.Mininet2.pingLongKill()
2159 '''
2160 main.step( "Check the continuous pings to ensure that no packets " +
2161 "were dropped during component failure" )
2162 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2163 main.params[ 'TESTONIP' ] )
2164 LossInPings = main.FALSE
2165 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2166 for i in range( 8, 18 ):
2167 main.log.info(
2168 "Checking for a loss in pings along flow from s" +
2169 str( i ) )
2170 LossInPings = main.Mininet2.checkForLoss(
2171 "/tmp/ping.h" +
2172 str( i ) ) or LossInPings
2173 if LossInPings == main.TRUE:
2174 main.log.info( "Loss in ping detected" )
2175 elif LossInPings == main.ERROR:
2176 main.log.info( "There are multiple mininet process running" )
2177 elif LossInPings == main.FALSE:
2178 main.log.info( "No Loss in the pings" )
2179 main.log.info( "No loss of dataplane connectivity" )
2180 utilities.assert_equals(
2181 expect=main.FALSE,
2182 actual=LossInPings,
2183 onpass="No Loss of connectivity",
2184 onfail="Loss of dataplane connectivity detected" )
2185 '''
2186
2187 main.step( "Leadership Election is still functional" )
2188 # Test of LeadershipElection
2189 leaderList = []
2190
2191 partitioned = []
2192 for i in main.partition:
2193 partitioned.append( main.nodes[i].ip_address )
2194 leaderResult = main.TRUE
2195
2196 for i in main.activeNodes:
2197 cli = main.CLIs[i]
2198 leaderN = cli.electionTestLeader()
2199 leaderList.append( leaderN )
2200 if leaderN == main.FALSE:
2201 # error in response
2202 main.log.error( "Something is wrong with " +
2203 "electionTestLeader function, check the" +
2204 " error logs" )
2205 leaderResult = main.FALSE
2206 elif leaderN is None:
2207 main.log.error( cli.name +
2208 " shows no leader for the election-app was" +
2209 " elected after the old one died" )
2210 leaderResult = main.FALSE
2211 elif leaderN in partitioned:
2212 main.log.error( cli.name + " shows " + str( leaderN ) +
2213 " as leader for the election-app, but it " +
2214 "was partitioned" )
2215 leaderResult = main.FALSE
2216 if len( set( leaderList ) ) != 1:
2217 leaderResult = main.FALSE
2218 main.log.error(
2219 "Inconsistent view of leader for the election test app" )
2220 # TODO: print the list
2221 utilities.assert_equals(
2222 expect=main.TRUE,
2223 actual=leaderResult,
2224 onpass="Leadership election passed",
2225 onfail="Something went wrong with Leadership election" )
2226
2227 def CASE8( self, main ):
2228 """
2229 Compare topo
2230 """
2231 import json
2232 import time
2233 assert main.numCtrls, "main.numCtrls not defined"
2234 assert main, "main not defined"
2235 assert utilities.assert_equals, "utilities.assert_equals not defined"
2236 assert main.CLIs, "main.CLIs not defined"
2237 assert main.nodes, "main.nodes not defined"
2238
2239 main.case( "Compare ONOS Topology view to Mininet topology" )
2240 main.caseExplanation = "Compare topology objects between Mininet" +\
2241 " and ONOS"
2242 topoResult = main.FALSE
2243 topoFailMsg = "ONOS topology don't match Mininet"
2244 elapsed = 0
2245 count = 0
2246 main.step( "Comparing ONOS topology to MN topology" )
2247 startTime = time.time()
2248 # Give time for Gossip to work
2249 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2250 devicesResults = main.TRUE
2251 linksResults = main.TRUE
2252 hostsResults = main.TRUE
2253 hostAttachmentResults = True
2254 count += 1
2255 cliStart = time.time()
2256 devices = []
2257 threads = []
2258 for i in main.activeNodes:
2259 t = main.Thread( target=utilities.retry,
2260 name="devices-" + str( i ),
2261 args=[ main.CLIs[i].devices, [ None ] ],
2262 kwargs= { 'sleep': 5, 'attempts': 5,
2263 'randomTime': True } )
2264 threads.append( t )
2265 t.start()
2266
2267 for t in threads:
2268 t.join()
2269 devices.append( t.result )
2270 hosts = []
2271 ipResult = main.TRUE
2272 threads = []
2273 for i in main.activeNodes:
2274 t = main.Thread( target=utilities.retry,
2275 name="hosts-" + str( i ),
2276 args=[ main.CLIs[i].hosts, [ None ] ],
2277 kwargs= { 'sleep': 5, 'attempts': 5,
2278 'randomTime': True } )
2279 threads.append( t )
2280 t.start()
2281
2282 for t in threads:
2283 t.join()
2284 try:
2285 hosts.append( json.loads( t.result ) )
2286 except ( ValueError, TypeError ):
2287 main.log.exception( "Error parsing hosts results" )
2288 main.log.error( repr( t.result ) )
2289 hosts.append( None )
2290 for controller in range( 0, len( hosts ) ):
2291 controllerStr = str( main.activeNodes[controller] + 1 )
2292 if hosts[ controller ]:
2293 for host in hosts[ controller ]:
2294 if host is None or host.get( 'ipAddresses', [] ) == []:
2295 main.log.error(
2296 "Error with host ipAddresses on controller" +
2297 controllerStr + ": " + str( host ) )
2298 ipResult = main.FALSE
2299 ports = []
2300 threads = []
2301 for i in main.activeNodes:
2302 t = main.Thread( target=utilities.retry,
2303 name="ports-" + str( i ),
2304 args=[ main.CLIs[i].ports, [ None ] ],
2305 kwargs= { 'sleep': 5, 'attempts': 5,
2306 'randomTime': True } )
2307 threads.append( t )
2308 t.start()
2309
2310 for t in threads:
2311 t.join()
2312 ports.append( t.result )
2313 links = []
2314 threads = []
2315 for i in main.activeNodes:
2316 t = main.Thread( target=utilities.retry,
2317 name="links-" + str( i ),
2318 args=[ main.CLIs[i].links, [ None ] ],
2319 kwargs= { 'sleep': 5, 'attempts': 5,
2320 'randomTime': True } )
2321 threads.append( t )
2322 t.start()
2323
2324 for t in threads:
2325 t.join()
2326 links.append( t.result )
2327 clusters = []
2328 threads = []
2329 for i in main.activeNodes:
2330 t = main.Thread( target=utilities.retry,
2331 name="clusters-" + str( i ),
2332 args=[ main.CLIs[i].clusters, [ None ] ],
2333 kwargs= { 'sleep': 5, 'attempts': 5,
2334 'randomTime': True } )
2335 threads.append( t )
2336 t.start()
2337
2338 for t in threads:
2339 t.join()
2340 clusters.append( t.result )
2341
2342 elapsed = time.time() - startTime
2343 cliTime = time.time() - cliStart
2344 print "Elapsed time: " + str( elapsed )
2345 print "CLI time: " + str( cliTime )
2346
2347 if all( e is None for e in devices ) and\
2348 all( e is None for e in hosts ) and\
2349 all( e is None for e in ports ) and\
2350 all( e is None for e in links ) and\
2351 all( e is None for e in clusters ):
2352 topoFailMsg = "Could not get topology from ONOS"
2353 main.log.error( topoFailMsg )
2354 continue # Try again, No use trying to compare
2355
2356 mnSwitches = main.Mininet1.getSwitches()
2357 mnLinks = main.Mininet1.getLinks()
2358 mnHosts = main.Mininet1.getHosts()
2359 for controller in range( len( main.activeNodes ) ):
2360 controllerStr = str( main.activeNodes[controller] + 1 )
2361 if devices[ controller ] and ports[ controller ] and\
2362 "Error" not in devices[ controller ] and\
2363 "Error" not in ports[ controller ]:
2364
2365 try:
2366 currentDevicesResult = main.Mininet1.compareSwitches(
2367 mnSwitches,
2368 json.loads( devices[ controller ] ),
2369 json.loads( ports[ controller ] ) )
2370 except ( TypeError, ValueError ) as e:
2371 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2372 devices[ controller ], ports[ controller ] ) )
2373 else:
2374 currentDevicesResult = main.FALSE
2375 utilities.assert_equals( expect=main.TRUE,
2376 actual=currentDevicesResult,
2377 onpass="ONOS" + controllerStr +
2378 " Switches view is correct",
2379 onfail="ONOS" + controllerStr +
2380 " Switches view is incorrect" )
2381
2382 if links[ controller ] and "Error" not in links[ controller ]:
2383 currentLinksResult = main.Mininet1.compareLinks(
2384 mnSwitches, mnLinks,
2385 json.loads( links[ controller ] ) )
2386 else:
2387 currentLinksResult = main.FALSE
2388 utilities.assert_equals( expect=main.TRUE,
2389 actual=currentLinksResult,
2390 onpass="ONOS" + controllerStr +
2391 " links view is correct",
2392 onfail="ONOS" + controllerStr +
2393 " links view is incorrect" )
2394 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2395 currentHostsResult = main.Mininet1.compareHosts(
2396 mnHosts,
2397 hosts[ controller ] )
2398 elif hosts[ controller ] == []:
2399 currentHostsResult = main.TRUE
2400 else:
2401 currentHostsResult = main.FALSE
2402 utilities.assert_equals( expect=main.TRUE,
2403 actual=currentHostsResult,
2404 onpass="ONOS" + controllerStr +
2405 " hosts exist in Mininet",
2406 onfail="ONOS" + controllerStr +
2407 " hosts don't match Mininet" )
2408 # CHECKING HOST ATTACHMENT POINTS
2409 hostAttachment = True
2410 zeroHosts = False
2411 # FIXME: topo-HA/obelisk specific mappings:
2412 # key is mac and value is dpid
2413 mappings = {}
2414 for i in range( 1, 29 ): # hosts 1 through 28
2415 # set up correct variables:
2416 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2417 if i == 1:
2418 deviceId = "1000".zfill(16)
2419 elif i == 2:
2420 deviceId = "2000".zfill(16)
2421 elif i == 3:
2422 deviceId = "3000".zfill(16)
2423 elif i == 4:
2424 deviceId = "3004".zfill(16)
2425 elif i == 5:
2426 deviceId = "5000".zfill(16)
2427 elif i == 6:
2428 deviceId = "6000".zfill(16)
2429 elif i == 7:
2430 deviceId = "6007".zfill(16)
2431 elif i >= 8 and i <= 17:
2432 dpid = '3' + str( i ).zfill( 3 )
2433 deviceId = dpid.zfill(16)
2434 elif i >= 18 and i <= 27:
2435 dpid = '6' + str( i ).zfill( 3 )
2436 deviceId = dpid.zfill(16)
2437 elif i == 28:
2438 deviceId = "2800".zfill(16)
2439 mappings[ macId ] = deviceId
2440 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2441 if hosts[ controller ] == []:
2442 main.log.warn( "There are no hosts discovered" )
2443 zeroHosts = True
2444 else:
2445 for host in hosts[ controller ]:
2446 mac = None
2447 location = None
2448 device = None
2449 port = None
2450 try:
2451 mac = host.get( 'mac' )
2452 assert mac, "mac field could not be found for this host object"
2453
2454 location = host.get( 'location' )
2455 assert location, "location field could not be found for this host object"
2456
2457 # Trim the protocol identifier off deviceId
2458 device = str( location.get( 'elementId' ) ).split(':')[1]
2459 assert device, "elementId field could not be found for this host location object"
2460
2461 port = location.get( 'port' )
2462 assert port, "port field could not be found for this host location object"
2463
2464 # Now check if this matches where they should be
2465 if mac and device and port:
2466 if str( port ) != "1":
2467 main.log.error( "The attachment port is incorrect for " +
2468 "host " + str( mac ) +
2469 ". Expected: 1 Actual: " + str( port) )
2470 hostAttachment = False
2471 if device != mappings[ str( mac ) ]:
2472 main.log.error( "The attachment device is incorrect for " +
2473 "host " + str( mac ) +
2474 ". Expected: " + mappings[ str( mac ) ] +
2475 " Actual: " + device )
2476 hostAttachment = False
2477 else:
2478 hostAttachment = False
2479 except AssertionError:
2480 main.log.exception( "Json object not as expected" )
2481 main.log.error( repr( host ) )
2482 hostAttachment = False
2483 else:
2484 main.log.error( "No hosts json output or \"Error\"" +
2485 " in output. hosts = " +
2486 repr( hosts[ controller ] ) )
2487 if zeroHosts is False:
2488 hostAttachment = True
2489
2490 # END CHECKING HOST ATTACHMENT POINTS
2491 devicesResults = devicesResults and currentDevicesResult
2492 linksResults = linksResults and currentLinksResult
2493 hostsResults = hostsResults and currentHostsResult
2494 hostAttachmentResults = hostAttachmentResults and\
2495 hostAttachment
Jon Halla440e872016-03-31 15:15:50 -07002496 topoResult = ( devicesResults and linksResults
2497 and hostsResults and ipResult and
2498 hostAttachmentResults )
Jon Hall6e709752016-02-01 13:38:46 -08002499 utilities.assert_equals( expect=True,
2500 actual=topoResult,
2501 onpass="ONOS topology matches Mininet",
2502 onfail=topoFailMsg )
2503 # End of While loop to pull ONOS state
2504
2505 # Compare json objects for hosts and dataplane clusters
2506
2507 # hosts
2508 main.step( "Hosts view is consistent across all ONOS nodes" )
2509 consistentHostsResult = main.TRUE
2510 for controller in range( len( hosts ) ):
2511 controllerStr = str( main.activeNodes[controller] + 1 )
2512 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2513 if hosts[ controller ] == hosts[ 0 ]:
2514 continue
2515 else: # hosts not consistent
2516 main.log.error( "hosts from ONOS" + controllerStr +
2517 " is inconsistent with ONOS1" )
2518 main.log.warn( repr( hosts[ controller ] ) )
2519 consistentHostsResult = main.FALSE
2520
2521 else:
2522 main.log.error( "Error in getting ONOS hosts from ONOS" +
2523 controllerStr )
2524 consistentHostsResult = main.FALSE
2525 main.log.warn( "ONOS" + controllerStr +
2526 " hosts response: " +
2527 repr( hosts[ controller ] ) )
2528 utilities.assert_equals(
2529 expect=main.TRUE,
2530 actual=consistentHostsResult,
2531 onpass="Hosts view is consistent across all ONOS nodes",
2532 onfail="ONOS nodes have different views of hosts" )
2533
2534 main.step( "Hosts information is correct" )
2535 hostsResults = hostsResults and ipResult
2536 utilities.assert_equals(
2537 expect=main.TRUE,
2538 actual=hostsResults,
2539 onpass="Host information is correct",
2540 onfail="Host information is incorrect" )
2541
2542 main.step( "Host attachment points to the network" )
2543 utilities.assert_equals(
2544 expect=True,
2545 actual=hostAttachmentResults,
2546 onpass="Hosts are correctly attached to the network",
2547 onfail="ONOS did not correctly attach hosts to the network" )
2548
2549 # Strongly connected clusters of devices
2550 main.step( "Clusters view is consistent across all ONOS nodes" )
2551 consistentClustersResult = main.TRUE
2552 for controller in range( len( clusters ) ):
2553 controllerStr = str( main.activeNodes[controller] + 1 )
2554 if "Error" not in clusters[ controller ]:
2555 if clusters[ controller ] == clusters[ 0 ]:
2556 continue
2557 else: # clusters not consistent
2558 main.log.error( "clusters from ONOS" +
2559 controllerStr +
2560 " is inconsistent with ONOS1" )
2561 consistentClustersResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002562 else:
2563 main.log.error( "Error in getting dataplane clusters " +
2564 "from ONOS" + controllerStr )
2565 consistentClustersResult = main.FALSE
2566 main.log.warn( "ONOS" + controllerStr +
2567 " clusters response: " +
2568 repr( clusters[ controller ] ) )
2569 utilities.assert_equals(
2570 expect=main.TRUE,
2571 actual=consistentClustersResult,
2572 onpass="Clusters view is consistent across all ONOS nodes",
2573 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07002574 if not consistentClustersResult:
2575 main.log.debug( clusters )
Jon Hall6e709752016-02-01 13:38:46 -08002576
2577 main.step( "There is only one SCC" )
2578 # there should always only be one cluster
2579 try:
2580 numClusters = len( json.loads( clusters[ 0 ] ) )
2581 except ( ValueError, TypeError ):
2582 main.log.exception( "Error parsing clusters[0]: " +
2583 repr( clusters[0] ) )
Jon Halla440e872016-03-31 15:15:50 -07002584 numClusters = "ERROR"
Jon Hall6e709752016-02-01 13:38:46 -08002585 clusterResults = main.FALSE
2586 if numClusters == 1:
2587 clusterResults = main.TRUE
2588 utilities.assert_equals(
2589 expect=1,
2590 actual=numClusters,
2591 onpass="ONOS shows 1 SCC",
2592 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2593
2594 topoResult = ( devicesResults and linksResults
2595 and hostsResults and consistentHostsResult
2596 and consistentClustersResult and clusterResults
2597 and ipResult and hostAttachmentResults )
2598
2599 topoResult = topoResult and int( count <= 2 )
2600 note = "note it takes about " + str( int( cliTime ) ) + \
2601 " seconds for the test to make all the cli calls to fetch " +\
2602 "the topology from each ONOS instance"
2603 main.log.info(
2604 "Very crass estimate for topology discovery/convergence( " +
2605 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2606 str( count ) + " tries" )
2607
2608 main.step( "Device information is correct" )
2609 utilities.assert_equals(
2610 expect=main.TRUE,
2611 actual=devicesResults,
2612 onpass="Device information is correct",
2613 onfail="Device information is incorrect" )
2614
2615 main.step( "Links are correct" )
2616 utilities.assert_equals(
2617 expect=main.TRUE,
2618 actual=linksResults,
2619 onpass="Link are correct",
2620 onfail="Links are incorrect" )
2621
Jon Halla440e872016-03-31 15:15:50 -07002622 main.step( "Hosts are correct" )
2623 utilities.assert_equals(
2624 expect=main.TRUE,
2625 actual=hostsResults,
2626 onpass="Hosts are correct",
2627 onfail="Hosts are incorrect" )
2628
Jon Hall6e709752016-02-01 13:38:46 -08002629 # FIXME: move this to an ONOS state case
2630 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -07002631 nodeResults = utilities.retry( main.HA.nodesCheck,
2632 False,
2633 args=[main.activeNodes],
2634 attempts=5 )
Jon Hall6e709752016-02-01 13:38:46 -08002635
Jon Hall41d39f12016-04-11 22:54:35 -07002636 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Hall6e709752016-02-01 13:38:46 -08002637 onpass="Nodes check successful",
2638 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002639 if not nodeResults:
Jon Hall41d39f12016-04-11 22:54:35 -07002640 for i in main.activeNodes:
Jon Halla440e872016-03-31 15:15:50 -07002641 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hall41d39f12016-04-11 22:54:35 -07002642 main.CLIs[i].name,
2643 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08002644
Jon Halld2871c22016-07-26 11:01:14 -07002645 if not topoResult:
2646 main.cleanup()
2647 main.exit()
2648
Jon Hall6e709752016-02-01 13:38:46 -08002649 def CASE9( self, main ):
2650 """
2651 Link s3-s28 down
2652 """
2653 import time
2654 assert main.numCtrls, "main.numCtrls not defined"
2655 assert main, "main not defined"
2656 assert utilities.assert_equals, "utilities.assert_equals not defined"
2657 assert main.CLIs, "main.CLIs not defined"
2658 assert main.nodes, "main.nodes not defined"
2659 # NOTE: You should probably run a topology check after this
2660
2661 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2662
2663 description = "Turn off a link to ensure that Link Discovery " +\
2664 "is working properly"
2665 main.case( description )
2666
2667 main.step( "Kill Link between s3 and s28" )
2668 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2669 main.log.info( "Waiting " + str( linkSleep ) +
2670 " seconds for link down to be discovered" )
2671 time.sleep( linkSleep )
2672 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2673 onpass="Link down successful",
2674 onfail="Failed to bring link down" )
2675 # TODO do some sort of check here
2676
2677 def CASE10( self, main ):
2678 """
2679 Link s3-s28 up
2680 """
2681 import time
2682 assert main.numCtrls, "main.numCtrls not defined"
2683 assert main, "main not defined"
2684 assert utilities.assert_equals, "utilities.assert_equals not defined"
2685 assert main.CLIs, "main.CLIs not defined"
2686 assert main.nodes, "main.nodes not defined"
2687 # NOTE: You should probably run a topology check after this
2688
2689 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2690
2691 description = "Restore a link to ensure that Link Discovery is " + \
2692 "working properly"
2693 main.case( description )
2694
2695 main.step( "Bring link between s3 and s28 back up" )
2696 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2697 main.log.info( "Waiting " + str( linkSleep ) +
2698 " seconds for link up to be discovered" )
2699 time.sleep( linkSleep )
2700 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2701 onpass="Link up successful",
2702 onfail="Failed to bring link up" )
2703 # TODO do some sort of check here
2704
2705 def CASE11( self, main ):
2706 """
2707 Switch Down
2708 """
2709 # NOTE: You should probably run a topology check after this
2710 import time
2711 assert main.numCtrls, "main.numCtrls not defined"
2712 assert main, "main not defined"
2713 assert utilities.assert_equals, "utilities.assert_equals not defined"
2714 assert main.CLIs, "main.CLIs not defined"
2715 assert main.nodes, "main.nodes not defined"
2716
2717 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2718
2719 description = "Killing a switch to ensure it is discovered correctly"
2720 onosCli = main.CLIs[ main.activeNodes[0] ]
2721 main.case( description )
2722 switch = main.params[ 'kill' ][ 'switch' ]
2723 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2724
2725 # TODO: Make this switch parameterizable
2726 main.step( "Kill " + switch )
2727 main.log.info( "Deleting " + switch )
2728 main.Mininet1.delSwitch( switch )
2729 main.log.info( "Waiting " + str( switchSleep ) +
2730 " seconds for switch down to be discovered" )
2731 time.sleep( switchSleep )
2732 device = onosCli.getDevice( dpid=switchDPID )
2733 # Peek at the deleted switch
2734 main.log.warn( str( device ) )
2735 result = main.FALSE
2736 if device and device[ 'available' ] is False:
2737 result = main.TRUE
2738 utilities.assert_equals( expect=main.TRUE, actual=result,
2739 onpass="Kill switch successful",
2740 onfail="Failed to kill switch?" )
2741
2742 def CASE12( self, main ):
2743 """
2744 Switch Up
2745 """
2746 # NOTE: You should probably run a topology check after this
2747 import time
2748 assert main.numCtrls, "main.numCtrls not defined"
2749 assert main, "main not defined"
2750 assert utilities.assert_equals, "utilities.assert_equals not defined"
2751 assert main.CLIs, "main.CLIs not defined"
2752 assert main.nodes, "main.nodes not defined"
2753 assert ONOS1Port, "ONOS1Port not defined"
2754 assert ONOS2Port, "ONOS2Port not defined"
2755 assert ONOS3Port, "ONOS3Port not defined"
2756 assert ONOS4Port, "ONOS4Port not defined"
2757 assert ONOS5Port, "ONOS5Port not defined"
2758 assert ONOS6Port, "ONOS6Port not defined"
2759 assert ONOS7Port, "ONOS7Port not defined"
2760
2761 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2762 switch = main.params[ 'kill' ][ 'switch' ]
2763 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2764 links = main.params[ 'kill' ][ 'links' ].split()
2765 onosCli = main.CLIs[ main.activeNodes[0] ]
2766 description = "Adding a switch to ensure it is discovered correctly"
2767 main.case( description )
2768
2769 main.step( "Add back " + switch )
2770 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2771 for peer in links:
2772 main.Mininet1.addLink( switch, peer )
2773 ipList = [ node.ip_address for node in main.nodes ]
2774 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2775 main.log.info( "Waiting " + str( switchSleep ) +
2776 " seconds for switch up to be discovered" )
2777 time.sleep( switchSleep )
2778 device = onosCli.getDevice( dpid=switchDPID )
2779 # Peek at the deleted switch
2780 main.log.warn( str( device ) )
2781 result = main.FALSE
2782 if device and device[ 'available' ]:
2783 result = main.TRUE
2784 utilities.assert_equals( expect=main.TRUE, actual=result,
2785 onpass="add switch successful",
2786 onfail="Failed to add switch?" )
2787
2788 def CASE13( self, main ):
2789 """
2790 Clean up
2791 """
2792 import os
2793 import time
2794 assert main.numCtrls, "main.numCtrls not defined"
2795 assert main, "main not defined"
2796 assert utilities.assert_equals, "utilities.assert_equals not defined"
2797 assert main.CLIs, "main.CLIs not defined"
2798 assert main.nodes, "main.nodes not defined"
2799
2800 # printing colors to terminal
2801 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2802 'blue': '\033[94m', 'green': '\033[92m',
2803 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2804 main.case( "Test Cleanup" )
2805 main.step( "Killing tcpdumps" )
2806 main.Mininet2.stopTcpdump()
2807
2808 testname = main.TEST
2809 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2810 main.step( "Copying MN pcap and ONOS log files to test station" )
2811 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2812 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2813 # NOTE: MN Pcap file is being saved to logdir.
2814 # We scp this file as MN and TestON aren't necessarily the same vm
2815
2816 # FIXME: To be replaced with a Jenkin's post script
2817 # TODO: Load these from params
2818 # NOTE: must end in /
2819 logFolder = "/opt/onos/log/"
2820 logFiles = [ "karaf.log", "karaf.log.1" ]
2821 # NOTE: must end in /
2822 for f in logFiles:
2823 for node in main.nodes:
2824 dstName = main.logdir + "/" + node.name + "-" + f
2825 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2826 logFolder + f, dstName )
2827 # std*.log's
2828 # NOTE: must end in /
2829 logFolder = "/opt/onos/var/"
2830 logFiles = [ "stderr.log", "stdout.log" ]
2831 # NOTE: must end in /
2832 for f in logFiles:
2833 for node in main.nodes:
2834 dstName = main.logdir + "/" + node.name + "-" + f
2835 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2836 logFolder + f, dstName )
2837 else:
2838 main.log.debug( "skipping saving log files" )
2839
2840 main.step( "Stopping Mininet" )
2841 mnResult = main.Mininet1.stopNet()
2842 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2843 onpass="Mininet stopped",
2844 onfail="MN cleanup NOT successful" )
2845
2846 main.step( "Checking ONOS Logs for errors" )
2847 for node in main.nodes:
2848 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2849 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2850
2851 try:
2852 timerLog = open( main.logdir + "/Timers.csv", 'w')
2853 # Overwrite with empty line and close
2854 labels = "Gossip Intents"
2855 data = str( gossipTime )
2856 timerLog.write( labels + "\n" + data )
2857 timerLog.close()
2858 except NameError, e:
2859 main.log.exception(e)
2860
2861 def CASE14( self, main ):
2862 """
2863 start election app on all onos nodes
2864 """
2865 assert main.numCtrls, "main.numCtrls not defined"
2866 assert main, "main not defined"
2867 assert utilities.assert_equals, "utilities.assert_equals not defined"
2868 assert main.CLIs, "main.CLIs not defined"
2869 assert main.nodes, "main.nodes not defined"
2870
2871 main.case("Start Leadership Election app")
2872 main.step( "Install leadership election app" )
2873 onosCli = main.CLIs[ main.activeNodes[0] ]
2874 appResult = onosCli.activateApp( "org.onosproject.election" )
2875 utilities.assert_equals(
2876 expect=main.TRUE,
2877 actual=appResult,
2878 onpass="Election app installed",
2879 onfail="Something went wrong with installing Leadership election" )
2880
2881 main.step( "Run for election on each node" )
Jon Hall6e709752016-02-01 13:38:46 -08002882 for i in main.activeNodes:
2883 main.CLIs[i].electionTestRun()
Jon Hall25463a82016-04-13 14:03:52 -07002884 time.sleep(5)
2885 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2886 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall6e709752016-02-01 13:38:46 -08002887 utilities.assert_equals(
Jon Hall25463a82016-04-13 14:03:52 -07002888 expect=True,
2889 actual=sameResult,
2890 onpass="All nodes see the same leaderboards",
2891 onfail="Inconsistent leaderboards" )
Jon Hall6e709752016-02-01 13:38:46 -08002892
Jon Hall25463a82016-04-13 14:03:52 -07002893 if sameResult:
2894 leader = leaders[ 0 ][ 0 ]
2895 if main.nodes[main.activeNodes[0]].ip_address in leader:
2896 correctLeader = True
2897 else:
2898 correctLeader = False
2899 main.step( "First node was elected leader" )
2900 utilities.assert_equals(
2901 expect=True,
2902 actual=correctLeader,
2903 onpass="Correct leader was elected",
2904 onfail="Incorrect leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002905
2906 def CASE15( self, main ):
2907 """
2908 Check that Leadership Election is still functional
2909 15.1 Run election on each node
2910 15.2 Check that each node has the same leaders and candidates
2911 15.3 Find current leader and withdraw
2912 15.4 Check that a new node was elected leader
2913 15.5 Check that that new leader was the candidate of old leader
2914 15.6 Run for election on old leader
2915 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2916 15.8 Make sure that the old leader was added to the candidate list
2917
2918 old and new variable prefixes refer to data from before vs after
2919 withdrawl and later before withdrawl vs after re-election
2920 """
2921 import time
2922 assert main.numCtrls, "main.numCtrls not defined"
2923 assert main, "main not defined"
2924 assert utilities.assert_equals, "utilities.assert_equals not defined"
2925 assert main.CLIs, "main.CLIs not defined"
2926 assert main.nodes, "main.nodes not defined"
2927
2928 description = "Check that Leadership Election is still functional"
2929 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002930 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall6e709752016-02-01 13:38:46 -08002931
Jon Halla440e872016-03-31 15:15:50 -07002932 oldLeaders = [] # list of lists of each nodes' candidates before
2933 newLeaders = [] # list of lists of each nodes' candidates after
Jon Hall6e709752016-02-01 13:38:46 -08002934 oldLeader = '' # the old leader from oldLeaders, None if not same
2935 newLeader = '' # the new leaders fron newLoeaders, None if not same
2936 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2937 expectNoLeader = False # True when there is only one leader
2938 if main.numCtrls == 1:
2939 expectNoLeader = True
2940
2941 main.step( "Run for election on each node" )
2942 electionResult = main.TRUE
2943
2944 for i in main.activeNodes: # run test election on each node
2945 if main.CLIs[i].electionTestRun() == main.FALSE:
2946 electionResult = main.FALSE
2947 utilities.assert_equals(
2948 expect=main.TRUE,
2949 actual=electionResult,
2950 onpass="All nodes successfully ran for leadership",
2951 onfail="At least one node failed to run for leadership" )
2952
2953 if electionResult == main.FALSE:
2954 main.log.error(
2955 "Skipping Test Case because Election Test App isn't loaded" )
2956 main.skipCase()
2957
2958 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07002959 failMessage = "Nodes have different leaderboards"
Jon Halla440e872016-03-31 15:15:50 -07002960 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
Jon Hall41d39f12016-04-11 22:54:35 -07002961 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Halla440e872016-03-31 15:15:50 -07002962 if sameResult:
2963 oldLeader = oldLeaders[ 0 ][ 0 ]
2964 main.log.warn( oldLeader )
Jon Hall6e709752016-02-01 13:38:46 -08002965 else:
Jon Halla440e872016-03-31 15:15:50 -07002966 oldLeader = None
Jon Hall6e709752016-02-01 13:38:46 -08002967 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002968 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08002969 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07002970 onpass="Leaderboards are consistent for the election topic",
Jon Hall6e709752016-02-01 13:38:46 -08002971 onfail=failMessage )
2972
2973 main.step( "Find current leader and withdraw" )
2974 withdrawResult = main.TRUE
2975 # do some sanity checking on leader before using it
2976 if oldLeader is None:
2977 main.log.error( "Leadership isn't consistent." )
2978 withdrawResult = main.FALSE
2979 # Get the CLI of the oldLeader
2980 for i in main.activeNodes:
2981 if oldLeader == main.nodes[ i ].ip_address:
2982 oldLeaderCLI = main.CLIs[ i ]
2983 break
2984 else: # FOR/ELSE statement
2985 main.log.error( "Leader election, could not find current leader" )
2986 if oldLeader:
2987 withdrawResult = oldLeaderCLI.electionTestWithdraw()
2988 utilities.assert_equals(
2989 expect=main.TRUE,
2990 actual=withdrawResult,
2991 onpass="Node was withdrawn from election",
2992 onfail="Node was not withdrawn from election" )
2993
2994 main.step( "Check that a new node was elected leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002995 failMessage = "Nodes have different leaders"
Jon Hall6e709752016-02-01 13:38:46 -08002996 # Get new leaders and candidates
Jon Hall41d39f12016-04-11 22:54:35 -07002997 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall3a7843a2016-04-12 03:01:09 -07002998 newLeader = None
Jon Halla440e872016-03-31 15:15:50 -07002999 if newLeaderResult:
Jon Hall3a7843a2016-04-12 03:01:09 -07003000 if newLeaders[ 0 ][ 0 ] == 'none':
3001 main.log.error( "No leader was elected on at least 1 node" )
3002 if not expectNoLeader:
3003 newLeaderResult = False
Jon Hall25463a82016-04-13 14:03:52 -07003004 newLeader = newLeaders[ 0 ][ 0 ]
Jon Hall6e709752016-02-01 13:38:46 -08003005
3006 # Check that the new leader is not the older leader, which was withdrawn
3007 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07003008 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08003009 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3010 " as the current leader" )
Jon Hall6e709752016-02-01 13:38:46 -08003011 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003012 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08003013 actual=newLeaderResult,
3014 onpass="Leadership election passed",
3015 onfail="Something went wrong with Leadership election" )
3016
Jon Halla440e872016-03-31 15:15:50 -07003017 main.step( "Check that that new leader was the candidate of old leader" )
Jon Hall6e709752016-02-01 13:38:46 -08003018 # candidates[ 2 ] should become the top candidate after withdrawl
3019 correctCandidateResult = main.TRUE
3020 if expectNoLeader:
3021 if newLeader == 'none':
3022 main.log.info( "No leader expected. None found. Pass" )
3023 correctCandidateResult = main.TRUE
3024 else:
3025 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3026 correctCandidateResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003027 elif len( oldLeaders[0] ) >= 3:
3028 if newLeader == oldLeaders[ 0 ][ 2 ]:
3029 # correct leader was elected
3030 correctCandidateResult = main.TRUE
3031 else:
3032 correctCandidateResult = main.FALSE
3033 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3034 newLeader, oldLeaders[ 0 ][ 2 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08003035 else:
3036 main.log.warn( "Could not determine who should be the correct leader" )
Jon Halla440e872016-03-31 15:15:50 -07003037 main.log.debug( oldLeaders[ 0 ] )
Jon Hall6e709752016-02-01 13:38:46 -08003038 correctCandidateResult = main.FALSE
3039 utilities.assert_equals(
3040 expect=main.TRUE,
3041 actual=correctCandidateResult,
3042 onpass="Correct Candidate Elected",
3043 onfail="Incorrect Candidate Elected" )
3044
3045 main.step( "Run for election on old leader( just so everyone " +
3046 "is in the hat )" )
3047 if oldLeaderCLI is not None:
3048 runResult = oldLeaderCLI.electionTestRun()
3049 else:
3050 main.log.error( "No old leader to re-elect" )
3051 runResult = main.FALSE
3052 utilities.assert_equals(
3053 expect=main.TRUE,
3054 actual=runResult,
3055 onpass="App re-ran for election",
3056 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07003057
Jon Hall6e709752016-02-01 13:38:46 -08003058 main.step(
3059 "Check that oldLeader is a candidate, and leader if only 1 node" )
3060 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07003061 # Get new leaders and candidates
3062 reRunLeaders = []
3063 time.sleep( 5 ) # Paremterize
Jon Hall41d39f12016-04-11 22:54:35 -07003064 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall6e709752016-02-01 13:38:46 -08003065
3066 # Check that the re-elected node is last on the candidate List
Jon Hall3a7843a2016-04-12 03:01:09 -07003067 if not reRunLeaders[0]:
3068 positionResult = main.FALSE
3069 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
Jon Halla440e872016-03-31 15:15:50 -07003070 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3071 str( reRunLeaders[ 0 ] ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08003072 positionResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08003073 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003074 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08003075 actual=positionResult,
3076 onpass="Old leader successfully re-ran for election",
3077 onfail="Something went wrong with Leadership election after " +
3078 "the old leader re-ran for election" )
3079
3080 def CASE16( self, main ):
3081 """
3082 Install Distributed Primitives app
3083 """
3084 import time
3085 assert main.numCtrls, "main.numCtrls not defined"
3086 assert main, "main not defined"
3087 assert utilities.assert_equals, "utilities.assert_equals not defined"
3088 assert main.CLIs, "main.CLIs not defined"
3089 assert main.nodes, "main.nodes not defined"
3090
3091 # Variables for the distributed primitives tests
3092 global pCounterName
Jon Hall6e709752016-02-01 13:38:46 -08003093 global pCounterValue
Jon Hall6e709752016-02-01 13:38:46 -08003094 global onosSet
3095 global onosSetName
3096 pCounterName = "TestON-Partitions"
Jon Hall6e709752016-02-01 13:38:46 -08003097 pCounterValue = 0
Jon Hall6e709752016-02-01 13:38:46 -08003098 onosSet = set([])
3099 onosSetName = "TestON-set"
3100
3101 description = "Install Primitives app"
3102 main.case( description )
3103 main.step( "Install Primitives app" )
3104 appName = "org.onosproject.distributedprimitives"
3105 node = main.activeNodes[0]
3106 appResults = main.CLIs[node].activateApp( appName )
3107 utilities.assert_equals( expect=main.TRUE,
3108 actual=appResults,
3109 onpass="Primitives app activated",
3110 onfail="Primitives app not activated" )
3111 time.sleep( 5 ) # To allow all nodes to activate
3112
3113 def CASE17( self, main ):
3114 """
3115 Check for basic functionality with distributed primitives
3116 """
3117 # Make sure variables are defined/set
3118 assert main.numCtrls, "main.numCtrls not defined"
3119 assert main, "main not defined"
3120 assert utilities.assert_equals, "utilities.assert_equals not defined"
3121 assert main.CLIs, "main.CLIs not defined"
3122 assert main.nodes, "main.nodes not defined"
3123 assert pCounterName, "pCounterName not defined"
Jon Hall6e709752016-02-01 13:38:46 -08003124 assert onosSetName, "onosSetName not defined"
3125 # NOTE: assert fails if value is 0/None/Empty/False
3126 try:
3127 pCounterValue
3128 except NameError:
3129 main.log.error( "pCounterValue not defined, setting to 0" )
3130 pCounterValue = 0
3131 try:
Jon Hall6e709752016-02-01 13:38:46 -08003132 onosSet
3133 except NameError:
3134 main.log.error( "onosSet not defined, setting to empty Set" )
3135 onosSet = set([])
3136 # Variables for the distributed primitives tests. These are local only
3137 addValue = "a"
3138 addAllValue = "a b c d e f"
3139 retainValue = "c d e f"
3140
3141 description = "Check for basic functionality with distributed " +\
3142 "primitives"
3143 main.case( description )
3144 main.caseExplanation = "Test the methods of the distributed " +\
3145 "primitives (counters and sets) throught the cli"
3146 # DISTRIBUTED ATOMIC COUNTERS
3147 # Partitioned counters
3148 main.step( "Increment then get a default counter on each node" )
3149 pCounters = []
3150 threads = []
3151 addedPValues = []
3152 for i in main.activeNodes:
3153 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3154 name="counterAddAndGet-" + str( i ),
3155 args=[ pCounterName ] )
3156 pCounterValue += 1
3157 addedPValues.append( pCounterValue )
3158 threads.append( t )
3159 t.start()
3160
3161 for t in threads:
3162 t.join()
3163 pCounters.append( t.result )
3164 # Check that counter incremented numController times
3165 pCounterResults = True
3166 for i in addedPValues:
3167 tmpResult = i in pCounters
3168 pCounterResults = pCounterResults and tmpResult
3169 if not tmpResult:
3170 main.log.error( str( i ) + " is not in partitioned "
3171 "counter incremented results" )
3172 utilities.assert_equals( expect=True,
3173 actual=pCounterResults,
3174 onpass="Default counter incremented",
3175 onfail="Error incrementing default" +
3176 " counter" )
3177
3178 main.step( "Get then Increment a default counter on each node" )
3179 pCounters = []
3180 threads = []
3181 addedPValues = []
3182 for i in main.activeNodes:
3183 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3184 name="counterGetAndAdd-" + str( i ),
3185 args=[ pCounterName ] )
3186 addedPValues.append( pCounterValue )
3187 pCounterValue += 1
3188 threads.append( t )
3189 t.start()
3190
3191 for t in threads:
3192 t.join()
3193 pCounters.append( t.result )
3194 # Check that counter incremented numController times
3195 pCounterResults = True
3196 for i in addedPValues:
3197 tmpResult = i in pCounters
3198 pCounterResults = pCounterResults and tmpResult
3199 if not tmpResult:
3200 main.log.error( str( i ) + " is not in partitioned "
3201 "counter incremented results" )
3202 utilities.assert_equals( expect=True,
3203 actual=pCounterResults,
3204 onpass="Default counter incremented",
3205 onfail="Error incrementing default" +
3206 " counter" )
3207
3208 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003209 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Hall6e709752016-02-01 13:38:46 -08003210 utilities.assert_equals( expect=main.TRUE,
3211 actual=incrementCheck,
3212 onpass="Added counters are correct",
3213 onfail="Added counters are incorrect" )
3214
3215 main.step( "Add -8 to then get a default counter on each node" )
3216 pCounters = []
3217 threads = []
3218 addedPValues = []
3219 for i in main.activeNodes:
3220 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3221 name="counterIncrement-" + str( i ),
3222 args=[ pCounterName ],
3223 kwargs={ "delta": -8 } )
3224 pCounterValue += -8
3225 addedPValues.append( pCounterValue )
3226 threads.append( t )
3227 t.start()
3228
3229 for t in threads:
3230 t.join()
3231 pCounters.append( t.result )
3232 # Check that counter incremented numController times
3233 pCounterResults = True
3234 for i in addedPValues:
3235 tmpResult = i in pCounters
3236 pCounterResults = pCounterResults and tmpResult
3237 if not tmpResult:
3238 main.log.error( str( i ) + " is not in partitioned "
3239 "counter incremented results" )
3240 utilities.assert_equals( expect=True,
3241 actual=pCounterResults,
3242 onpass="Default counter incremented",
3243 onfail="Error incrementing default" +
3244 " counter" )
3245
3246 main.step( "Add 5 to then get a default counter on each node" )
3247 pCounters = []
3248 threads = []
3249 addedPValues = []
3250 for i in main.activeNodes:
3251 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3252 name="counterIncrement-" + str( i ),
3253 args=[ pCounterName ],
3254 kwargs={ "delta": 5 } )
3255 pCounterValue += 5
3256 addedPValues.append( pCounterValue )
3257 threads.append( t )
3258 t.start()
3259
3260 for t in threads:
3261 t.join()
3262 pCounters.append( t.result )
3263 # Check that counter incremented numController times
3264 pCounterResults = True
3265 for i in addedPValues:
3266 tmpResult = i in pCounters
3267 pCounterResults = pCounterResults and tmpResult
3268 if not tmpResult:
3269 main.log.error( str( i ) + " is not in partitioned "
3270 "counter incremented results" )
3271 utilities.assert_equals( expect=True,
3272 actual=pCounterResults,
3273 onpass="Default counter incremented",
3274 onfail="Error incrementing default" +
3275 " counter" )
3276
3277 main.step( "Get then add 5 to a default counter on each node" )
3278 pCounters = []
3279 threads = []
3280 addedPValues = []
3281 for i in main.activeNodes:
3282 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3283 name="counterIncrement-" + str( i ),
3284 args=[ pCounterName ],
3285 kwargs={ "delta": 5 } )
3286 addedPValues.append( pCounterValue )
3287 pCounterValue += 5
3288 threads.append( t )
3289 t.start()
3290
3291 for t in threads:
3292 t.join()
3293 pCounters.append( t.result )
3294 # Check that counter incremented numController times
3295 pCounterResults = True
3296 for i in addedPValues:
3297 tmpResult = i in pCounters
3298 pCounterResults = pCounterResults and tmpResult
3299 if not tmpResult:
3300 main.log.error( str( i ) + " is not in partitioned "
3301 "counter incremented results" )
3302 utilities.assert_equals( expect=True,
3303 actual=pCounterResults,
3304 onpass="Default counter incremented",
3305 onfail="Error incrementing default" +
3306 " counter" )
3307
3308 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003309 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Hall6e709752016-02-01 13:38:46 -08003310 utilities.assert_equals( expect=main.TRUE,
3311 actual=incrementCheck,
3312 onpass="Added counters are correct",
3313 onfail="Added counters are incorrect" )
3314
Jon Hall6e709752016-02-01 13:38:46 -08003315 # DISTRIBUTED SETS
3316 main.step( "Distributed Set get" )
3317 size = len( onosSet )
3318 getResponses = []
3319 threads = []
3320 for i in main.activeNodes:
3321 t = main.Thread( target=main.CLIs[i].setTestGet,
3322 name="setTestGet-" + str( i ),
3323 args=[ onosSetName ] )
3324 threads.append( t )
3325 t.start()
3326 for t in threads:
3327 t.join()
3328 getResponses.append( t.result )
3329
3330 getResults = main.TRUE
3331 for i in range( len( main.activeNodes ) ):
3332 node = str( main.activeNodes[i] + 1 )
3333 if isinstance( getResponses[ i ], list):
3334 current = set( getResponses[ i ] )
3335 if len( current ) == len( getResponses[ i ] ):
3336 # no repeats
3337 if onosSet != current:
3338 main.log.error( "ONOS" + node +
3339 " has incorrect view" +
3340 " of set " + onosSetName + ":\n" +
3341 str( getResponses[ i ] ) )
3342 main.log.debug( "Expected: " + str( onosSet ) )
3343 main.log.debug( "Actual: " + str( current ) )
3344 getResults = main.FALSE
3345 else:
3346 # error, set is not a set
3347 main.log.error( "ONOS" + node +
3348 " has repeat elements in" +
3349 " set " + onosSetName + ":\n" +
3350 str( getResponses[ i ] ) )
3351 getResults = main.FALSE
3352 elif getResponses[ i ] == main.ERROR:
3353 getResults = main.FALSE
3354 utilities.assert_equals( expect=main.TRUE,
3355 actual=getResults,
3356 onpass="Set elements are correct",
3357 onfail="Set elements are incorrect" )
3358
3359 main.step( "Distributed Set size" )
3360 sizeResponses = []
3361 threads = []
3362 for i in main.activeNodes:
3363 t = main.Thread( target=main.CLIs[i].setTestSize,
3364 name="setTestSize-" + str( i ),
3365 args=[ onosSetName ] )
3366 threads.append( t )
3367 t.start()
3368 for t in threads:
3369 t.join()
3370 sizeResponses.append( t.result )
3371
3372 sizeResults = main.TRUE
3373 for i in range( len( main.activeNodes ) ):
3374 node = str( main.activeNodes[i] + 1 )
3375 if size != sizeResponses[ i ]:
3376 sizeResults = main.FALSE
3377 main.log.error( "ONOS" + node +
3378 " expected a size of " + str( size ) +
3379 " for set " + onosSetName +
3380 " but got " + str( sizeResponses[ i ] ) )
3381 utilities.assert_equals( expect=main.TRUE,
3382 actual=sizeResults,
3383 onpass="Set sizes are correct",
3384 onfail="Set sizes are incorrect" )
3385
3386 main.step( "Distributed Set add()" )
3387 onosSet.add( addValue )
3388 addResponses = []
3389 threads = []
3390 for i in main.activeNodes:
3391 t = main.Thread( target=main.CLIs[i].setTestAdd,
3392 name="setTestAdd-" + str( i ),
3393 args=[ onosSetName, addValue ] )
3394 threads.append( t )
3395 t.start()
3396 for t in threads:
3397 t.join()
3398 addResponses.append( t.result )
3399
3400 # main.TRUE = successfully changed the set
3401 # main.FALSE = action resulted in no change in set
3402 # main.ERROR - Some error in executing the function
3403 addResults = main.TRUE
3404 for i in range( len( main.activeNodes ) ):
3405 if addResponses[ i ] == main.TRUE:
3406 # All is well
3407 pass
3408 elif addResponses[ i ] == main.FALSE:
3409 # Already in set, probably fine
3410 pass
3411 elif addResponses[ i ] == main.ERROR:
3412 # Error in execution
3413 addResults = main.FALSE
3414 else:
3415 # unexpected result
3416 addResults = main.FALSE
3417 if addResults != main.TRUE:
3418 main.log.error( "Error executing set add" )
3419
3420 # Check if set is still correct
3421 size = len( onosSet )
3422 getResponses = []
3423 threads = []
3424 for i in main.activeNodes:
3425 t = main.Thread( target=main.CLIs[i].setTestGet,
3426 name="setTestGet-" + str( i ),
3427 args=[ onosSetName ] )
3428 threads.append( t )
3429 t.start()
3430 for t in threads:
3431 t.join()
3432 getResponses.append( t.result )
3433 getResults = main.TRUE
3434 for i in range( len( main.activeNodes ) ):
3435 node = str( main.activeNodes[i] + 1 )
3436 if isinstance( getResponses[ i ], list):
3437 current = set( getResponses[ i ] )
3438 if len( current ) == len( getResponses[ i ] ):
3439 # no repeats
3440 if onosSet != current:
3441 main.log.error( "ONOS" + node + " has incorrect view" +
3442 " of set " + onosSetName + ":\n" +
3443 str( getResponses[ i ] ) )
3444 main.log.debug( "Expected: " + str( onosSet ) )
3445 main.log.debug( "Actual: " + str( current ) )
3446 getResults = main.FALSE
3447 else:
3448 # error, set is not a set
3449 main.log.error( "ONOS" + node + " has repeat elements in" +
3450 " set " + onosSetName + ":\n" +
3451 str( getResponses[ i ] ) )
3452 getResults = main.FALSE
3453 elif getResponses[ i ] == main.ERROR:
3454 getResults = main.FALSE
3455 sizeResponses = []
3456 threads = []
3457 for i in main.activeNodes:
3458 t = main.Thread( target=main.CLIs[i].setTestSize,
3459 name="setTestSize-" + str( i ),
3460 args=[ onosSetName ] )
3461 threads.append( t )
3462 t.start()
3463 for t in threads:
3464 t.join()
3465 sizeResponses.append( t.result )
3466 sizeResults = main.TRUE
3467 for i in range( len( main.activeNodes ) ):
3468 node = str( main.activeNodes[i] + 1 )
3469 if size != sizeResponses[ i ]:
3470 sizeResults = main.FALSE
3471 main.log.error( "ONOS" + node +
3472 " expected a size of " + str( size ) +
3473 " for set " + onosSetName +
3474 " but got " + str( sizeResponses[ i ] ) )
3475 addResults = addResults and getResults and sizeResults
3476 utilities.assert_equals( expect=main.TRUE,
3477 actual=addResults,
3478 onpass="Set add correct",
3479 onfail="Set add was incorrect" )
3480
3481 main.step( "Distributed Set addAll()" )
3482 onosSet.update( addAllValue.split() )
3483 addResponses = []
3484 threads = []
3485 for i in main.activeNodes:
3486 t = main.Thread( target=main.CLIs[i].setTestAdd,
3487 name="setTestAddAll-" + str( i ),
3488 args=[ onosSetName, addAllValue ] )
3489 threads.append( t )
3490 t.start()
3491 for t in threads:
3492 t.join()
3493 addResponses.append( t.result )
3494
3495 # main.TRUE = successfully changed the set
3496 # main.FALSE = action resulted in no change in set
3497 # main.ERROR - Some error in executing the function
3498 addAllResults = main.TRUE
3499 for i in range( len( main.activeNodes ) ):
3500 if addResponses[ i ] == main.TRUE:
3501 # All is well
3502 pass
3503 elif addResponses[ i ] == main.FALSE:
3504 # Already in set, probably fine
3505 pass
3506 elif addResponses[ i ] == main.ERROR:
3507 # Error in execution
3508 addAllResults = main.FALSE
3509 else:
3510 # unexpected result
3511 addAllResults = main.FALSE
3512 if addAllResults != main.TRUE:
3513 main.log.error( "Error executing set addAll" )
3514
3515 # Check if set is still correct
3516 size = len( onosSet )
3517 getResponses = []
3518 threads = []
3519 for i in main.activeNodes:
3520 t = main.Thread( target=main.CLIs[i].setTestGet,
3521 name="setTestGet-" + str( i ),
3522 args=[ onosSetName ] )
3523 threads.append( t )
3524 t.start()
3525 for t in threads:
3526 t.join()
3527 getResponses.append( t.result )
3528 getResults = main.TRUE
3529 for i in range( len( main.activeNodes ) ):
3530 node = str( main.activeNodes[i] + 1 )
3531 if isinstance( getResponses[ i ], list):
3532 current = set( getResponses[ i ] )
3533 if len( current ) == len( getResponses[ i ] ):
3534 # no repeats
3535 if onosSet != current:
3536 main.log.error( "ONOS" + node +
3537 " has incorrect view" +
3538 " of set " + onosSetName + ":\n" +
3539 str( getResponses[ i ] ) )
3540 main.log.debug( "Expected: " + str( onosSet ) )
3541 main.log.debug( "Actual: " + str( current ) )
3542 getResults = main.FALSE
3543 else:
3544 # error, set is not a set
3545 main.log.error( "ONOS" + node +
3546 " has repeat elements in" +
3547 " set " + onosSetName + ":\n" +
3548 str( getResponses[ i ] ) )
3549 getResults = main.FALSE
3550 elif getResponses[ i ] == main.ERROR:
3551 getResults = main.FALSE
3552 sizeResponses = []
3553 threads = []
3554 for i in main.activeNodes:
3555 t = main.Thread( target=main.CLIs[i].setTestSize,
3556 name="setTestSize-" + str( i ),
3557 args=[ onosSetName ] )
3558 threads.append( t )
3559 t.start()
3560 for t in threads:
3561 t.join()
3562 sizeResponses.append( t.result )
3563 sizeResults = main.TRUE
3564 for i in range( len( main.activeNodes ) ):
3565 node = str( main.activeNodes[i] + 1 )
3566 if size != sizeResponses[ i ]:
3567 sizeResults = main.FALSE
3568 main.log.error( "ONOS" + node +
3569 " expected a size of " + str( size ) +
3570 " for set " + onosSetName +
3571 " but got " + str( sizeResponses[ i ] ) )
3572 addAllResults = addAllResults and getResults and sizeResults
3573 utilities.assert_equals( expect=main.TRUE,
3574 actual=addAllResults,
3575 onpass="Set addAll correct",
3576 onfail="Set addAll was incorrect" )
3577
3578 main.step( "Distributed Set contains()" )
3579 containsResponses = []
3580 threads = []
3581 for i in main.activeNodes:
3582 t = main.Thread( target=main.CLIs[i].setTestGet,
3583 name="setContains-" + str( i ),
3584 args=[ onosSetName ],
3585 kwargs={ "values": addValue } )
3586 threads.append( t )
3587 t.start()
3588 for t in threads:
3589 t.join()
3590 # NOTE: This is the tuple
3591 containsResponses.append( t.result )
3592
3593 containsResults = main.TRUE
3594 for i in range( len( main.activeNodes ) ):
3595 if containsResponses[ i ] == main.ERROR:
3596 containsResults = main.FALSE
3597 else:
3598 containsResults = containsResults and\
3599 containsResponses[ i ][ 1 ]
3600 utilities.assert_equals( expect=main.TRUE,
3601 actual=containsResults,
3602 onpass="Set contains is functional",
3603 onfail="Set contains failed" )
3604
3605 main.step( "Distributed Set containsAll()" )
3606 containsAllResponses = []
3607 threads = []
3608 for i in main.activeNodes:
3609 t = main.Thread( target=main.CLIs[i].setTestGet,
3610 name="setContainsAll-" + str( i ),
3611 args=[ onosSetName ],
3612 kwargs={ "values": addAllValue } )
3613 threads.append( t )
3614 t.start()
3615 for t in threads:
3616 t.join()
3617 # NOTE: This is the tuple
3618 containsAllResponses.append( t.result )
3619
3620 containsAllResults = main.TRUE
3621 for i in range( len( main.activeNodes ) ):
3622 if containsResponses[ i ] == main.ERROR:
3623 containsResults = main.FALSE
3624 else:
3625 containsResults = containsResults and\
3626 containsResponses[ i ][ 1 ]
3627 utilities.assert_equals( expect=main.TRUE,
3628 actual=containsAllResults,
3629 onpass="Set containsAll is functional",
3630 onfail="Set containsAll failed" )
3631
3632 main.step( "Distributed Set remove()" )
3633 onosSet.remove( addValue )
3634 removeResponses = []
3635 threads = []
3636 for i in main.activeNodes:
3637 t = main.Thread( target=main.CLIs[i].setTestRemove,
3638 name="setTestRemove-" + str( i ),
3639 args=[ onosSetName, addValue ] )
3640 threads.append( t )
3641 t.start()
3642 for t in threads:
3643 t.join()
3644 removeResponses.append( t.result )
3645
3646 # main.TRUE = successfully changed the set
3647 # main.FALSE = action resulted in no change in set
3648 # main.ERROR - Some error in executing the function
3649 removeResults = main.TRUE
3650 for i in range( len( main.activeNodes ) ):
3651 if removeResponses[ i ] == main.TRUE:
3652 # All is well
3653 pass
3654 elif removeResponses[ i ] == main.FALSE:
3655 # not in set, probably fine
3656 pass
3657 elif removeResponses[ i ] == main.ERROR:
3658 # Error in execution
3659 removeResults = main.FALSE
3660 else:
3661 # unexpected result
3662 removeResults = main.FALSE
3663 if removeResults != main.TRUE:
3664 main.log.error( "Error executing set remove" )
3665
3666 # Check if set is still correct
3667 size = len( onosSet )
3668 getResponses = []
3669 threads = []
3670 for i in main.activeNodes:
3671 t = main.Thread( target=main.CLIs[i].setTestGet,
3672 name="setTestGet-" + str( i ),
3673 args=[ onosSetName ] )
3674 threads.append( t )
3675 t.start()
3676 for t in threads:
3677 t.join()
3678 getResponses.append( t.result )
3679 getResults = main.TRUE
3680 for i in range( len( main.activeNodes ) ):
3681 node = str( main.activeNodes[i] + 1 )
3682 if isinstance( getResponses[ i ], list):
3683 current = set( getResponses[ i ] )
3684 if len( current ) == len( getResponses[ i ] ):
3685 # no repeats
3686 if onosSet != current:
3687 main.log.error( "ONOS" + node +
3688 " has incorrect view" +
3689 " of set " + onosSetName + ":\n" +
3690 str( getResponses[ i ] ) )
3691 main.log.debug( "Expected: " + str( onosSet ) )
3692 main.log.debug( "Actual: " + str( current ) )
3693 getResults = main.FALSE
3694 else:
3695 # error, set is not a set
3696 main.log.error( "ONOS" + node +
3697 " has repeat elements in" +
3698 " set " + onosSetName + ":\n" +
3699 str( getResponses[ i ] ) )
3700 getResults = main.FALSE
3701 elif getResponses[ i ] == main.ERROR:
3702 getResults = main.FALSE
3703 sizeResponses = []
3704 threads = []
3705 for i in main.activeNodes:
3706 t = main.Thread( target=main.CLIs[i].setTestSize,
3707 name="setTestSize-" + str( i ),
3708 args=[ onosSetName ] )
3709 threads.append( t )
3710 t.start()
3711 for t in threads:
3712 t.join()
3713 sizeResponses.append( t.result )
3714 sizeResults = main.TRUE
3715 for i in range( len( main.activeNodes ) ):
3716 node = str( main.activeNodes[i] + 1 )
3717 if size != sizeResponses[ i ]:
3718 sizeResults = main.FALSE
3719 main.log.error( "ONOS" + node +
3720 " expected a size of " + str( size ) +
3721 " for set " + onosSetName +
3722 " but got " + str( sizeResponses[ i ] ) )
3723 removeResults = removeResults and getResults and sizeResults
3724 utilities.assert_equals( expect=main.TRUE,
3725 actual=removeResults,
3726 onpass="Set remove correct",
3727 onfail="Set remove was incorrect" )
3728
3729 main.step( "Distributed Set removeAll()" )
3730 onosSet.difference_update( addAllValue.split() )
3731 removeAllResponses = []
3732 threads = []
3733 try:
3734 for i in main.activeNodes:
3735 t = main.Thread( target=main.CLIs[i].setTestRemove,
3736 name="setTestRemoveAll-" + str( i ),
3737 args=[ onosSetName, addAllValue ] )
3738 threads.append( t )
3739 t.start()
3740 for t in threads:
3741 t.join()
3742 removeAllResponses.append( t.result )
3743 except Exception, e:
3744 main.log.exception(e)
3745
3746 # main.TRUE = successfully changed the set
3747 # main.FALSE = action resulted in no change in set
3748 # main.ERROR - Some error in executing the function
3749 removeAllResults = main.TRUE
3750 for i in range( len( main.activeNodes ) ):
3751 if removeAllResponses[ i ] == main.TRUE:
3752 # All is well
3753 pass
3754 elif removeAllResponses[ i ] == main.FALSE:
3755 # not in set, probably fine
3756 pass
3757 elif removeAllResponses[ i ] == main.ERROR:
3758 # Error in execution
3759 removeAllResults = main.FALSE
3760 else:
3761 # unexpected result
3762 removeAllResults = main.FALSE
3763 if removeAllResults != main.TRUE:
3764 main.log.error( "Error executing set removeAll" )
3765
3766 # Check if set is still correct
3767 size = len( onosSet )
3768 getResponses = []
3769 threads = []
3770 for i in main.activeNodes:
3771 t = main.Thread( target=main.CLIs[i].setTestGet,
3772 name="setTestGet-" + str( i ),
3773 args=[ onosSetName ] )
3774 threads.append( t )
3775 t.start()
3776 for t in threads:
3777 t.join()
3778 getResponses.append( t.result )
3779 getResults = main.TRUE
3780 for i in range( len( main.activeNodes ) ):
3781 node = str( main.activeNodes[i] + 1 )
3782 if isinstance( getResponses[ i ], list):
3783 current = set( getResponses[ i ] )
3784 if len( current ) == len( getResponses[ i ] ):
3785 # no repeats
3786 if onosSet != current:
3787 main.log.error( "ONOS" + node +
3788 " has incorrect view" +
3789 " of set " + onosSetName + ":\n" +
3790 str( getResponses[ i ] ) )
3791 main.log.debug( "Expected: " + str( onosSet ) )
3792 main.log.debug( "Actual: " + str( current ) )
3793 getResults = main.FALSE
3794 else:
3795 # error, set is not a set
3796 main.log.error( "ONOS" + node +
3797 " has repeat elements in" +
3798 " set " + onosSetName + ":\n" +
3799 str( getResponses[ i ] ) )
3800 getResults = main.FALSE
3801 elif getResponses[ i ] == main.ERROR:
3802 getResults = main.FALSE
3803 sizeResponses = []
3804 threads = []
3805 for i in main.activeNodes:
3806 t = main.Thread( target=main.CLIs[i].setTestSize,
3807 name="setTestSize-" + str( i ),
3808 args=[ onosSetName ] )
3809 threads.append( t )
3810 t.start()
3811 for t in threads:
3812 t.join()
3813 sizeResponses.append( t.result )
3814 sizeResults = main.TRUE
3815 for i in range( len( main.activeNodes ) ):
3816 node = str( main.activeNodes[i] + 1 )
3817 if size != sizeResponses[ i ]:
3818 sizeResults = main.FALSE
3819 main.log.error( "ONOS" + node +
3820 " expected a size of " + str( size ) +
3821 " for set " + onosSetName +
3822 " but got " + str( sizeResponses[ i ] ) )
3823 removeAllResults = removeAllResults and getResults and sizeResults
3824 utilities.assert_equals( expect=main.TRUE,
3825 actual=removeAllResults,
3826 onpass="Set removeAll correct",
3827 onfail="Set removeAll was incorrect" )
3828
3829 main.step( "Distributed Set addAll()" )
3830 onosSet.update( addAllValue.split() )
3831 addResponses = []
3832 threads = []
3833 for i in main.activeNodes:
3834 t = main.Thread( target=main.CLIs[i].setTestAdd,
3835 name="setTestAddAll-" + str( i ),
3836 args=[ onosSetName, addAllValue ] )
3837 threads.append( t )
3838 t.start()
3839 for t in threads:
3840 t.join()
3841 addResponses.append( t.result )
3842
3843 # main.TRUE = successfully changed the set
3844 # main.FALSE = action resulted in no change in set
3845 # main.ERROR - Some error in executing the function
3846 addAllResults = main.TRUE
3847 for i in range( len( main.activeNodes ) ):
3848 if addResponses[ i ] == main.TRUE:
3849 # All is well
3850 pass
3851 elif addResponses[ i ] == main.FALSE:
3852 # Already in set, probably fine
3853 pass
3854 elif addResponses[ i ] == main.ERROR:
3855 # Error in execution
3856 addAllResults = main.FALSE
3857 else:
3858 # unexpected result
3859 addAllResults = main.FALSE
3860 if addAllResults != main.TRUE:
3861 main.log.error( "Error executing set addAll" )
3862
3863 # Check if set is still correct
3864 size = len( onosSet )
3865 getResponses = []
3866 threads = []
3867 for i in main.activeNodes:
3868 t = main.Thread( target=main.CLIs[i].setTestGet,
3869 name="setTestGet-" + str( i ),
3870 args=[ onosSetName ] )
3871 threads.append( t )
3872 t.start()
3873 for t in threads:
3874 t.join()
3875 getResponses.append( t.result )
3876 getResults = main.TRUE
3877 for i in range( len( main.activeNodes ) ):
3878 node = str( main.activeNodes[i] + 1 )
3879 if isinstance( getResponses[ i ], list):
3880 current = set( getResponses[ i ] )
3881 if len( current ) == len( getResponses[ i ] ):
3882 # no repeats
3883 if onosSet != current:
3884 main.log.error( "ONOS" + node +
3885 " has incorrect view" +
3886 " of set " + onosSetName + ":\n" +
3887 str( getResponses[ i ] ) )
3888 main.log.debug( "Expected: " + str( onosSet ) )
3889 main.log.debug( "Actual: " + str( current ) )
3890 getResults = main.FALSE
3891 else:
3892 # error, set is not a set
3893 main.log.error( "ONOS" + node +
3894 " has repeat elements in" +
3895 " set " + onosSetName + ":\n" +
3896 str( getResponses[ i ] ) )
3897 getResults = main.FALSE
3898 elif getResponses[ i ] == main.ERROR:
3899 getResults = main.FALSE
3900 sizeResponses = []
3901 threads = []
3902 for i in main.activeNodes:
3903 t = main.Thread( target=main.CLIs[i].setTestSize,
3904 name="setTestSize-" + str( i ),
3905 args=[ onosSetName ] )
3906 threads.append( t )
3907 t.start()
3908 for t in threads:
3909 t.join()
3910 sizeResponses.append( t.result )
3911 sizeResults = main.TRUE
3912 for i in range( len( main.activeNodes ) ):
3913 node = str( main.activeNodes[i] + 1 )
3914 if size != sizeResponses[ i ]:
3915 sizeResults = main.FALSE
3916 main.log.error( "ONOS" + node +
3917 " expected a size of " + str( size ) +
3918 " for set " + onosSetName +
3919 " but got " + str( sizeResponses[ i ] ) )
3920 addAllResults = addAllResults and getResults and sizeResults
3921 utilities.assert_equals( expect=main.TRUE,
3922 actual=addAllResults,
3923 onpass="Set addAll correct",
3924 onfail="Set addAll was incorrect" )
3925
3926 main.step( "Distributed Set clear()" )
3927 onosSet.clear()
3928 clearResponses = []
3929 threads = []
3930 for i in main.activeNodes:
3931 t = main.Thread( target=main.CLIs[i].setTestRemove,
3932 name="setTestClear-" + str( i ),
3933 args=[ onosSetName, " "], # Values doesn't matter
3934 kwargs={ "clear": True } )
3935 threads.append( t )
3936 t.start()
3937 for t in threads:
3938 t.join()
3939 clearResponses.append( t.result )
3940
3941 # main.TRUE = successfully changed the set
3942 # main.FALSE = action resulted in no change in set
3943 # main.ERROR - Some error in executing the function
3944 clearResults = main.TRUE
3945 for i in range( len( main.activeNodes ) ):
3946 if clearResponses[ i ] == main.TRUE:
3947 # All is well
3948 pass
3949 elif clearResponses[ i ] == main.FALSE:
3950 # Nothing set, probably fine
3951 pass
3952 elif clearResponses[ i ] == main.ERROR:
3953 # Error in execution
3954 clearResults = main.FALSE
3955 else:
3956 # unexpected result
3957 clearResults = main.FALSE
3958 if clearResults != main.TRUE:
3959 main.log.error( "Error executing set clear" )
3960
3961 # Check if set is still correct
3962 size = len( onosSet )
3963 getResponses = []
3964 threads = []
3965 for i in main.activeNodes:
3966 t = main.Thread( target=main.CLIs[i].setTestGet,
3967 name="setTestGet-" + str( i ),
3968 args=[ onosSetName ] )
3969 threads.append( t )
3970 t.start()
3971 for t in threads:
3972 t.join()
3973 getResponses.append( t.result )
3974 getResults = main.TRUE
3975 for i in range( len( main.activeNodes ) ):
3976 node = str( main.activeNodes[i] + 1 )
3977 if isinstance( getResponses[ i ], list):
3978 current = set( getResponses[ i ] )
3979 if len( current ) == len( getResponses[ i ] ):
3980 # no repeats
3981 if onosSet != current:
3982 main.log.error( "ONOS" + node +
3983 " has incorrect view" +
3984 " of set " + onosSetName + ":\n" +
3985 str( getResponses[ i ] ) )
3986 main.log.debug( "Expected: " + str( onosSet ) )
3987 main.log.debug( "Actual: " + str( current ) )
3988 getResults = main.FALSE
3989 else:
3990 # error, set is not a set
3991 main.log.error( "ONOS" + node +
3992 " has repeat elements in" +
3993 " set " + onosSetName + ":\n" +
3994 str( getResponses[ i ] ) )
3995 getResults = main.FALSE
3996 elif getResponses[ i ] == main.ERROR:
3997 getResults = main.FALSE
3998 sizeResponses = []
3999 threads = []
4000 for i in main.activeNodes:
4001 t = main.Thread( target=main.CLIs[i].setTestSize,
4002 name="setTestSize-" + str( i ),
4003 args=[ onosSetName ] )
4004 threads.append( t )
4005 t.start()
4006 for t in threads:
4007 t.join()
4008 sizeResponses.append( t.result )
4009 sizeResults = main.TRUE
4010 for i in range( len( main.activeNodes ) ):
4011 node = str( main.activeNodes[i] + 1 )
4012 if size != sizeResponses[ i ]:
4013 sizeResults = main.FALSE
4014 main.log.error( "ONOS" + node +
4015 " expected a size of " + str( size ) +
4016 " for set " + onosSetName +
4017 " but got " + str( sizeResponses[ i ] ) )
4018 clearResults = clearResults and getResults and sizeResults
4019 utilities.assert_equals( expect=main.TRUE,
4020 actual=clearResults,
4021 onpass="Set clear correct",
4022 onfail="Set clear was incorrect" )
4023
4024 main.step( "Distributed Set addAll()" )
4025 onosSet.update( addAllValue.split() )
4026 addResponses = []
4027 threads = []
4028 for i in main.activeNodes:
4029 t = main.Thread( target=main.CLIs[i].setTestAdd,
4030 name="setTestAddAll-" + str( i ),
4031 args=[ onosSetName, addAllValue ] )
4032 threads.append( t )
4033 t.start()
4034 for t in threads:
4035 t.join()
4036 addResponses.append( t.result )
4037
4038 # main.TRUE = successfully changed the set
4039 # main.FALSE = action resulted in no change in set
4040 # main.ERROR - Some error in executing the function
4041 addAllResults = main.TRUE
4042 for i in range( len( main.activeNodes ) ):
4043 if addResponses[ i ] == main.TRUE:
4044 # All is well
4045 pass
4046 elif addResponses[ i ] == main.FALSE:
4047 # Already in set, probably fine
4048 pass
4049 elif addResponses[ i ] == main.ERROR:
4050 # Error in execution
4051 addAllResults = main.FALSE
4052 else:
4053 # unexpected result
4054 addAllResults = main.FALSE
4055 if addAllResults != main.TRUE:
4056 main.log.error( "Error executing set addAll" )
4057
4058 # Check if set is still correct
4059 size = len( onosSet )
4060 getResponses = []
4061 threads = []
4062 for i in main.activeNodes:
4063 t = main.Thread( target=main.CLIs[i].setTestGet,
4064 name="setTestGet-" + str( i ),
4065 args=[ onosSetName ] )
4066 threads.append( t )
4067 t.start()
4068 for t in threads:
4069 t.join()
4070 getResponses.append( t.result )
4071 getResults = main.TRUE
4072 for i in range( len( main.activeNodes ) ):
4073 node = str( main.activeNodes[i] + 1 )
4074 if isinstance( getResponses[ i ], list):
4075 current = set( getResponses[ i ] )
4076 if len( current ) == len( getResponses[ i ] ):
4077 # no repeats
4078 if onosSet != current:
4079 main.log.error( "ONOS" + node +
4080 " has incorrect view" +
4081 " of set " + onosSetName + ":\n" +
4082 str( getResponses[ i ] ) )
4083 main.log.debug( "Expected: " + str( onosSet ) )
4084 main.log.debug( "Actual: " + str( current ) )
4085 getResults = main.FALSE
4086 else:
4087 # error, set is not a set
4088 main.log.error( "ONOS" + node +
4089 " has repeat elements in" +
4090 " set " + onosSetName + ":\n" +
4091 str( getResponses[ i ] ) )
4092 getResults = main.FALSE
4093 elif getResponses[ i ] == main.ERROR:
4094 getResults = main.FALSE
4095 sizeResponses = []
4096 threads = []
4097 for i in main.activeNodes:
4098 t = main.Thread( target=main.CLIs[i].setTestSize,
4099 name="setTestSize-" + str( i ),
4100 args=[ onosSetName ] )
4101 threads.append( t )
4102 t.start()
4103 for t in threads:
4104 t.join()
4105 sizeResponses.append( t.result )
4106 sizeResults = main.TRUE
4107 for i in range( len( main.activeNodes ) ):
4108 node = str( main.activeNodes[i] + 1 )
4109 if size != sizeResponses[ i ]:
4110 sizeResults = main.FALSE
4111 main.log.error( "ONOS" + node +
4112 " expected a size of " + str( size ) +
4113 " for set " + onosSetName +
4114 " but got " + str( sizeResponses[ i ] ) )
4115 addAllResults = addAllResults and getResults and sizeResults
4116 utilities.assert_equals( expect=main.TRUE,
4117 actual=addAllResults,
4118 onpass="Set addAll correct",
4119 onfail="Set addAll was incorrect" )
4120
4121 main.step( "Distributed Set retain()" )
4122 onosSet.intersection_update( retainValue.split() )
4123 retainResponses = []
4124 threads = []
4125 for i in main.activeNodes:
4126 t = main.Thread( target=main.CLIs[i].setTestRemove,
4127 name="setTestRetain-" + str( i ),
4128 args=[ onosSetName, retainValue ],
4129 kwargs={ "retain": True } )
4130 threads.append( t )
4131 t.start()
4132 for t in threads:
4133 t.join()
4134 retainResponses.append( t.result )
4135
4136 # main.TRUE = successfully changed the set
4137 # main.FALSE = action resulted in no change in set
4138 # main.ERROR - Some error in executing the function
4139 retainResults = main.TRUE
4140 for i in range( len( main.activeNodes ) ):
4141 if retainResponses[ i ] == main.TRUE:
4142 # All is well
4143 pass
4144 elif retainResponses[ i ] == main.FALSE:
4145 # Already in set, probably fine
4146 pass
4147 elif retainResponses[ i ] == main.ERROR:
4148 # Error in execution
4149 retainResults = main.FALSE
4150 else:
4151 # unexpected result
4152 retainResults = main.FALSE
4153 if retainResults != main.TRUE:
4154 main.log.error( "Error executing set retain" )
4155
4156 # Check if set is still correct
4157 size = len( onosSet )
4158 getResponses = []
4159 threads = []
4160 for i in main.activeNodes:
4161 t = main.Thread( target=main.CLIs[i].setTestGet,
4162 name="setTestGet-" + str( i ),
4163 args=[ onosSetName ] )
4164 threads.append( t )
4165 t.start()
4166 for t in threads:
4167 t.join()
4168 getResponses.append( t.result )
4169 getResults = main.TRUE
4170 for i in range( len( main.activeNodes ) ):
4171 node = str( main.activeNodes[i] + 1 )
4172 if isinstance( getResponses[ i ], list):
4173 current = set( getResponses[ i ] )
4174 if len( current ) == len( getResponses[ i ] ):
4175 # no repeats
4176 if onosSet != current:
4177 main.log.error( "ONOS" + node +
4178 " has incorrect view" +
4179 " of set " + onosSetName + ":\n" +
4180 str( getResponses[ i ] ) )
4181 main.log.debug( "Expected: " + str( onosSet ) )
4182 main.log.debug( "Actual: " + str( current ) )
4183 getResults = main.FALSE
4184 else:
4185 # error, set is not a set
4186 main.log.error( "ONOS" + node +
4187 " has repeat elements in" +
4188 " set " + onosSetName + ":\n" +
4189 str( getResponses[ i ] ) )
4190 getResults = main.FALSE
4191 elif getResponses[ i ] == main.ERROR:
4192 getResults = main.FALSE
4193 sizeResponses = []
4194 threads = []
4195 for i in main.activeNodes:
4196 t = main.Thread( target=main.CLIs[i].setTestSize,
4197 name="setTestSize-" + str( i ),
4198 args=[ onosSetName ] )
4199 threads.append( t )
4200 t.start()
4201 for t in threads:
4202 t.join()
4203 sizeResponses.append( t.result )
4204 sizeResults = main.TRUE
4205 for i in range( len( main.activeNodes ) ):
4206 node = str( main.activeNodes[i] + 1 )
4207 if size != sizeResponses[ i ]:
4208 sizeResults = main.FALSE
4209 main.log.error( "ONOS" + node + " expected a size of " +
4210 str( size ) + " for set " + onosSetName +
4211 " but got " + str( sizeResponses[ i ] ) )
4212 retainResults = retainResults and getResults and sizeResults
4213 utilities.assert_equals( expect=main.TRUE,
4214 actual=retainResults,
4215 onpass="Set retain correct",
4216 onfail="Set retain was incorrect" )
4217
4218 # Transactional maps
4219 main.step( "Partitioned Transactional maps put" )
4220 tMapValue = "Testing"
4221 numKeys = 100
4222 putResult = True
4223 node = main.activeNodes[0]
4224 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4225 if putResponses and len( putResponses ) == 100:
4226 for i in putResponses:
4227 if putResponses[ i ][ 'value' ] != tMapValue:
4228 putResult = False
4229 else:
4230 putResult = False
4231 if not putResult:
4232 main.log.debug( "Put response values: " + str( putResponses ) )
4233 utilities.assert_equals( expect=True,
4234 actual=putResult,
4235 onpass="Partitioned Transactional Map put successful",
4236 onfail="Partitioned Transactional Map put values are incorrect" )
4237
4238 main.step( "Partitioned Transactional maps get" )
Jon Hall9bfadd22016-05-11 14:48:07 -07004239 # FIXME: is this sleep needed?
4240 time.sleep( 5 )
4241
Jon Hall6e709752016-02-01 13:38:46 -08004242 getCheck = True
4243 for n in range( 1, numKeys + 1 ):
4244 getResponses = []
4245 threads = []
4246 valueCheck = True
4247 for i in main.activeNodes:
4248 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4249 name="TMap-get-" + str( i ),
4250 args=[ "Key" + str( n ) ] )
4251 threads.append( t )
4252 t.start()
4253 for t in threads:
4254 t.join()
4255 getResponses.append( t.result )
4256 for node in getResponses:
4257 if node != tMapValue:
4258 valueCheck = False
4259 if not valueCheck:
4260 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4261 main.log.warn( getResponses )
4262 getCheck = getCheck and valueCheck
4263 utilities.assert_equals( expect=True,
4264 actual=getCheck,
4265 onpass="Partitioned Transactional Map get values were correct",
4266 onfail="Partitioned Transactional Map values incorrect" )