blob: 361da8dfef171cdd0b8be048c125ba9bd936fefb [file] [log] [blame]
Jon Hall6e709752016-02-01 13:38:46 -08001"""
2Description: This test is to determine if ONOS can handle
3 a full network partion
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
14CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
28class HAfullNetPartition:
29
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
51 import imp
52 import pexpect
53 import time
Jon Halla440e872016-03-31 15:15:50 -070054 import json
Jon Hall6e709752016-02-01 13:38:46 -080055 main.log.info( "ONOS HA test: Partition ONOS nodes into two sub-clusters - " +
56 "initialization" )
57 main.case( "Setting up test environment" )
58 main.caseExplanation = "Setup the test environment including " +\
59 "installing ONOS, starting Mininet and ONOS" +\
60 "cli sessions."
61
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
69 main.numCtrls = int( main.params[ 'num_controllers' ] )
70 if main.ONOSbench.maxNodes:
71 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
74 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070081 # These are for csv plotting in jenkins
82 global labels
83 global data
84 labels = []
85 data = []
Jon Hall6e709752016-02-01 13:38:46 -080086
87 # FIXME: just get controller port from params?
88 # TODO: do we really need all these?
89 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
90 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
91 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
92 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
93 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
94 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
95 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
96
97 try:
Jon Hall41d39f12016-04-11 22:54:35 -070098 from tests.HAsanity.dependencies.HA import HA
99 main.HA = HA()
Jon Hall6e709752016-02-01 13:38:46 -0800100 except Exception as e:
101 main.log.exception( e )
102 main.cleanup()
103 main.exit()
104
105 main.CLIs = []
106 main.nodes = []
107 ipList = []
108 for i in range( 1, main.numCtrls + 1 ):
109 try:
110 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
111 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
112 ipList.append( main.nodes[ -1 ].ip_address )
113 except AttributeError:
114 break
115
116 main.step( "Create cell file" )
117 cellAppString = main.params[ 'ENV' ][ 'appString' ]
118 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
119 main.Mininet1.ip_address,
120 cellAppString, ipList )
121 main.step( "Applying cell variable to environment" )
122 cellResult = main.ONOSbench.setCell( cellName )
123 verifyResult = main.ONOSbench.verifyCell()
124
125 # FIXME:this is short term fix
126 main.log.info( "Removing raft logs" )
127 main.ONOSbench.onosRemoveRaftLogs()
128
129 main.log.info( "Uninstalling ONOS" )
130 for node in main.nodes:
131 main.ONOSbench.onosUninstall( node.ip_address )
132
133 # Make sure ONOS is DEAD
134 main.log.info( "Killing any ONOS processes" )
135 killResults = main.TRUE
136 for node in main.nodes:
137 killed = main.ONOSbench.onosKill( node.ip_address )
138 killResults = killResults and killed
139
140 cleanInstallResult = main.TRUE
141 gitPullResult = main.TRUE
142
143 main.step( "Starting Mininet" )
144 # scp topo file to mininet
145 # TODO: move to params?
146 topoName = "obelisk.py"
147 filePath = main.ONOSbench.home + "/tools/test/topos/"
148 main.ONOSbench.scp( main.Mininet1,
149 filePath + topoName,
150 main.Mininet1.home,
151 direction="to" )
152 mnResult = main.Mininet1.startNet( )
153 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
154 onpass="Mininet Started",
155 onfail="Error starting Mininet" )
156
157 main.step( "Git checkout and pull " + gitBranch )
158 if PULLCODE:
159 main.ONOSbench.gitCheckout( gitBranch )
160 gitPullResult = main.ONOSbench.gitPull()
161 # values of 1 or 3 are good
162 utilities.assert_lesser( expect=0, actual=gitPullResult,
163 onpass="Git pull successful",
164 onfail="Git pull failed" )
165 main.ONOSbench.getVersion( report=True )
166
167 main.step( "Using mvn clean install" )
168 cleanInstallResult = main.TRUE
169 if PULLCODE and gitPullResult == main.TRUE:
170 cleanInstallResult = main.ONOSbench.cleanInstall()
171 else:
172 main.log.warn( "Did not pull new code so skipping mvn " +
173 "clean install" )
174 utilities.assert_equals( expect=main.TRUE,
175 actual=cleanInstallResult,
176 onpass="MCI successful",
177 onfail="MCI failed" )
178 # GRAPHS
179 # NOTE: important params here:
180 # job = name of Jenkins job
181 # Plot Name = Plot-HA, only can be used if multiple plots
182 # index = The number of the graph under plot name
183 job = "HAfullNetPartition"
184 plotName = "Plot-HA"
Jon Hall843f8bc2016-03-18 14:28:13 -0700185 index = "1"
Jon Hall6e709752016-02-01 13:38:46 -0800186 graphs = '<ac:structured-macro ac:name="html">\n'
187 graphs += '<ac:plain-text-body><![CDATA[\n'
188 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
189 '/plot/' + plotName + '/getPlot?index=' + index +\
190 '&width=500&height=300"' +\
191 'noborder="0" width="500" height="300" scrolling="yes" ' +\
192 'seamless="seamless"></iframe>\n'
193 graphs += ']]></ac:plain-text-body>\n'
194 graphs += '</ac:structured-macro>\n'
195 main.log.wiki(graphs)
196
197 main.step( "Creating ONOS package" )
198 # copy gen-partions file to ONOS
199 # NOTE: this assumes TestON and ONOS are on the same machine
200 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
201 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
202 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
203 main.ONOSbench.ip_address,
204 srcFile,
205 dstDir,
206 pwd=main.ONOSbench.pwd,
207 direction="from" )
208 packageResult = main.ONOSbench.onosPackage()
209 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
210 onpass="ONOS package successful",
211 onfail="ONOS package failed" )
212
213 main.step( "Installing ONOS package" )
214 onosInstallResult = main.TRUE
215 for node in main.nodes:
216 tmpResult = main.ONOSbench.onosInstall( options="-f",
217 node=node.ip_address )
218 onosInstallResult = onosInstallResult and tmpResult
219 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
220 onpass="ONOS install successful",
221 onfail="ONOS install failed" )
222 # clean up gen-partitions file
223 try:
224 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
225 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
226 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
227 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
228 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
229 str( main.ONOSbench.handle.before ) )
230 except ( pexpect.TIMEOUT, pexpect.EOF ):
231 main.log.exception( "ONOSbench: pexpect exception found:" +
232 main.ONOSbench.handle.before )
233 main.cleanup()
234 main.exit()
235
236 main.step( "Checking if ONOS is up yet" )
237 for i in range( 2 ):
238 onosIsupResult = main.TRUE
239 for node in main.nodes:
240 started = main.ONOSbench.isup( node.ip_address )
241 if not started:
242 main.log.error( node.name + " hasn't started" )
243 onosIsupResult = onosIsupResult and started
244 if onosIsupResult == main.TRUE:
245 break
246 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
247 onpass="ONOS startup successful",
248 onfail="ONOS startup failed" )
249
250 main.log.step( "Starting ONOS CLI sessions" )
251 cliResults = main.TRUE
252 threads = []
253 for i in range( main.numCtrls ):
254 t = main.Thread( target=main.CLIs[i].startOnosCli,
255 name="startOnosCli-" + str( i ),
256 args=[main.nodes[i].ip_address] )
257 threads.append( t )
258 t.start()
259
260 for t in threads:
261 t.join()
262 cliResults = cliResults and t.result
263 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
264 onpass="ONOS cli startup successful",
265 onfail="ONOS cli startup failed" )
266
267 # Create a list of active nodes for use when some nodes are stopped
268 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
269
270 if main.params[ 'tcpdump' ].lower() == "true":
271 main.step( "Start Packet Capture MN" )
272 main.Mininet2.startTcpdump(
273 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
274 + "-MN.pcap",
275 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
276 port=main.params[ 'MNtcpdump' ][ 'port' ] )
277
Jon Halla440e872016-03-31 15:15:50 -0700278 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -0700279 nodeResults = utilities.retry( main.HA.nodesCheck,
280 False,
281 args=[main.activeNodes],
282 attempts=5 )
Jon Halla440e872016-03-31 15:15:50 -0700283
Jon Hall41d39f12016-04-11 22:54:35 -0700284 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Halla440e872016-03-31 15:15:50 -0700285 onpass="Nodes check successful",
286 onfail="Nodes check NOT successful" )
287
288 if not nodeResults:
289 for cli in main.CLIs:
290 main.log.debug( "{} components not ACTIVE: \n{}".format(
291 cli.name,
292 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
293
Jon Hall6e709752016-02-01 13:38:46 -0800294 if cliResults == main.FALSE:
295 main.log.error( "Failed to start ONOS, stopping test" )
296 main.cleanup()
297 main.exit()
298
Jon Hall172b7ba2016-04-07 18:12:20 -0700299 main.step( "Activate apps defined in the params file" )
300 # get data from the params
301 apps = main.params.get( 'apps' )
302 if apps:
303 apps = apps.split(',')
304 main.log.warn( apps )
305 activateResult = True
306 for app in apps:
307 main.CLIs[ 0 ].app( app, "Activate" )
308 # TODO: check this worked
309 time.sleep( 10 ) # wait for apps to activate
310 for app in apps:
311 state = main.CLIs[ 0 ].appStatus( app )
312 if state == "ACTIVE":
313 activateResult = activeResult and True
314 else:
315 main.log.error( "{} is in {} state".format( app, state ) )
316 activeResult = False
317 utilities.assert_equals( expect=True,
318 actual=activateResult,
319 onpass="Successfully activated apps",
320 onfail="Failed to activate apps" )
321 else:
322 main.log.warn( "No apps were specified to be loaded after startup" )
323
324 main.step( "Set ONOS configurations" )
325 config = main.params.get( 'ONOS_Configuration' )
326 if config:
327 main.log.debug( config )
328 checkResult = main.TRUE
329 for component in config:
330 for setting in config[component]:
331 value = config[component][setting]
332 check = main.CLIs[ 0 ].setCfg( component, setting, value )
333 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
334 checkResult = check and checkResult
335 utilities.assert_equals( expect=main.TRUE,
336 actual=checkResult,
337 onpass="Successfully set config",
338 onfail="Failed to set config" )
339 else:
340 main.log.warn( "No configurations were specified to be changed after startup" )
341
Jon Hall9d2dcad2016-04-08 10:15:20 -0700342 main.step( "App Ids check" )
343 appCheck = main.TRUE
344 threads = []
345 for i in main.activeNodes:
346 t = main.Thread( target=main.CLIs[i].appToIDCheck,
347 name="appToIDCheck-" + str( i ),
348 args=[] )
349 threads.append( t )
350 t.start()
351
352 for t in threads:
353 t.join()
354 appCheck = appCheck and t.result
355 if appCheck != main.TRUE:
356 node = main.activeNodes[0]
357 main.log.warn( main.CLIs[node].apps() )
358 main.log.warn( main.CLIs[node].appIDs() )
359 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
360 onpass="App Ids seem to be correct",
361 onfail="Something is wrong with app Ids" )
362
Jon Hall6e709752016-02-01 13:38:46 -0800363 def CASE2( self, main ):
364 """
365 Assign devices to controllers
366 """
367 import re
368 assert main.numCtrls, "main.numCtrls not defined"
369 assert main, "main not defined"
370 assert utilities.assert_equals, "utilities.assert_equals not defined"
371 assert main.CLIs, "main.CLIs not defined"
372 assert main.nodes, "main.nodes not defined"
373 assert ONOS1Port, "ONOS1Port not defined"
374 assert ONOS2Port, "ONOS2Port not defined"
375 assert ONOS3Port, "ONOS3Port not defined"
376 assert ONOS4Port, "ONOS4Port not defined"
377 assert ONOS5Port, "ONOS5Port not defined"
378 assert ONOS6Port, "ONOS6Port not defined"
379 assert ONOS7Port, "ONOS7Port not defined"
380
381 main.case( "Assigning devices to controllers" )
382 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
383 "and check that an ONOS node becomes the " +\
384 "master of the device."
385 main.step( "Assign switches to controllers" )
386
387 ipList = []
388 for i in range( main.numCtrls ):
389 ipList.append( main.nodes[ i ].ip_address )
390 swList = []
391 for i in range( 1, 29 ):
392 swList.append( "s" + str( i ) )
393 main.Mininet1.assignSwController( sw=swList, ip=ipList )
394
395 mastershipCheck = main.TRUE
396 for i in range( 1, 29 ):
397 response = main.Mininet1.getSwController( "s" + str( i ) )
398 try:
399 main.log.info( str( response ) )
400 except Exception:
401 main.log.info( repr( response ) )
402 for node in main.nodes:
403 if re.search( "tcp:" + node.ip_address, response ):
404 mastershipCheck = mastershipCheck and main.TRUE
405 else:
406 main.log.error( "Error, node " + node.ip_address + " is " +
407 "not in the list of controllers s" +
408 str( i ) + " is connecting to." )
409 mastershipCheck = main.FALSE
410 utilities.assert_equals(
411 expect=main.TRUE,
412 actual=mastershipCheck,
413 onpass="Switch mastership assigned correctly",
414 onfail="Switches not assigned correctly to controllers" )
415
416 def CASE21( self, main ):
417 """
418 Assign mastership to controllers
419 """
420 import time
421 assert main.numCtrls, "main.numCtrls not defined"
422 assert main, "main not defined"
423 assert utilities.assert_equals, "utilities.assert_equals not defined"
424 assert main.CLIs, "main.CLIs not defined"
425 assert main.nodes, "main.nodes not defined"
426 assert ONOS1Port, "ONOS1Port not defined"
427 assert ONOS2Port, "ONOS2Port not defined"
428 assert ONOS3Port, "ONOS3Port not defined"
429 assert ONOS4Port, "ONOS4Port not defined"
430 assert ONOS5Port, "ONOS5Port not defined"
431 assert ONOS6Port, "ONOS6Port not defined"
432 assert ONOS7Port, "ONOS7Port not defined"
433
434 main.case( "Assigning Controller roles for switches" )
435 main.caseExplanation = "Check that ONOS is connected to each " +\
436 "device. Then manually assign" +\
437 " mastership to specific ONOS nodes using" +\
438 " 'device-role'"
439 main.step( "Assign mastership of switches to specific controllers" )
440 # Manually assign mastership to the controller we want
441 roleCall = main.TRUE
442
443 ipList = [ ]
444 deviceList = []
445 onosCli = main.CLIs[ main.activeNodes[0] ]
446 try:
447 # Assign mastership to specific controllers. This assignment was
448 # determined for a 7 node cluser, but will work with any sized
449 # cluster
450 for i in range( 1, 29 ): # switches 1 through 28
451 # set up correct variables:
452 if i == 1:
453 c = 0
454 ip = main.nodes[ c ].ip_address # ONOS1
455 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
456 elif i == 2:
457 c = 1 % main.numCtrls
458 ip = main.nodes[ c ].ip_address # ONOS2
459 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
460 elif i == 3:
461 c = 1 % main.numCtrls
462 ip = main.nodes[ c ].ip_address # ONOS2
463 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
464 elif i == 4:
465 c = 3 % main.numCtrls
466 ip = main.nodes[ c ].ip_address # ONOS4
467 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
468 elif i == 5:
469 c = 2 % main.numCtrls
470 ip = main.nodes[ c ].ip_address # ONOS3
471 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
472 elif i == 6:
473 c = 2 % main.numCtrls
474 ip = main.nodes[ c ].ip_address # ONOS3
475 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
476 elif i == 7:
477 c = 5 % main.numCtrls
478 ip = main.nodes[ c ].ip_address # ONOS6
479 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
480 elif i >= 8 and i <= 17:
481 c = 4 % main.numCtrls
482 ip = main.nodes[ c ].ip_address # ONOS5
483 dpid = '3' + str( i ).zfill( 3 )
484 deviceId = onosCli.getDevice( dpid ).get( 'id' )
485 elif i >= 18 and i <= 27:
486 c = 6 % main.numCtrls
487 ip = main.nodes[ c ].ip_address # ONOS7
488 dpid = '6' + str( i ).zfill( 3 )
489 deviceId = onosCli.getDevice( dpid ).get( 'id' )
490 elif i == 28:
491 c = 0
492 ip = main.nodes[ c ].ip_address # ONOS1
493 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
494 else:
495 main.log.error( "You didn't write an else statement for " +
496 "switch s" + str( i ) )
497 roleCall = main.FALSE
498 # Assign switch
499 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
500 # TODO: make this controller dynamic
501 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
502 ipList.append( ip )
503 deviceList.append( deviceId )
504 except ( AttributeError, AssertionError ):
505 main.log.exception( "Something is wrong with ONOS device view" )
506 main.log.info( onosCli.devices() )
507 utilities.assert_equals(
508 expect=main.TRUE,
509 actual=roleCall,
510 onpass="Re-assigned switch mastership to designated controller",
511 onfail="Something wrong with deviceRole calls" )
512
513 main.step( "Check mastership was correctly assigned" )
514 roleCheck = main.TRUE
515 # NOTE: This is due to the fact that device mastership change is not
516 # atomic and is actually a multi step process
517 time.sleep( 5 )
518 for i in range( len( ipList ) ):
519 ip = ipList[i]
520 deviceId = deviceList[i]
521 # Check assignment
522 master = onosCli.getRole( deviceId ).get( 'master' )
523 if ip in master:
524 roleCheck = roleCheck and main.TRUE
525 else:
526 roleCheck = roleCheck and main.FALSE
527 main.log.error( "Error, controller " + ip + " is not" +
528 " master " + "of device " +
529 str( deviceId ) + ". Master is " +
530 repr( master ) + "." )
531 utilities.assert_equals(
532 expect=main.TRUE,
533 actual=roleCheck,
534 onpass="Switches were successfully reassigned to designated " +
535 "controller",
536 onfail="Switches were not successfully reassigned" )
537
538 def CASE3( self, main ):
539 """
540 Assign intents
541 """
542 import time
543 import json
544 assert main.numCtrls, "main.numCtrls not defined"
545 assert main, "main not defined"
546 assert utilities.assert_equals, "utilities.assert_equals not defined"
547 assert main.CLIs, "main.CLIs not defined"
548 assert main.nodes, "main.nodes not defined"
549 main.case( "Adding host Intents" )
550 main.caseExplanation = "Discover hosts by using pingall then " +\
551 "assign predetermined host-to-host intents." +\
552 " After installation, check that the intent" +\
553 " is distributed to all nodes and the state" +\
554 " is INSTALLED"
555
556 # install onos-app-fwd
557 main.step( "Install reactive forwarding app" )
558 onosCli = main.CLIs[ main.activeNodes[0] ]
559 installResults = onosCli.activateApp( "org.onosproject.fwd" )
560 utilities.assert_equals( expect=main.TRUE, actual=installResults,
561 onpass="Install fwd successful",
562 onfail="Install fwd failed" )
563
564 main.step( "Check app ids" )
565 appCheck = main.TRUE
566 threads = []
567 for i in main.activeNodes:
568 t = main.Thread( target=main.CLIs[i].appToIDCheck,
569 name="appToIDCheck-" + str( i ),
570 args=[] )
571 threads.append( t )
572 t.start()
573
574 for t in threads:
575 t.join()
576 appCheck = appCheck and t.result
577 if appCheck != main.TRUE:
578 main.log.warn( onosCli.apps() )
579 main.log.warn( onosCli.appIDs() )
580 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
581 onpass="App Ids seem to be correct",
582 onfail="Something is wrong with app Ids" )
583
584 main.step( "Discovering Hosts( Via pingall for now )" )
585 # FIXME: Once we have a host discovery mechanism, use that instead
586 # REACTIVE FWD test
587 pingResult = main.FALSE
588 passMsg = "Reactive Pingall test passed"
589 time1 = time.time()
590 pingResult = main.Mininet1.pingall()
591 time2 = time.time()
592 if not pingResult:
593 main.log.warn("First pingall failed. Trying again...")
594 pingResult = main.Mininet1.pingall()
595 passMsg += " on the second try"
596 utilities.assert_equals(
597 expect=main.TRUE,
598 actual=pingResult,
599 onpass= passMsg,
600 onfail="Reactive Pingall failed, " +
601 "one or more ping pairs failed" )
602 main.log.info( "Time for pingall: %2f seconds" %
603 ( time2 - time1 ) )
604 # timeout for fwd flows
605 time.sleep( 11 )
606 # uninstall onos-app-fwd
607 main.step( "Uninstall reactive forwarding app" )
608 node = main.activeNodes[0]
609 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
610 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
611 onpass="Uninstall fwd successful",
612 onfail="Uninstall fwd failed" )
613
614 main.step( "Check app ids" )
615 threads = []
616 appCheck2 = main.TRUE
617 for i in main.activeNodes:
618 t = main.Thread( target=main.CLIs[i].appToIDCheck,
619 name="appToIDCheck-" + str( i ),
620 args=[] )
621 threads.append( t )
622 t.start()
623
624 for t in threads:
625 t.join()
626 appCheck2 = appCheck2 and t.result
627 if appCheck2 != main.TRUE:
628 node = main.activeNodes[0]
629 main.log.warn( main.CLIs[node].apps() )
630 main.log.warn( main.CLIs[node].appIDs() )
631 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
632 onpass="App Ids seem to be correct",
633 onfail="Something is wrong with app Ids" )
634
635 main.step( "Add host intents via cli" )
636 intentIds = []
637 # TODO: move the host numbers to params
638 # Maybe look at all the paths we ping?
639 intentAddResult = True
640 hostResult = main.TRUE
641 for i in range( 8, 18 ):
642 main.log.info( "Adding host intent between h" + str( i ) +
643 " and h" + str( i + 10 ) )
644 host1 = "00:00:00:00:00:" + \
645 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
646 host2 = "00:00:00:00:00:" + \
647 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
648 # NOTE: getHost can return None
649 host1Dict = onosCli.getHost( host1 )
650 host2Dict = onosCli.getHost( host2 )
651 host1Id = None
652 host2Id = None
653 if host1Dict and host2Dict:
654 host1Id = host1Dict.get( 'id', None )
655 host2Id = host2Dict.get( 'id', None )
656 if host1Id and host2Id:
657 nodeNum = ( i % len( main.activeNodes ) )
658 node = main.activeNodes[nodeNum]
659 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
660 if tmpId:
661 main.log.info( "Added intent with id: " + tmpId )
662 intentIds.append( tmpId )
663 else:
664 main.log.error( "addHostIntent returned: " +
665 repr( tmpId ) )
666 else:
667 main.log.error( "Error, getHost() failed for h" + str( i ) +
668 " and/or h" + str( i + 10 ) )
669 node = main.activeNodes[0]
670 hosts = main.CLIs[node].hosts()
671 main.log.warn( "Hosts output: " )
672 try:
673 main.log.warn( json.dumps( json.loads( hosts ),
674 sort_keys=True,
675 indent=4,
676 separators=( ',', ': ' ) ) )
677 except ( ValueError, TypeError ):
678 main.log.warn( repr( hosts ) )
679 hostResult = main.FALSE
680 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
681 onpass="Found a host id for each host",
682 onfail="Error looking up host ids" )
683
684 intentStart = time.time()
685 onosIds = onosCli.getAllIntentsId()
686 main.log.info( "Submitted intents: " + str( intentIds ) )
687 main.log.info( "Intents in ONOS: " + str( onosIds ) )
688 for intent in intentIds:
689 if intent in onosIds:
690 pass # intent submitted is in onos
691 else:
692 intentAddResult = False
693 if intentAddResult:
694 intentStop = time.time()
695 else:
696 intentStop = None
697 # Print the intent states
698 intents = onosCli.intents()
699 intentStates = []
700 installedCheck = True
701 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
702 count = 0
703 try:
704 for intent in json.loads( intents ):
705 state = intent.get( 'state', None )
706 if "INSTALLED" not in state:
707 installedCheck = False
708 intentId = intent.get( 'id', None )
709 intentStates.append( ( intentId, state ) )
710 except ( ValueError, TypeError ):
711 main.log.exception( "Error parsing intents" )
712 # add submitted intents not in the store
713 tmplist = [ i for i, s in intentStates ]
714 missingIntents = False
715 for i in intentIds:
716 if i not in tmplist:
717 intentStates.append( ( i, " - " ) )
718 missingIntents = True
719 intentStates.sort()
720 for i, s in intentStates:
721 count += 1
722 main.log.info( "%-6s%-15s%-15s" %
723 ( str( count ), str( i ), str( s ) ) )
724 leaders = onosCli.leaders()
725 try:
726 missing = False
727 if leaders:
728 parsedLeaders = json.loads( leaders )
729 main.log.warn( json.dumps( parsedLeaders,
730 sort_keys=True,
731 indent=4,
732 separators=( ',', ': ' ) ) )
733 # check for all intent partitions
734 topics = []
735 for i in range( 14 ):
736 topics.append( "intent-partition-" + str( i ) )
737 main.log.debug( topics )
738 ONOStopics = [ j['topic'] for j in parsedLeaders ]
739 for topic in topics:
740 if topic not in ONOStopics:
741 main.log.error( "Error: " + topic +
742 " not in leaders" )
743 missing = True
744 else:
745 main.log.error( "leaders() returned None" )
746 except ( ValueError, TypeError ):
747 main.log.exception( "Error parsing leaders" )
748 main.log.error( repr( leaders ) )
749 # Check all nodes
750 if missing:
751 for i in main.activeNodes:
752 response = main.CLIs[i].leaders( jsonFormat=False)
753 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
754 str( response ) )
755
756 partitions = onosCli.partitions()
757 try:
758 if partitions :
759 parsedPartitions = json.loads( partitions )
760 main.log.warn( json.dumps( parsedPartitions,
761 sort_keys=True,
762 indent=4,
763 separators=( ',', ': ' ) ) )
764 # TODO check for a leader in all paritions
765 # TODO check for consistency among nodes
766 else:
767 main.log.error( "partitions() returned None" )
768 except ( ValueError, TypeError ):
769 main.log.exception( "Error parsing partitions" )
770 main.log.error( repr( partitions ) )
771 pendingMap = onosCli.pendingMap()
772 try:
773 if pendingMap :
774 parsedPending = json.loads( pendingMap )
775 main.log.warn( json.dumps( parsedPending,
776 sort_keys=True,
777 indent=4,
778 separators=( ',', ': ' ) ) )
779 # TODO check something here?
780 else:
781 main.log.error( "pendingMap() returned None" )
782 except ( ValueError, TypeError ):
783 main.log.exception( "Error parsing pending map" )
784 main.log.error( repr( pendingMap ) )
785
786 intentAddResult = bool( intentAddResult and not missingIntents and
787 installedCheck )
788 if not intentAddResult:
789 main.log.error( "Error in pushing host intents to ONOS" )
790
791 main.step( "Intent Anti-Entropy dispersion" )
792 for j in range(100):
793 correct = True
794 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
795 for i in main.activeNodes:
796 onosIds = []
797 ids = main.CLIs[i].getAllIntentsId()
798 onosIds.append( ids )
799 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
800 str( sorted( onosIds ) ) )
801 if sorted( ids ) != sorted( intentIds ):
802 main.log.warn( "Set of intent IDs doesn't match" )
803 correct = False
804 break
805 else:
806 intents = json.loads( main.CLIs[i].intents() )
807 for intent in intents:
808 if intent[ 'state' ] != "INSTALLED":
809 main.log.warn( "Intent " + intent[ 'id' ] +
810 " is " + intent[ 'state' ] )
811 correct = False
812 break
813 if correct:
814 break
815 else:
816 time.sleep(1)
817 if not intentStop:
818 intentStop = time.time()
819 global gossipTime
820 gossipTime = intentStop - intentStart
821 main.log.info( "It took about " + str( gossipTime ) +
822 " seconds for all intents to appear in each node" )
823 gossipPeriod = int( main.params['timers']['gossip'] )
824 maxGossipTime = gossipPeriod * len( main.activeNodes )
825 utilities.assert_greater_equals(
826 expect=maxGossipTime, actual=gossipTime,
827 onpass="ECM anti-entropy for intents worked within " +
828 "expected time",
829 onfail="Intent ECM anti-entropy took too long. " +
830 "Expected time:{}, Actual time:{}".format( maxGossipTime,
831 gossipTime ) )
832 if gossipTime <= maxGossipTime:
833 intentAddResult = True
834
835 if not intentAddResult or "key" in pendingMap:
836 import time
837 installedCheck = True
838 main.log.info( "Sleeping 60 seconds to see if intents are found" )
839 time.sleep( 60 )
840 onosIds = onosCli.getAllIntentsId()
841 main.log.info( "Submitted intents: " + str( intentIds ) )
842 main.log.info( "Intents in ONOS: " + str( onosIds ) )
843 # Print the intent states
844 intents = onosCli.intents()
845 intentStates = []
846 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
847 count = 0
848 try:
849 for intent in json.loads( intents ):
850 # Iter through intents of a node
851 state = intent.get( 'state', None )
852 if "INSTALLED" not in state:
853 installedCheck = False
854 intentId = intent.get( 'id', None )
855 intentStates.append( ( intentId, state ) )
856 except ( ValueError, TypeError ):
857 main.log.exception( "Error parsing intents" )
858 # add submitted intents not in the store
859 tmplist = [ i for i, s in intentStates ]
860 for i in intentIds:
861 if i not in tmplist:
862 intentStates.append( ( i, " - " ) )
863 intentStates.sort()
864 for i, s in intentStates:
865 count += 1
866 main.log.info( "%-6s%-15s%-15s" %
867 ( str( count ), str( i ), str( s ) ) )
868 leaders = onosCli.leaders()
869 try:
870 missing = False
871 if leaders:
872 parsedLeaders = json.loads( leaders )
873 main.log.warn( json.dumps( parsedLeaders,
874 sort_keys=True,
875 indent=4,
876 separators=( ',', ': ' ) ) )
877 # check for all intent partitions
878 # check for election
879 topics = []
880 for i in range( 14 ):
881 topics.append( "intent-partition-" + str( i ) )
882 # FIXME: this should only be after we start the app
883 topics.append( "org.onosproject.election" )
884 main.log.debug( topics )
885 ONOStopics = [ j['topic'] for j in parsedLeaders ]
886 for topic in topics:
887 if topic not in ONOStopics:
888 main.log.error( "Error: " + topic +
889 " not in leaders" )
890 missing = True
891 else:
892 main.log.error( "leaders() returned None" )
893 except ( ValueError, TypeError ):
894 main.log.exception( "Error parsing leaders" )
895 main.log.error( repr( leaders ) )
896 # Check all nodes
897 if missing:
898 for i in main.activeNodes:
899 node = main.CLIs[i]
900 response = node.leaders( jsonFormat=False)
901 main.log.warn( str( node.name ) + " leaders output: \n" +
902 str( response ) )
903
904 partitions = onosCli.partitions()
905 try:
906 if partitions :
907 parsedPartitions = json.loads( partitions )
908 main.log.warn( json.dumps( parsedPartitions,
909 sort_keys=True,
910 indent=4,
911 separators=( ',', ': ' ) ) )
912 # TODO check for a leader in all paritions
913 # TODO check for consistency among nodes
914 else:
915 main.log.error( "partitions() returned None" )
916 except ( ValueError, TypeError ):
917 main.log.exception( "Error parsing partitions" )
918 main.log.error( repr( partitions ) )
919 pendingMap = onosCli.pendingMap()
920 try:
921 if pendingMap :
922 parsedPending = json.loads( pendingMap )
923 main.log.warn( json.dumps( parsedPending,
924 sort_keys=True,
925 indent=4,
926 separators=( ',', ': ' ) ) )
927 # TODO check something here?
928 else:
929 main.log.error( "pendingMap() returned None" )
930 except ( ValueError, TypeError ):
931 main.log.exception( "Error parsing pending map" )
932 main.log.error( repr( pendingMap ) )
933
934 def CASE4( self, main ):
935 """
936 Ping across added host intents
937 """
938 import json
939 import time
940 assert main.numCtrls, "main.numCtrls not defined"
941 assert main, "main not defined"
942 assert utilities.assert_equals, "utilities.assert_equals not defined"
943 assert main.CLIs, "main.CLIs not defined"
944 assert main.nodes, "main.nodes not defined"
945 main.case( "Verify connectivity by sending traffic across Intents" )
946 main.caseExplanation = "Ping across added host intents to check " +\
947 "functionality and check the state of " +\
948 "the intent"
Jon Hall6e709752016-02-01 13:38:46 -0800949
Jon Hall41d39f12016-04-11 22:54:35 -0700950 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall6e709752016-02-01 13:38:46 -0800951 main.step( "Check Intent state" )
952 installedCheck = False
953 loopCount = 0
954 while not installedCheck and loopCount < 40:
955 installedCheck = True
956 # Print the intent states
957 intents = onosCli.intents()
958 intentStates = []
959 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
960 count = 0
961 # Iter through intents of a node
962 try:
963 for intent in json.loads( intents ):
964 state = intent.get( 'state', None )
965 if "INSTALLED" not in state:
966 installedCheck = False
967 intentId = intent.get( 'id', None )
968 intentStates.append( ( intentId, state ) )
969 except ( ValueError, TypeError ):
970 main.log.exception( "Error parsing intents." )
971 # Print states
972 intentStates.sort()
973 for i, s in intentStates:
974 count += 1
975 main.log.info( "%-6s%-15s%-15s" %
976 ( str( count ), str( i ), str( s ) ) )
977 if not installedCheck:
978 time.sleep( 1 )
979 loopCount += 1
980 utilities.assert_equals( expect=True, actual=installedCheck,
981 onpass="Intents are all INSTALLED",
982 onfail="Intents are not all in " +
983 "INSTALLED state" )
984
Jon Hall9d2dcad2016-04-08 10:15:20 -0700985 main.step( "Ping across added host intents" )
Jon Hall9d2dcad2016-04-08 10:15:20 -0700986 PingResult = main.TRUE
987 for i in range( 8, 18 ):
988 ping = main.Mininet1.pingHost( src="h" + str( i ),
989 target="h" + str( i + 10 ) )
990 PingResult = PingResult and ping
991 if ping == main.FALSE:
992 main.log.warn( "Ping failed between h" + str( i ) +
993 " and h" + str( i + 10 ) )
994 elif ping == main.TRUE:
995 main.log.info( "Ping test passed!" )
996 # Don't set PingResult or you'd override failures
997 if PingResult == main.FALSE:
998 main.log.error(
999 "Intents have not been installed correctly, pings failed." )
1000 # TODO: pretty print
1001 main.log.warn( "ONOS1 intents: " )
1002 try:
1003 tmpIntents = onosCli.intents()
1004 main.log.warn( json.dumps( json.loads( tmpIntents ),
1005 sort_keys=True,
1006 indent=4,
1007 separators=( ',', ': ' ) ) )
1008 except ( ValueError, TypeError ):
1009 main.log.warn( repr( tmpIntents ) )
1010 utilities.assert_equals(
1011 expect=main.TRUE,
1012 actual=PingResult,
1013 onpass="Intents have been installed correctly and pings work",
1014 onfail="Intents have not been installed correctly, pings failed." )
1015
Jon Hall6e709752016-02-01 13:38:46 -08001016 main.step( "Check leadership of topics" )
1017 leaders = onosCli.leaders()
1018 topicCheck = main.TRUE
1019 try:
1020 if leaders:
1021 parsedLeaders = json.loads( leaders )
1022 main.log.warn( json.dumps( parsedLeaders,
1023 sort_keys=True,
1024 indent=4,
1025 separators=( ',', ': ' ) ) )
1026 # check for all intent partitions
1027 # check for election
1028 # TODO: Look at Devices as topics now that it uses this system
1029 topics = []
1030 for i in range( 14 ):
1031 topics.append( "intent-partition-" + str( i ) )
1032 # FIXME: this should only be after we start the app
1033 # FIXME: topics.append( "org.onosproject.election" )
1034 # Print leaders output
1035 main.log.debug( topics )
1036 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1037 for topic in topics:
1038 if topic not in ONOStopics:
1039 main.log.error( "Error: " + topic +
1040 " not in leaders" )
1041 topicCheck = main.FALSE
1042 else:
1043 main.log.error( "leaders() returned None" )
1044 topicCheck = main.FALSE
1045 except ( ValueError, TypeError ):
1046 topicCheck = main.FALSE
1047 main.log.exception( "Error parsing leaders" )
1048 main.log.error( repr( leaders ) )
1049 # TODO: Check for a leader of these topics
1050 # Check all nodes
1051 if topicCheck:
1052 for i in main.activeNodes:
1053 node = main.CLIs[i]
1054 response = node.leaders( jsonFormat=False)
1055 main.log.warn( str( node.name ) + " leaders output: \n" +
1056 str( response ) )
1057
1058 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1059 onpass="intent Partitions is in leaders",
1060 onfail="Some topics were lost " )
1061 # Print partitions
1062 partitions = onosCli.partitions()
1063 try:
1064 if partitions :
1065 parsedPartitions = json.loads( partitions )
1066 main.log.warn( json.dumps( parsedPartitions,
1067 sort_keys=True,
1068 indent=4,
1069 separators=( ',', ': ' ) ) )
1070 # TODO check for a leader in all paritions
1071 # TODO check for consistency among nodes
1072 else:
1073 main.log.error( "partitions() returned None" )
1074 except ( ValueError, TypeError ):
1075 main.log.exception( "Error parsing partitions" )
1076 main.log.error( repr( partitions ) )
1077 # Print Pending Map
1078 pendingMap = onosCli.pendingMap()
1079 try:
1080 if pendingMap :
1081 parsedPending = json.loads( pendingMap )
1082 main.log.warn( json.dumps( parsedPending,
1083 sort_keys=True,
1084 indent=4,
1085 separators=( ',', ': ' ) ) )
1086 # TODO check something here?
1087 else:
1088 main.log.error( "pendingMap() returned None" )
1089 except ( ValueError, TypeError ):
1090 main.log.exception( "Error parsing pending map" )
1091 main.log.error( repr( pendingMap ) )
1092
1093 if not installedCheck:
1094 main.log.info( "Waiting 60 seconds to see if the state of " +
1095 "intents change" )
1096 time.sleep( 60 )
1097 # Print the intent states
1098 intents = onosCli.intents()
1099 intentStates = []
1100 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1101 count = 0
1102 # Iter through intents of a node
1103 try:
1104 for intent in json.loads( intents ):
1105 state = intent.get( 'state', None )
1106 if "INSTALLED" not in state:
1107 installedCheck = False
1108 intentId = intent.get( 'id', None )
1109 intentStates.append( ( intentId, state ) )
1110 except ( ValueError, TypeError ):
1111 main.log.exception( "Error parsing intents." )
1112 intentStates.sort()
1113 for i, s in intentStates:
1114 count += 1
1115 main.log.info( "%-6s%-15s%-15s" %
1116 ( str( count ), str( i ), str( s ) ) )
1117 leaders = onosCli.leaders()
1118 try:
1119 missing = False
1120 if leaders:
1121 parsedLeaders = json.loads( leaders )
1122 main.log.warn( json.dumps( parsedLeaders,
1123 sort_keys=True,
1124 indent=4,
1125 separators=( ',', ': ' ) ) )
1126 # check for all intent partitions
1127 # check for election
1128 topics = []
1129 for i in range( 14 ):
1130 topics.append( "intent-partition-" + str( i ) )
1131 # FIXME: this should only be after we start the app
1132 topics.append( "org.onosproject.election" )
1133 main.log.debug( topics )
1134 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1135 for topic in topics:
1136 if topic not in ONOStopics:
1137 main.log.error( "Error: " + topic +
1138 " not in leaders" )
1139 missing = True
1140 else:
1141 main.log.error( "leaders() returned None" )
1142 except ( ValueError, TypeError ):
1143 main.log.exception( "Error parsing leaders" )
1144 main.log.error( repr( leaders ) )
1145 if missing:
1146 for i in main.activeNodes:
1147 node = main.CLIs[i]
1148 response = node.leaders( jsonFormat=False)
1149 main.log.warn( str( node.name ) + " leaders output: \n" +
1150 str( response ) )
1151
1152 partitions = onosCli.partitions()
1153 try:
1154 if partitions :
1155 parsedPartitions = json.loads( partitions )
1156 main.log.warn( json.dumps( parsedPartitions,
1157 sort_keys=True,
1158 indent=4,
1159 separators=( ',', ': ' ) ) )
1160 # TODO check for a leader in all paritions
1161 # TODO check for consistency among nodes
1162 else:
1163 main.log.error( "partitions() returned None" )
1164 except ( ValueError, TypeError ):
1165 main.log.exception( "Error parsing partitions" )
1166 main.log.error( repr( partitions ) )
1167 pendingMap = onosCli.pendingMap()
1168 try:
1169 if pendingMap :
1170 parsedPending = json.loads( pendingMap )
1171 main.log.warn( json.dumps( parsedPending,
1172 sort_keys=True,
1173 indent=4,
1174 separators=( ',', ': ' ) ) )
1175 # TODO check something here?
1176 else:
1177 main.log.error( "pendingMap() returned None" )
1178 except ( ValueError, TypeError ):
1179 main.log.exception( "Error parsing pending map" )
1180 main.log.error( repr( pendingMap ) )
1181 # Print flowrules
1182 node = main.activeNodes[0]
1183 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1184 main.step( "Wait a minute then ping again" )
1185 # the wait is above
1186 PingResult = main.TRUE
1187 for i in range( 8, 18 ):
1188 ping = main.Mininet1.pingHost( src="h" + str( i ),
1189 target="h" + str( i + 10 ) )
1190 PingResult = PingResult and ping
1191 if ping == main.FALSE:
1192 main.log.warn( "Ping failed between h" + str( i ) +
1193 " and h" + str( i + 10 ) )
1194 elif ping == main.TRUE:
1195 main.log.info( "Ping test passed!" )
1196 # Don't set PingResult or you'd override failures
1197 if PingResult == main.FALSE:
1198 main.log.error(
1199 "Intents have not been installed correctly, pings failed." )
1200 # TODO: pretty print
1201 main.log.warn( "ONOS1 intents: " )
1202 try:
1203 tmpIntents = onosCli.intents()
1204 main.log.warn( json.dumps( json.loads( tmpIntents ),
1205 sort_keys=True,
1206 indent=4,
1207 separators=( ',', ': ' ) ) )
1208 except ( ValueError, TypeError ):
1209 main.log.warn( repr( tmpIntents ) )
1210 utilities.assert_equals(
1211 expect=main.TRUE,
1212 actual=PingResult,
1213 onpass="Intents have been installed correctly and pings work",
1214 onfail="Intents have not been installed correctly, pings failed." )
1215
1216 def CASE5( self, main ):
1217 """
1218 Reading state of ONOS
1219 """
1220 import json
1221 import time
1222 assert main.numCtrls, "main.numCtrls not defined"
1223 assert main, "main not defined"
1224 assert utilities.assert_equals, "utilities.assert_equals not defined"
1225 assert main.CLIs, "main.CLIs not defined"
1226 assert main.nodes, "main.nodes not defined"
1227
1228 main.case( "Setting up and gathering data for current state" )
1229 # The general idea for this test case is to pull the state of
1230 # ( intents,flows, topology,... ) from each ONOS node
1231 # We can then compare them with each other and also with past states
1232
1233 main.step( "Check that each switch has a master" )
1234 global mastershipState
1235 mastershipState = '[]'
1236
1237 # Assert that each device has a master
1238 rolesNotNull = main.TRUE
1239 threads = []
1240 for i in main.activeNodes:
1241 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1242 name="rolesNotNull-" + str( i ),
1243 args=[] )
1244 threads.append( t )
1245 t.start()
1246
1247 for t in threads:
1248 t.join()
1249 rolesNotNull = rolesNotNull and t.result
1250 utilities.assert_equals(
1251 expect=main.TRUE,
1252 actual=rolesNotNull,
1253 onpass="Each device has a master",
1254 onfail="Some devices don't have a master assigned" )
1255
1256 main.step( "Get the Mastership of each switch from each controller" )
1257 ONOSMastership = []
1258 mastershipCheck = main.FALSE
1259 consistentMastership = True
1260 rolesResults = True
1261 threads = []
1262 for i in main.activeNodes:
1263 t = main.Thread( target=main.CLIs[i].roles,
1264 name="roles-" + str( i ),
1265 args=[] )
1266 threads.append( t )
1267 t.start()
1268
1269 for t in threads:
1270 t.join()
1271 ONOSMastership.append( t.result )
1272
1273 for i in range( len( ONOSMastership ) ):
1274 node = str( main.activeNodes[i] + 1 )
1275 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1276 main.log.error( "Error in getting ONOS" + node + " roles" )
1277 main.log.warn( "ONOS" + node + " mastership response: " +
1278 repr( ONOSMastership[i] ) )
1279 rolesResults = False
1280 utilities.assert_equals(
1281 expect=True,
1282 actual=rolesResults,
1283 onpass="No error in reading roles output",
1284 onfail="Error in reading roles from ONOS" )
1285
1286 main.step( "Check for consistency in roles from each controller" )
1287 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1288 main.log.info(
1289 "Switch roles are consistent across all ONOS nodes" )
1290 else:
1291 consistentMastership = False
1292 utilities.assert_equals(
1293 expect=True,
1294 actual=consistentMastership,
1295 onpass="Switch roles are consistent across all ONOS nodes",
1296 onfail="ONOS nodes have different views of switch roles" )
1297
1298 if rolesResults and not consistentMastership:
1299 for i in range( len( main.activeNodes ) ):
1300 node = str( main.activeNodes[i] + 1 )
1301 try:
1302 main.log.warn(
1303 "ONOS" + node + " roles: ",
1304 json.dumps(
1305 json.loads( ONOSMastership[ i ] ),
1306 sort_keys=True,
1307 indent=4,
1308 separators=( ',', ': ' ) ) )
1309 except ( ValueError, TypeError ):
1310 main.log.warn( repr( ONOSMastership[ i ] ) )
1311 elif rolesResults and consistentMastership:
1312 mastershipCheck = main.TRUE
1313 mastershipState = ONOSMastership[ 0 ]
1314
1315 main.step( "Get the intents from each controller" )
1316 global intentState
1317 intentState = []
1318 ONOSIntents = []
1319 intentCheck = main.FALSE
1320 consistentIntents = True
1321 intentsResults = True
1322 threads = []
1323 for i in main.activeNodes:
1324 t = main.Thread( target=main.CLIs[i].intents,
1325 name="intents-" + str( i ),
1326 args=[],
1327 kwargs={ 'jsonFormat': True } )
1328 threads.append( t )
1329 t.start()
1330
1331 for t in threads:
1332 t.join()
1333 ONOSIntents.append( t.result )
1334
1335 for i in range( len( ONOSIntents ) ):
1336 node = str( main.activeNodes[i] + 1 )
1337 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1338 main.log.error( "Error in getting ONOS" + node + " intents" )
1339 main.log.warn( "ONOS" + node + " intents response: " +
1340 repr( ONOSIntents[ i ] ) )
1341 intentsResults = False
1342 utilities.assert_equals(
1343 expect=True,
1344 actual=intentsResults,
1345 onpass="No error in reading intents output",
1346 onfail="Error in reading intents from ONOS" )
1347
1348 main.step( "Check for consistency in Intents from each controller" )
1349 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1350 main.log.info( "Intents are consistent across all ONOS " +
1351 "nodes" )
1352 else:
1353 consistentIntents = False
1354 main.log.error( "Intents not consistent" )
1355 utilities.assert_equals(
1356 expect=True,
1357 actual=consistentIntents,
1358 onpass="Intents are consistent across all ONOS nodes",
1359 onfail="ONOS nodes have different views of intents" )
1360
1361 if intentsResults:
1362 # Try to make it easy to figure out what is happening
1363 #
1364 # Intent ONOS1 ONOS2 ...
1365 # 0x01 INSTALLED INSTALLING
1366 # ... ... ...
1367 # ... ... ...
1368 title = " Id"
1369 for n in main.activeNodes:
1370 title += " " * 10 + "ONOS" + str( n + 1 )
1371 main.log.warn( title )
1372 # get all intent keys in the cluster
1373 keys = []
Jon Halla440e872016-03-31 15:15:50 -07001374 try:
1375 # Get the set of all intent keys
Jon Hall6e709752016-02-01 13:38:46 -08001376 for nodeStr in ONOSIntents:
1377 node = json.loads( nodeStr )
1378 for intent in node:
Jon Halla440e872016-03-31 15:15:50 -07001379 keys.append( intent.get( 'id' ) )
1380 keys = set( keys )
1381 # For each intent key, print the state on each node
1382 for key in keys:
1383 row = "%-13s" % key
1384 for nodeStr in ONOSIntents:
1385 node = json.loads( nodeStr )
1386 for intent in node:
1387 if intent.get( 'id', "Error" ) == key:
1388 row += "%-15s" % intent.get( 'state' )
1389 main.log.warn( row )
1390 # End of intent state table
1391 except ValueError as e:
1392 main.log.exception( e )
1393 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
Jon Hall6e709752016-02-01 13:38:46 -08001394
1395 if intentsResults and not consistentIntents:
1396 # print the json objects
1397 n = str( main.activeNodes[-1] + 1 )
1398 main.log.debug( "ONOS" + n + " intents: " )
1399 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1400 sort_keys=True,
1401 indent=4,
1402 separators=( ',', ': ' ) ) )
1403 for i in range( len( ONOSIntents ) ):
1404 node = str( main.activeNodes[i] + 1 )
1405 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1406 main.log.debug( "ONOS" + node + " intents: " )
1407 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1408 sort_keys=True,
1409 indent=4,
1410 separators=( ',', ': ' ) ) )
1411 else:
1412 main.log.debug( "ONOS" + node + " intents match ONOS" +
1413 n + " intents" )
1414 elif intentsResults and consistentIntents:
1415 intentCheck = main.TRUE
1416 intentState = ONOSIntents[ 0 ]
1417
1418 main.step( "Get the flows from each controller" )
1419 global flowState
1420 flowState = []
1421 ONOSFlows = []
1422 ONOSFlowsJson = []
1423 flowCheck = main.FALSE
1424 consistentFlows = True
1425 flowsResults = True
1426 threads = []
1427 for i in main.activeNodes:
1428 t = main.Thread( target=main.CLIs[i].flows,
1429 name="flows-" + str( i ),
1430 args=[],
1431 kwargs={ 'jsonFormat': True } )
1432 threads.append( t )
1433 t.start()
1434
1435 # NOTE: Flows command can take some time to run
1436 time.sleep(30)
1437 for t in threads:
1438 t.join()
1439 result = t.result
1440 ONOSFlows.append( result )
1441
1442 for i in range( len( ONOSFlows ) ):
1443 num = str( main.activeNodes[i] + 1 )
1444 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1445 main.log.error( "Error in getting ONOS" + num + " flows" )
1446 main.log.warn( "ONOS" + num + " flows response: " +
1447 repr( ONOSFlows[ i ] ) )
1448 flowsResults = False
1449 ONOSFlowsJson.append( None )
1450 else:
1451 try:
1452 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1453 except ( ValueError, TypeError ):
1454 # FIXME: change this to log.error?
1455 main.log.exception( "Error in parsing ONOS" + num +
1456 " response as json." )
1457 main.log.error( repr( ONOSFlows[ i ] ) )
1458 ONOSFlowsJson.append( None )
1459 flowsResults = False
1460 utilities.assert_equals(
1461 expect=True,
1462 actual=flowsResults,
1463 onpass="No error in reading flows output",
1464 onfail="Error in reading flows from ONOS" )
1465
1466 main.step( "Check for consistency in Flows from each controller" )
1467 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1468 if all( tmp ):
1469 main.log.info( "Flow count is consistent across all ONOS nodes" )
1470 else:
1471 consistentFlows = False
1472 utilities.assert_equals(
1473 expect=True,
1474 actual=consistentFlows,
1475 onpass="The flow count is consistent across all ONOS nodes",
1476 onfail="ONOS nodes have different flow counts" )
1477
1478 if flowsResults and not consistentFlows:
1479 for i in range( len( ONOSFlows ) ):
1480 node = str( main.activeNodes[i] + 1 )
1481 try:
1482 main.log.warn(
1483 "ONOS" + node + " flows: " +
1484 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1485 indent=4, separators=( ',', ': ' ) ) )
1486 except ( ValueError, TypeError ):
1487 main.log.warn( "ONOS" + node + " flows: " +
1488 repr( ONOSFlows[ i ] ) )
1489 elif flowsResults and consistentFlows:
1490 flowCheck = main.TRUE
1491 flowState = ONOSFlows[ 0 ]
1492
1493 main.step( "Get the OF Table entries" )
1494 global flows
1495 flows = []
1496 for i in range( 1, 29 ):
1497 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1498 if flowCheck == main.FALSE:
1499 for table in flows:
1500 main.log.warn( table )
1501 # TODO: Compare switch flow tables with ONOS flow tables
1502
1503 main.step( "Start continuous pings" )
1504 main.Mininet2.pingLong(
1505 src=main.params[ 'PING' ][ 'source1' ],
1506 target=main.params[ 'PING' ][ 'target1' ],
1507 pingTime=500 )
1508 main.Mininet2.pingLong(
1509 src=main.params[ 'PING' ][ 'source2' ],
1510 target=main.params[ 'PING' ][ 'target2' ],
1511 pingTime=500 )
1512 main.Mininet2.pingLong(
1513 src=main.params[ 'PING' ][ 'source3' ],
1514 target=main.params[ 'PING' ][ 'target3' ],
1515 pingTime=500 )
1516 main.Mininet2.pingLong(
1517 src=main.params[ 'PING' ][ 'source4' ],
1518 target=main.params[ 'PING' ][ 'target4' ],
1519 pingTime=500 )
1520 main.Mininet2.pingLong(
1521 src=main.params[ 'PING' ][ 'source5' ],
1522 target=main.params[ 'PING' ][ 'target5' ],
1523 pingTime=500 )
1524 main.Mininet2.pingLong(
1525 src=main.params[ 'PING' ][ 'source6' ],
1526 target=main.params[ 'PING' ][ 'target6' ],
1527 pingTime=500 )
1528 main.Mininet2.pingLong(
1529 src=main.params[ 'PING' ][ 'source7' ],
1530 target=main.params[ 'PING' ][ 'target7' ],
1531 pingTime=500 )
1532 main.Mininet2.pingLong(
1533 src=main.params[ 'PING' ][ 'source8' ],
1534 target=main.params[ 'PING' ][ 'target8' ],
1535 pingTime=500 )
1536 main.Mininet2.pingLong(
1537 src=main.params[ 'PING' ][ 'source9' ],
1538 target=main.params[ 'PING' ][ 'target9' ],
1539 pingTime=500 )
1540 main.Mininet2.pingLong(
1541 src=main.params[ 'PING' ][ 'source10' ],
1542 target=main.params[ 'PING' ][ 'target10' ],
1543 pingTime=500 )
1544
1545 main.step( "Collecting topology information from ONOS" )
1546 devices = []
1547 threads = []
1548 for i in main.activeNodes:
1549 t = main.Thread( target=main.CLIs[i].devices,
1550 name="devices-" + str( i ),
1551 args=[ ] )
1552 threads.append( t )
1553 t.start()
1554
1555 for t in threads:
1556 t.join()
1557 devices.append( t.result )
1558 hosts = []
1559 threads = []
1560 for i in main.activeNodes:
1561 t = main.Thread( target=main.CLIs[i].hosts,
1562 name="hosts-" + str( i ),
1563 args=[ ] )
1564 threads.append( t )
1565 t.start()
1566
1567 for t in threads:
1568 t.join()
1569 try:
1570 hosts.append( json.loads( t.result ) )
1571 except ( ValueError, TypeError ):
1572 # FIXME: better handling of this, print which node
1573 # Maybe use thread name?
1574 main.log.exception( "Error parsing json output of hosts" )
1575 main.log.warn( repr( t.result ) )
1576 hosts.append( None )
1577
1578 ports = []
1579 threads = []
1580 for i in main.activeNodes:
1581 t = main.Thread( target=main.CLIs[i].ports,
1582 name="ports-" + str( i ),
1583 args=[ ] )
1584 threads.append( t )
1585 t.start()
1586
1587 for t in threads:
1588 t.join()
1589 ports.append( t.result )
1590 links = []
1591 threads = []
1592 for i in main.activeNodes:
1593 t = main.Thread( target=main.CLIs[i].links,
1594 name="links-" + str( i ),
1595 args=[ ] )
1596 threads.append( t )
1597 t.start()
1598
1599 for t in threads:
1600 t.join()
1601 links.append( t.result )
1602 clusters = []
1603 threads = []
1604 for i in main.activeNodes:
1605 t = main.Thread( target=main.CLIs[i].clusters,
1606 name="clusters-" + str( i ),
1607 args=[ ] )
1608 threads.append( t )
1609 t.start()
1610
1611 for t in threads:
1612 t.join()
1613 clusters.append( t.result )
1614 # Compare json objects for hosts and dataplane clusters
1615
1616 # hosts
1617 main.step( "Host view is consistent across ONOS nodes" )
1618 consistentHostsResult = main.TRUE
1619 for controller in range( len( hosts ) ):
1620 controllerStr = str( main.activeNodes[controller] + 1 )
1621 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1622 if hosts[ controller ] == hosts[ 0 ]:
1623 continue
1624 else: # hosts not consistent
1625 main.log.error( "hosts from ONOS" +
1626 controllerStr +
1627 " is inconsistent with ONOS1" )
1628 main.log.warn( repr( hosts[ controller ] ) )
1629 consistentHostsResult = main.FALSE
1630
1631 else:
1632 main.log.error( "Error in getting ONOS hosts from ONOS" +
1633 controllerStr )
1634 consistentHostsResult = main.FALSE
1635 main.log.warn( "ONOS" + controllerStr +
1636 " hosts response: " +
1637 repr( hosts[ controller ] ) )
1638 utilities.assert_equals(
1639 expect=main.TRUE,
1640 actual=consistentHostsResult,
1641 onpass="Hosts view is consistent across all ONOS nodes",
1642 onfail="ONOS nodes have different views of hosts" )
1643
1644 main.step( "Each host has an IP address" )
1645 ipResult = main.TRUE
1646 for controller in range( 0, len( hosts ) ):
1647 controllerStr = str( main.activeNodes[controller] + 1 )
1648 if hosts[ controller ]:
1649 for host in hosts[ controller ]:
1650 if not host.get( 'ipAddresses', [ ] ):
1651 main.log.error( "Error with host ips on controller" +
1652 controllerStr + ": " + str( host ) )
1653 ipResult = main.FALSE
1654 utilities.assert_equals(
1655 expect=main.TRUE,
1656 actual=ipResult,
1657 onpass="The ips of the hosts aren't empty",
1658 onfail="The ip of at least one host is missing" )
1659
1660 # Strongly connected clusters of devices
1661 main.step( "Cluster view is consistent across ONOS nodes" )
1662 consistentClustersResult = main.TRUE
1663 for controller in range( len( clusters ) ):
1664 controllerStr = str( main.activeNodes[controller] + 1 )
1665 if "Error" not in clusters[ controller ]:
1666 if clusters[ controller ] == clusters[ 0 ]:
1667 continue
1668 else: # clusters not consistent
1669 main.log.error( "clusters from ONOS" + controllerStr +
1670 " is inconsistent with ONOS1" )
1671 consistentClustersResult = main.FALSE
1672
1673 else:
1674 main.log.error( "Error in getting dataplane clusters " +
1675 "from ONOS" + controllerStr )
1676 consistentClustersResult = main.FALSE
1677 main.log.warn( "ONOS" + controllerStr +
1678 " clusters response: " +
1679 repr( clusters[ controller ] ) )
1680 utilities.assert_equals(
1681 expect=main.TRUE,
1682 actual=consistentClustersResult,
1683 onpass="Clusters view is consistent across all ONOS nodes",
1684 onfail="ONOS nodes have different views of clusters" )
Jon Hall172b7ba2016-04-07 18:12:20 -07001685 if consistentClustersResult != main.TRUE:
1686 main.log.debug( clusters )
Jon Hall6e709752016-02-01 13:38:46 -08001687 # there should always only be one cluster
1688 main.step( "Cluster view correct across ONOS nodes" )
1689 try:
1690 numClusters = len( json.loads( clusters[ 0 ] ) )
1691 except ( ValueError, TypeError ):
1692 main.log.exception( "Error parsing clusters[0]: " +
1693 repr( clusters[ 0 ] ) )
1694 numClusters = "ERROR"
1695 clusterResults = main.FALSE
1696 if numClusters == 1:
1697 clusterResults = main.TRUE
1698 utilities.assert_equals(
1699 expect=1,
1700 actual=numClusters,
1701 onpass="ONOS shows 1 SCC",
1702 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1703
1704 main.step( "Comparing ONOS topology to MN" )
1705 devicesResults = main.TRUE
1706 linksResults = main.TRUE
1707 hostsResults = main.TRUE
1708 mnSwitches = main.Mininet1.getSwitches()
1709 mnLinks = main.Mininet1.getLinks()
1710 mnHosts = main.Mininet1.getHosts()
1711 for controller in main.activeNodes:
1712 controllerStr = str( main.activeNodes[controller] + 1 )
1713 if devices[ controller ] and ports[ controller ] and\
1714 "Error" not in devices[ controller ] and\
1715 "Error" not in ports[ controller ]:
1716 currentDevicesResult = main.Mininet1.compareSwitches(
1717 mnSwitches,
1718 json.loads( devices[ controller ] ),
1719 json.loads( ports[ controller ] ) )
1720 else:
1721 currentDevicesResult = main.FALSE
1722 utilities.assert_equals( expect=main.TRUE,
1723 actual=currentDevicesResult,
1724 onpass="ONOS" + controllerStr +
1725 " Switches view is correct",
1726 onfail="ONOS" + controllerStr +
1727 " Switches view is incorrect" )
1728 if links[ controller ] and "Error" not in links[ controller ]:
1729 currentLinksResult = main.Mininet1.compareLinks(
1730 mnSwitches, mnLinks,
1731 json.loads( links[ controller ] ) )
1732 else:
1733 currentLinksResult = main.FALSE
1734 utilities.assert_equals( expect=main.TRUE,
1735 actual=currentLinksResult,
1736 onpass="ONOS" + controllerStr +
1737 " links view is correct",
1738 onfail="ONOS" + controllerStr +
1739 " links view is incorrect" )
1740
1741 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1742 currentHostsResult = main.Mininet1.compareHosts(
1743 mnHosts,
1744 hosts[ controller ] )
1745 else:
1746 currentHostsResult = main.FALSE
1747 utilities.assert_equals( expect=main.TRUE,
1748 actual=currentHostsResult,
1749 onpass="ONOS" + controllerStr +
1750 " hosts exist in Mininet",
1751 onfail="ONOS" + controllerStr +
1752 " hosts don't match Mininet" )
1753
1754 devicesResults = devicesResults and currentDevicesResult
1755 linksResults = linksResults and currentLinksResult
1756 hostsResults = hostsResults and currentHostsResult
1757
1758 main.step( "Device information is correct" )
1759 utilities.assert_equals(
1760 expect=main.TRUE,
1761 actual=devicesResults,
1762 onpass="Device information is correct",
1763 onfail="Device information is incorrect" )
1764
1765 main.step( "Links are correct" )
1766 utilities.assert_equals(
1767 expect=main.TRUE,
1768 actual=linksResults,
1769 onpass="Link are correct",
1770 onfail="Links are incorrect" )
1771
1772 main.step( "Hosts are correct" )
1773 utilities.assert_equals(
1774 expect=main.TRUE,
1775 actual=hostsResults,
1776 onpass="Hosts are correct",
1777 onfail="Hosts are incorrect" )
1778
1779 def CASE61( self, main ):
1780 """
1781 The Failure case.
1782 """
1783 import math
1784 assert main.numCtrls, "main.numCtrls not defined"
1785 assert main, "main not defined"
1786 assert utilities.assert_equals, "utilities.assert_equals not defined"
1787 assert main.CLIs, "main.CLIs not defined"
1788 assert main.nodes, "main.nodes not defined"
1789 main.case( "Partition ONOS nodes into two distinct partitions" )
1790
1791 main.step( "Checking ONOS Logs for errors" )
1792 for node in main.nodes:
1793 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1794 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1795
1796 n = len( main.nodes ) # Number of nodes
1797 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1798 main.partition = [ 0 ] # ONOS node to partition, listed by index in main.nodes
1799 if n > 3:
1800 main.partition.append( p - 1 )
1801 # NOTE: This only works for cluster sizes of 3,5, or 7.
1802
1803 main.step( "Partitioning ONOS nodes" )
1804 nodeList = [ str( i + 1 ) for i in main.partition ]
1805 main.log.info( "Nodes to be partitioned: " + str( nodeList ) )
1806 partitionResults = main.TRUE
1807 for i in range( 0, n ):
1808 this = main.nodes[i]
1809 if i not in main.partition:
1810 for j in main.partition:
1811 foe = main.nodes[j]
1812 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1813 #CMD HERE
1814 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1815 this.handle.sendline( cmdStr )
1816 this.handle.expect( "\$" )
1817 main.log.debug( this.handle.before )
1818 else:
1819 for j in range( 0, n ):
1820 if j not in main.partition:
1821 foe = main.nodes[j]
1822 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1823 #CMD HERE
1824 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1825 this.handle.sendline( cmdStr )
1826 this.handle.expect( "\$" )
1827 main.log.debug( this.handle.before )
1828 main.activeNodes.remove( i )
1829 # NOTE: When dynamic clustering is finished, we need to start checking
1830 # main.partion nodes still work when partitioned
1831 utilities.assert_equals( expect=main.TRUE, actual=partitionResults,
1832 onpass="Firewall rules set successfully",
1833 onfail="Error setting firewall rules" )
1834
1835 main.log.step( "Sleeping 60 seconds" )
1836 time.sleep( 60 )
1837
1838 def CASE62( self, main ):
1839 """
1840 Healing Partition
1841 """
1842 import time
1843 assert main.numCtrls, "main.numCtrls not defined"
1844 assert main, "main not defined"
1845 assert utilities.assert_equals, "utilities.assert_equals not defined"
1846 assert main.CLIs, "main.CLIs not defined"
1847 assert main.nodes, "main.nodes not defined"
1848 assert main.partition, "main.partition not defined"
1849 main.case( "Healing Partition" )
1850
1851 main.step( "Deleteing firewall rules" )
1852 healResults = main.TRUE
1853 for node in main.nodes:
1854 cmdStr = "sudo iptables -F"
1855 node.handle.sendline( cmdStr )
1856 node.handle.expect( "\$" )
1857 main.log.debug( node.handle.before )
1858 utilities.assert_equals( expect=main.TRUE, actual=healResults,
1859 onpass="Firewall rules removed",
1860 onfail="Error removing firewall rules" )
1861
1862 for node in main.partition:
1863 main.activeNodes.append( node )
1864 main.activeNodes.sort()
1865 try:
1866 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1867 "List of active nodes has duplicates, this likely indicates something was run out of order"
1868 except AssertionError:
1869 main.log.exception( "" )
1870 main.cleanup()
1871 main.exit()
1872
1873 def CASE7( self, main ):
1874 """
1875 Check state after ONOS failure
1876 """
1877 import json
1878 assert main.numCtrls, "main.numCtrls not defined"
1879 assert main, "main not defined"
1880 assert utilities.assert_equals, "utilities.assert_equals not defined"
1881 assert main.CLIs, "main.CLIs not defined"
1882 assert main.nodes, "main.nodes not defined"
1883 try:
1884 main.partition
1885 except AttributeError:
1886 main.partition = []
1887
1888 main.case( "Running ONOS Constant State Tests" )
1889
1890 main.step( "Check that each switch has a master" )
1891 # Assert that each device has a master
1892 rolesNotNull = main.TRUE
1893 threads = []
1894 for i in main.activeNodes:
1895 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1896 name="rolesNotNull-" + str( i ),
1897 args=[ ] )
1898 threads.append( t )
1899 t.start()
1900
1901 for t in threads:
1902 t.join()
1903 rolesNotNull = rolesNotNull and t.result
1904 utilities.assert_equals(
1905 expect=main.TRUE,
1906 actual=rolesNotNull,
1907 onpass="Each device has a master",
1908 onfail="Some devices don't have a master assigned" )
1909
1910 main.step( "Read device roles from ONOS" )
1911 ONOSMastership = []
1912 mastershipCheck = main.FALSE
1913 consistentMastership = True
1914 rolesResults = True
1915 threads = []
1916 for i in main.activeNodes:
1917 t = main.Thread( target=main.CLIs[i].roles,
1918 name="roles-" + str( i ),
1919 args=[] )
1920 threads.append( t )
1921 t.start()
1922
1923 for t in threads:
1924 t.join()
1925 ONOSMastership.append( t.result )
1926
1927 for i in range( len( ONOSMastership ) ):
1928 node = str( main.activeNodes[i] + 1 )
1929 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1930 main.log.error( "Error in getting ONOS" + node + " roles" )
1931 main.log.warn( "ONOS" + node + " mastership response: " +
1932 repr( ONOSMastership[i] ) )
1933 rolesResults = False
1934 utilities.assert_equals(
1935 expect=True,
1936 actual=rolesResults,
1937 onpass="No error in reading roles output",
1938 onfail="Error in reading roles from ONOS" )
1939
1940 main.step( "Check for consistency in roles from each controller" )
1941 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1942 main.log.info(
1943 "Switch roles are consistent across all ONOS nodes" )
1944 else:
1945 consistentMastership = False
1946 utilities.assert_equals(
1947 expect=True,
1948 actual=consistentMastership,
1949 onpass="Switch roles are consistent across all ONOS nodes",
1950 onfail="ONOS nodes have different views of switch roles" )
1951
1952 if rolesResults and not consistentMastership:
1953 for i in range( len( ONOSMastership ) ):
1954 node = str( main.activeNodes[i] + 1 )
1955 main.log.warn( "ONOS" + node + " roles: ",
1956 json.dumps( json.loads( ONOSMastership[ i ] ),
1957 sort_keys=True,
1958 indent=4,
1959 separators=( ',', ': ' ) ) )
1960
1961 # NOTE: we expect mastership to change on controller failure
1962
1963 main.step( "Get the intents and compare across all nodes" )
1964 ONOSIntents = []
1965 intentCheck = main.FALSE
1966 consistentIntents = True
1967 intentsResults = True
1968 threads = []
1969 for i in main.activeNodes:
1970 t = main.Thread( target=main.CLIs[i].intents,
1971 name="intents-" + str( i ),
1972 args=[],
1973 kwargs={ 'jsonFormat': True } )
1974 threads.append( t )
1975 t.start()
1976
1977 for t in threads:
1978 t.join()
1979 ONOSIntents.append( t.result )
1980
1981 for i in range( len( ONOSIntents) ):
1982 node = str( main.activeNodes[i] + 1 )
1983 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1984 main.log.error( "Error in getting ONOS" + node + " intents" )
1985 main.log.warn( "ONOS" + node + " intents response: " +
1986 repr( ONOSIntents[ i ] ) )
1987 intentsResults = False
1988 utilities.assert_equals(
1989 expect=True,
1990 actual=intentsResults,
1991 onpass="No error in reading intents output",
1992 onfail="Error in reading intents from ONOS" )
1993
1994 main.step( "Check for consistency in Intents from each controller" )
1995 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1996 main.log.info( "Intents are consistent across all ONOS " +
1997 "nodes" )
1998 else:
1999 consistentIntents = False
2000
2001 # Try to make it easy to figure out what is happening
2002 #
2003 # Intent ONOS1 ONOS2 ...
2004 # 0x01 INSTALLED INSTALLING
2005 # ... ... ...
2006 # ... ... ...
2007 title = " ID"
2008 for n in main.activeNodes:
2009 title += " " * 10 + "ONOS" + str( n + 1 )
2010 main.log.warn( title )
2011 # get all intent keys in the cluster
2012 keys = []
2013 for nodeStr in ONOSIntents:
2014 node = json.loads( nodeStr )
2015 for intent in node:
2016 keys.append( intent.get( 'id' ) )
2017 keys = set( keys )
2018 for key in keys:
2019 row = "%-13s" % key
2020 for nodeStr in ONOSIntents:
2021 node = json.loads( nodeStr )
2022 for intent in node:
2023 if intent.get( 'id' ) == key:
2024 row += "%-15s" % intent.get( 'state' )
2025 main.log.warn( row )
2026 # End table view
2027
2028 utilities.assert_equals(
2029 expect=True,
2030 actual=consistentIntents,
2031 onpass="Intents are consistent across all ONOS nodes",
2032 onfail="ONOS nodes have different views of intents" )
2033 intentStates = []
2034 for node in ONOSIntents: # Iter through ONOS nodes
2035 nodeStates = []
2036 # Iter through intents of a node
2037 try:
2038 for intent in json.loads( node ):
2039 nodeStates.append( intent[ 'state' ] )
2040 except ( ValueError, TypeError ):
2041 main.log.exception( "Error in parsing intents" )
2042 main.log.error( repr( node ) )
2043 intentStates.append( nodeStates )
2044 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2045 main.log.info( dict( out ) )
2046
2047 if intentsResults and not consistentIntents:
2048 for i in range( len( main.activeNodes ) ):
2049 node = str( main.activeNodes[i] + 1 )
2050 main.log.warn( "ONOS" + node + " intents: " )
2051 main.log.warn( json.dumps(
2052 json.loads( ONOSIntents[ i ] ),
2053 sort_keys=True,
2054 indent=4,
2055 separators=( ',', ': ' ) ) )
2056 elif intentsResults and consistentIntents:
2057 intentCheck = main.TRUE
2058
2059 # NOTE: Store has no durability, so intents are lost across system
2060 # restarts
2061 main.step( "Compare current intents with intents before the failure" )
2062 # NOTE: this requires case 5 to pass for intentState to be set.
2063 # maybe we should stop the test if that fails?
2064 sameIntents = main.FALSE
2065 try:
2066 intentState
2067 except NameError:
2068 main.log.warn( "No previous intent state was saved" )
2069 else:
2070 if intentState and intentState == ONOSIntents[ 0 ]:
2071 sameIntents = main.TRUE
2072 main.log.info( "Intents are consistent with before failure" )
2073 # TODO: possibly the states have changed? we may need to figure out
2074 # what the acceptable states are
2075 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2076 sameIntents = main.TRUE
2077 try:
2078 before = json.loads( intentState )
2079 after = json.loads( ONOSIntents[ 0 ] )
2080 for intent in before:
2081 if intent not in after:
2082 sameIntents = main.FALSE
2083 main.log.debug( "Intent is not currently in ONOS " +
2084 "(at least in the same form):" )
2085 main.log.debug( json.dumps( intent ) )
2086 except ( ValueError, TypeError ):
2087 main.log.exception( "Exception printing intents" )
2088 main.log.debug( repr( ONOSIntents[0] ) )
2089 main.log.debug( repr( intentState ) )
2090 if sameIntents == main.FALSE:
2091 try:
2092 main.log.debug( "ONOS intents before: " )
2093 main.log.debug( json.dumps( json.loads( intentState ),
2094 sort_keys=True, indent=4,
2095 separators=( ',', ': ' ) ) )
2096 main.log.debug( "Current ONOS intents: " )
2097 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2098 sort_keys=True, indent=4,
2099 separators=( ',', ': ' ) ) )
2100 except ( ValueError, TypeError ):
2101 main.log.exception( "Exception printing intents" )
2102 main.log.debug( repr( ONOSIntents[0] ) )
2103 main.log.debug( repr( intentState ) )
2104 utilities.assert_equals(
2105 expect=main.TRUE,
2106 actual=sameIntents,
2107 onpass="Intents are consistent with before failure",
2108 onfail="The Intents changed during failure" )
2109 intentCheck = intentCheck and sameIntents
2110
2111 main.step( "Get the OF Table entries and compare to before " +
2112 "component failure" )
2113 FlowTables = main.TRUE
2114 for i in range( 28 ):
2115 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2116 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
Jon Hall41d39f12016-04-11 22:54:35 -07002117 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2118 FlowTables = FlowTables and curSwitch
2119 if curSwitch == main.FALSE:
Jon Hall6e709752016-02-01 13:38:46 -08002120 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2121 utilities.assert_equals(
2122 expect=main.TRUE,
2123 actual=FlowTables,
2124 onpass="No changes were found in the flow tables",
2125 onfail="Changes were found in the flow tables" )
2126
2127 main.Mininet2.pingLongKill()
2128 '''
2129 main.step( "Check the continuous pings to ensure that no packets " +
2130 "were dropped during component failure" )
2131 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2132 main.params[ 'TESTONIP' ] )
2133 LossInPings = main.FALSE
2134 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2135 for i in range( 8, 18 ):
2136 main.log.info(
2137 "Checking for a loss in pings along flow from s" +
2138 str( i ) )
2139 LossInPings = main.Mininet2.checkForLoss(
2140 "/tmp/ping.h" +
2141 str( i ) ) or LossInPings
2142 if LossInPings == main.TRUE:
2143 main.log.info( "Loss in ping detected" )
2144 elif LossInPings == main.ERROR:
2145 main.log.info( "There are multiple mininet process running" )
2146 elif LossInPings == main.FALSE:
2147 main.log.info( "No Loss in the pings" )
2148 main.log.info( "No loss of dataplane connectivity" )
2149 utilities.assert_equals(
2150 expect=main.FALSE,
2151 actual=LossInPings,
2152 onpass="No Loss of connectivity",
2153 onfail="Loss of dataplane connectivity detected" )
2154 '''
2155
2156 main.step( "Leadership Election is still functional" )
2157 # Test of LeadershipElection
2158 leaderList = []
2159
2160 partitioned = []
2161 for i in main.partition:
2162 partitioned.append( main.nodes[i].ip_address )
2163 leaderResult = main.TRUE
2164
2165 for i in main.activeNodes:
2166 cli = main.CLIs[i]
2167 leaderN = cli.electionTestLeader()
2168 leaderList.append( leaderN )
2169 if leaderN == main.FALSE:
2170 # error in response
2171 main.log.error( "Something is wrong with " +
2172 "electionTestLeader function, check the" +
2173 " error logs" )
2174 leaderResult = main.FALSE
2175 elif leaderN is None:
2176 main.log.error( cli.name +
2177 " shows no leader for the election-app was" +
2178 " elected after the old one died" )
2179 leaderResult = main.FALSE
2180 elif leaderN in partitioned:
2181 main.log.error( cli.name + " shows " + str( leaderN ) +
2182 " as leader for the election-app, but it " +
2183 "was partitioned" )
2184 leaderResult = main.FALSE
2185 if len( set( leaderList ) ) != 1:
2186 leaderResult = main.FALSE
2187 main.log.error(
2188 "Inconsistent view of leader for the election test app" )
2189 # TODO: print the list
2190 utilities.assert_equals(
2191 expect=main.TRUE,
2192 actual=leaderResult,
2193 onpass="Leadership election passed",
2194 onfail="Something went wrong with Leadership election" )
2195
2196 def CASE8( self, main ):
2197 """
2198 Compare topo
2199 """
2200 import json
2201 import time
2202 assert main.numCtrls, "main.numCtrls not defined"
2203 assert main, "main not defined"
2204 assert utilities.assert_equals, "utilities.assert_equals not defined"
2205 assert main.CLIs, "main.CLIs not defined"
2206 assert main.nodes, "main.nodes not defined"
2207
2208 main.case( "Compare ONOS Topology view to Mininet topology" )
2209 main.caseExplanation = "Compare topology objects between Mininet" +\
2210 " and ONOS"
2211 topoResult = main.FALSE
2212 topoFailMsg = "ONOS topology don't match Mininet"
2213 elapsed = 0
2214 count = 0
2215 main.step( "Comparing ONOS topology to MN topology" )
2216 startTime = time.time()
2217 # Give time for Gossip to work
2218 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2219 devicesResults = main.TRUE
2220 linksResults = main.TRUE
2221 hostsResults = main.TRUE
2222 hostAttachmentResults = True
2223 count += 1
2224 cliStart = time.time()
2225 devices = []
2226 threads = []
2227 for i in main.activeNodes:
2228 t = main.Thread( target=utilities.retry,
2229 name="devices-" + str( i ),
2230 args=[ main.CLIs[i].devices, [ None ] ],
2231 kwargs= { 'sleep': 5, 'attempts': 5,
2232 'randomTime': True } )
2233 threads.append( t )
2234 t.start()
2235
2236 for t in threads:
2237 t.join()
2238 devices.append( t.result )
2239 hosts = []
2240 ipResult = main.TRUE
2241 threads = []
2242 for i in main.activeNodes:
2243 t = main.Thread( target=utilities.retry,
2244 name="hosts-" + str( i ),
2245 args=[ main.CLIs[i].hosts, [ None ] ],
2246 kwargs= { 'sleep': 5, 'attempts': 5,
2247 'randomTime': True } )
2248 threads.append( t )
2249 t.start()
2250
2251 for t in threads:
2252 t.join()
2253 try:
2254 hosts.append( json.loads( t.result ) )
2255 except ( ValueError, TypeError ):
2256 main.log.exception( "Error parsing hosts results" )
2257 main.log.error( repr( t.result ) )
2258 hosts.append( None )
2259 for controller in range( 0, len( hosts ) ):
2260 controllerStr = str( main.activeNodes[controller] + 1 )
2261 if hosts[ controller ]:
2262 for host in hosts[ controller ]:
2263 if host is None or host.get( 'ipAddresses', [] ) == []:
2264 main.log.error(
2265 "Error with host ipAddresses on controller" +
2266 controllerStr + ": " + str( host ) )
2267 ipResult = main.FALSE
2268 ports = []
2269 threads = []
2270 for i in main.activeNodes:
2271 t = main.Thread( target=utilities.retry,
2272 name="ports-" + str( i ),
2273 args=[ main.CLIs[i].ports, [ None ] ],
2274 kwargs= { 'sleep': 5, 'attempts': 5,
2275 'randomTime': True } )
2276 threads.append( t )
2277 t.start()
2278
2279 for t in threads:
2280 t.join()
2281 ports.append( t.result )
2282 links = []
2283 threads = []
2284 for i in main.activeNodes:
2285 t = main.Thread( target=utilities.retry,
2286 name="links-" + str( i ),
2287 args=[ main.CLIs[i].links, [ None ] ],
2288 kwargs= { 'sleep': 5, 'attempts': 5,
2289 'randomTime': True } )
2290 threads.append( t )
2291 t.start()
2292
2293 for t in threads:
2294 t.join()
2295 links.append( t.result )
2296 clusters = []
2297 threads = []
2298 for i in main.activeNodes:
2299 t = main.Thread( target=utilities.retry,
2300 name="clusters-" + str( i ),
2301 args=[ main.CLIs[i].clusters, [ None ] ],
2302 kwargs= { 'sleep': 5, 'attempts': 5,
2303 'randomTime': True } )
2304 threads.append( t )
2305 t.start()
2306
2307 for t in threads:
2308 t.join()
2309 clusters.append( t.result )
2310
2311 elapsed = time.time() - startTime
2312 cliTime = time.time() - cliStart
2313 print "Elapsed time: " + str( elapsed )
2314 print "CLI time: " + str( cliTime )
2315
2316 if all( e is None for e in devices ) and\
2317 all( e is None for e in hosts ) and\
2318 all( e is None for e in ports ) and\
2319 all( e is None for e in links ) and\
2320 all( e is None for e in clusters ):
2321 topoFailMsg = "Could not get topology from ONOS"
2322 main.log.error( topoFailMsg )
2323 continue # Try again, No use trying to compare
2324
2325 mnSwitches = main.Mininet1.getSwitches()
2326 mnLinks = main.Mininet1.getLinks()
2327 mnHosts = main.Mininet1.getHosts()
2328 for controller in range( len( main.activeNodes ) ):
2329 controllerStr = str( main.activeNodes[controller] + 1 )
2330 if devices[ controller ] and ports[ controller ] and\
2331 "Error" not in devices[ controller ] and\
2332 "Error" not in ports[ controller ]:
2333
2334 try:
2335 currentDevicesResult = main.Mininet1.compareSwitches(
2336 mnSwitches,
2337 json.loads( devices[ controller ] ),
2338 json.loads( ports[ controller ] ) )
2339 except ( TypeError, ValueError ) as e:
2340 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2341 devices[ controller ], ports[ controller ] ) )
2342 else:
2343 currentDevicesResult = main.FALSE
2344 utilities.assert_equals( expect=main.TRUE,
2345 actual=currentDevicesResult,
2346 onpass="ONOS" + controllerStr +
2347 " Switches view is correct",
2348 onfail="ONOS" + controllerStr +
2349 " Switches view is incorrect" )
2350
2351 if links[ controller ] and "Error" not in links[ controller ]:
2352 currentLinksResult = main.Mininet1.compareLinks(
2353 mnSwitches, mnLinks,
2354 json.loads( links[ controller ] ) )
2355 else:
2356 currentLinksResult = main.FALSE
2357 utilities.assert_equals( expect=main.TRUE,
2358 actual=currentLinksResult,
2359 onpass="ONOS" + controllerStr +
2360 " links view is correct",
2361 onfail="ONOS" + controllerStr +
2362 " links view is incorrect" )
2363 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2364 currentHostsResult = main.Mininet1.compareHosts(
2365 mnHosts,
2366 hosts[ controller ] )
2367 elif hosts[ controller ] == []:
2368 currentHostsResult = main.TRUE
2369 else:
2370 currentHostsResult = main.FALSE
2371 utilities.assert_equals( expect=main.TRUE,
2372 actual=currentHostsResult,
2373 onpass="ONOS" + controllerStr +
2374 " hosts exist in Mininet",
2375 onfail="ONOS" + controllerStr +
2376 " hosts don't match Mininet" )
2377 # CHECKING HOST ATTACHMENT POINTS
2378 hostAttachment = True
2379 zeroHosts = False
2380 # FIXME: topo-HA/obelisk specific mappings:
2381 # key is mac and value is dpid
2382 mappings = {}
2383 for i in range( 1, 29 ): # hosts 1 through 28
2384 # set up correct variables:
2385 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2386 if i == 1:
2387 deviceId = "1000".zfill(16)
2388 elif i == 2:
2389 deviceId = "2000".zfill(16)
2390 elif i == 3:
2391 deviceId = "3000".zfill(16)
2392 elif i == 4:
2393 deviceId = "3004".zfill(16)
2394 elif i == 5:
2395 deviceId = "5000".zfill(16)
2396 elif i == 6:
2397 deviceId = "6000".zfill(16)
2398 elif i == 7:
2399 deviceId = "6007".zfill(16)
2400 elif i >= 8 and i <= 17:
2401 dpid = '3' + str( i ).zfill( 3 )
2402 deviceId = dpid.zfill(16)
2403 elif i >= 18 and i <= 27:
2404 dpid = '6' + str( i ).zfill( 3 )
2405 deviceId = dpid.zfill(16)
2406 elif i == 28:
2407 deviceId = "2800".zfill(16)
2408 mappings[ macId ] = deviceId
2409 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2410 if hosts[ controller ] == []:
2411 main.log.warn( "There are no hosts discovered" )
2412 zeroHosts = True
2413 else:
2414 for host in hosts[ controller ]:
2415 mac = None
2416 location = None
2417 device = None
2418 port = None
2419 try:
2420 mac = host.get( 'mac' )
2421 assert mac, "mac field could not be found for this host object"
2422
2423 location = host.get( 'location' )
2424 assert location, "location field could not be found for this host object"
2425
2426 # Trim the protocol identifier off deviceId
2427 device = str( location.get( 'elementId' ) ).split(':')[1]
2428 assert device, "elementId field could not be found for this host location object"
2429
2430 port = location.get( 'port' )
2431 assert port, "port field could not be found for this host location object"
2432
2433 # Now check if this matches where they should be
2434 if mac and device and port:
2435 if str( port ) != "1":
2436 main.log.error( "The attachment port is incorrect for " +
2437 "host " + str( mac ) +
2438 ". Expected: 1 Actual: " + str( port) )
2439 hostAttachment = False
2440 if device != mappings[ str( mac ) ]:
2441 main.log.error( "The attachment device is incorrect for " +
2442 "host " + str( mac ) +
2443 ". Expected: " + mappings[ str( mac ) ] +
2444 " Actual: " + device )
2445 hostAttachment = False
2446 else:
2447 hostAttachment = False
2448 except AssertionError:
2449 main.log.exception( "Json object not as expected" )
2450 main.log.error( repr( host ) )
2451 hostAttachment = False
2452 else:
2453 main.log.error( "No hosts json output or \"Error\"" +
2454 " in output. hosts = " +
2455 repr( hosts[ controller ] ) )
2456 if zeroHosts is False:
2457 hostAttachment = True
2458
2459 # END CHECKING HOST ATTACHMENT POINTS
2460 devicesResults = devicesResults and currentDevicesResult
2461 linksResults = linksResults and currentLinksResult
2462 hostsResults = hostsResults and currentHostsResult
2463 hostAttachmentResults = hostAttachmentResults and\
2464 hostAttachment
Jon Halla440e872016-03-31 15:15:50 -07002465 topoResult = ( devicesResults and linksResults
2466 and hostsResults and ipResult and
2467 hostAttachmentResults )
Jon Hall6e709752016-02-01 13:38:46 -08002468 utilities.assert_equals( expect=True,
2469 actual=topoResult,
2470 onpass="ONOS topology matches Mininet",
2471 onfail=topoFailMsg )
2472 # End of While loop to pull ONOS state
2473
2474 # Compare json objects for hosts and dataplane clusters
2475
2476 # hosts
2477 main.step( "Hosts view is consistent across all ONOS nodes" )
2478 consistentHostsResult = main.TRUE
2479 for controller in range( len( hosts ) ):
2480 controllerStr = str( main.activeNodes[controller] + 1 )
2481 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2482 if hosts[ controller ] == hosts[ 0 ]:
2483 continue
2484 else: # hosts not consistent
2485 main.log.error( "hosts from ONOS" + controllerStr +
2486 " is inconsistent with ONOS1" )
2487 main.log.warn( repr( hosts[ controller ] ) )
2488 consistentHostsResult = main.FALSE
2489
2490 else:
2491 main.log.error( "Error in getting ONOS hosts from ONOS" +
2492 controllerStr )
2493 consistentHostsResult = main.FALSE
2494 main.log.warn( "ONOS" + controllerStr +
2495 " hosts response: " +
2496 repr( hosts[ controller ] ) )
2497 utilities.assert_equals(
2498 expect=main.TRUE,
2499 actual=consistentHostsResult,
2500 onpass="Hosts view is consistent across all ONOS nodes",
2501 onfail="ONOS nodes have different views of hosts" )
2502
2503 main.step( "Hosts information is correct" )
2504 hostsResults = hostsResults and ipResult
2505 utilities.assert_equals(
2506 expect=main.TRUE,
2507 actual=hostsResults,
2508 onpass="Host information is correct",
2509 onfail="Host information is incorrect" )
2510
2511 main.step( "Host attachment points to the network" )
2512 utilities.assert_equals(
2513 expect=True,
2514 actual=hostAttachmentResults,
2515 onpass="Hosts are correctly attached to the network",
2516 onfail="ONOS did not correctly attach hosts to the network" )
2517
2518 # Strongly connected clusters of devices
2519 main.step( "Clusters view is consistent across all ONOS nodes" )
2520 consistentClustersResult = main.TRUE
2521 for controller in range( len( clusters ) ):
2522 controllerStr = str( main.activeNodes[controller] + 1 )
2523 if "Error" not in clusters[ controller ]:
2524 if clusters[ controller ] == clusters[ 0 ]:
2525 continue
2526 else: # clusters not consistent
2527 main.log.error( "clusters from ONOS" +
2528 controllerStr +
2529 " is inconsistent with ONOS1" )
2530 consistentClustersResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002531 else:
2532 main.log.error( "Error in getting dataplane clusters " +
2533 "from ONOS" + controllerStr )
2534 consistentClustersResult = main.FALSE
2535 main.log.warn( "ONOS" + controllerStr +
2536 " clusters response: " +
2537 repr( clusters[ controller ] ) )
2538 utilities.assert_equals(
2539 expect=main.TRUE,
2540 actual=consistentClustersResult,
2541 onpass="Clusters view is consistent across all ONOS nodes",
2542 onfail="ONOS nodes have different views of clusters" )
2543
2544 main.step( "There is only one SCC" )
2545 # there should always only be one cluster
2546 try:
2547 numClusters = len( json.loads( clusters[ 0 ] ) )
2548 except ( ValueError, TypeError ):
2549 main.log.exception( "Error parsing clusters[0]: " +
2550 repr( clusters[0] ) )
Jon Halla440e872016-03-31 15:15:50 -07002551 numClusters = "ERROR"
Jon Hall6e709752016-02-01 13:38:46 -08002552 clusterResults = main.FALSE
2553 if numClusters == 1:
2554 clusterResults = main.TRUE
2555 utilities.assert_equals(
2556 expect=1,
2557 actual=numClusters,
2558 onpass="ONOS shows 1 SCC",
2559 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2560
2561 topoResult = ( devicesResults and linksResults
2562 and hostsResults and consistentHostsResult
2563 and consistentClustersResult and clusterResults
2564 and ipResult and hostAttachmentResults )
2565
2566 topoResult = topoResult and int( count <= 2 )
2567 note = "note it takes about " + str( int( cliTime ) ) + \
2568 " seconds for the test to make all the cli calls to fetch " +\
2569 "the topology from each ONOS instance"
2570 main.log.info(
2571 "Very crass estimate for topology discovery/convergence( " +
2572 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2573 str( count ) + " tries" )
2574
2575 main.step( "Device information is correct" )
2576 utilities.assert_equals(
2577 expect=main.TRUE,
2578 actual=devicesResults,
2579 onpass="Device information is correct",
2580 onfail="Device information is incorrect" )
2581
2582 main.step( "Links are correct" )
2583 utilities.assert_equals(
2584 expect=main.TRUE,
2585 actual=linksResults,
2586 onpass="Link are correct",
2587 onfail="Links are incorrect" )
2588
Jon Halla440e872016-03-31 15:15:50 -07002589 main.step( "Hosts are correct" )
2590 utilities.assert_equals(
2591 expect=main.TRUE,
2592 actual=hostsResults,
2593 onpass="Hosts are correct",
2594 onfail="Hosts are incorrect" )
2595
Jon Hall6e709752016-02-01 13:38:46 -08002596 # FIXME: move this to an ONOS state case
2597 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -07002598 nodeResults = utilities.retry( main.HA.nodesCheck,
2599 False,
2600 args=[main.activeNodes],
2601 attempts=5 )
Jon Hall6e709752016-02-01 13:38:46 -08002602
Jon Hall41d39f12016-04-11 22:54:35 -07002603 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Hall6e709752016-02-01 13:38:46 -08002604 onpass="Nodes check successful",
2605 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002606 if not nodeResults:
Jon Hall41d39f12016-04-11 22:54:35 -07002607 for i in main.activeNodes:
Jon Halla440e872016-03-31 15:15:50 -07002608 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hall41d39f12016-04-11 22:54:35 -07002609 main.CLIs[i].name,
2610 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08002611
2612 def CASE9( self, main ):
2613 """
2614 Link s3-s28 down
2615 """
2616 import time
2617 assert main.numCtrls, "main.numCtrls not defined"
2618 assert main, "main not defined"
2619 assert utilities.assert_equals, "utilities.assert_equals not defined"
2620 assert main.CLIs, "main.CLIs not defined"
2621 assert main.nodes, "main.nodes not defined"
2622 # NOTE: You should probably run a topology check after this
2623
2624 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2625
2626 description = "Turn off a link to ensure that Link Discovery " +\
2627 "is working properly"
2628 main.case( description )
2629
2630 main.step( "Kill Link between s3 and s28" )
2631 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2632 main.log.info( "Waiting " + str( linkSleep ) +
2633 " seconds for link down to be discovered" )
2634 time.sleep( linkSleep )
2635 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2636 onpass="Link down successful",
2637 onfail="Failed to bring link down" )
2638 # TODO do some sort of check here
2639
2640 def CASE10( self, main ):
2641 """
2642 Link s3-s28 up
2643 """
2644 import time
2645 assert main.numCtrls, "main.numCtrls not defined"
2646 assert main, "main not defined"
2647 assert utilities.assert_equals, "utilities.assert_equals not defined"
2648 assert main.CLIs, "main.CLIs not defined"
2649 assert main.nodes, "main.nodes not defined"
2650 # NOTE: You should probably run a topology check after this
2651
2652 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2653
2654 description = "Restore a link to ensure that Link Discovery is " + \
2655 "working properly"
2656 main.case( description )
2657
2658 main.step( "Bring link between s3 and s28 back up" )
2659 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2660 main.log.info( "Waiting " + str( linkSleep ) +
2661 " seconds for link up to be discovered" )
2662 time.sleep( linkSleep )
2663 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2664 onpass="Link up successful",
2665 onfail="Failed to bring link up" )
2666 # TODO do some sort of check here
2667
2668 def CASE11( self, main ):
2669 """
2670 Switch Down
2671 """
2672 # NOTE: You should probably run a topology check after this
2673 import time
2674 assert main.numCtrls, "main.numCtrls not defined"
2675 assert main, "main not defined"
2676 assert utilities.assert_equals, "utilities.assert_equals not defined"
2677 assert main.CLIs, "main.CLIs not defined"
2678 assert main.nodes, "main.nodes not defined"
2679
2680 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2681
2682 description = "Killing a switch to ensure it is discovered correctly"
2683 onosCli = main.CLIs[ main.activeNodes[0] ]
2684 main.case( description )
2685 switch = main.params[ 'kill' ][ 'switch' ]
2686 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2687
2688 # TODO: Make this switch parameterizable
2689 main.step( "Kill " + switch )
2690 main.log.info( "Deleting " + switch )
2691 main.Mininet1.delSwitch( switch )
2692 main.log.info( "Waiting " + str( switchSleep ) +
2693 " seconds for switch down to be discovered" )
2694 time.sleep( switchSleep )
2695 device = onosCli.getDevice( dpid=switchDPID )
2696 # Peek at the deleted switch
2697 main.log.warn( str( device ) )
2698 result = main.FALSE
2699 if device and device[ 'available' ] is False:
2700 result = main.TRUE
2701 utilities.assert_equals( expect=main.TRUE, actual=result,
2702 onpass="Kill switch successful",
2703 onfail="Failed to kill switch?" )
2704
2705 def CASE12( self, main ):
2706 """
2707 Switch Up
2708 """
2709 # NOTE: You should probably run a topology check after this
2710 import time
2711 assert main.numCtrls, "main.numCtrls not defined"
2712 assert main, "main not defined"
2713 assert utilities.assert_equals, "utilities.assert_equals not defined"
2714 assert main.CLIs, "main.CLIs not defined"
2715 assert main.nodes, "main.nodes not defined"
2716 assert ONOS1Port, "ONOS1Port not defined"
2717 assert ONOS2Port, "ONOS2Port not defined"
2718 assert ONOS3Port, "ONOS3Port not defined"
2719 assert ONOS4Port, "ONOS4Port not defined"
2720 assert ONOS5Port, "ONOS5Port not defined"
2721 assert ONOS6Port, "ONOS6Port not defined"
2722 assert ONOS7Port, "ONOS7Port not defined"
2723
2724 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2725 switch = main.params[ 'kill' ][ 'switch' ]
2726 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2727 links = main.params[ 'kill' ][ 'links' ].split()
2728 onosCli = main.CLIs[ main.activeNodes[0] ]
2729 description = "Adding a switch to ensure it is discovered correctly"
2730 main.case( description )
2731
2732 main.step( "Add back " + switch )
2733 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2734 for peer in links:
2735 main.Mininet1.addLink( switch, peer )
2736 ipList = [ node.ip_address for node in main.nodes ]
2737 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2738 main.log.info( "Waiting " + str( switchSleep ) +
2739 " seconds for switch up to be discovered" )
2740 time.sleep( switchSleep )
2741 device = onosCli.getDevice( dpid=switchDPID )
2742 # Peek at the deleted switch
2743 main.log.warn( str( device ) )
2744 result = main.FALSE
2745 if device and device[ 'available' ]:
2746 result = main.TRUE
2747 utilities.assert_equals( expect=main.TRUE, actual=result,
2748 onpass="add switch successful",
2749 onfail="Failed to add switch?" )
2750
2751 def CASE13( self, main ):
2752 """
2753 Clean up
2754 """
2755 import os
2756 import time
2757 assert main.numCtrls, "main.numCtrls not defined"
2758 assert main, "main not defined"
2759 assert utilities.assert_equals, "utilities.assert_equals not defined"
2760 assert main.CLIs, "main.CLIs not defined"
2761 assert main.nodes, "main.nodes not defined"
2762
2763 # printing colors to terminal
2764 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2765 'blue': '\033[94m', 'green': '\033[92m',
2766 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2767 main.case( "Test Cleanup" )
2768 main.step( "Killing tcpdumps" )
2769 main.Mininet2.stopTcpdump()
2770
2771 testname = main.TEST
2772 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2773 main.step( "Copying MN pcap and ONOS log files to test station" )
2774 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2775 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2776 # NOTE: MN Pcap file is being saved to logdir.
2777 # We scp this file as MN and TestON aren't necessarily the same vm
2778
2779 # FIXME: To be replaced with a Jenkin's post script
2780 # TODO: Load these from params
2781 # NOTE: must end in /
2782 logFolder = "/opt/onos/log/"
2783 logFiles = [ "karaf.log", "karaf.log.1" ]
2784 # NOTE: must end in /
2785 for f in logFiles:
2786 for node in main.nodes:
2787 dstName = main.logdir + "/" + node.name + "-" + f
2788 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2789 logFolder + f, dstName )
2790 # std*.log's
2791 # NOTE: must end in /
2792 logFolder = "/opt/onos/var/"
2793 logFiles = [ "stderr.log", "stdout.log" ]
2794 # NOTE: must end in /
2795 for f in logFiles:
2796 for node in main.nodes:
2797 dstName = main.logdir + "/" + node.name + "-" + f
2798 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2799 logFolder + f, dstName )
2800 else:
2801 main.log.debug( "skipping saving log files" )
2802
2803 main.step( "Stopping Mininet" )
2804 mnResult = main.Mininet1.stopNet()
2805 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2806 onpass="Mininet stopped",
2807 onfail="MN cleanup NOT successful" )
2808
2809 main.step( "Checking ONOS Logs for errors" )
2810 for node in main.nodes:
2811 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2812 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2813
2814 try:
2815 timerLog = open( main.logdir + "/Timers.csv", 'w')
2816 # Overwrite with empty line and close
2817 labels = "Gossip Intents"
2818 data = str( gossipTime )
2819 timerLog.write( labels + "\n" + data )
2820 timerLog.close()
2821 except NameError, e:
2822 main.log.exception(e)
2823
2824 def CASE14( self, main ):
2825 """
2826 start election app on all onos nodes
2827 """
2828 assert main.numCtrls, "main.numCtrls not defined"
2829 assert main, "main not defined"
2830 assert utilities.assert_equals, "utilities.assert_equals not defined"
2831 assert main.CLIs, "main.CLIs not defined"
2832 assert main.nodes, "main.nodes not defined"
2833
2834 main.case("Start Leadership Election app")
2835 main.step( "Install leadership election app" )
2836 onosCli = main.CLIs[ main.activeNodes[0] ]
2837 appResult = onosCli.activateApp( "org.onosproject.election" )
2838 utilities.assert_equals(
2839 expect=main.TRUE,
2840 actual=appResult,
2841 onpass="Election app installed",
2842 onfail="Something went wrong with installing Leadership election" )
2843
2844 main.step( "Run for election on each node" )
Jon Hall6e709752016-02-01 13:38:46 -08002845 for i in main.activeNodes:
2846 main.CLIs[i].electionTestRun()
Jon Hall25463a82016-04-13 14:03:52 -07002847 time.sleep(5)
2848 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2849 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall6e709752016-02-01 13:38:46 -08002850 utilities.assert_equals(
Jon Hall25463a82016-04-13 14:03:52 -07002851 expect=True,
2852 actual=sameResult,
2853 onpass="All nodes see the same leaderboards",
2854 onfail="Inconsistent leaderboards" )
Jon Hall6e709752016-02-01 13:38:46 -08002855
Jon Hall25463a82016-04-13 14:03:52 -07002856 if sameResult:
2857 leader = leaders[ 0 ][ 0 ]
2858 if main.nodes[main.activeNodes[0]].ip_address in leader:
2859 correctLeader = True
2860 else:
2861 correctLeader = False
2862 main.step( "First node was elected leader" )
2863 utilities.assert_equals(
2864 expect=True,
2865 actual=correctLeader,
2866 onpass="Correct leader was elected",
2867 onfail="Incorrect leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002868
2869 def CASE15( self, main ):
2870 """
2871 Check that Leadership Election is still functional
2872 15.1 Run election on each node
2873 15.2 Check that each node has the same leaders and candidates
2874 15.3 Find current leader and withdraw
2875 15.4 Check that a new node was elected leader
2876 15.5 Check that that new leader was the candidate of old leader
2877 15.6 Run for election on old leader
2878 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2879 15.8 Make sure that the old leader was added to the candidate list
2880
2881 old and new variable prefixes refer to data from before vs after
2882 withdrawl and later before withdrawl vs after re-election
2883 """
2884 import time
2885 assert main.numCtrls, "main.numCtrls not defined"
2886 assert main, "main not defined"
2887 assert utilities.assert_equals, "utilities.assert_equals not defined"
2888 assert main.CLIs, "main.CLIs not defined"
2889 assert main.nodes, "main.nodes not defined"
2890
2891 description = "Check that Leadership Election is still functional"
2892 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002893 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall6e709752016-02-01 13:38:46 -08002894
Jon Halla440e872016-03-31 15:15:50 -07002895 oldLeaders = [] # list of lists of each nodes' candidates before
2896 newLeaders = [] # list of lists of each nodes' candidates after
Jon Hall6e709752016-02-01 13:38:46 -08002897 oldLeader = '' # the old leader from oldLeaders, None if not same
2898 newLeader = '' # the new leaders fron newLoeaders, None if not same
2899 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2900 expectNoLeader = False # True when there is only one leader
2901 if main.numCtrls == 1:
2902 expectNoLeader = True
2903
2904 main.step( "Run for election on each node" )
2905 electionResult = main.TRUE
2906
2907 for i in main.activeNodes: # run test election on each node
2908 if main.CLIs[i].electionTestRun() == main.FALSE:
2909 electionResult = main.FALSE
2910 utilities.assert_equals(
2911 expect=main.TRUE,
2912 actual=electionResult,
2913 onpass="All nodes successfully ran for leadership",
2914 onfail="At least one node failed to run for leadership" )
2915
2916 if electionResult == main.FALSE:
2917 main.log.error(
2918 "Skipping Test Case because Election Test App isn't loaded" )
2919 main.skipCase()
2920
2921 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07002922 failMessage = "Nodes have different leaderboards"
Jon Halla440e872016-03-31 15:15:50 -07002923 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
Jon Hall41d39f12016-04-11 22:54:35 -07002924 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Halla440e872016-03-31 15:15:50 -07002925 if sameResult:
2926 oldLeader = oldLeaders[ 0 ][ 0 ]
2927 main.log.warn( oldLeader )
Jon Hall6e709752016-02-01 13:38:46 -08002928 else:
Jon Halla440e872016-03-31 15:15:50 -07002929 oldLeader = None
Jon Hall6e709752016-02-01 13:38:46 -08002930 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002931 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08002932 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07002933 onpass="Leaderboards are consistent for the election topic",
Jon Hall6e709752016-02-01 13:38:46 -08002934 onfail=failMessage )
2935
2936 main.step( "Find current leader and withdraw" )
2937 withdrawResult = main.TRUE
2938 # do some sanity checking on leader before using it
2939 if oldLeader is None:
2940 main.log.error( "Leadership isn't consistent." )
2941 withdrawResult = main.FALSE
2942 # Get the CLI of the oldLeader
2943 for i in main.activeNodes:
2944 if oldLeader == main.nodes[ i ].ip_address:
2945 oldLeaderCLI = main.CLIs[ i ]
2946 break
2947 else: # FOR/ELSE statement
2948 main.log.error( "Leader election, could not find current leader" )
2949 if oldLeader:
2950 withdrawResult = oldLeaderCLI.electionTestWithdraw()
2951 utilities.assert_equals(
2952 expect=main.TRUE,
2953 actual=withdrawResult,
2954 onpass="Node was withdrawn from election",
2955 onfail="Node was not withdrawn from election" )
2956
2957 main.step( "Check that a new node was elected leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002958 failMessage = "Nodes have different leaders"
Jon Hall6e709752016-02-01 13:38:46 -08002959 # Get new leaders and candidates
Jon Hall41d39f12016-04-11 22:54:35 -07002960 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall3a7843a2016-04-12 03:01:09 -07002961 newLeader = None
Jon Halla440e872016-03-31 15:15:50 -07002962 if newLeaderResult:
Jon Hall3a7843a2016-04-12 03:01:09 -07002963 if newLeaders[ 0 ][ 0 ] == 'none':
2964 main.log.error( "No leader was elected on at least 1 node" )
2965 if not expectNoLeader:
2966 newLeaderResult = False
Jon Hall25463a82016-04-13 14:03:52 -07002967 newLeader = newLeaders[ 0 ][ 0 ]
Jon Hall6e709752016-02-01 13:38:46 -08002968
2969 # Check that the new leader is not the older leader, which was withdrawn
2970 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07002971 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08002972 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
2973 " as the current leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002974 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002975 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08002976 actual=newLeaderResult,
2977 onpass="Leadership election passed",
2978 onfail="Something went wrong with Leadership election" )
2979
Jon Halla440e872016-03-31 15:15:50 -07002980 main.step( "Check that that new leader was the candidate of old leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002981 # candidates[ 2 ] should become the top candidate after withdrawl
2982 correctCandidateResult = main.TRUE
2983 if expectNoLeader:
2984 if newLeader == 'none':
2985 main.log.info( "No leader expected. None found. Pass" )
2986 correctCandidateResult = main.TRUE
2987 else:
2988 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2989 correctCandidateResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002990 elif len( oldLeaders[0] ) >= 3:
2991 if newLeader == oldLeaders[ 0 ][ 2 ]:
2992 # correct leader was elected
2993 correctCandidateResult = main.TRUE
2994 else:
2995 correctCandidateResult = main.FALSE
2996 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
2997 newLeader, oldLeaders[ 0 ][ 2 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08002998 else:
2999 main.log.warn( "Could not determine who should be the correct leader" )
Jon Halla440e872016-03-31 15:15:50 -07003000 main.log.debug( oldLeaders[ 0 ] )
Jon Hall6e709752016-02-01 13:38:46 -08003001 correctCandidateResult = main.FALSE
3002 utilities.assert_equals(
3003 expect=main.TRUE,
3004 actual=correctCandidateResult,
3005 onpass="Correct Candidate Elected",
3006 onfail="Incorrect Candidate Elected" )
3007
3008 main.step( "Run for election on old leader( just so everyone " +
3009 "is in the hat )" )
3010 if oldLeaderCLI is not None:
3011 runResult = oldLeaderCLI.electionTestRun()
3012 else:
3013 main.log.error( "No old leader to re-elect" )
3014 runResult = main.FALSE
3015 utilities.assert_equals(
3016 expect=main.TRUE,
3017 actual=runResult,
3018 onpass="App re-ran for election",
3019 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07003020
Jon Hall6e709752016-02-01 13:38:46 -08003021 main.step(
3022 "Check that oldLeader is a candidate, and leader if only 1 node" )
3023 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07003024 # Get new leaders and candidates
3025 reRunLeaders = []
3026 time.sleep( 5 ) # Paremterize
Jon Hall41d39f12016-04-11 22:54:35 -07003027 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall6e709752016-02-01 13:38:46 -08003028
3029 # Check that the re-elected node is last on the candidate List
Jon Hall3a7843a2016-04-12 03:01:09 -07003030 if not reRunLeaders[0]:
3031 positionResult = main.FALSE
3032 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
Jon Halla440e872016-03-31 15:15:50 -07003033 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3034 str( reRunLeaders[ 0 ] ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08003035 positionResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08003036 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003037 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08003038 actual=positionResult,
3039 onpass="Old leader successfully re-ran for election",
3040 onfail="Something went wrong with Leadership election after " +
3041 "the old leader re-ran for election" )
3042
3043 def CASE16( self, main ):
3044 """
3045 Install Distributed Primitives app
3046 """
3047 import time
3048 assert main.numCtrls, "main.numCtrls not defined"
3049 assert main, "main not defined"
3050 assert utilities.assert_equals, "utilities.assert_equals not defined"
3051 assert main.CLIs, "main.CLIs not defined"
3052 assert main.nodes, "main.nodes not defined"
3053
3054 # Variables for the distributed primitives tests
3055 global pCounterName
Jon Hall6e709752016-02-01 13:38:46 -08003056 global pCounterValue
Jon Hall6e709752016-02-01 13:38:46 -08003057 global onosSet
3058 global onosSetName
3059 pCounterName = "TestON-Partitions"
Jon Hall6e709752016-02-01 13:38:46 -08003060 pCounterValue = 0
Jon Hall6e709752016-02-01 13:38:46 -08003061 onosSet = set([])
3062 onosSetName = "TestON-set"
3063
3064 description = "Install Primitives app"
3065 main.case( description )
3066 main.step( "Install Primitives app" )
3067 appName = "org.onosproject.distributedprimitives"
3068 node = main.activeNodes[0]
3069 appResults = main.CLIs[node].activateApp( appName )
3070 utilities.assert_equals( expect=main.TRUE,
3071 actual=appResults,
3072 onpass="Primitives app activated",
3073 onfail="Primitives app not activated" )
3074 time.sleep( 5 ) # To allow all nodes to activate
3075
3076 def CASE17( self, main ):
3077 """
3078 Check for basic functionality with distributed primitives
3079 """
3080 # Make sure variables are defined/set
3081 assert main.numCtrls, "main.numCtrls not defined"
3082 assert main, "main not defined"
3083 assert utilities.assert_equals, "utilities.assert_equals not defined"
3084 assert main.CLIs, "main.CLIs not defined"
3085 assert main.nodes, "main.nodes not defined"
3086 assert pCounterName, "pCounterName not defined"
Jon Hall6e709752016-02-01 13:38:46 -08003087 assert onosSetName, "onosSetName not defined"
3088 # NOTE: assert fails if value is 0/None/Empty/False
3089 try:
3090 pCounterValue
3091 except NameError:
3092 main.log.error( "pCounterValue not defined, setting to 0" )
3093 pCounterValue = 0
3094 try:
Jon Hall6e709752016-02-01 13:38:46 -08003095 onosSet
3096 except NameError:
3097 main.log.error( "onosSet not defined, setting to empty Set" )
3098 onosSet = set([])
3099 # Variables for the distributed primitives tests. These are local only
3100 addValue = "a"
3101 addAllValue = "a b c d e f"
3102 retainValue = "c d e f"
3103
3104 description = "Check for basic functionality with distributed " +\
3105 "primitives"
3106 main.case( description )
3107 main.caseExplanation = "Test the methods of the distributed " +\
3108 "primitives (counters and sets) throught the cli"
3109 # DISTRIBUTED ATOMIC COUNTERS
3110 # Partitioned counters
3111 main.step( "Increment then get a default counter on each node" )
3112 pCounters = []
3113 threads = []
3114 addedPValues = []
3115 for i in main.activeNodes:
3116 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3117 name="counterAddAndGet-" + str( i ),
3118 args=[ pCounterName ] )
3119 pCounterValue += 1
3120 addedPValues.append( pCounterValue )
3121 threads.append( t )
3122 t.start()
3123
3124 for t in threads:
3125 t.join()
3126 pCounters.append( t.result )
3127 # Check that counter incremented numController times
3128 pCounterResults = True
3129 for i in addedPValues:
3130 tmpResult = i in pCounters
3131 pCounterResults = pCounterResults and tmpResult
3132 if not tmpResult:
3133 main.log.error( str( i ) + " is not in partitioned "
3134 "counter incremented results" )
3135 utilities.assert_equals( expect=True,
3136 actual=pCounterResults,
3137 onpass="Default counter incremented",
3138 onfail="Error incrementing default" +
3139 " counter" )
3140
3141 main.step( "Get then Increment a default counter on each node" )
3142 pCounters = []
3143 threads = []
3144 addedPValues = []
3145 for i in main.activeNodes:
3146 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3147 name="counterGetAndAdd-" + str( i ),
3148 args=[ pCounterName ] )
3149 addedPValues.append( pCounterValue )
3150 pCounterValue += 1
3151 threads.append( t )
3152 t.start()
3153
3154 for t in threads:
3155 t.join()
3156 pCounters.append( t.result )
3157 # Check that counter incremented numController times
3158 pCounterResults = True
3159 for i in addedPValues:
3160 tmpResult = i in pCounters
3161 pCounterResults = pCounterResults and tmpResult
3162 if not tmpResult:
3163 main.log.error( str( i ) + " is not in partitioned "
3164 "counter incremented results" )
3165 utilities.assert_equals( expect=True,
3166 actual=pCounterResults,
3167 onpass="Default counter incremented",
3168 onfail="Error incrementing default" +
3169 " counter" )
3170
3171 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003172 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Hall6e709752016-02-01 13:38:46 -08003173 utilities.assert_equals( expect=main.TRUE,
3174 actual=incrementCheck,
3175 onpass="Added counters are correct",
3176 onfail="Added counters are incorrect" )
3177
3178 main.step( "Add -8 to then get a default counter on each node" )
3179 pCounters = []
3180 threads = []
3181 addedPValues = []
3182 for i in main.activeNodes:
3183 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3184 name="counterIncrement-" + str( i ),
3185 args=[ pCounterName ],
3186 kwargs={ "delta": -8 } )
3187 pCounterValue += -8
3188 addedPValues.append( pCounterValue )
3189 threads.append( t )
3190 t.start()
3191
3192 for t in threads:
3193 t.join()
3194 pCounters.append( t.result )
3195 # Check that counter incremented numController times
3196 pCounterResults = True
3197 for i in addedPValues:
3198 tmpResult = i in pCounters
3199 pCounterResults = pCounterResults and tmpResult
3200 if not tmpResult:
3201 main.log.error( str( i ) + " is not in partitioned "
3202 "counter incremented results" )
3203 utilities.assert_equals( expect=True,
3204 actual=pCounterResults,
3205 onpass="Default counter incremented",
3206 onfail="Error incrementing default" +
3207 " counter" )
3208
3209 main.step( "Add 5 to then get a default counter on each node" )
3210 pCounters = []
3211 threads = []
3212 addedPValues = []
3213 for i in main.activeNodes:
3214 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3215 name="counterIncrement-" + str( i ),
3216 args=[ pCounterName ],
3217 kwargs={ "delta": 5 } )
3218 pCounterValue += 5
3219 addedPValues.append( pCounterValue )
3220 threads.append( t )
3221 t.start()
3222
3223 for t in threads:
3224 t.join()
3225 pCounters.append( t.result )
3226 # Check that counter incremented numController times
3227 pCounterResults = True
3228 for i in addedPValues:
3229 tmpResult = i in pCounters
3230 pCounterResults = pCounterResults and tmpResult
3231 if not tmpResult:
3232 main.log.error( str( i ) + " is not in partitioned "
3233 "counter incremented results" )
3234 utilities.assert_equals( expect=True,
3235 actual=pCounterResults,
3236 onpass="Default counter incremented",
3237 onfail="Error incrementing default" +
3238 " counter" )
3239
3240 main.step( "Get then add 5 to a default counter on each node" )
3241 pCounters = []
3242 threads = []
3243 addedPValues = []
3244 for i in main.activeNodes:
3245 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3246 name="counterIncrement-" + str( i ),
3247 args=[ pCounterName ],
3248 kwargs={ "delta": 5 } )
3249 addedPValues.append( pCounterValue )
3250 pCounterValue += 5
3251 threads.append( t )
3252 t.start()
3253
3254 for t in threads:
3255 t.join()
3256 pCounters.append( t.result )
3257 # Check that counter incremented numController times
3258 pCounterResults = True
3259 for i in addedPValues:
3260 tmpResult = i in pCounters
3261 pCounterResults = pCounterResults and tmpResult
3262 if not tmpResult:
3263 main.log.error( str( i ) + " is not in partitioned "
3264 "counter incremented results" )
3265 utilities.assert_equals( expect=True,
3266 actual=pCounterResults,
3267 onpass="Default counter incremented",
3268 onfail="Error incrementing default" +
3269 " counter" )
3270
3271 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003272 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Hall6e709752016-02-01 13:38:46 -08003273 utilities.assert_equals( expect=main.TRUE,
3274 actual=incrementCheck,
3275 onpass="Added counters are correct",
3276 onfail="Added counters are incorrect" )
3277
Jon Hall6e709752016-02-01 13:38:46 -08003278 # DISTRIBUTED SETS
3279 main.step( "Distributed Set get" )
3280 size = len( onosSet )
3281 getResponses = []
3282 threads = []
3283 for i in main.activeNodes:
3284 t = main.Thread( target=main.CLIs[i].setTestGet,
3285 name="setTestGet-" + str( i ),
3286 args=[ onosSetName ] )
3287 threads.append( t )
3288 t.start()
3289 for t in threads:
3290 t.join()
3291 getResponses.append( t.result )
3292
3293 getResults = main.TRUE
3294 for i in range( len( main.activeNodes ) ):
3295 node = str( main.activeNodes[i] + 1 )
3296 if isinstance( getResponses[ i ], list):
3297 current = set( getResponses[ i ] )
3298 if len( current ) == len( getResponses[ i ] ):
3299 # no repeats
3300 if onosSet != current:
3301 main.log.error( "ONOS" + node +
3302 " has incorrect view" +
3303 " of set " + onosSetName + ":\n" +
3304 str( getResponses[ i ] ) )
3305 main.log.debug( "Expected: " + str( onosSet ) )
3306 main.log.debug( "Actual: " + str( current ) )
3307 getResults = main.FALSE
3308 else:
3309 # error, set is not a set
3310 main.log.error( "ONOS" + node +
3311 " has repeat elements in" +
3312 " set " + onosSetName + ":\n" +
3313 str( getResponses[ i ] ) )
3314 getResults = main.FALSE
3315 elif getResponses[ i ] == main.ERROR:
3316 getResults = main.FALSE
3317 utilities.assert_equals( expect=main.TRUE,
3318 actual=getResults,
3319 onpass="Set elements are correct",
3320 onfail="Set elements are incorrect" )
3321
3322 main.step( "Distributed Set size" )
3323 sizeResponses = []
3324 threads = []
3325 for i in main.activeNodes:
3326 t = main.Thread( target=main.CLIs[i].setTestSize,
3327 name="setTestSize-" + str( i ),
3328 args=[ onosSetName ] )
3329 threads.append( t )
3330 t.start()
3331 for t in threads:
3332 t.join()
3333 sizeResponses.append( t.result )
3334
3335 sizeResults = main.TRUE
3336 for i in range( len( main.activeNodes ) ):
3337 node = str( main.activeNodes[i] + 1 )
3338 if size != sizeResponses[ i ]:
3339 sizeResults = main.FALSE
3340 main.log.error( "ONOS" + node +
3341 " expected a size of " + str( size ) +
3342 " for set " + onosSetName +
3343 " but got " + str( sizeResponses[ i ] ) )
3344 utilities.assert_equals( expect=main.TRUE,
3345 actual=sizeResults,
3346 onpass="Set sizes are correct",
3347 onfail="Set sizes are incorrect" )
3348
3349 main.step( "Distributed Set add()" )
3350 onosSet.add( addValue )
3351 addResponses = []
3352 threads = []
3353 for i in main.activeNodes:
3354 t = main.Thread( target=main.CLIs[i].setTestAdd,
3355 name="setTestAdd-" + str( i ),
3356 args=[ onosSetName, addValue ] )
3357 threads.append( t )
3358 t.start()
3359 for t in threads:
3360 t.join()
3361 addResponses.append( t.result )
3362
3363 # main.TRUE = successfully changed the set
3364 # main.FALSE = action resulted in no change in set
3365 # main.ERROR - Some error in executing the function
3366 addResults = main.TRUE
3367 for i in range( len( main.activeNodes ) ):
3368 if addResponses[ i ] == main.TRUE:
3369 # All is well
3370 pass
3371 elif addResponses[ i ] == main.FALSE:
3372 # Already in set, probably fine
3373 pass
3374 elif addResponses[ i ] == main.ERROR:
3375 # Error in execution
3376 addResults = main.FALSE
3377 else:
3378 # unexpected result
3379 addResults = main.FALSE
3380 if addResults != main.TRUE:
3381 main.log.error( "Error executing set add" )
3382
3383 # Check if set is still correct
3384 size = len( onosSet )
3385 getResponses = []
3386 threads = []
3387 for i in main.activeNodes:
3388 t = main.Thread( target=main.CLIs[i].setTestGet,
3389 name="setTestGet-" + str( i ),
3390 args=[ onosSetName ] )
3391 threads.append( t )
3392 t.start()
3393 for t in threads:
3394 t.join()
3395 getResponses.append( t.result )
3396 getResults = main.TRUE
3397 for i in range( len( main.activeNodes ) ):
3398 node = str( main.activeNodes[i] + 1 )
3399 if isinstance( getResponses[ i ], list):
3400 current = set( getResponses[ i ] )
3401 if len( current ) == len( getResponses[ i ] ):
3402 # no repeats
3403 if onosSet != current:
3404 main.log.error( "ONOS" + node + " has incorrect view" +
3405 " of set " + onosSetName + ":\n" +
3406 str( getResponses[ i ] ) )
3407 main.log.debug( "Expected: " + str( onosSet ) )
3408 main.log.debug( "Actual: " + str( current ) )
3409 getResults = main.FALSE
3410 else:
3411 # error, set is not a set
3412 main.log.error( "ONOS" + node + " has repeat elements in" +
3413 " set " + onosSetName + ":\n" +
3414 str( getResponses[ i ] ) )
3415 getResults = main.FALSE
3416 elif getResponses[ i ] == main.ERROR:
3417 getResults = main.FALSE
3418 sizeResponses = []
3419 threads = []
3420 for i in main.activeNodes:
3421 t = main.Thread( target=main.CLIs[i].setTestSize,
3422 name="setTestSize-" + str( i ),
3423 args=[ onosSetName ] )
3424 threads.append( t )
3425 t.start()
3426 for t in threads:
3427 t.join()
3428 sizeResponses.append( t.result )
3429 sizeResults = main.TRUE
3430 for i in range( len( main.activeNodes ) ):
3431 node = str( main.activeNodes[i] + 1 )
3432 if size != sizeResponses[ i ]:
3433 sizeResults = main.FALSE
3434 main.log.error( "ONOS" + node +
3435 " expected a size of " + str( size ) +
3436 " for set " + onosSetName +
3437 " but got " + str( sizeResponses[ i ] ) )
3438 addResults = addResults and getResults and sizeResults
3439 utilities.assert_equals( expect=main.TRUE,
3440 actual=addResults,
3441 onpass="Set add correct",
3442 onfail="Set add was incorrect" )
3443
3444 main.step( "Distributed Set addAll()" )
3445 onosSet.update( addAllValue.split() )
3446 addResponses = []
3447 threads = []
3448 for i in main.activeNodes:
3449 t = main.Thread( target=main.CLIs[i].setTestAdd,
3450 name="setTestAddAll-" + str( i ),
3451 args=[ onosSetName, addAllValue ] )
3452 threads.append( t )
3453 t.start()
3454 for t in threads:
3455 t.join()
3456 addResponses.append( t.result )
3457
3458 # main.TRUE = successfully changed the set
3459 # main.FALSE = action resulted in no change in set
3460 # main.ERROR - Some error in executing the function
3461 addAllResults = main.TRUE
3462 for i in range( len( main.activeNodes ) ):
3463 if addResponses[ i ] == main.TRUE:
3464 # All is well
3465 pass
3466 elif addResponses[ i ] == main.FALSE:
3467 # Already in set, probably fine
3468 pass
3469 elif addResponses[ i ] == main.ERROR:
3470 # Error in execution
3471 addAllResults = main.FALSE
3472 else:
3473 # unexpected result
3474 addAllResults = main.FALSE
3475 if addAllResults != main.TRUE:
3476 main.log.error( "Error executing set addAll" )
3477
3478 # Check if set is still correct
3479 size = len( onosSet )
3480 getResponses = []
3481 threads = []
3482 for i in main.activeNodes:
3483 t = main.Thread( target=main.CLIs[i].setTestGet,
3484 name="setTestGet-" + str( i ),
3485 args=[ onosSetName ] )
3486 threads.append( t )
3487 t.start()
3488 for t in threads:
3489 t.join()
3490 getResponses.append( t.result )
3491 getResults = main.TRUE
3492 for i in range( len( main.activeNodes ) ):
3493 node = str( main.activeNodes[i] + 1 )
3494 if isinstance( getResponses[ i ], list):
3495 current = set( getResponses[ i ] )
3496 if len( current ) == len( getResponses[ i ] ):
3497 # no repeats
3498 if onosSet != current:
3499 main.log.error( "ONOS" + node +
3500 " has incorrect view" +
3501 " of set " + onosSetName + ":\n" +
3502 str( getResponses[ i ] ) )
3503 main.log.debug( "Expected: " + str( onosSet ) )
3504 main.log.debug( "Actual: " + str( current ) )
3505 getResults = main.FALSE
3506 else:
3507 # error, set is not a set
3508 main.log.error( "ONOS" + node +
3509 " has repeat elements in" +
3510 " set " + onosSetName + ":\n" +
3511 str( getResponses[ i ] ) )
3512 getResults = main.FALSE
3513 elif getResponses[ i ] == main.ERROR:
3514 getResults = main.FALSE
3515 sizeResponses = []
3516 threads = []
3517 for i in main.activeNodes:
3518 t = main.Thread( target=main.CLIs[i].setTestSize,
3519 name="setTestSize-" + str( i ),
3520 args=[ onosSetName ] )
3521 threads.append( t )
3522 t.start()
3523 for t in threads:
3524 t.join()
3525 sizeResponses.append( t.result )
3526 sizeResults = main.TRUE
3527 for i in range( len( main.activeNodes ) ):
3528 node = str( main.activeNodes[i] + 1 )
3529 if size != sizeResponses[ i ]:
3530 sizeResults = main.FALSE
3531 main.log.error( "ONOS" + node +
3532 " expected a size of " + str( size ) +
3533 " for set " + onosSetName +
3534 " but got " + str( sizeResponses[ i ] ) )
3535 addAllResults = addAllResults and getResults and sizeResults
3536 utilities.assert_equals( expect=main.TRUE,
3537 actual=addAllResults,
3538 onpass="Set addAll correct",
3539 onfail="Set addAll was incorrect" )
3540
3541 main.step( "Distributed Set contains()" )
3542 containsResponses = []
3543 threads = []
3544 for i in main.activeNodes:
3545 t = main.Thread( target=main.CLIs[i].setTestGet,
3546 name="setContains-" + str( i ),
3547 args=[ onosSetName ],
3548 kwargs={ "values": addValue } )
3549 threads.append( t )
3550 t.start()
3551 for t in threads:
3552 t.join()
3553 # NOTE: This is the tuple
3554 containsResponses.append( t.result )
3555
3556 containsResults = main.TRUE
3557 for i in range( len( main.activeNodes ) ):
3558 if containsResponses[ i ] == main.ERROR:
3559 containsResults = main.FALSE
3560 else:
3561 containsResults = containsResults and\
3562 containsResponses[ i ][ 1 ]
3563 utilities.assert_equals( expect=main.TRUE,
3564 actual=containsResults,
3565 onpass="Set contains is functional",
3566 onfail="Set contains failed" )
3567
3568 main.step( "Distributed Set containsAll()" )
3569 containsAllResponses = []
3570 threads = []
3571 for i in main.activeNodes:
3572 t = main.Thread( target=main.CLIs[i].setTestGet,
3573 name="setContainsAll-" + str( i ),
3574 args=[ onosSetName ],
3575 kwargs={ "values": addAllValue } )
3576 threads.append( t )
3577 t.start()
3578 for t in threads:
3579 t.join()
3580 # NOTE: This is the tuple
3581 containsAllResponses.append( t.result )
3582
3583 containsAllResults = main.TRUE
3584 for i in range( len( main.activeNodes ) ):
3585 if containsResponses[ i ] == main.ERROR:
3586 containsResults = main.FALSE
3587 else:
3588 containsResults = containsResults and\
3589 containsResponses[ i ][ 1 ]
3590 utilities.assert_equals( expect=main.TRUE,
3591 actual=containsAllResults,
3592 onpass="Set containsAll is functional",
3593 onfail="Set containsAll failed" )
3594
3595 main.step( "Distributed Set remove()" )
3596 onosSet.remove( addValue )
3597 removeResponses = []
3598 threads = []
3599 for i in main.activeNodes:
3600 t = main.Thread( target=main.CLIs[i].setTestRemove,
3601 name="setTestRemove-" + str( i ),
3602 args=[ onosSetName, addValue ] )
3603 threads.append( t )
3604 t.start()
3605 for t in threads:
3606 t.join()
3607 removeResponses.append( t.result )
3608
3609 # main.TRUE = successfully changed the set
3610 # main.FALSE = action resulted in no change in set
3611 # main.ERROR - Some error in executing the function
3612 removeResults = main.TRUE
3613 for i in range( len( main.activeNodes ) ):
3614 if removeResponses[ i ] == main.TRUE:
3615 # All is well
3616 pass
3617 elif removeResponses[ i ] == main.FALSE:
3618 # not in set, probably fine
3619 pass
3620 elif removeResponses[ i ] == main.ERROR:
3621 # Error in execution
3622 removeResults = main.FALSE
3623 else:
3624 # unexpected result
3625 removeResults = main.FALSE
3626 if removeResults != main.TRUE:
3627 main.log.error( "Error executing set remove" )
3628
3629 # Check if set is still correct
3630 size = len( onosSet )
3631 getResponses = []
3632 threads = []
3633 for i in main.activeNodes:
3634 t = main.Thread( target=main.CLIs[i].setTestGet,
3635 name="setTestGet-" + str( i ),
3636 args=[ onosSetName ] )
3637 threads.append( t )
3638 t.start()
3639 for t in threads:
3640 t.join()
3641 getResponses.append( t.result )
3642 getResults = main.TRUE
3643 for i in range( len( main.activeNodes ) ):
3644 node = str( main.activeNodes[i] + 1 )
3645 if isinstance( getResponses[ i ], list):
3646 current = set( getResponses[ i ] )
3647 if len( current ) == len( getResponses[ i ] ):
3648 # no repeats
3649 if onosSet != current:
3650 main.log.error( "ONOS" + node +
3651 " has incorrect view" +
3652 " of set " + onosSetName + ":\n" +
3653 str( getResponses[ i ] ) )
3654 main.log.debug( "Expected: " + str( onosSet ) )
3655 main.log.debug( "Actual: " + str( current ) )
3656 getResults = main.FALSE
3657 else:
3658 # error, set is not a set
3659 main.log.error( "ONOS" + node +
3660 " has repeat elements in" +
3661 " set " + onosSetName + ":\n" +
3662 str( getResponses[ i ] ) )
3663 getResults = main.FALSE
3664 elif getResponses[ i ] == main.ERROR:
3665 getResults = main.FALSE
3666 sizeResponses = []
3667 threads = []
3668 for i in main.activeNodes:
3669 t = main.Thread( target=main.CLIs[i].setTestSize,
3670 name="setTestSize-" + str( i ),
3671 args=[ onosSetName ] )
3672 threads.append( t )
3673 t.start()
3674 for t in threads:
3675 t.join()
3676 sizeResponses.append( t.result )
3677 sizeResults = main.TRUE
3678 for i in range( len( main.activeNodes ) ):
3679 node = str( main.activeNodes[i] + 1 )
3680 if size != sizeResponses[ i ]:
3681 sizeResults = main.FALSE
3682 main.log.error( "ONOS" + node +
3683 " expected a size of " + str( size ) +
3684 " for set " + onosSetName +
3685 " but got " + str( sizeResponses[ i ] ) )
3686 removeResults = removeResults and getResults and sizeResults
3687 utilities.assert_equals( expect=main.TRUE,
3688 actual=removeResults,
3689 onpass="Set remove correct",
3690 onfail="Set remove was incorrect" )
3691
3692 main.step( "Distributed Set removeAll()" )
3693 onosSet.difference_update( addAllValue.split() )
3694 removeAllResponses = []
3695 threads = []
3696 try:
3697 for i in main.activeNodes:
3698 t = main.Thread( target=main.CLIs[i].setTestRemove,
3699 name="setTestRemoveAll-" + str( i ),
3700 args=[ onosSetName, addAllValue ] )
3701 threads.append( t )
3702 t.start()
3703 for t in threads:
3704 t.join()
3705 removeAllResponses.append( t.result )
3706 except Exception, e:
3707 main.log.exception(e)
3708
3709 # main.TRUE = successfully changed the set
3710 # main.FALSE = action resulted in no change in set
3711 # main.ERROR - Some error in executing the function
3712 removeAllResults = main.TRUE
3713 for i in range( len( main.activeNodes ) ):
3714 if removeAllResponses[ i ] == main.TRUE:
3715 # All is well
3716 pass
3717 elif removeAllResponses[ i ] == main.FALSE:
3718 # not in set, probably fine
3719 pass
3720 elif removeAllResponses[ i ] == main.ERROR:
3721 # Error in execution
3722 removeAllResults = main.FALSE
3723 else:
3724 # unexpected result
3725 removeAllResults = main.FALSE
3726 if removeAllResults != main.TRUE:
3727 main.log.error( "Error executing set removeAll" )
3728
3729 # Check if set is still correct
3730 size = len( onosSet )
3731 getResponses = []
3732 threads = []
3733 for i in main.activeNodes:
3734 t = main.Thread( target=main.CLIs[i].setTestGet,
3735 name="setTestGet-" + str( i ),
3736 args=[ onosSetName ] )
3737 threads.append( t )
3738 t.start()
3739 for t in threads:
3740 t.join()
3741 getResponses.append( t.result )
3742 getResults = main.TRUE
3743 for i in range( len( main.activeNodes ) ):
3744 node = str( main.activeNodes[i] + 1 )
3745 if isinstance( getResponses[ i ], list):
3746 current = set( getResponses[ i ] )
3747 if len( current ) == len( getResponses[ i ] ):
3748 # no repeats
3749 if onosSet != current:
3750 main.log.error( "ONOS" + node +
3751 " has incorrect view" +
3752 " of set " + onosSetName + ":\n" +
3753 str( getResponses[ i ] ) )
3754 main.log.debug( "Expected: " + str( onosSet ) )
3755 main.log.debug( "Actual: " + str( current ) )
3756 getResults = main.FALSE
3757 else:
3758 # error, set is not a set
3759 main.log.error( "ONOS" + node +
3760 " has repeat elements in" +
3761 " set " + onosSetName + ":\n" +
3762 str( getResponses[ i ] ) )
3763 getResults = main.FALSE
3764 elif getResponses[ i ] == main.ERROR:
3765 getResults = main.FALSE
3766 sizeResponses = []
3767 threads = []
3768 for i in main.activeNodes:
3769 t = main.Thread( target=main.CLIs[i].setTestSize,
3770 name="setTestSize-" + str( i ),
3771 args=[ onosSetName ] )
3772 threads.append( t )
3773 t.start()
3774 for t in threads:
3775 t.join()
3776 sizeResponses.append( t.result )
3777 sizeResults = main.TRUE
3778 for i in range( len( main.activeNodes ) ):
3779 node = str( main.activeNodes[i] + 1 )
3780 if size != sizeResponses[ i ]:
3781 sizeResults = main.FALSE
3782 main.log.error( "ONOS" + node +
3783 " expected a size of " + str( size ) +
3784 " for set " + onosSetName +
3785 " but got " + str( sizeResponses[ i ] ) )
3786 removeAllResults = removeAllResults and getResults and sizeResults
3787 utilities.assert_equals( expect=main.TRUE,
3788 actual=removeAllResults,
3789 onpass="Set removeAll correct",
3790 onfail="Set removeAll was incorrect" )
3791
3792 main.step( "Distributed Set addAll()" )
3793 onosSet.update( addAllValue.split() )
3794 addResponses = []
3795 threads = []
3796 for i in main.activeNodes:
3797 t = main.Thread( target=main.CLIs[i].setTestAdd,
3798 name="setTestAddAll-" + str( i ),
3799 args=[ onosSetName, addAllValue ] )
3800 threads.append( t )
3801 t.start()
3802 for t in threads:
3803 t.join()
3804 addResponses.append( t.result )
3805
3806 # main.TRUE = successfully changed the set
3807 # main.FALSE = action resulted in no change in set
3808 # main.ERROR - Some error in executing the function
3809 addAllResults = main.TRUE
3810 for i in range( len( main.activeNodes ) ):
3811 if addResponses[ i ] == main.TRUE:
3812 # All is well
3813 pass
3814 elif addResponses[ i ] == main.FALSE:
3815 # Already in set, probably fine
3816 pass
3817 elif addResponses[ i ] == main.ERROR:
3818 # Error in execution
3819 addAllResults = main.FALSE
3820 else:
3821 # unexpected result
3822 addAllResults = main.FALSE
3823 if addAllResults != main.TRUE:
3824 main.log.error( "Error executing set addAll" )
3825
3826 # Check if set is still correct
3827 size = len( onosSet )
3828 getResponses = []
3829 threads = []
3830 for i in main.activeNodes:
3831 t = main.Thread( target=main.CLIs[i].setTestGet,
3832 name="setTestGet-" + str( i ),
3833 args=[ onosSetName ] )
3834 threads.append( t )
3835 t.start()
3836 for t in threads:
3837 t.join()
3838 getResponses.append( t.result )
3839 getResults = main.TRUE
3840 for i in range( len( main.activeNodes ) ):
3841 node = str( main.activeNodes[i] + 1 )
3842 if isinstance( getResponses[ i ], list):
3843 current = set( getResponses[ i ] )
3844 if len( current ) == len( getResponses[ i ] ):
3845 # no repeats
3846 if onosSet != current:
3847 main.log.error( "ONOS" + node +
3848 " has incorrect view" +
3849 " of set " + onosSetName + ":\n" +
3850 str( getResponses[ i ] ) )
3851 main.log.debug( "Expected: " + str( onosSet ) )
3852 main.log.debug( "Actual: " + str( current ) )
3853 getResults = main.FALSE
3854 else:
3855 # error, set is not a set
3856 main.log.error( "ONOS" + node +
3857 " has repeat elements in" +
3858 " set " + onosSetName + ":\n" +
3859 str( getResponses[ i ] ) )
3860 getResults = main.FALSE
3861 elif getResponses[ i ] == main.ERROR:
3862 getResults = main.FALSE
3863 sizeResponses = []
3864 threads = []
3865 for i in main.activeNodes:
3866 t = main.Thread( target=main.CLIs[i].setTestSize,
3867 name="setTestSize-" + str( i ),
3868 args=[ onosSetName ] )
3869 threads.append( t )
3870 t.start()
3871 for t in threads:
3872 t.join()
3873 sizeResponses.append( t.result )
3874 sizeResults = main.TRUE
3875 for i in range( len( main.activeNodes ) ):
3876 node = str( main.activeNodes[i] + 1 )
3877 if size != sizeResponses[ i ]:
3878 sizeResults = main.FALSE
3879 main.log.error( "ONOS" + node +
3880 " expected a size of " + str( size ) +
3881 " for set " + onosSetName +
3882 " but got " + str( sizeResponses[ i ] ) )
3883 addAllResults = addAllResults and getResults and sizeResults
3884 utilities.assert_equals( expect=main.TRUE,
3885 actual=addAllResults,
3886 onpass="Set addAll correct",
3887 onfail="Set addAll was incorrect" )
3888
3889 main.step( "Distributed Set clear()" )
3890 onosSet.clear()
3891 clearResponses = []
3892 threads = []
3893 for i in main.activeNodes:
3894 t = main.Thread( target=main.CLIs[i].setTestRemove,
3895 name="setTestClear-" + str( i ),
3896 args=[ onosSetName, " "], # Values doesn't matter
3897 kwargs={ "clear": True } )
3898 threads.append( t )
3899 t.start()
3900 for t in threads:
3901 t.join()
3902 clearResponses.append( t.result )
3903
3904 # main.TRUE = successfully changed the set
3905 # main.FALSE = action resulted in no change in set
3906 # main.ERROR - Some error in executing the function
3907 clearResults = main.TRUE
3908 for i in range( len( main.activeNodes ) ):
3909 if clearResponses[ i ] == main.TRUE:
3910 # All is well
3911 pass
3912 elif clearResponses[ i ] == main.FALSE:
3913 # Nothing set, probably fine
3914 pass
3915 elif clearResponses[ i ] == main.ERROR:
3916 # Error in execution
3917 clearResults = main.FALSE
3918 else:
3919 # unexpected result
3920 clearResults = main.FALSE
3921 if clearResults != main.TRUE:
3922 main.log.error( "Error executing set clear" )
3923
3924 # Check if set is still correct
3925 size = len( onosSet )
3926 getResponses = []
3927 threads = []
3928 for i in main.activeNodes:
3929 t = main.Thread( target=main.CLIs[i].setTestGet,
3930 name="setTestGet-" + str( i ),
3931 args=[ onosSetName ] )
3932 threads.append( t )
3933 t.start()
3934 for t in threads:
3935 t.join()
3936 getResponses.append( t.result )
3937 getResults = main.TRUE
3938 for i in range( len( main.activeNodes ) ):
3939 node = str( main.activeNodes[i] + 1 )
3940 if isinstance( getResponses[ i ], list):
3941 current = set( getResponses[ i ] )
3942 if len( current ) == len( getResponses[ i ] ):
3943 # no repeats
3944 if onosSet != current:
3945 main.log.error( "ONOS" + node +
3946 " has incorrect view" +
3947 " of set " + onosSetName + ":\n" +
3948 str( getResponses[ i ] ) )
3949 main.log.debug( "Expected: " + str( onosSet ) )
3950 main.log.debug( "Actual: " + str( current ) )
3951 getResults = main.FALSE
3952 else:
3953 # error, set is not a set
3954 main.log.error( "ONOS" + node +
3955 " has repeat elements in" +
3956 " set " + onosSetName + ":\n" +
3957 str( getResponses[ i ] ) )
3958 getResults = main.FALSE
3959 elif getResponses[ i ] == main.ERROR:
3960 getResults = main.FALSE
3961 sizeResponses = []
3962 threads = []
3963 for i in main.activeNodes:
3964 t = main.Thread( target=main.CLIs[i].setTestSize,
3965 name="setTestSize-" + str( i ),
3966 args=[ onosSetName ] )
3967 threads.append( t )
3968 t.start()
3969 for t in threads:
3970 t.join()
3971 sizeResponses.append( t.result )
3972 sizeResults = main.TRUE
3973 for i in range( len( main.activeNodes ) ):
3974 node = str( main.activeNodes[i] + 1 )
3975 if size != sizeResponses[ i ]:
3976 sizeResults = main.FALSE
3977 main.log.error( "ONOS" + node +
3978 " expected a size of " + str( size ) +
3979 " for set " + onosSetName +
3980 " but got " + str( sizeResponses[ i ] ) )
3981 clearResults = clearResults and getResults and sizeResults
3982 utilities.assert_equals( expect=main.TRUE,
3983 actual=clearResults,
3984 onpass="Set clear correct",
3985 onfail="Set clear was incorrect" )
3986
3987 main.step( "Distributed Set addAll()" )
3988 onosSet.update( addAllValue.split() )
3989 addResponses = []
3990 threads = []
3991 for i in main.activeNodes:
3992 t = main.Thread( target=main.CLIs[i].setTestAdd,
3993 name="setTestAddAll-" + str( i ),
3994 args=[ onosSetName, addAllValue ] )
3995 threads.append( t )
3996 t.start()
3997 for t in threads:
3998 t.join()
3999 addResponses.append( t.result )
4000
4001 # main.TRUE = successfully changed the set
4002 # main.FALSE = action resulted in no change in set
4003 # main.ERROR - Some error in executing the function
4004 addAllResults = main.TRUE
4005 for i in range( len( main.activeNodes ) ):
4006 if addResponses[ i ] == main.TRUE:
4007 # All is well
4008 pass
4009 elif addResponses[ i ] == main.FALSE:
4010 # Already in set, probably fine
4011 pass
4012 elif addResponses[ i ] == main.ERROR:
4013 # Error in execution
4014 addAllResults = main.FALSE
4015 else:
4016 # unexpected result
4017 addAllResults = main.FALSE
4018 if addAllResults != main.TRUE:
4019 main.log.error( "Error executing set addAll" )
4020
4021 # Check if set is still correct
4022 size = len( onosSet )
4023 getResponses = []
4024 threads = []
4025 for i in main.activeNodes:
4026 t = main.Thread( target=main.CLIs[i].setTestGet,
4027 name="setTestGet-" + str( i ),
4028 args=[ onosSetName ] )
4029 threads.append( t )
4030 t.start()
4031 for t in threads:
4032 t.join()
4033 getResponses.append( t.result )
4034 getResults = main.TRUE
4035 for i in range( len( main.activeNodes ) ):
4036 node = str( main.activeNodes[i] + 1 )
4037 if isinstance( getResponses[ i ], list):
4038 current = set( getResponses[ i ] )
4039 if len( current ) == len( getResponses[ i ] ):
4040 # no repeats
4041 if onosSet != current:
4042 main.log.error( "ONOS" + node +
4043 " has incorrect view" +
4044 " of set " + onosSetName + ":\n" +
4045 str( getResponses[ i ] ) )
4046 main.log.debug( "Expected: " + str( onosSet ) )
4047 main.log.debug( "Actual: " + str( current ) )
4048 getResults = main.FALSE
4049 else:
4050 # error, set is not a set
4051 main.log.error( "ONOS" + node +
4052 " has repeat elements in" +
4053 " set " + onosSetName + ":\n" +
4054 str( getResponses[ i ] ) )
4055 getResults = main.FALSE
4056 elif getResponses[ i ] == main.ERROR:
4057 getResults = main.FALSE
4058 sizeResponses = []
4059 threads = []
4060 for i in main.activeNodes:
4061 t = main.Thread( target=main.CLIs[i].setTestSize,
4062 name="setTestSize-" + str( i ),
4063 args=[ onosSetName ] )
4064 threads.append( t )
4065 t.start()
4066 for t in threads:
4067 t.join()
4068 sizeResponses.append( t.result )
4069 sizeResults = main.TRUE
4070 for i in range( len( main.activeNodes ) ):
4071 node = str( main.activeNodes[i] + 1 )
4072 if size != sizeResponses[ i ]:
4073 sizeResults = main.FALSE
4074 main.log.error( "ONOS" + node +
4075 " expected a size of " + str( size ) +
4076 " for set " + onosSetName +
4077 " but got " + str( sizeResponses[ i ] ) )
4078 addAllResults = addAllResults and getResults and sizeResults
4079 utilities.assert_equals( expect=main.TRUE,
4080 actual=addAllResults,
4081 onpass="Set addAll correct",
4082 onfail="Set addAll was incorrect" )
4083
4084 main.step( "Distributed Set retain()" )
4085 onosSet.intersection_update( retainValue.split() )
4086 retainResponses = []
4087 threads = []
4088 for i in main.activeNodes:
4089 t = main.Thread( target=main.CLIs[i].setTestRemove,
4090 name="setTestRetain-" + str( i ),
4091 args=[ onosSetName, retainValue ],
4092 kwargs={ "retain": True } )
4093 threads.append( t )
4094 t.start()
4095 for t in threads:
4096 t.join()
4097 retainResponses.append( t.result )
4098
4099 # main.TRUE = successfully changed the set
4100 # main.FALSE = action resulted in no change in set
4101 # main.ERROR - Some error in executing the function
4102 retainResults = main.TRUE
4103 for i in range( len( main.activeNodes ) ):
4104 if retainResponses[ i ] == main.TRUE:
4105 # All is well
4106 pass
4107 elif retainResponses[ i ] == main.FALSE:
4108 # Already in set, probably fine
4109 pass
4110 elif retainResponses[ i ] == main.ERROR:
4111 # Error in execution
4112 retainResults = main.FALSE
4113 else:
4114 # unexpected result
4115 retainResults = main.FALSE
4116 if retainResults != main.TRUE:
4117 main.log.error( "Error executing set retain" )
4118
4119 # Check if set is still correct
4120 size = len( onosSet )
4121 getResponses = []
4122 threads = []
4123 for i in main.activeNodes:
4124 t = main.Thread( target=main.CLIs[i].setTestGet,
4125 name="setTestGet-" + str( i ),
4126 args=[ onosSetName ] )
4127 threads.append( t )
4128 t.start()
4129 for t in threads:
4130 t.join()
4131 getResponses.append( t.result )
4132 getResults = main.TRUE
4133 for i in range( len( main.activeNodes ) ):
4134 node = str( main.activeNodes[i] + 1 )
4135 if isinstance( getResponses[ i ], list):
4136 current = set( getResponses[ i ] )
4137 if len( current ) == len( getResponses[ i ] ):
4138 # no repeats
4139 if onosSet != current:
4140 main.log.error( "ONOS" + node +
4141 " has incorrect view" +
4142 " of set " + onosSetName + ":\n" +
4143 str( getResponses[ i ] ) )
4144 main.log.debug( "Expected: " + str( onosSet ) )
4145 main.log.debug( "Actual: " + str( current ) )
4146 getResults = main.FALSE
4147 else:
4148 # error, set is not a set
4149 main.log.error( "ONOS" + node +
4150 " has repeat elements in" +
4151 " set " + onosSetName + ":\n" +
4152 str( getResponses[ i ] ) )
4153 getResults = main.FALSE
4154 elif getResponses[ i ] == main.ERROR:
4155 getResults = main.FALSE
4156 sizeResponses = []
4157 threads = []
4158 for i in main.activeNodes:
4159 t = main.Thread( target=main.CLIs[i].setTestSize,
4160 name="setTestSize-" + str( i ),
4161 args=[ onosSetName ] )
4162 threads.append( t )
4163 t.start()
4164 for t in threads:
4165 t.join()
4166 sizeResponses.append( t.result )
4167 sizeResults = main.TRUE
4168 for i in range( len( main.activeNodes ) ):
4169 node = str( main.activeNodes[i] + 1 )
4170 if size != sizeResponses[ i ]:
4171 sizeResults = main.FALSE
4172 main.log.error( "ONOS" + node + " expected a size of " +
4173 str( size ) + " for set " + onosSetName +
4174 " but got " + str( sizeResponses[ i ] ) )
4175 retainResults = retainResults and getResults and sizeResults
4176 utilities.assert_equals( expect=main.TRUE,
4177 actual=retainResults,
4178 onpass="Set retain correct",
4179 onfail="Set retain was incorrect" )
4180
4181 # Transactional maps
4182 main.step( "Partitioned Transactional maps put" )
4183 tMapValue = "Testing"
4184 numKeys = 100
4185 putResult = True
4186 node = main.activeNodes[0]
4187 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4188 if putResponses and len( putResponses ) == 100:
4189 for i in putResponses:
4190 if putResponses[ i ][ 'value' ] != tMapValue:
4191 putResult = False
4192 else:
4193 putResult = False
4194 if not putResult:
4195 main.log.debug( "Put response values: " + str( putResponses ) )
4196 utilities.assert_equals( expect=True,
4197 actual=putResult,
4198 onpass="Partitioned Transactional Map put successful",
4199 onfail="Partitioned Transactional Map put values are incorrect" )
4200
4201 main.step( "Partitioned Transactional maps get" )
4202 getCheck = True
4203 for n in range( 1, numKeys + 1 ):
4204 getResponses = []
4205 threads = []
4206 valueCheck = True
4207 for i in main.activeNodes:
4208 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4209 name="TMap-get-" + str( i ),
4210 args=[ "Key" + str( n ) ] )
4211 threads.append( t )
4212 t.start()
4213 for t in threads:
4214 t.join()
4215 getResponses.append( t.result )
4216 for node in getResponses:
4217 if node != tMapValue:
4218 valueCheck = False
4219 if not valueCheck:
4220 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4221 main.log.warn( getResponses )
4222 getCheck = getCheck and valueCheck
4223 utilities.assert_equals( expect=True,
4224 actual=getCheck,
4225 onpass="Partitioned Transactional Map get values were correct",
4226 onfail="Partitioned Transactional Map values incorrect" )