blob: 6ae0c7bd3a5dbdeeb54f17e23864813bf8f27183 [file] [log] [blame]
Jon Hall6e709752016-02-01 13:38:46 -08001"""
2Description: This test is to determine if ONOS can handle
3 a full network partion
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
14CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
28class HAfullNetPartition:
29
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
51 import imp
52 import pexpect
53 import time
Jon Halla440e872016-03-31 15:15:50 -070054 import json
Jon Hall6e709752016-02-01 13:38:46 -080055 main.log.info( "ONOS HA test: Partition ONOS nodes into two sub-clusters - " +
56 "initialization" )
57 main.case( "Setting up test environment" )
58 main.caseExplanation = "Setup the test environment including " +\
59 "installing ONOS, starting Mininet and ONOS" +\
60 "cli sessions."
61
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
69 main.numCtrls = int( main.params[ 'num_controllers' ] )
70 if main.ONOSbench.maxNodes:
71 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
74 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070081 # These are for csv plotting in jenkins
82 global labels
83 global data
84 labels = []
85 data = []
Jon Hall6e709752016-02-01 13:38:46 -080086
87 # FIXME: just get controller port from params?
88 # TODO: do we really need all these?
89 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
90 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
91 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
92 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
93 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
94 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
95 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
96
97 try:
Jon Hall41d39f12016-04-11 22:54:35 -070098 from tests.HAsanity.dependencies.HA import HA
99 main.HA = HA()
Jon Hall6e709752016-02-01 13:38:46 -0800100 except Exception as e:
101 main.log.exception( e )
102 main.cleanup()
103 main.exit()
104
105 main.CLIs = []
106 main.nodes = []
107 ipList = []
108 for i in range( 1, main.numCtrls + 1 ):
109 try:
110 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
111 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
112 ipList.append( main.nodes[ -1 ].ip_address )
113 except AttributeError:
114 break
115
116 main.step( "Create cell file" )
117 cellAppString = main.params[ 'ENV' ][ 'appString' ]
118 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
119 main.Mininet1.ip_address,
120 cellAppString, ipList )
121 main.step( "Applying cell variable to environment" )
122 cellResult = main.ONOSbench.setCell( cellName )
123 verifyResult = main.ONOSbench.verifyCell()
124
125 # FIXME:this is short term fix
126 main.log.info( "Removing raft logs" )
127 main.ONOSbench.onosRemoveRaftLogs()
128
129 main.log.info( "Uninstalling ONOS" )
130 for node in main.nodes:
131 main.ONOSbench.onosUninstall( node.ip_address )
132
133 # Make sure ONOS is DEAD
134 main.log.info( "Killing any ONOS processes" )
135 killResults = main.TRUE
136 for node in main.nodes:
137 killed = main.ONOSbench.onosKill( node.ip_address )
138 killResults = killResults and killed
139
140 cleanInstallResult = main.TRUE
141 gitPullResult = main.TRUE
142
143 main.step( "Starting Mininet" )
144 # scp topo file to mininet
145 # TODO: move to params?
146 topoName = "obelisk.py"
147 filePath = main.ONOSbench.home + "/tools/test/topos/"
148 main.ONOSbench.scp( main.Mininet1,
149 filePath + topoName,
150 main.Mininet1.home,
151 direction="to" )
152 mnResult = main.Mininet1.startNet( )
153 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
154 onpass="Mininet Started",
155 onfail="Error starting Mininet" )
156
157 main.step( "Git checkout and pull " + gitBranch )
158 if PULLCODE:
159 main.ONOSbench.gitCheckout( gitBranch )
160 gitPullResult = main.ONOSbench.gitPull()
161 # values of 1 or 3 are good
162 utilities.assert_lesser( expect=0, actual=gitPullResult,
163 onpass="Git pull successful",
164 onfail="Git pull failed" )
165 main.ONOSbench.getVersion( report=True )
166
167 main.step( "Using mvn clean install" )
168 cleanInstallResult = main.TRUE
169 if PULLCODE and gitPullResult == main.TRUE:
170 cleanInstallResult = main.ONOSbench.cleanInstall()
171 else:
172 main.log.warn( "Did not pull new code so skipping mvn " +
173 "clean install" )
174 utilities.assert_equals( expect=main.TRUE,
175 actual=cleanInstallResult,
176 onpass="MCI successful",
177 onfail="MCI failed" )
178 # GRAPHS
179 # NOTE: important params here:
180 # job = name of Jenkins job
181 # Plot Name = Plot-HA, only can be used if multiple plots
182 # index = The number of the graph under plot name
183 job = "HAfullNetPartition"
184 plotName = "Plot-HA"
Jon Hall843f8bc2016-03-18 14:28:13 -0700185 index = "1"
Jon Hall6e709752016-02-01 13:38:46 -0800186 graphs = '<ac:structured-macro ac:name="html">\n'
187 graphs += '<ac:plain-text-body><![CDATA[\n'
188 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
189 '/plot/' + plotName + '/getPlot?index=' + index +\
190 '&width=500&height=300"' +\
191 'noborder="0" width="500" height="300" scrolling="yes" ' +\
192 'seamless="seamless"></iframe>\n'
193 graphs += ']]></ac:plain-text-body>\n'
194 graphs += '</ac:structured-macro>\n'
195 main.log.wiki(graphs)
196
197 main.step( "Creating ONOS package" )
198 # copy gen-partions file to ONOS
199 # NOTE: this assumes TestON and ONOS are on the same machine
200 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
201 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
202 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
203 main.ONOSbench.ip_address,
204 srcFile,
205 dstDir,
206 pwd=main.ONOSbench.pwd,
207 direction="from" )
208 packageResult = main.ONOSbench.onosPackage()
209 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
210 onpass="ONOS package successful",
211 onfail="ONOS package failed" )
212
213 main.step( "Installing ONOS package" )
214 onosInstallResult = main.TRUE
215 for node in main.nodes:
216 tmpResult = main.ONOSbench.onosInstall( options="-f",
217 node=node.ip_address )
218 onosInstallResult = onosInstallResult and tmpResult
219 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
220 onpass="ONOS install successful",
221 onfail="ONOS install failed" )
222 # clean up gen-partitions file
223 try:
224 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
225 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
226 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
227 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
228 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
229 str( main.ONOSbench.handle.before ) )
230 except ( pexpect.TIMEOUT, pexpect.EOF ):
231 main.log.exception( "ONOSbench: pexpect exception found:" +
232 main.ONOSbench.handle.before )
233 main.cleanup()
234 main.exit()
235
236 main.step( "Checking if ONOS is up yet" )
237 for i in range( 2 ):
238 onosIsupResult = main.TRUE
239 for node in main.nodes:
240 started = main.ONOSbench.isup( node.ip_address )
241 if not started:
242 main.log.error( node.name + " hasn't started" )
243 onosIsupResult = onosIsupResult and started
244 if onosIsupResult == main.TRUE:
245 break
246 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
247 onpass="ONOS startup successful",
248 onfail="ONOS startup failed" )
249
250 main.log.step( "Starting ONOS CLI sessions" )
251 cliResults = main.TRUE
252 threads = []
253 for i in range( main.numCtrls ):
254 t = main.Thread( target=main.CLIs[i].startOnosCli,
255 name="startOnosCli-" + str( i ),
256 args=[main.nodes[i].ip_address] )
257 threads.append( t )
258 t.start()
259
260 for t in threads:
261 t.join()
262 cliResults = cliResults and t.result
263 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
264 onpass="ONOS cli startup successful",
265 onfail="ONOS cli startup failed" )
266
267 # Create a list of active nodes for use when some nodes are stopped
268 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
269
270 if main.params[ 'tcpdump' ].lower() == "true":
271 main.step( "Start Packet Capture MN" )
272 main.Mininet2.startTcpdump(
273 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
274 + "-MN.pcap",
275 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
276 port=main.params[ 'MNtcpdump' ][ 'port' ] )
277
Jon Halla440e872016-03-31 15:15:50 -0700278 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -0700279 nodeResults = utilities.retry( main.HA.nodesCheck,
280 False,
281 args=[main.activeNodes],
282 attempts=5 )
Jon Halla440e872016-03-31 15:15:50 -0700283
Jon Hall41d39f12016-04-11 22:54:35 -0700284 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Halla440e872016-03-31 15:15:50 -0700285 onpass="Nodes check successful",
286 onfail="Nodes check NOT successful" )
287
288 if not nodeResults:
289 for cli in main.CLIs:
290 main.log.debug( "{} components not ACTIVE: \n{}".format(
291 cli.name,
292 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
293
Jon Hall6e709752016-02-01 13:38:46 -0800294 if cliResults == main.FALSE:
295 main.log.error( "Failed to start ONOS, stopping test" )
296 main.cleanup()
297 main.exit()
298
Jon Hall172b7ba2016-04-07 18:12:20 -0700299 main.step( "Activate apps defined in the params file" )
300 # get data from the params
301 apps = main.params.get( 'apps' )
302 if apps:
303 apps = apps.split(',')
304 main.log.warn( apps )
305 activateResult = True
306 for app in apps:
307 main.CLIs[ 0 ].app( app, "Activate" )
308 # TODO: check this worked
309 time.sleep( 10 ) # wait for apps to activate
310 for app in apps:
311 state = main.CLIs[ 0 ].appStatus( app )
312 if state == "ACTIVE":
313 activateResult = activeResult and True
314 else:
315 main.log.error( "{} is in {} state".format( app, state ) )
316 activeResult = False
317 utilities.assert_equals( expect=True,
318 actual=activateResult,
319 onpass="Successfully activated apps",
320 onfail="Failed to activate apps" )
321 else:
322 main.log.warn( "No apps were specified to be loaded after startup" )
323
324 main.step( "Set ONOS configurations" )
325 config = main.params.get( 'ONOS_Configuration' )
326 if config:
327 main.log.debug( config )
328 checkResult = main.TRUE
329 for component in config:
330 for setting in config[component]:
331 value = config[component][setting]
332 check = main.CLIs[ 0 ].setCfg( component, setting, value )
333 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
334 checkResult = check and checkResult
335 utilities.assert_equals( expect=main.TRUE,
336 actual=checkResult,
337 onpass="Successfully set config",
338 onfail="Failed to set config" )
339 else:
340 main.log.warn( "No configurations were specified to be changed after startup" )
341
Jon Hall9d2dcad2016-04-08 10:15:20 -0700342 main.step( "App Ids check" )
343 appCheck = main.TRUE
344 threads = []
345 for i in main.activeNodes:
346 t = main.Thread( target=main.CLIs[i].appToIDCheck,
347 name="appToIDCheck-" + str( i ),
348 args=[] )
349 threads.append( t )
350 t.start()
351
352 for t in threads:
353 t.join()
354 appCheck = appCheck and t.result
355 if appCheck != main.TRUE:
356 node = main.activeNodes[0]
357 main.log.warn( main.CLIs[node].apps() )
358 main.log.warn( main.CLIs[node].appIDs() )
359 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
360 onpass="App Ids seem to be correct",
361 onfail="Something is wrong with app Ids" )
362
Jon Hall6e709752016-02-01 13:38:46 -0800363 def CASE2( self, main ):
364 """
365 Assign devices to controllers
366 """
367 import re
368 assert main.numCtrls, "main.numCtrls not defined"
369 assert main, "main not defined"
370 assert utilities.assert_equals, "utilities.assert_equals not defined"
371 assert main.CLIs, "main.CLIs not defined"
372 assert main.nodes, "main.nodes not defined"
373 assert ONOS1Port, "ONOS1Port not defined"
374 assert ONOS2Port, "ONOS2Port not defined"
375 assert ONOS3Port, "ONOS3Port not defined"
376 assert ONOS4Port, "ONOS4Port not defined"
377 assert ONOS5Port, "ONOS5Port not defined"
378 assert ONOS6Port, "ONOS6Port not defined"
379 assert ONOS7Port, "ONOS7Port not defined"
380
381 main.case( "Assigning devices to controllers" )
382 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
383 "and check that an ONOS node becomes the " +\
384 "master of the device."
385 main.step( "Assign switches to controllers" )
386
387 ipList = []
388 for i in range( main.numCtrls ):
389 ipList.append( main.nodes[ i ].ip_address )
390 swList = []
391 for i in range( 1, 29 ):
392 swList.append( "s" + str( i ) )
393 main.Mininet1.assignSwController( sw=swList, ip=ipList )
394
395 mastershipCheck = main.TRUE
396 for i in range( 1, 29 ):
397 response = main.Mininet1.getSwController( "s" + str( i ) )
398 try:
399 main.log.info( str( response ) )
400 except Exception:
401 main.log.info( repr( response ) )
402 for node in main.nodes:
403 if re.search( "tcp:" + node.ip_address, response ):
404 mastershipCheck = mastershipCheck and main.TRUE
405 else:
406 main.log.error( "Error, node " + node.ip_address + " is " +
407 "not in the list of controllers s" +
408 str( i ) + " is connecting to." )
409 mastershipCheck = main.FALSE
410 utilities.assert_equals(
411 expect=main.TRUE,
412 actual=mastershipCheck,
413 onpass="Switch mastership assigned correctly",
414 onfail="Switches not assigned correctly to controllers" )
415
416 def CASE21( self, main ):
417 """
418 Assign mastership to controllers
419 """
420 import time
421 assert main.numCtrls, "main.numCtrls not defined"
422 assert main, "main not defined"
423 assert utilities.assert_equals, "utilities.assert_equals not defined"
424 assert main.CLIs, "main.CLIs not defined"
425 assert main.nodes, "main.nodes not defined"
426 assert ONOS1Port, "ONOS1Port not defined"
427 assert ONOS2Port, "ONOS2Port not defined"
428 assert ONOS3Port, "ONOS3Port not defined"
429 assert ONOS4Port, "ONOS4Port not defined"
430 assert ONOS5Port, "ONOS5Port not defined"
431 assert ONOS6Port, "ONOS6Port not defined"
432 assert ONOS7Port, "ONOS7Port not defined"
433
434 main.case( "Assigning Controller roles for switches" )
435 main.caseExplanation = "Check that ONOS is connected to each " +\
436 "device. Then manually assign" +\
437 " mastership to specific ONOS nodes using" +\
438 " 'device-role'"
439 main.step( "Assign mastership of switches to specific controllers" )
440 # Manually assign mastership to the controller we want
441 roleCall = main.TRUE
442
443 ipList = [ ]
444 deviceList = []
445 onosCli = main.CLIs[ main.activeNodes[0] ]
446 try:
447 # Assign mastership to specific controllers. This assignment was
448 # determined for a 7 node cluser, but will work with any sized
449 # cluster
450 for i in range( 1, 29 ): # switches 1 through 28
451 # set up correct variables:
452 if i == 1:
453 c = 0
454 ip = main.nodes[ c ].ip_address # ONOS1
455 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
456 elif i == 2:
457 c = 1 % main.numCtrls
458 ip = main.nodes[ c ].ip_address # ONOS2
459 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
460 elif i == 3:
461 c = 1 % main.numCtrls
462 ip = main.nodes[ c ].ip_address # ONOS2
463 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
464 elif i == 4:
465 c = 3 % main.numCtrls
466 ip = main.nodes[ c ].ip_address # ONOS4
467 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
468 elif i == 5:
469 c = 2 % main.numCtrls
470 ip = main.nodes[ c ].ip_address # ONOS3
471 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
472 elif i == 6:
473 c = 2 % main.numCtrls
474 ip = main.nodes[ c ].ip_address # ONOS3
475 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
476 elif i == 7:
477 c = 5 % main.numCtrls
478 ip = main.nodes[ c ].ip_address # ONOS6
479 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
480 elif i >= 8 and i <= 17:
481 c = 4 % main.numCtrls
482 ip = main.nodes[ c ].ip_address # ONOS5
483 dpid = '3' + str( i ).zfill( 3 )
484 deviceId = onosCli.getDevice( dpid ).get( 'id' )
485 elif i >= 18 and i <= 27:
486 c = 6 % main.numCtrls
487 ip = main.nodes[ c ].ip_address # ONOS7
488 dpid = '6' + str( i ).zfill( 3 )
489 deviceId = onosCli.getDevice( dpid ).get( 'id' )
490 elif i == 28:
491 c = 0
492 ip = main.nodes[ c ].ip_address # ONOS1
493 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
494 else:
495 main.log.error( "You didn't write an else statement for " +
496 "switch s" + str( i ) )
497 roleCall = main.FALSE
498 # Assign switch
499 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
500 # TODO: make this controller dynamic
501 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
502 ipList.append( ip )
503 deviceList.append( deviceId )
504 except ( AttributeError, AssertionError ):
505 main.log.exception( "Something is wrong with ONOS device view" )
506 main.log.info( onosCli.devices() )
507 utilities.assert_equals(
508 expect=main.TRUE,
509 actual=roleCall,
510 onpass="Re-assigned switch mastership to designated controller",
511 onfail="Something wrong with deviceRole calls" )
512
513 main.step( "Check mastership was correctly assigned" )
514 roleCheck = main.TRUE
515 # NOTE: This is due to the fact that device mastership change is not
516 # atomic and is actually a multi step process
517 time.sleep( 5 )
518 for i in range( len( ipList ) ):
519 ip = ipList[i]
520 deviceId = deviceList[i]
521 # Check assignment
522 master = onosCli.getRole( deviceId ).get( 'master' )
523 if ip in master:
524 roleCheck = roleCheck and main.TRUE
525 else:
526 roleCheck = roleCheck and main.FALSE
527 main.log.error( "Error, controller " + ip + " is not" +
528 " master " + "of device " +
529 str( deviceId ) + ". Master is " +
530 repr( master ) + "." )
531 utilities.assert_equals(
532 expect=main.TRUE,
533 actual=roleCheck,
534 onpass="Switches were successfully reassigned to designated " +
535 "controller",
536 onfail="Switches were not successfully reassigned" )
537
538 def CASE3( self, main ):
539 """
540 Assign intents
541 """
542 import time
543 import json
544 assert main.numCtrls, "main.numCtrls not defined"
545 assert main, "main not defined"
546 assert utilities.assert_equals, "utilities.assert_equals not defined"
547 assert main.CLIs, "main.CLIs not defined"
548 assert main.nodes, "main.nodes not defined"
549 main.case( "Adding host Intents" )
550 main.caseExplanation = "Discover hosts by using pingall then " +\
551 "assign predetermined host-to-host intents." +\
552 " After installation, check that the intent" +\
553 " is distributed to all nodes and the state" +\
554 " is INSTALLED"
555
556 # install onos-app-fwd
557 main.step( "Install reactive forwarding app" )
558 onosCli = main.CLIs[ main.activeNodes[0] ]
559 installResults = onosCli.activateApp( "org.onosproject.fwd" )
560 utilities.assert_equals( expect=main.TRUE, actual=installResults,
561 onpass="Install fwd successful",
562 onfail="Install fwd failed" )
563
564 main.step( "Check app ids" )
565 appCheck = main.TRUE
566 threads = []
567 for i in main.activeNodes:
568 t = main.Thread( target=main.CLIs[i].appToIDCheck,
569 name="appToIDCheck-" + str( i ),
570 args=[] )
571 threads.append( t )
572 t.start()
573
574 for t in threads:
575 t.join()
576 appCheck = appCheck and t.result
577 if appCheck != main.TRUE:
578 main.log.warn( onosCli.apps() )
579 main.log.warn( onosCli.appIDs() )
580 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
581 onpass="App Ids seem to be correct",
582 onfail="Something is wrong with app Ids" )
583
584 main.step( "Discovering Hosts( Via pingall for now )" )
585 # FIXME: Once we have a host discovery mechanism, use that instead
586 # REACTIVE FWD test
587 pingResult = main.FALSE
588 passMsg = "Reactive Pingall test passed"
589 time1 = time.time()
590 pingResult = main.Mininet1.pingall()
591 time2 = time.time()
592 if not pingResult:
593 main.log.warn("First pingall failed. Trying again...")
594 pingResult = main.Mininet1.pingall()
595 passMsg += " on the second try"
596 utilities.assert_equals(
597 expect=main.TRUE,
598 actual=pingResult,
599 onpass= passMsg,
600 onfail="Reactive Pingall failed, " +
601 "one or more ping pairs failed" )
602 main.log.info( "Time for pingall: %2f seconds" %
603 ( time2 - time1 ) )
604 # timeout for fwd flows
605 time.sleep( 11 )
606 # uninstall onos-app-fwd
607 main.step( "Uninstall reactive forwarding app" )
608 node = main.activeNodes[0]
609 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
610 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
611 onpass="Uninstall fwd successful",
612 onfail="Uninstall fwd failed" )
613
614 main.step( "Check app ids" )
615 threads = []
616 appCheck2 = main.TRUE
617 for i in main.activeNodes:
618 t = main.Thread( target=main.CLIs[i].appToIDCheck,
619 name="appToIDCheck-" + str( i ),
620 args=[] )
621 threads.append( t )
622 t.start()
623
624 for t in threads:
625 t.join()
626 appCheck2 = appCheck2 and t.result
627 if appCheck2 != main.TRUE:
628 node = main.activeNodes[0]
629 main.log.warn( main.CLIs[node].apps() )
630 main.log.warn( main.CLIs[node].appIDs() )
631 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
632 onpass="App Ids seem to be correct",
633 onfail="Something is wrong with app Ids" )
634
635 main.step( "Add host intents via cli" )
636 intentIds = []
637 # TODO: move the host numbers to params
638 # Maybe look at all the paths we ping?
639 intentAddResult = True
640 hostResult = main.TRUE
641 for i in range( 8, 18 ):
642 main.log.info( "Adding host intent between h" + str( i ) +
643 " and h" + str( i + 10 ) )
644 host1 = "00:00:00:00:00:" + \
645 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
646 host2 = "00:00:00:00:00:" + \
647 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
648 # NOTE: getHost can return None
649 host1Dict = onosCli.getHost( host1 )
650 host2Dict = onosCli.getHost( host2 )
651 host1Id = None
652 host2Id = None
653 if host1Dict and host2Dict:
654 host1Id = host1Dict.get( 'id', None )
655 host2Id = host2Dict.get( 'id', None )
656 if host1Id and host2Id:
657 nodeNum = ( i % len( main.activeNodes ) )
658 node = main.activeNodes[nodeNum]
659 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
660 if tmpId:
661 main.log.info( "Added intent with id: " + tmpId )
662 intentIds.append( tmpId )
663 else:
664 main.log.error( "addHostIntent returned: " +
665 repr( tmpId ) )
666 else:
667 main.log.error( "Error, getHost() failed for h" + str( i ) +
668 " and/or h" + str( i + 10 ) )
669 node = main.activeNodes[0]
670 hosts = main.CLIs[node].hosts()
671 main.log.warn( "Hosts output: " )
672 try:
673 main.log.warn( json.dumps( json.loads( hosts ),
674 sort_keys=True,
675 indent=4,
676 separators=( ',', ': ' ) ) )
677 except ( ValueError, TypeError ):
678 main.log.warn( repr( hosts ) )
679 hostResult = main.FALSE
680 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
681 onpass="Found a host id for each host",
682 onfail="Error looking up host ids" )
683
684 intentStart = time.time()
685 onosIds = onosCli.getAllIntentsId()
686 main.log.info( "Submitted intents: " + str( intentIds ) )
687 main.log.info( "Intents in ONOS: " + str( onosIds ) )
688 for intent in intentIds:
689 if intent in onosIds:
690 pass # intent submitted is in onos
691 else:
692 intentAddResult = False
693 if intentAddResult:
694 intentStop = time.time()
695 else:
696 intentStop = None
697 # Print the intent states
698 intents = onosCli.intents()
699 intentStates = []
700 installedCheck = True
701 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
702 count = 0
703 try:
704 for intent in json.loads( intents ):
705 state = intent.get( 'state', None )
706 if "INSTALLED" not in state:
707 installedCheck = False
708 intentId = intent.get( 'id', None )
709 intentStates.append( ( intentId, state ) )
710 except ( ValueError, TypeError ):
711 main.log.exception( "Error parsing intents" )
712 # add submitted intents not in the store
713 tmplist = [ i for i, s in intentStates ]
714 missingIntents = False
715 for i in intentIds:
716 if i not in tmplist:
717 intentStates.append( ( i, " - " ) )
718 missingIntents = True
719 intentStates.sort()
720 for i, s in intentStates:
721 count += 1
722 main.log.info( "%-6s%-15s%-15s" %
723 ( str( count ), str( i ), str( s ) ) )
724 leaders = onosCli.leaders()
725 try:
726 missing = False
727 if leaders:
728 parsedLeaders = json.loads( leaders )
729 main.log.warn( json.dumps( parsedLeaders,
730 sort_keys=True,
731 indent=4,
732 separators=( ',', ': ' ) ) )
733 # check for all intent partitions
734 topics = []
735 for i in range( 14 ):
736 topics.append( "intent-partition-" + str( i ) )
737 main.log.debug( topics )
738 ONOStopics = [ j['topic'] for j in parsedLeaders ]
739 for topic in topics:
740 if topic not in ONOStopics:
741 main.log.error( "Error: " + topic +
742 " not in leaders" )
743 missing = True
744 else:
745 main.log.error( "leaders() returned None" )
746 except ( ValueError, TypeError ):
747 main.log.exception( "Error parsing leaders" )
748 main.log.error( repr( leaders ) )
749 # Check all nodes
750 if missing:
751 for i in main.activeNodes:
752 response = main.CLIs[i].leaders( jsonFormat=False)
753 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
754 str( response ) )
755
756 partitions = onosCli.partitions()
757 try:
758 if partitions :
759 parsedPartitions = json.loads( partitions )
760 main.log.warn( json.dumps( parsedPartitions,
761 sort_keys=True,
762 indent=4,
763 separators=( ',', ': ' ) ) )
764 # TODO check for a leader in all paritions
765 # TODO check for consistency among nodes
766 else:
767 main.log.error( "partitions() returned None" )
768 except ( ValueError, TypeError ):
769 main.log.exception( "Error parsing partitions" )
770 main.log.error( repr( partitions ) )
771 pendingMap = onosCli.pendingMap()
772 try:
773 if pendingMap :
774 parsedPending = json.loads( pendingMap )
775 main.log.warn( json.dumps( parsedPending,
776 sort_keys=True,
777 indent=4,
778 separators=( ',', ': ' ) ) )
779 # TODO check something here?
780 else:
781 main.log.error( "pendingMap() returned None" )
782 except ( ValueError, TypeError ):
783 main.log.exception( "Error parsing pending map" )
784 main.log.error( repr( pendingMap ) )
785
786 intentAddResult = bool( intentAddResult and not missingIntents and
787 installedCheck )
788 if not intentAddResult:
789 main.log.error( "Error in pushing host intents to ONOS" )
790
791 main.step( "Intent Anti-Entropy dispersion" )
792 for j in range(100):
793 correct = True
794 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
795 for i in main.activeNodes:
796 onosIds = []
797 ids = main.CLIs[i].getAllIntentsId()
798 onosIds.append( ids )
799 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
800 str( sorted( onosIds ) ) )
801 if sorted( ids ) != sorted( intentIds ):
802 main.log.warn( "Set of intent IDs doesn't match" )
803 correct = False
804 break
805 else:
806 intents = json.loads( main.CLIs[i].intents() )
807 for intent in intents:
808 if intent[ 'state' ] != "INSTALLED":
809 main.log.warn( "Intent " + intent[ 'id' ] +
810 " is " + intent[ 'state' ] )
811 correct = False
812 break
813 if correct:
814 break
815 else:
816 time.sleep(1)
817 if not intentStop:
818 intentStop = time.time()
819 global gossipTime
820 gossipTime = intentStop - intentStart
821 main.log.info( "It took about " + str( gossipTime ) +
822 " seconds for all intents to appear in each node" )
823 gossipPeriod = int( main.params['timers']['gossip'] )
824 maxGossipTime = gossipPeriod * len( main.activeNodes )
825 utilities.assert_greater_equals(
826 expect=maxGossipTime, actual=gossipTime,
827 onpass="ECM anti-entropy for intents worked within " +
828 "expected time",
829 onfail="Intent ECM anti-entropy took too long. " +
830 "Expected time:{}, Actual time:{}".format( maxGossipTime,
831 gossipTime ) )
832 if gossipTime <= maxGossipTime:
833 intentAddResult = True
834
835 if not intentAddResult or "key" in pendingMap:
836 import time
837 installedCheck = True
838 main.log.info( "Sleeping 60 seconds to see if intents are found" )
839 time.sleep( 60 )
840 onosIds = onosCli.getAllIntentsId()
841 main.log.info( "Submitted intents: " + str( intentIds ) )
842 main.log.info( "Intents in ONOS: " + str( onosIds ) )
843 # Print the intent states
844 intents = onosCli.intents()
845 intentStates = []
846 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
847 count = 0
848 try:
849 for intent in json.loads( intents ):
850 # Iter through intents of a node
851 state = intent.get( 'state', None )
852 if "INSTALLED" not in state:
853 installedCheck = False
854 intentId = intent.get( 'id', None )
855 intentStates.append( ( intentId, state ) )
856 except ( ValueError, TypeError ):
857 main.log.exception( "Error parsing intents" )
858 # add submitted intents not in the store
859 tmplist = [ i for i, s in intentStates ]
860 for i in intentIds:
861 if i not in tmplist:
862 intentStates.append( ( i, " - " ) )
863 intentStates.sort()
864 for i, s in intentStates:
865 count += 1
866 main.log.info( "%-6s%-15s%-15s" %
867 ( str( count ), str( i ), str( s ) ) )
868 leaders = onosCli.leaders()
869 try:
870 missing = False
871 if leaders:
872 parsedLeaders = json.loads( leaders )
873 main.log.warn( json.dumps( parsedLeaders,
874 sort_keys=True,
875 indent=4,
876 separators=( ',', ': ' ) ) )
877 # check for all intent partitions
878 # check for election
879 topics = []
880 for i in range( 14 ):
881 topics.append( "intent-partition-" + str( i ) )
882 # FIXME: this should only be after we start the app
883 topics.append( "org.onosproject.election" )
884 main.log.debug( topics )
885 ONOStopics = [ j['topic'] for j in parsedLeaders ]
886 for topic in topics:
887 if topic not in ONOStopics:
888 main.log.error( "Error: " + topic +
889 " not in leaders" )
890 missing = True
891 else:
892 main.log.error( "leaders() returned None" )
893 except ( ValueError, TypeError ):
894 main.log.exception( "Error parsing leaders" )
895 main.log.error( repr( leaders ) )
896 # Check all nodes
897 if missing:
898 for i in main.activeNodes:
899 node = main.CLIs[i]
900 response = node.leaders( jsonFormat=False)
901 main.log.warn( str( node.name ) + " leaders output: \n" +
902 str( response ) )
903
904 partitions = onosCli.partitions()
905 try:
906 if partitions :
907 parsedPartitions = json.loads( partitions )
908 main.log.warn( json.dumps( parsedPartitions,
909 sort_keys=True,
910 indent=4,
911 separators=( ',', ': ' ) ) )
912 # TODO check for a leader in all paritions
913 # TODO check for consistency among nodes
914 else:
915 main.log.error( "partitions() returned None" )
916 except ( ValueError, TypeError ):
917 main.log.exception( "Error parsing partitions" )
918 main.log.error( repr( partitions ) )
919 pendingMap = onosCli.pendingMap()
920 try:
921 if pendingMap :
922 parsedPending = json.loads( pendingMap )
923 main.log.warn( json.dumps( parsedPending,
924 sort_keys=True,
925 indent=4,
926 separators=( ',', ': ' ) ) )
927 # TODO check something here?
928 else:
929 main.log.error( "pendingMap() returned None" )
930 except ( ValueError, TypeError ):
931 main.log.exception( "Error parsing pending map" )
932 main.log.error( repr( pendingMap ) )
933
934 def CASE4( self, main ):
935 """
936 Ping across added host intents
937 """
938 import json
939 import time
940 assert main.numCtrls, "main.numCtrls not defined"
941 assert main, "main not defined"
942 assert utilities.assert_equals, "utilities.assert_equals not defined"
943 assert main.CLIs, "main.CLIs not defined"
944 assert main.nodes, "main.nodes not defined"
945 main.case( "Verify connectivity by sending traffic across Intents" )
946 main.caseExplanation = "Ping across added host intents to check " +\
947 "functionality and check the state of " +\
948 "the intent"
Jon Hall6e709752016-02-01 13:38:46 -0800949
Jon Hall41d39f12016-04-11 22:54:35 -0700950 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall6e709752016-02-01 13:38:46 -0800951 main.step( "Check Intent state" )
952 installedCheck = False
953 loopCount = 0
954 while not installedCheck and loopCount < 40:
955 installedCheck = True
956 # Print the intent states
957 intents = onosCli.intents()
958 intentStates = []
959 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
960 count = 0
961 # Iter through intents of a node
962 try:
963 for intent in json.loads( intents ):
964 state = intent.get( 'state', None )
965 if "INSTALLED" not in state:
966 installedCheck = False
967 intentId = intent.get( 'id', None )
968 intentStates.append( ( intentId, state ) )
969 except ( ValueError, TypeError ):
970 main.log.exception( "Error parsing intents." )
971 # Print states
972 intentStates.sort()
973 for i, s in intentStates:
974 count += 1
975 main.log.info( "%-6s%-15s%-15s" %
976 ( str( count ), str( i ), str( s ) ) )
977 if not installedCheck:
978 time.sleep( 1 )
979 loopCount += 1
980 utilities.assert_equals( expect=True, actual=installedCheck,
981 onpass="Intents are all INSTALLED",
982 onfail="Intents are not all in " +
983 "INSTALLED state" )
984
Jon Hall9d2dcad2016-04-08 10:15:20 -0700985 main.step( "Ping across added host intents" )
Jon Hall9d2dcad2016-04-08 10:15:20 -0700986 PingResult = main.TRUE
987 for i in range( 8, 18 ):
988 ping = main.Mininet1.pingHost( src="h" + str( i ),
989 target="h" + str( i + 10 ) )
990 PingResult = PingResult and ping
991 if ping == main.FALSE:
992 main.log.warn( "Ping failed between h" + str( i ) +
993 " and h" + str( i + 10 ) )
994 elif ping == main.TRUE:
995 main.log.info( "Ping test passed!" )
996 # Don't set PingResult or you'd override failures
997 if PingResult == main.FALSE:
998 main.log.error(
999 "Intents have not been installed correctly, pings failed." )
1000 # TODO: pretty print
1001 main.log.warn( "ONOS1 intents: " )
1002 try:
1003 tmpIntents = onosCli.intents()
1004 main.log.warn( json.dumps( json.loads( tmpIntents ),
1005 sort_keys=True,
1006 indent=4,
1007 separators=( ',', ': ' ) ) )
1008 except ( ValueError, TypeError ):
1009 main.log.warn( repr( tmpIntents ) )
1010 utilities.assert_equals(
1011 expect=main.TRUE,
1012 actual=PingResult,
1013 onpass="Intents have been installed correctly and pings work",
1014 onfail="Intents have not been installed correctly, pings failed." )
1015
Jon Hall6e709752016-02-01 13:38:46 -08001016 main.step( "Check leadership of topics" )
1017 leaders = onosCli.leaders()
1018 topicCheck = main.TRUE
1019 try:
1020 if leaders:
1021 parsedLeaders = json.loads( leaders )
1022 main.log.warn( json.dumps( parsedLeaders,
1023 sort_keys=True,
1024 indent=4,
1025 separators=( ',', ': ' ) ) )
1026 # check for all intent partitions
1027 # check for election
1028 # TODO: Look at Devices as topics now that it uses this system
1029 topics = []
1030 for i in range( 14 ):
1031 topics.append( "intent-partition-" + str( i ) )
1032 # FIXME: this should only be after we start the app
1033 # FIXME: topics.append( "org.onosproject.election" )
1034 # Print leaders output
1035 main.log.debug( topics )
1036 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1037 for topic in topics:
1038 if topic not in ONOStopics:
1039 main.log.error( "Error: " + topic +
1040 " not in leaders" )
1041 topicCheck = main.FALSE
1042 else:
1043 main.log.error( "leaders() returned None" )
1044 topicCheck = main.FALSE
1045 except ( ValueError, TypeError ):
1046 topicCheck = main.FALSE
1047 main.log.exception( "Error parsing leaders" )
1048 main.log.error( repr( leaders ) )
1049 # TODO: Check for a leader of these topics
1050 # Check all nodes
1051 if topicCheck:
1052 for i in main.activeNodes:
1053 node = main.CLIs[i]
1054 response = node.leaders( jsonFormat=False)
1055 main.log.warn( str( node.name ) + " leaders output: \n" +
1056 str( response ) )
1057
1058 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1059 onpass="intent Partitions is in leaders",
1060 onfail="Some topics were lost " )
1061 # Print partitions
1062 partitions = onosCli.partitions()
1063 try:
1064 if partitions :
1065 parsedPartitions = json.loads( partitions )
1066 main.log.warn( json.dumps( parsedPartitions,
1067 sort_keys=True,
1068 indent=4,
1069 separators=( ',', ': ' ) ) )
1070 # TODO check for a leader in all paritions
1071 # TODO check for consistency among nodes
1072 else:
1073 main.log.error( "partitions() returned None" )
1074 except ( ValueError, TypeError ):
1075 main.log.exception( "Error parsing partitions" )
1076 main.log.error( repr( partitions ) )
1077 # Print Pending Map
1078 pendingMap = onosCli.pendingMap()
1079 try:
1080 if pendingMap :
1081 parsedPending = json.loads( pendingMap )
1082 main.log.warn( json.dumps( parsedPending,
1083 sort_keys=True,
1084 indent=4,
1085 separators=( ',', ': ' ) ) )
1086 # TODO check something here?
1087 else:
1088 main.log.error( "pendingMap() returned None" )
1089 except ( ValueError, TypeError ):
1090 main.log.exception( "Error parsing pending map" )
1091 main.log.error( repr( pendingMap ) )
1092
1093 if not installedCheck:
1094 main.log.info( "Waiting 60 seconds to see if the state of " +
1095 "intents change" )
1096 time.sleep( 60 )
1097 # Print the intent states
1098 intents = onosCli.intents()
1099 intentStates = []
1100 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1101 count = 0
1102 # Iter through intents of a node
1103 try:
1104 for intent in json.loads( intents ):
1105 state = intent.get( 'state', None )
1106 if "INSTALLED" not in state:
1107 installedCheck = False
1108 intentId = intent.get( 'id', None )
1109 intentStates.append( ( intentId, state ) )
1110 except ( ValueError, TypeError ):
1111 main.log.exception( "Error parsing intents." )
1112 intentStates.sort()
1113 for i, s in intentStates:
1114 count += 1
1115 main.log.info( "%-6s%-15s%-15s" %
1116 ( str( count ), str( i ), str( s ) ) )
1117 leaders = onosCli.leaders()
1118 try:
1119 missing = False
1120 if leaders:
1121 parsedLeaders = json.loads( leaders )
1122 main.log.warn( json.dumps( parsedLeaders,
1123 sort_keys=True,
1124 indent=4,
1125 separators=( ',', ': ' ) ) )
1126 # check for all intent partitions
1127 # check for election
1128 topics = []
1129 for i in range( 14 ):
1130 topics.append( "intent-partition-" + str( i ) )
1131 # FIXME: this should only be after we start the app
1132 topics.append( "org.onosproject.election" )
1133 main.log.debug( topics )
1134 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1135 for topic in topics:
1136 if topic not in ONOStopics:
1137 main.log.error( "Error: " + topic +
1138 " not in leaders" )
1139 missing = True
1140 else:
1141 main.log.error( "leaders() returned None" )
1142 except ( ValueError, TypeError ):
1143 main.log.exception( "Error parsing leaders" )
1144 main.log.error( repr( leaders ) )
1145 if missing:
1146 for i in main.activeNodes:
1147 node = main.CLIs[i]
1148 response = node.leaders( jsonFormat=False)
1149 main.log.warn( str( node.name ) + " leaders output: \n" +
1150 str( response ) )
1151
1152 partitions = onosCli.partitions()
1153 try:
1154 if partitions :
1155 parsedPartitions = json.loads( partitions )
1156 main.log.warn( json.dumps( parsedPartitions,
1157 sort_keys=True,
1158 indent=4,
1159 separators=( ',', ': ' ) ) )
1160 # TODO check for a leader in all paritions
1161 # TODO check for consistency among nodes
1162 else:
1163 main.log.error( "partitions() returned None" )
1164 except ( ValueError, TypeError ):
1165 main.log.exception( "Error parsing partitions" )
1166 main.log.error( repr( partitions ) )
1167 pendingMap = onosCli.pendingMap()
1168 try:
1169 if pendingMap :
1170 parsedPending = json.loads( pendingMap )
1171 main.log.warn( json.dumps( parsedPending,
1172 sort_keys=True,
1173 indent=4,
1174 separators=( ',', ': ' ) ) )
1175 # TODO check something here?
1176 else:
1177 main.log.error( "pendingMap() returned None" )
1178 except ( ValueError, TypeError ):
1179 main.log.exception( "Error parsing pending map" )
1180 main.log.error( repr( pendingMap ) )
1181 # Print flowrules
1182 node = main.activeNodes[0]
1183 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1184 main.step( "Wait a minute then ping again" )
1185 # the wait is above
1186 PingResult = main.TRUE
1187 for i in range( 8, 18 ):
1188 ping = main.Mininet1.pingHost( src="h" + str( i ),
1189 target="h" + str( i + 10 ) )
1190 PingResult = PingResult and ping
1191 if ping == main.FALSE:
1192 main.log.warn( "Ping failed between h" + str( i ) +
1193 " and h" + str( i + 10 ) )
1194 elif ping == main.TRUE:
1195 main.log.info( "Ping test passed!" )
1196 # Don't set PingResult or you'd override failures
1197 if PingResult == main.FALSE:
1198 main.log.error(
1199 "Intents have not been installed correctly, pings failed." )
1200 # TODO: pretty print
1201 main.log.warn( "ONOS1 intents: " )
1202 try:
1203 tmpIntents = onosCli.intents()
1204 main.log.warn( json.dumps( json.loads( tmpIntents ),
1205 sort_keys=True,
1206 indent=4,
1207 separators=( ',', ': ' ) ) )
1208 except ( ValueError, TypeError ):
1209 main.log.warn( repr( tmpIntents ) )
1210 utilities.assert_equals(
1211 expect=main.TRUE,
1212 actual=PingResult,
1213 onpass="Intents have been installed correctly and pings work",
1214 onfail="Intents have not been installed correctly, pings failed." )
1215
1216 def CASE5( self, main ):
1217 """
1218 Reading state of ONOS
1219 """
1220 import json
1221 import time
1222 assert main.numCtrls, "main.numCtrls not defined"
1223 assert main, "main not defined"
1224 assert utilities.assert_equals, "utilities.assert_equals not defined"
1225 assert main.CLIs, "main.CLIs not defined"
1226 assert main.nodes, "main.nodes not defined"
1227
1228 main.case( "Setting up and gathering data for current state" )
1229 # The general idea for this test case is to pull the state of
1230 # ( intents,flows, topology,... ) from each ONOS node
1231 # We can then compare them with each other and also with past states
1232
1233 main.step( "Check that each switch has a master" )
1234 global mastershipState
1235 mastershipState = '[]'
1236
1237 # Assert that each device has a master
1238 rolesNotNull = main.TRUE
1239 threads = []
1240 for i in main.activeNodes:
1241 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1242 name="rolesNotNull-" + str( i ),
1243 args=[] )
1244 threads.append( t )
1245 t.start()
1246
1247 for t in threads:
1248 t.join()
1249 rolesNotNull = rolesNotNull and t.result
1250 utilities.assert_equals(
1251 expect=main.TRUE,
1252 actual=rolesNotNull,
1253 onpass="Each device has a master",
1254 onfail="Some devices don't have a master assigned" )
1255
1256 main.step( "Get the Mastership of each switch from each controller" )
1257 ONOSMastership = []
1258 mastershipCheck = main.FALSE
1259 consistentMastership = True
1260 rolesResults = True
1261 threads = []
1262 for i in main.activeNodes:
1263 t = main.Thread( target=main.CLIs[i].roles,
1264 name="roles-" + str( i ),
1265 args=[] )
1266 threads.append( t )
1267 t.start()
1268
1269 for t in threads:
1270 t.join()
1271 ONOSMastership.append( t.result )
1272
1273 for i in range( len( ONOSMastership ) ):
1274 node = str( main.activeNodes[i] + 1 )
1275 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1276 main.log.error( "Error in getting ONOS" + node + " roles" )
1277 main.log.warn( "ONOS" + node + " mastership response: " +
1278 repr( ONOSMastership[i] ) )
1279 rolesResults = False
1280 utilities.assert_equals(
1281 expect=True,
1282 actual=rolesResults,
1283 onpass="No error in reading roles output",
1284 onfail="Error in reading roles from ONOS" )
1285
1286 main.step( "Check for consistency in roles from each controller" )
1287 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1288 main.log.info(
1289 "Switch roles are consistent across all ONOS nodes" )
1290 else:
1291 consistentMastership = False
1292 utilities.assert_equals(
1293 expect=True,
1294 actual=consistentMastership,
1295 onpass="Switch roles are consistent across all ONOS nodes",
1296 onfail="ONOS nodes have different views of switch roles" )
1297
1298 if rolesResults and not consistentMastership:
1299 for i in range( len( main.activeNodes ) ):
1300 node = str( main.activeNodes[i] + 1 )
1301 try:
1302 main.log.warn(
1303 "ONOS" + node + " roles: ",
1304 json.dumps(
1305 json.loads( ONOSMastership[ i ] ),
1306 sort_keys=True,
1307 indent=4,
1308 separators=( ',', ': ' ) ) )
1309 except ( ValueError, TypeError ):
1310 main.log.warn( repr( ONOSMastership[ i ] ) )
1311 elif rolesResults and consistentMastership:
1312 mastershipCheck = main.TRUE
1313 mastershipState = ONOSMastership[ 0 ]
1314
1315 main.step( "Get the intents from each controller" )
1316 global intentState
1317 intentState = []
1318 ONOSIntents = []
1319 intentCheck = main.FALSE
1320 consistentIntents = True
1321 intentsResults = True
1322 threads = []
1323 for i in main.activeNodes:
1324 t = main.Thread( target=main.CLIs[i].intents,
1325 name="intents-" + str( i ),
1326 args=[],
1327 kwargs={ 'jsonFormat': True } )
1328 threads.append( t )
1329 t.start()
1330
1331 for t in threads:
1332 t.join()
1333 ONOSIntents.append( t.result )
1334
1335 for i in range( len( ONOSIntents ) ):
1336 node = str( main.activeNodes[i] + 1 )
1337 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1338 main.log.error( "Error in getting ONOS" + node + " intents" )
1339 main.log.warn( "ONOS" + node + " intents response: " +
1340 repr( ONOSIntents[ i ] ) )
1341 intentsResults = False
1342 utilities.assert_equals(
1343 expect=True,
1344 actual=intentsResults,
1345 onpass="No error in reading intents output",
1346 onfail="Error in reading intents from ONOS" )
1347
1348 main.step( "Check for consistency in Intents from each controller" )
1349 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1350 main.log.info( "Intents are consistent across all ONOS " +
1351 "nodes" )
1352 else:
1353 consistentIntents = False
1354 main.log.error( "Intents not consistent" )
1355 utilities.assert_equals(
1356 expect=True,
1357 actual=consistentIntents,
1358 onpass="Intents are consistent across all ONOS nodes",
1359 onfail="ONOS nodes have different views of intents" )
1360
1361 if intentsResults:
1362 # Try to make it easy to figure out what is happening
1363 #
1364 # Intent ONOS1 ONOS2 ...
1365 # 0x01 INSTALLED INSTALLING
1366 # ... ... ...
1367 # ... ... ...
1368 title = " Id"
1369 for n in main.activeNodes:
1370 title += " " * 10 + "ONOS" + str( n + 1 )
1371 main.log.warn( title )
1372 # get all intent keys in the cluster
1373 keys = []
Jon Halla440e872016-03-31 15:15:50 -07001374 try:
1375 # Get the set of all intent keys
Jon Hall6e709752016-02-01 13:38:46 -08001376 for nodeStr in ONOSIntents:
1377 node = json.loads( nodeStr )
1378 for intent in node:
Jon Halla440e872016-03-31 15:15:50 -07001379 keys.append( intent.get( 'id' ) )
1380 keys = set( keys )
1381 # For each intent key, print the state on each node
1382 for key in keys:
1383 row = "%-13s" % key
1384 for nodeStr in ONOSIntents:
1385 node = json.loads( nodeStr )
1386 for intent in node:
1387 if intent.get( 'id', "Error" ) == key:
1388 row += "%-15s" % intent.get( 'state' )
1389 main.log.warn( row )
1390 # End of intent state table
1391 except ValueError as e:
1392 main.log.exception( e )
1393 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
Jon Hall6e709752016-02-01 13:38:46 -08001394
1395 if intentsResults and not consistentIntents:
1396 # print the json objects
1397 n = str( main.activeNodes[-1] + 1 )
1398 main.log.debug( "ONOS" + n + " intents: " )
1399 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1400 sort_keys=True,
1401 indent=4,
1402 separators=( ',', ': ' ) ) )
1403 for i in range( len( ONOSIntents ) ):
1404 node = str( main.activeNodes[i] + 1 )
1405 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1406 main.log.debug( "ONOS" + node + " intents: " )
1407 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1408 sort_keys=True,
1409 indent=4,
1410 separators=( ',', ': ' ) ) )
1411 else:
1412 main.log.debug( "ONOS" + node + " intents match ONOS" +
1413 n + " intents" )
1414 elif intentsResults and consistentIntents:
1415 intentCheck = main.TRUE
1416 intentState = ONOSIntents[ 0 ]
1417
1418 main.step( "Get the flows from each controller" )
1419 global flowState
1420 flowState = []
1421 ONOSFlows = []
1422 ONOSFlowsJson = []
1423 flowCheck = main.FALSE
1424 consistentFlows = True
1425 flowsResults = True
1426 threads = []
1427 for i in main.activeNodes:
1428 t = main.Thread( target=main.CLIs[i].flows,
1429 name="flows-" + str( i ),
1430 args=[],
1431 kwargs={ 'jsonFormat': True } )
1432 threads.append( t )
1433 t.start()
1434
1435 # NOTE: Flows command can take some time to run
1436 time.sleep(30)
1437 for t in threads:
1438 t.join()
1439 result = t.result
1440 ONOSFlows.append( result )
1441
1442 for i in range( len( ONOSFlows ) ):
1443 num = str( main.activeNodes[i] + 1 )
1444 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1445 main.log.error( "Error in getting ONOS" + num + " flows" )
1446 main.log.warn( "ONOS" + num + " flows response: " +
1447 repr( ONOSFlows[ i ] ) )
1448 flowsResults = False
1449 ONOSFlowsJson.append( None )
1450 else:
1451 try:
1452 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1453 except ( ValueError, TypeError ):
1454 # FIXME: change this to log.error?
1455 main.log.exception( "Error in parsing ONOS" + num +
1456 " response as json." )
1457 main.log.error( repr( ONOSFlows[ i ] ) )
1458 ONOSFlowsJson.append( None )
1459 flowsResults = False
1460 utilities.assert_equals(
1461 expect=True,
1462 actual=flowsResults,
1463 onpass="No error in reading flows output",
1464 onfail="Error in reading flows from ONOS" )
1465
1466 main.step( "Check for consistency in Flows from each controller" )
1467 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1468 if all( tmp ):
1469 main.log.info( "Flow count is consistent across all ONOS nodes" )
1470 else:
1471 consistentFlows = False
1472 utilities.assert_equals(
1473 expect=True,
1474 actual=consistentFlows,
1475 onpass="The flow count is consistent across all ONOS nodes",
1476 onfail="ONOS nodes have different flow counts" )
1477
1478 if flowsResults and not consistentFlows:
1479 for i in range( len( ONOSFlows ) ):
1480 node = str( main.activeNodes[i] + 1 )
1481 try:
1482 main.log.warn(
1483 "ONOS" + node + " flows: " +
1484 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1485 indent=4, separators=( ',', ': ' ) ) )
1486 except ( ValueError, TypeError ):
1487 main.log.warn( "ONOS" + node + " flows: " +
1488 repr( ONOSFlows[ i ] ) )
1489 elif flowsResults and consistentFlows:
1490 flowCheck = main.TRUE
1491 flowState = ONOSFlows[ 0 ]
1492
1493 main.step( "Get the OF Table entries" )
1494 global flows
1495 flows = []
1496 for i in range( 1, 29 ):
1497 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1498 if flowCheck == main.FALSE:
1499 for table in flows:
1500 main.log.warn( table )
1501 # TODO: Compare switch flow tables with ONOS flow tables
1502
1503 main.step( "Start continuous pings" )
1504 main.Mininet2.pingLong(
1505 src=main.params[ 'PING' ][ 'source1' ],
1506 target=main.params[ 'PING' ][ 'target1' ],
1507 pingTime=500 )
1508 main.Mininet2.pingLong(
1509 src=main.params[ 'PING' ][ 'source2' ],
1510 target=main.params[ 'PING' ][ 'target2' ],
1511 pingTime=500 )
1512 main.Mininet2.pingLong(
1513 src=main.params[ 'PING' ][ 'source3' ],
1514 target=main.params[ 'PING' ][ 'target3' ],
1515 pingTime=500 )
1516 main.Mininet2.pingLong(
1517 src=main.params[ 'PING' ][ 'source4' ],
1518 target=main.params[ 'PING' ][ 'target4' ],
1519 pingTime=500 )
1520 main.Mininet2.pingLong(
1521 src=main.params[ 'PING' ][ 'source5' ],
1522 target=main.params[ 'PING' ][ 'target5' ],
1523 pingTime=500 )
1524 main.Mininet2.pingLong(
1525 src=main.params[ 'PING' ][ 'source6' ],
1526 target=main.params[ 'PING' ][ 'target6' ],
1527 pingTime=500 )
1528 main.Mininet2.pingLong(
1529 src=main.params[ 'PING' ][ 'source7' ],
1530 target=main.params[ 'PING' ][ 'target7' ],
1531 pingTime=500 )
1532 main.Mininet2.pingLong(
1533 src=main.params[ 'PING' ][ 'source8' ],
1534 target=main.params[ 'PING' ][ 'target8' ],
1535 pingTime=500 )
1536 main.Mininet2.pingLong(
1537 src=main.params[ 'PING' ][ 'source9' ],
1538 target=main.params[ 'PING' ][ 'target9' ],
1539 pingTime=500 )
1540 main.Mininet2.pingLong(
1541 src=main.params[ 'PING' ][ 'source10' ],
1542 target=main.params[ 'PING' ][ 'target10' ],
1543 pingTime=500 )
1544
1545 main.step( "Collecting topology information from ONOS" )
1546 devices = []
1547 threads = []
1548 for i in main.activeNodes:
1549 t = main.Thread( target=main.CLIs[i].devices,
1550 name="devices-" + str( i ),
1551 args=[ ] )
1552 threads.append( t )
1553 t.start()
1554
1555 for t in threads:
1556 t.join()
1557 devices.append( t.result )
1558 hosts = []
1559 threads = []
1560 for i in main.activeNodes:
1561 t = main.Thread( target=main.CLIs[i].hosts,
1562 name="hosts-" + str( i ),
1563 args=[ ] )
1564 threads.append( t )
1565 t.start()
1566
1567 for t in threads:
1568 t.join()
1569 try:
1570 hosts.append( json.loads( t.result ) )
1571 except ( ValueError, TypeError ):
1572 # FIXME: better handling of this, print which node
1573 # Maybe use thread name?
1574 main.log.exception( "Error parsing json output of hosts" )
1575 main.log.warn( repr( t.result ) )
1576 hosts.append( None )
1577
1578 ports = []
1579 threads = []
1580 for i in main.activeNodes:
1581 t = main.Thread( target=main.CLIs[i].ports,
1582 name="ports-" + str( i ),
1583 args=[ ] )
1584 threads.append( t )
1585 t.start()
1586
1587 for t in threads:
1588 t.join()
1589 ports.append( t.result )
1590 links = []
1591 threads = []
1592 for i in main.activeNodes:
1593 t = main.Thread( target=main.CLIs[i].links,
1594 name="links-" + str( i ),
1595 args=[ ] )
1596 threads.append( t )
1597 t.start()
1598
1599 for t in threads:
1600 t.join()
1601 links.append( t.result )
1602 clusters = []
1603 threads = []
1604 for i in main.activeNodes:
1605 t = main.Thread( target=main.CLIs[i].clusters,
1606 name="clusters-" + str( i ),
1607 args=[ ] )
1608 threads.append( t )
1609 t.start()
1610
1611 for t in threads:
1612 t.join()
1613 clusters.append( t.result )
1614 # Compare json objects for hosts and dataplane clusters
1615
1616 # hosts
1617 main.step( "Host view is consistent across ONOS nodes" )
1618 consistentHostsResult = main.TRUE
1619 for controller in range( len( hosts ) ):
1620 controllerStr = str( main.activeNodes[controller] + 1 )
1621 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1622 if hosts[ controller ] == hosts[ 0 ]:
1623 continue
1624 else: # hosts not consistent
1625 main.log.error( "hosts from ONOS" +
1626 controllerStr +
1627 " is inconsistent with ONOS1" )
1628 main.log.warn( repr( hosts[ controller ] ) )
1629 consistentHostsResult = main.FALSE
1630
1631 else:
1632 main.log.error( "Error in getting ONOS hosts from ONOS" +
1633 controllerStr )
1634 consistentHostsResult = main.FALSE
1635 main.log.warn( "ONOS" + controllerStr +
1636 " hosts response: " +
1637 repr( hosts[ controller ] ) )
1638 utilities.assert_equals(
1639 expect=main.TRUE,
1640 actual=consistentHostsResult,
1641 onpass="Hosts view is consistent across all ONOS nodes",
1642 onfail="ONOS nodes have different views of hosts" )
1643
1644 main.step( "Each host has an IP address" )
1645 ipResult = main.TRUE
1646 for controller in range( 0, len( hosts ) ):
1647 controllerStr = str( main.activeNodes[controller] + 1 )
1648 if hosts[ controller ]:
1649 for host in hosts[ controller ]:
1650 if not host.get( 'ipAddresses', [ ] ):
1651 main.log.error( "Error with host ips on controller" +
1652 controllerStr + ": " + str( host ) )
1653 ipResult = main.FALSE
1654 utilities.assert_equals(
1655 expect=main.TRUE,
1656 actual=ipResult,
1657 onpass="The ips of the hosts aren't empty",
1658 onfail="The ip of at least one host is missing" )
1659
1660 # Strongly connected clusters of devices
1661 main.step( "Cluster view is consistent across ONOS nodes" )
1662 consistentClustersResult = main.TRUE
1663 for controller in range( len( clusters ) ):
1664 controllerStr = str( main.activeNodes[controller] + 1 )
1665 if "Error" not in clusters[ controller ]:
1666 if clusters[ controller ] == clusters[ 0 ]:
1667 continue
1668 else: # clusters not consistent
1669 main.log.error( "clusters from ONOS" + controllerStr +
1670 " is inconsistent with ONOS1" )
1671 consistentClustersResult = main.FALSE
1672
1673 else:
1674 main.log.error( "Error in getting dataplane clusters " +
1675 "from ONOS" + controllerStr )
1676 consistentClustersResult = main.FALSE
1677 main.log.warn( "ONOS" + controllerStr +
1678 " clusters response: " +
1679 repr( clusters[ controller ] ) )
1680 utilities.assert_equals(
1681 expect=main.TRUE,
1682 actual=consistentClustersResult,
1683 onpass="Clusters view is consistent across all ONOS nodes",
1684 onfail="ONOS nodes have different views of clusters" )
Jon Hall172b7ba2016-04-07 18:12:20 -07001685 if consistentClustersResult != main.TRUE:
1686 main.log.debug( clusters )
Jon Hall6e709752016-02-01 13:38:46 -08001687 # there should always only be one cluster
1688 main.step( "Cluster view correct across ONOS nodes" )
1689 try:
1690 numClusters = len( json.loads( clusters[ 0 ] ) )
1691 except ( ValueError, TypeError ):
1692 main.log.exception( "Error parsing clusters[0]: " +
1693 repr( clusters[ 0 ] ) )
1694 numClusters = "ERROR"
1695 clusterResults = main.FALSE
1696 if numClusters == 1:
1697 clusterResults = main.TRUE
1698 utilities.assert_equals(
1699 expect=1,
1700 actual=numClusters,
1701 onpass="ONOS shows 1 SCC",
1702 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1703
1704 main.step( "Comparing ONOS topology to MN" )
1705 devicesResults = main.TRUE
1706 linksResults = main.TRUE
1707 hostsResults = main.TRUE
1708 mnSwitches = main.Mininet1.getSwitches()
1709 mnLinks = main.Mininet1.getLinks()
1710 mnHosts = main.Mininet1.getHosts()
1711 for controller in main.activeNodes:
1712 controllerStr = str( main.activeNodes[controller] + 1 )
1713 if devices[ controller ] and ports[ controller ] and\
1714 "Error" not in devices[ controller ] and\
1715 "Error" not in ports[ controller ]:
1716 currentDevicesResult = main.Mininet1.compareSwitches(
1717 mnSwitches,
1718 json.loads( devices[ controller ] ),
1719 json.loads( ports[ controller ] ) )
1720 else:
1721 currentDevicesResult = main.FALSE
1722 utilities.assert_equals( expect=main.TRUE,
1723 actual=currentDevicesResult,
1724 onpass="ONOS" + controllerStr +
1725 " Switches view is correct",
1726 onfail="ONOS" + controllerStr +
1727 " Switches view is incorrect" )
1728 if links[ controller ] and "Error" not in links[ controller ]:
1729 currentLinksResult = main.Mininet1.compareLinks(
1730 mnSwitches, mnLinks,
1731 json.loads( links[ controller ] ) )
1732 else:
1733 currentLinksResult = main.FALSE
1734 utilities.assert_equals( expect=main.TRUE,
1735 actual=currentLinksResult,
1736 onpass="ONOS" + controllerStr +
1737 " links view is correct",
1738 onfail="ONOS" + controllerStr +
1739 " links view is incorrect" )
1740
1741 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1742 currentHostsResult = main.Mininet1.compareHosts(
1743 mnHosts,
1744 hosts[ controller ] )
1745 else:
1746 currentHostsResult = main.FALSE
1747 utilities.assert_equals( expect=main.TRUE,
1748 actual=currentHostsResult,
1749 onpass="ONOS" + controllerStr +
1750 " hosts exist in Mininet",
1751 onfail="ONOS" + controllerStr +
1752 " hosts don't match Mininet" )
1753
1754 devicesResults = devicesResults and currentDevicesResult
1755 linksResults = linksResults and currentLinksResult
1756 hostsResults = hostsResults and currentHostsResult
1757
1758 main.step( "Device information is correct" )
1759 utilities.assert_equals(
1760 expect=main.TRUE,
1761 actual=devicesResults,
1762 onpass="Device information is correct",
1763 onfail="Device information is incorrect" )
1764
1765 main.step( "Links are correct" )
1766 utilities.assert_equals(
1767 expect=main.TRUE,
1768 actual=linksResults,
1769 onpass="Link are correct",
1770 onfail="Links are incorrect" )
1771
1772 main.step( "Hosts are correct" )
1773 utilities.assert_equals(
1774 expect=main.TRUE,
1775 actual=hostsResults,
1776 onpass="Hosts are correct",
1777 onfail="Hosts are incorrect" )
1778
1779 def CASE61( self, main ):
1780 """
1781 The Failure case.
1782 """
1783 import math
1784 assert main.numCtrls, "main.numCtrls not defined"
1785 assert main, "main not defined"
1786 assert utilities.assert_equals, "utilities.assert_equals not defined"
1787 assert main.CLIs, "main.CLIs not defined"
1788 assert main.nodes, "main.nodes not defined"
1789 main.case( "Partition ONOS nodes into two distinct partitions" )
1790
1791 main.step( "Checking ONOS Logs for errors" )
1792 for node in main.nodes:
1793 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1794 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1795
1796 n = len( main.nodes ) # Number of nodes
1797 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1798 main.partition = [ 0 ] # ONOS node to partition, listed by index in main.nodes
1799 if n > 3:
1800 main.partition.append( p - 1 )
1801 # NOTE: This only works for cluster sizes of 3,5, or 7.
1802
1803 main.step( "Partitioning ONOS nodes" )
1804 nodeList = [ str( i + 1 ) for i in main.partition ]
1805 main.log.info( "Nodes to be partitioned: " + str( nodeList ) )
1806 partitionResults = main.TRUE
1807 for i in range( 0, n ):
1808 this = main.nodes[i]
1809 if i not in main.partition:
1810 for j in main.partition:
1811 foe = main.nodes[j]
1812 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1813 #CMD HERE
1814 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1815 this.handle.sendline( cmdStr )
1816 this.handle.expect( "\$" )
1817 main.log.debug( this.handle.before )
1818 else:
1819 for j in range( 0, n ):
1820 if j not in main.partition:
1821 foe = main.nodes[j]
1822 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1823 #CMD HERE
1824 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1825 this.handle.sendline( cmdStr )
1826 this.handle.expect( "\$" )
1827 main.log.debug( this.handle.before )
1828 main.activeNodes.remove( i )
1829 # NOTE: When dynamic clustering is finished, we need to start checking
1830 # main.partion nodes still work when partitioned
1831 utilities.assert_equals( expect=main.TRUE, actual=partitionResults,
1832 onpass="Firewall rules set successfully",
1833 onfail="Error setting firewall rules" )
1834
1835 main.log.step( "Sleeping 60 seconds" )
1836 time.sleep( 60 )
1837
1838 def CASE62( self, main ):
1839 """
1840 Healing Partition
1841 """
1842 import time
1843 assert main.numCtrls, "main.numCtrls not defined"
1844 assert main, "main not defined"
1845 assert utilities.assert_equals, "utilities.assert_equals not defined"
1846 assert main.CLIs, "main.CLIs not defined"
1847 assert main.nodes, "main.nodes not defined"
1848 assert main.partition, "main.partition not defined"
1849 main.case( "Healing Partition" )
1850
1851 main.step( "Deleteing firewall rules" )
1852 healResults = main.TRUE
1853 for node in main.nodes:
1854 cmdStr = "sudo iptables -F"
1855 node.handle.sendline( cmdStr )
1856 node.handle.expect( "\$" )
1857 main.log.debug( node.handle.before )
1858 utilities.assert_equals( expect=main.TRUE, actual=healResults,
1859 onpass="Firewall rules removed",
1860 onfail="Error removing firewall rules" )
1861
1862 for node in main.partition:
1863 main.activeNodes.append( node )
1864 main.activeNodes.sort()
1865 try:
1866 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1867 "List of active nodes has duplicates, this likely indicates something was run out of order"
1868 except AssertionError:
1869 main.log.exception( "" )
1870 main.cleanup()
1871 main.exit()
1872
1873 def CASE7( self, main ):
1874 """
1875 Check state after ONOS failure
1876 """
1877 import json
1878 assert main.numCtrls, "main.numCtrls not defined"
1879 assert main, "main not defined"
1880 assert utilities.assert_equals, "utilities.assert_equals not defined"
1881 assert main.CLIs, "main.CLIs not defined"
1882 assert main.nodes, "main.nodes not defined"
1883 try:
1884 main.partition
1885 except AttributeError:
1886 main.partition = []
1887
1888 main.case( "Running ONOS Constant State Tests" )
1889
1890 main.step( "Check that each switch has a master" )
1891 # Assert that each device has a master
1892 rolesNotNull = main.TRUE
1893 threads = []
1894 for i in main.activeNodes:
1895 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1896 name="rolesNotNull-" + str( i ),
1897 args=[ ] )
1898 threads.append( t )
1899 t.start()
1900
1901 for t in threads:
1902 t.join()
1903 rolesNotNull = rolesNotNull and t.result
1904 utilities.assert_equals(
1905 expect=main.TRUE,
1906 actual=rolesNotNull,
1907 onpass="Each device has a master",
1908 onfail="Some devices don't have a master assigned" )
1909
1910 main.step( "Read device roles from ONOS" )
1911 ONOSMastership = []
1912 mastershipCheck = main.FALSE
1913 consistentMastership = True
1914 rolesResults = True
1915 threads = []
1916 for i in main.activeNodes:
1917 t = main.Thread( target=main.CLIs[i].roles,
1918 name="roles-" + str( i ),
1919 args=[] )
1920 threads.append( t )
1921 t.start()
1922
1923 for t in threads:
1924 t.join()
1925 ONOSMastership.append( t.result )
1926
1927 for i in range( len( ONOSMastership ) ):
1928 node = str( main.activeNodes[i] + 1 )
1929 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1930 main.log.error( "Error in getting ONOS" + node + " roles" )
1931 main.log.warn( "ONOS" + node + " mastership response: " +
1932 repr( ONOSMastership[i] ) )
1933 rolesResults = False
1934 utilities.assert_equals(
1935 expect=True,
1936 actual=rolesResults,
1937 onpass="No error in reading roles output",
1938 onfail="Error in reading roles from ONOS" )
1939
1940 main.step( "Check for consistency in roles from each controller" )
1941 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1942 main.log.info(
1943 "Switch roles are consistent across all ONOS nodes" )
1944 else:
1945 consistentMastership = False
1946 utilities.assert_equals(
1947 expect=True,
1948 actual=consistentMastership,
1949 onpass="Switch roles are consistent across all ONOS nodes",
1950 onfail="ONOS nodes have different views of switch roles" )
1951
1952 if rolesResults and not consistentMastership:
1953 for i in range( len( ONOSMastership ) ):
1954 node = str( main.activeNodes[i] + 1 )
1955 main.log.warn( "ONOS" + node + " roles: ",
1956 json.dumps( json.loads( ONOSMastership[ i ] ),
1957 sort_keys=True,
1958 indent=4,
1959 separators=( ',', ': ' ) ) )
1960
1961 # NOTE: we expect mastership to change on controller failure
1962
1963 main.step( "Get the intents and compare across all nodes" )
1964 ONOSIntents = []
1965 intentCheck = main.FALSE
1966 consistentIntents = True
1967 intentsResults = True
1968 threads = []
1969 for i in main.activeNodes:
1970 t = main.Thread( target=main.CLIs[i].intents,
1971 name="intents-" + str( i ),
1972 args=[],
1973 kwargs={ 'jsonFormat': True } )
1974 threads.append( t )
1975 t.start()
1976
1977 for t in threads:
1978 t.join()
1979 ONOSIntents.append( t.result )
1980
1981 for i in range( len( ONOSIntents) ):
1982 node = str( main.activeNodes[i] + 1 )
1983 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1984 main.log.error( "Error in getting ONOS" + node + " intents" )
1985 main.log.warn( "ONOS" + node + " intents response: " +
1986 repr( ONOSIntents[ i ] ) )
1987 intentsResults = False
1988 utilities.assert_equals(
1989 expect=True,
1990 actual=intentsResults,
1991 onpass="No error in reading intents output",
1992 onfail="Error in reading intents from ONOS" )
1993
1994 main.step( "Check for consistency in Intents from each controller" )
1995 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1996 main.log.info( "Intents are consistent across all ONOS " +
1997 "nodes" )
1998 else:
1999 consistentIntents = False
2000
2001 # Try to make it easy to figure out what is happening
2002 #
2003 # Intent ONOS1 ONOS2 ...
2004 # 0x01 INSTALLED INSTALLING
2005 # ... ... ...
2006 # ... ... ...
2007 title = " ID"
2008 for n in main.activeNodes:
2009 title += " " * 10 + "ONOS" + str( n + 1 )
2010 main.log.warn( title )
2011 # get all intent keys in the cluster
2012 keys = []
2013 for nodeStr in ONOSIntents:
2014 node = json.loads( nodeStr )
2015 for intent in node:
2016 keys.append( intent.get( 'id' ) )
2017 keys = set( keys )
2018 for key in keys:
2019 row = "%-13s" % key
2020 for nodeStr in ONOSIntents:
2021 node = json.loads( nodeStr )
2022 for intent in node:
2023 if intent.get( 'id' ) == key:
2024 row += "%-15s" % intent.get( 'state' )
2025 main.log.warn( row )
2026 # End table view
2027
2028 utilities.assert_equals(
2029 expect=True,
2030 actual=consistentIntents,
2031 onpass="Intents are consistent across all ONOS nodes",
2032 onfail="ONOS nodes have different views of intents" )
2033 intentStates = []
2034 for node in ONOSIntents: # Iter through ONOS nodes
2035 nodeStates = []
2036 # Iter through intents of a node
2037 try:
2038 for intent in json.loads( node ):
2039 nodeStates.append( intent[ 'state' ] )
2040 except ( ValueError, TypeError ):
2041 main.log.exception( "Error in parsing intents" )
2042 main.log.error( repr( node ) )
2043 intentStates.append( nodeStates )
2044 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2045 main.log.info( dict( out ) )
2046
2047 if intentsResults and not consistentIntents:
2048 for i in range( len( main.activeNodes ) ):
2049 node = str( main.activeNodes[i] + 1 )
2050 main.log.warn( "ONOS" + node + " intents: " )
2051 main.log.warn( json.dumps(
2052 json.loads( ONOSIntents[ i ] ),
2053 sort_keys=True,
2054 indent=4,
2055 separators=( ',', ': ' ) ) )
2056 elif intentsResults and consistentIntents:
2057 intentCheck = main.TRUE
2058
2059 # NOTE: Store has no durability, so intents are lost across system
2060 # restarts
2061 main.step( "Compare current intents with intents before the failure" )
2062 # NOTE: this requires case 5 to pass for intentState to be set.
2063 # maybe we should stop the test if that fails?
2064 sameIntents = main.FALSE
2065 try:
2066 intentState
2067 except NameError:
2068 main.log.warn( "No previous intent state was saved" )
2069 else:
2070 if intentState and intentState == ONOSIntents[ 0 ]:
2071 sameIntents = main.TRUE
2072 main.log.info( "Intents are consistent with before failure" )
2073 # TODO: possibly the states have changed? we may need to figure out
2074 # what the acceptable states are
2075 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2076 sameIntents = main.TRUE
2077 try:
2078 before = json.loads( intentState )
2079 after = json.loads( ONOSIntents[ 0 ] )
2080 for intent in before:
2081 if intent not in after:
2082 sameIntents = main.FALSE
2083 main.log.debug( "Intent is not currently in ONOS " +
2084 "(at least in the same form):" )
2085 main.log.debug( json.dumps( intent ) )
2086 except ( ValueError, TypeError ):
2087 main.log.exception( "Exception printing intents" )
2088 main.log.debug( repr( ONOSIntents[0] ) )
2089 main.log.debug( repr( intentState ) )
2090 if sameIntents == main.FALSE:
2091 try:
2092 main.log.debug( "ONOS intents before: " )
2093 main.log.debug( json.dumps( json.loads( intentState ),
2094 sort_keys=True, indent=4,
2095 separators=( ',', ': ' ) ) )
2096 main.log.debug( "Current ONOS intents: " )
2097 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2098 sort_keys=True, indent=4,
2099 separators=( ',', ': ' ) ) )
2100 except ( ValueError, TypeError ):
2101 main.log.exception( "Exception printing intents" )
2102 main.log.debug( repr( ONOSIntents[0] ) )
2103 main.log.debug( repr( intentState ) )
2104 utilities.assert_equals(
2105 expect=main.TRUE,
2106 actual=sameIntents,
2107 onpass="Intents are consistent with before failure",
2108 onfail="The Intents changed during failure" )
2109 intentCheck = intentCheck and sameIntents
2110
2111 main.step( "Get the OF Table entries and compare to before " +
2112 "component failure" )
2113 FlowTables = main.TRUE
2114 for i in range( 28 ):
2115 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2116 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
Jon Hall41d39f12016-04-11 22:54:35 -07002117 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2118 FlowTables = FlowTables and curSwitch
2119 if curSwitch == main.FALSE:
Jon Hall6e709752016-02-01 13:38:46 -08002120 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2121 utilities.assert_equals(
2122 expect=main.TRUE,
2123 actual=FlowTables,
2124 onpass="No changes were found in the flow tables",
2125 onfail="Changes were found in the flow tables" )
2126
2127 main.Mininet2.pingLongKill()
2128 '''
2129 main.step( "Check the continuous pings to ensure that no packets " +
2130 "were dropped during component failure" )
2131 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2132 main.params[ 'TESTONIP' ] )
2133 LossInPings = main.FALSE
2134 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2135 for i in range( 8, 18 ):
2136 main.log.info(
2137 "Checking for a loss in pings along flow from s" +
2138 str( i ) )
2139 LossInPings = main.Mininet2.checkForLoss(
2140 "/tmp/ping.h" +
2141 str( i ) ) or LossInPings
2142 if LossInPings == main.TRUE:
2143 main.log.info( "Loss in ping detected" )
2144 elif LossInPings == main.ERROR:
2145 main.log.info( "There are multiple mininet process running" )
2146 elif LossInPings == main.FALSE:
2147 main.log.info( "No Loss in the pings" )
2148 main.log.info( "No loss of dataplane connectivity" )
2149 utilities.assert_equals(
2150 expect=main.FALSE,
2151 actual=LossInPings,
2152 onpass="No Loss of connectivity",
2153 onfail="Loss of dataplane connectivity detected" )
2154 '''
2155
2156 main.step( "Leadership Election is still functional" )
2157 # Test of LeadershipElection
2158 leaderList = []
2159
2160 partitioned = []
2161 for i in main.partition:
2162 partitioned.append( main.nodes[i].ip_address )
2163 leaderResult = main.TRUE
2164
2165 for i in main.activeNodes:
2166 cli = main.CLIs[i]
2167 leaderN = cli.electionTestLeader()
2168 leaderList.append( leaderN )
2169 if leaderN == main.FALSE:
2170 # error in response
2171 main.log.error( "Something is wrong with " +
2172 "electionTestLeader function, check the" +
2173 " error logs" )
2174 leaderResult = main.FALSE
2175 elif leaderN is None:
2176 main.log.error( cli.name +
2177 " shows no leader for the election-app was" +
2178 " elected after the old one died" )
2179 leaderResult = main.FALSE
2180 elif leaderN in partitioned:
2181 main.log.error( cli.name + " shows " + str( leaderN ) +
2182 " as leader for the election-app, but it " +
2183 "was partitioned" )
2184 leaderResult = main.FALSE
2185 if len( set( leaderList ) ) != 1:
2186 leaderResult = main.FALSE
2187 main.log.error(
2188 "Inconsistent view of leader for the election test app" )
2189 # TODO: print the list
2190 utilities.assert_equals(
2191 expect=main.TRUE,
2192 actual=leaderResult,
2193 onpass="Leadership election passed",
2194 onfail="Something went wrong with Leadership election" )
2195
2196 def CASE8( self, main ):
2197 """
2198 Compare topo
2199 """
2200 import json
2201 import time
2202 assert main.numCtrls, "main.numCtrls not defined"
2203 assert main, "main not defined"
2204 assert utilities.assert_equals, "utilities.assert_equals not defined"
2205 assert main.CLIs, "main.CLIs not defined"
2206 assert main.nodes, "main.nodes not defined"
2207
2208 main.case( "Compare ONOS Topology view to Mininet topology" )
2209 main.caseExplanation = "Compare topology objects between Mininet" +\
2210 " and ONOS"
2211 topoResult = main.FALSE
2212 topoFailMsg = "ONOS topology don't match Mininet"
2213 elapsed = 0
2214 count = 0
2215 main.step( "Comparing ONOS topology to MN topology" )
2216 startTime = time.time()
2217 # Give time for Gossip to work
2218 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2219 devicesResults = main.TRUE
2220 linksResults = main.TRUE
2221 hostsResults = main.TRUE
2222 hostAttachmentResults = True
2223 count += 1
2224 cliStart = time.time()
2225 devices = []
2226 threads = []
2227 for i in main.activeNodes:
2228 t = main.Thread( target=utilities.retry,
2229 name="devices-" + str( i ),
2230 args=[ main.CLIs[i].devices, [ None ] ],
2231 kwargs= { 'sleep': 5, 'attempts': 5,
2232 'randomTime': True } )
2233 threads.append( t )
2234 t.start()
2235
2236 for t in threads:
2237 t.join()
2238 devices.append( t.result )
2239 hosts = []
2240 ipResult = main.TRUE
2241 threads = []
2242 for i in main.activeNodes:
2243 t = main.Thread( target=utilities.retry,
2244 name="hosts-" + str( i ),
2245 args=[ main.CLIs[i].hosts, [ None ] ],
2246 kwargs= { 'sleep': 5, 'attempts': 5,
2247 'randomTime': True } )
2248 threads.append( t )
2249 t.start()
2250
2251 for t in threads:
2252 t.join()
2253 try:
2254 hosts.append( json.loads( t.result ) )
2255 except ( ValueError, TypeError ):
2256 main.log.exception( "Error parsing hosts results" )
2257 main.log.error( repr( t.result ) )
2258 hosts.append( None )
2259 for controller in range( 0, len( hosts ) ):
2260 controllerStr = str( main.activeNodes[controller] + 1 )
2261 if hosts[ controller ]:
2262 for host in hosts[ controller ]:
2263 if host is None or host.get( 'ipAddresses', [] ) == []:
2264 main.log.error(
2265 "Error with host ipAddresses on controller" +
2266 controllerStr + ": " + str( host ) )
2267 ipResult = main.FALSE
2268 ports = []
2269 threads = []
2270 for i in main.activeNodes:
2271 t = main.Thread( target=utilities.retry,
2272 name="ports-" + str( i ),
2273 args=[ main.CLIs[i].ports, [ None ] ],
2274 kwargs= { 'sleep': 5, 'attempts': 5,
2275 'randomTime': True } )
2276 threads.append( t )
2277 t.start()
2278
2279 for t in threads:
2280 t.join()
2281 ports.append( t.result )
2282 links = []
2283 threads = []
2284 for i in main.activeNodes:
2285 t = main.Thread( target=utilities.retry,
2286 name="links-" + str( i ),
2287 args=[ main.CLIs[i].links, [ None ] ],
2288 kwargs= { 'sleep': 5, 'attempts': 5,
2289 'randomTime': True } )
2290 threads.append( t )
2291 t.start()
2292
2293 for t in threads:
2294 t.join()
2295 links.append( t.result )
2296 clusters = []
2297 threads = []
2298 for i in main.activeNodes:
2299 t = main.Thread( target=utilities.retry,
2300 name="clusters-" + str( i ),
2301 args=[ main.CLIs[i].clusters, [ None ] ],
2302 kwargs= { 'sleep': 5, 'attempts': 5,
2303 'randomTime': True } )
2304 threads.append( t )
2305 t.start()
2306
2307 for t in threads:
2308 t.join()
2309 clusters.append( t.result )
2310
2311 elapsed = time.time() - startTime
2312 cliTime = time.time() - cliStart
2313 print "Elapsed time: " + str( elapsed )
2314 print "CLI time: " + str( cliTime )
2315
2316 if all( e is None for e in devices ) and\
2317 all( e is None for e in hosts ) and\
2318 all( e is None for e in ports ) and\
2319 all( e is None for e in links ) and\
2320 all( e is None for e in clusters ):
2321 topoFailMsg = "Could not get topology from ONOS"
2322 main.log.error( topoFailMsg )
2323 continue # Try again, No use trying to compare
2324
2325 mnSwitches = main.Mininet1.getSwitches()
2326 mnLinks = main.Mininet1.getLinks()
2327 mnHosts = main.Mininet1.getHosts()
2328 for controller in range( len( main.activeNodes ) ):
2329 controllerStr = str( main.activeNodes[controller] + 1 )
2330 if devices[ controller ] and ports[ controller ] and\
2331 "Error" not in devices[ controller ] and\
2332 "Error" not in ports[ controller ]:
2333
2334 try:
2335 currentDevicesResult = main.Mininet1.compareSwitches(
2336 mnSwitches,
2337 json.loads( devices[ controller ] ),
2338 json.loads( ports[ controller ] ) )
2339 except ( TypeError, ValueError ) as e:
2340 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2341 devices[ controller ], ports[ controller ] ) )
2342 else:
2343 currentDevicesResult = main.FALSE
2344 utilities.assert_equals( expect=main.TRUE,
2345 actual=currentDevicesResult,
2346 onpass="ONOS" + controllerStr +
2347 " Switches view is correct",
2348 onfail="ONOS" + controllerStr +
2349 " Switches view is incorrect" )
2350
2351 if links[ controller ] and "Error" not in links[ controller ]:
2352 currentLinksResult = main.Mininet1.compareLinks(
2353 mnSwitches, mnLinks,
2354 json.loads( links[ controller ] ) )
2355 else:
2356 currentLinksResult = main.FALSE
2357 utilities.assert_equals( expect=main.TRUE,
2358 actual=currentLinksResult,
2359 onpass="ONOS" + controllerStr +
2360 " links view is correct",
2361 onfail="ONOS" + controllerStr +
2362 " links view is incorrect" )
2363 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2364 currentHostsResult = main.Mininet1.compareHosts(
2365 mnHosts,
2366 hosts[ controller ] )
2367 elif hosts[ controller ] == []:
2368 currentHostsResult = main.TRUE
2369 else:
2370 currentHostsResult = main.FALSE
2371 utilities.assert_equals( expect=main.TRUE,
2372 actual=currentHostsResult,
2373 onpass="ONOS" + controllerStr +
2374 " hosts exist in Mininet",
2375 onfail="ONOS" + controllerStr +
2376 " hosts don't match Mininet" )
2377 # CHECKING HOST ATTACHMENT POINTS
2378 hostAttachment = True
2379 zeroHosts = False
2380 # FIXME: topo-HA/obelisk specific mappings:
2381 # key is mac and value is dpid
2382 mappings = {}
2383 for i in range( 1, 29 ): # hosts 1 through 28
2384 # set up correct variables:
2385 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2386 if i == 1:
2387 deviceId = "1000".zfill(16)
2388 elif i == 2:
2389 deviceId = "2000".zfill(16)
2390 elif i == 3:
2391 deviceId = "3000".zfill(16)
2392 elif i == 4:
2393 deviceId = "3004".zfill(16)
2394 elif i == 5:
2395 deviceId = "5000".zfill(16)
2396 elif i == 6:
2397 deviceId = "6000".zfill(16)
2398 elif i == 7:
2399 deviceId = "6007".zfill(16)
2400 elif i >= 8 and i <= 17:
2401 dpid = '3' + str( i ).zfill( 3 )
2402 deviceId = dpid.zfill(16)
2403 elif i >= 18 and i <= 27:
2404 dpid = '6' + str( i ).zfill( 3 )
2405 deviceId = dpid.zfill(16)
2406 elif i == 28:
2407 deviceId = "2800".zfill(16)
2408 mappings[ macId ] = deviceId
2409 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2410 if hosts[ controller ] == []:
2411 main.log.warn( "There are no hosts discovered" )
2412 zeroHosts = True
2413 else:
2414 for host in hosts[ controller ]:
2415 mac = None
2416 location = None
2417 device = None
2418 port = None
2419 try:
2420 mac = host.get( 'mac' )
2421 assert mac, "mac field could not be found for this host object"
2422
2423 location = host.get( 'location' )
2424 assert location, "location field could not be found for this host object"
2425
2426 # Trim the protocol identifier off deviceId
2427 device = str( location.get( 'elementId' ) ).split(':')[1]
2428 assert device, "elementId field could not be found for this host location object"
2429
2430 port = location.get( 'port' )
2431 assert port, "port field could not be found for this host location object"
2432
2433 # Now check if this matches where they should be
2434 if mac and device and port:
2435 if str( port ) != "1":
2436 main.log.error( "The attachment port is incorrect for " +
2437 "host " + str( mac ) +
2438 ". Expected: 1 Actual: " + str( port) )
2439 hostAttachment = False
2440 if device != mappings[ str( mac ) ]:
2441 main.log.error( "The attachment device is incorrect for " +
2442 "host " + str( mac ) +
2443 ". Expected: " + mappings[ str( mac ) ] +
2444 " Actual: " + device )
2445 hostAttachment = False
2446 else:
2447 hostAttachment = False
2448 except AssertionError:
2449 main.log.exception( "Json object not as expected" )
2450 main.log.error( repr( host ) )
2451 hostAttachment = False
2452 else:
2453 main.log.error( "No hosts json output or \"Error\"" +
2454 " in output. hosts = " +
2455 repr( hosts[ controller ] ) )
2456 if zeroHosts is False:
2457 hostAttachment = True
2458
2459 # END CHECKING HOST ATTACHMENT POINTS
2460 devicesResults = devicesResults and currentDevicesResult
2461 linksResults = linksResults and currentLinksResult
2462 hostsResults = hostsResults and currentHostsResult
2463 hostAttachmentResults = hostAttachmentResults and\
2464 hostAttachment
Jon Halla440e872016-03-31 15:15:50 -07002465 topoResult = ( devicesResults and linksResults
2466 and hostsResults and ipResult and
2467 hostAttachmentResults )
Jon Hall6e709752016-02-01 13:38:46 -08002468 utilities.assert_equals( expect=True,
2469 actual=topoResult,
2470 onpass="ONOS topology matches Mininet",
2471 onfail=topoFailMsg )
2472 # End of While loop to pull ONOS state
2473
2474 # Compare json objects for hosts and dataplane clusters
2475
2476 # hosts
2477 main.step( "Hosts view is consistent across all ONOS nodes" )
2478 consistentHostsResult = main.TRUE
2479 for controller in range( len( hosts ) ):
2480 controllerStr = str( main.activeNodes[controller] + 1 )
2481 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2482 if hosts[ controller ] == hosts[ 0 ]:
2483 continue
2484 else: # hosts not consistent
2485 main.log.error( "hosts from ONOS" + controllerStr +
2486 " is inconsistent with ONOS1" )
2487 main.log.warn( repr( hosts[ controller ] ) )
2488 consistentHostsResult = main.FALSE
2489
2490 else:
2491 main.log.error( "Error in getting ONOS hosts from ONOS" +
2492 controllerStr )
2493 consistentHostsResult = main.FALSE
2494 main.log.warn( "ONOS" + controllerStr +
2495 " hosts response: " +
2496 repr( hosts[ controller ] ) )
2497 utilities.assert_equals(
2498 expect=main.TRUE,
2499 actual=consistentHostsResult,
2500 onpass="Hosts view is consistent across all ONOS nodes",
2501 onfail="ONOS nodes have different views of hosts" )
2502
2503 main.step( "Hosts information is correct" )
2504 hostsResults = hostsResults and ipResult
2505 utilities.assert_equals(
2506 expect=main.TRUE,
2507 actual=hostsResults,
2508 onpass="Host information is correct",
2509 onfail="Host information is incorrect" )
2510
2511 main.step( "Host attachment points to the network" )
2512 utilities.assert_equals(
2513 expect=True,
2514 actual=hostAttachmentResults,
2515 onpass="Hosts are correctly attached to the network",
2516 onfail="ONOS did not correctly attach hosts to the network" )
2517
2518 # Strongly connected clusters of devices
2519 main.step( "Clusters view is consistent across all ONOS nodes" )
2520 consistentClustersResult = main.TRUE
2521 for controller in range( len( clusters ) ):
2522 controllerStr = str( main.activeNodes[controller] + 1 )
2523 if "Error" not in clusters[ controller ]:
2524 if clusters[ controller ] == clusters[ 0 ]:
2525 continue
2526 else: # clusters not consistent
2527 main.log.error( "clusters from ONOS" +
2528 controllerStr +
2529 " is inconsistent with ONOS1" )
2530 consistentClustersResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002531 else:
2532 main.log.error( "Error in getting dataplane clusters " +
2533 "from ONOS" + controllerStr )
2534 consistentClustersResult = main.FALSE
2535 main.log.warn( "ONOS" + controllerStr +
2536 " clusters response: " +
2537 repr( clusters[ controller ] ) )
2538 utilities.assert_equals(
2539 expect=main.TRUE,
2540 actual=consistentClustersResult,
2541 onpass="Clusters view is consistent across all ONOS nodes",
2542 onfail="ONOS nodes have different views of clusters" )
2543
2544 main.step( "There is only one SCC" )
2545 # there should always only be one cluster
2546 try:
2547 numClusters = len( json.loads( clusters[ 0 ] ) )
2548 except ( ValueError, TypeError ):
2549 main.log.exception( "Error parsing clusters[0]: " +
2550 repr( clusters[0] ) )
Jon Halla440e872016-03-31 15:15:50 -07002551 numClusters = "ERROR"
Jon Hall6e709752016-02-01 13:38:46 -08002552 clusterResults = main.FALSE
2553 if numClusters == 1:
2554 clusterResults = main.TRUE
2555 utilities.assert_equals(
2556 expect=1,
2557 actual=numClusters,
2558 onpass="ONOS shows 1 SCC",
2559 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2560
2561 topoResult = ( devicesResults and linksResults
2562 and hostsResults and consistentHostsResult
2563 and consistentClustersResult and clusterResults
2564 and ipResult and hostAttachmentResults )
2565
2566 topoResult = topoResult and int( count <= 2 )
2567 note = "note it takes about " + str( int( cliTime ) ) + \
2568 " seconds for the test to make all the cli calls to fetch " +\
2569 "the topology from each ONOS instance"
2570 main.log.info(
2571 "Very crass estimate for topology discovery/convergence( " +
2572 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2573 str( count ) + " tries" )
2574
2575 main.step( "Device information is correct" )
2576 utilities.assert_equals(
2577 expect=main.TRUE,
2578 actual=devicesResults,
2579 onpass="Device information is correct",
2580 onfail="Device information is incorrect" )
2581
2582 main.step( "Links are correct" )
2583 utilities.assert_equals(
2584 expect=main.TRUE,
2585 actual=linksResults,
2586 onpass="Link are correct",
2587 onfail="Links are incorrect" )
2588
Jon Halla440e872016-03-31 15:15:50 -07002589 main.step( "Hosts are correct" )
2590 utilities.assert_equals(
2591 expect=main.TRUE,
2592 actual=hostsResults,
2593 onpass="Hosts are correct",
2594 onfail="Hosts are incorrect" )
2595
Jon Hall6e709752016-02-01 13:38:46 -08002596 # FIXME: move this to an ONOS state case
2597 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -07002598 nodeResults = utilities.retry( main.HA.nodesCheck,
2599 False,
2600 args=[main.activeNodes],
2601 attempts=5 )
Jon Hall6e709752016-02-01 13:38:46 -08002602
Jon Hall41d39f12016-04-11 22:54:35 -07002603 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Hall6e709752016-02-01 13:38:46 -08002604 onpass="Nodes check successful",
2605 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002606 if not nodeResults:
Jon Hall41d39f12016-04-11 22:54:35 -07002607 for i in main.activeNodes:
Jon Halla440e872016-03-31 15:15:50 -07002608 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hall41d39f12016-04-11 22:54:35 -07002609 main.CLIs[i].name,
2610 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08002611
2612 def CASE9( self, main ):
2613 """
2614 Link s3-s28 down
2615 """
2616 import time
2617 assert main.numCtrls, "main.numCtrls not defined"
2618 assert main, "main not defined"
2619 assert utilities.assert_equals, "utilities.assert_equals not defined"
2620 assert main.CLIs, "main.CLIs not defined"
2621 assert main.nodes, "main.nodes not defined"
2622 # NOTE: You should probably run a topology check after this
2623
2624 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2625
2626 description = "Turn off a link to ensure that Link Discovery " +\
2627 "is working properly"
2628 main.case( description )
2629
2630 main.step( "Kill Link between s3 and s28" )
2631 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2632 main.log.info( "Waiting " + str( linkSleep ) +
2633 " seconds for link down to be discovered" )
2634 time.sleep( linkSleep )
2635 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2636 onpass="Link down successful",
2637 onfail="Failed to bring link down" )
2638 # TODO do some sort of check here
2639
2640 def CASE10( self, main ):
2641 """
2642 Link s3-s28 up
2643 """
2644 import time
2645 assert main.numCtrls, "main.numCtrls not defined"
2646 assert main, "main not defined"
2647 assert utilities.assert_equals, "utilities.assert_equals not defined"
2648 assert main.CLIs, "main.CLIs not defined"
2649 assert main.nodes, "main.nodes not defined"
2650 # NOTE: You should probably run a topology check after this
2651
2652 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2653
2654 description = "Restore a link to ensure that Link Discovery is " + \
2655 "working properly"
2656 main.case( description )
2657
2658 main.step( "Bring link between s3 and s28 back up" )
2659 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2660 main.log.info( "Waiting " + str( linkSleep ) +
2661 " seconds for link up to be discovered" )
2662 time.sleep( linkSleep )
2663 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2664 onpass="Link up successful",
2665 onfail="Failed to bring link up" )
2666 # TODO do some sort of check here
2667
2668 def CASE11( self, main ):
2669 """
2670 Switch Down
2671 """
2672 # NOTE: You should probably run a topology check after this
2673 import time
2674 assert main.numCtrls, "main.numCtrls not defined"
2675 assert main, "main not defined"
2676 assert utilities.assert_equals, "utilities.assert_equals not defined"
2677 assert main.CLIs, "main.CLIs not defined"
2678 assert main.nodes, "main.nodes not defined"
2679
2680 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2681
2682 description = "Killing a switch to ensure it is discovered correctly"
2683 onosCli = main.CLIs[ main.activeNodes[0] ]
2684 main.case( description )
2685 switch = main.params[ 'kill' ][ 'switch' ]
2686 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2687
2688 # TODO: Make this switch parameterizable
2689 main.step( "Kill " + switch )
2690 main.log.info( "Deleting " + switch )
2691 main.Mininet1.delSwitch( switch )
2692 main.log.info( "Waiting " + str( switchSleep ) +
2693 " seconds for switch down to be discovered" )
2694 time.sleep( switchSleep )
2695 device = onosCli.getDevice( dpid=switchDPID )
2696 # Peek at the deleted switch
2697 main.log.warn( str( device ) )
2698 result = main.FALSE
2699 if device and device[ 'available' ] is False:
2700 result = main.TRUE
2701 utilities.assert_equals( expect=main.TRUE, actual=result,
2702 onpass="Kill switch successful",
2703 onfail="Failed to kill switch?" )
2704
2705 def CASE12( self, main ):
2706 """
2707 Switch Up
2708 """
2709 # NOTE: You should probably run a topology check after this
2710 import time
2711 assert main.numCtrls, "main.numCtrls not defined"
2712 assert main, "main not defined"
2713 assert utilities.assert_equals, "utilities.assert_equals not defined"
2714 assert main.CLIs, "main.CLIs not defined"
2715 assert main.nodes, "main.nodes not defined"
2716 assert ONOS1Port, "ONOS1Port not defined"
2717 assert ONOS2Port, "ONOS2Port not defined"
2718 assert ONOS3Port, "ONOS3Port not defined"
2719 assert ONOS4Port, "ONOS4Port not defined"
2720 assert ONOS5Port, "ONOS5Port not defined"
2721 assert ONOS6Port, "ONOS6Port not defined"
2722 assert ONOS7Port, "ONOS7Port not defined"
2723
2724 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2725 switch = main.params[ 'kill' ][ 'switch' ]
2726 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2727 links = main.params[ 'kill' ][ 'links' ].split()
2728 onosCli = main.CLIs[ main.activeNodes[0] ]
2729 description = "Adding a switch to ensure it is discovered correctly"
2730 main.case( description )
2731
2732 main.step( "Add back " + switch )
2733 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2734 for peer in links:
2735 main.Mininet1.addLink( switch, peer )
2736 ipList = [ node.ip_address for node in main.nodes ]
2737 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2738 main.log.info( "Waiting " + str( switchSleep ) +
2739 " seconds for switch up to be discovered" )
2740 time.sleep( switchSleep )
2741 device = onosCli.getDevice( dpid=switchDPID )
2742 # Peek at the deleted switch
2743 main.log.warn( str( device ) )
2744 result = main.FALSE
2745 if device and device[ 'available' ]:
2746 result = main.TRUE
2747 utilities.assert_equals( expect=main.TRUE, actual=result,
2748 onpass="add switch successful",
2749 onfail="Failed to add switch?" )
2750
2751 def CASE13( self, main ):
2752 """
2753 Clean up
2754 """
2755 import os
2756 import time
2757 assert main.numCtrls, "main.numCtrls not defined"
2758 assert main, "main not defined"
2759 assert utilities.assert_equals, "utilities.assert_equals not defined"
2760 assert main.CLIs, "main.CLIs not defined"
2761 assert main.nodes, "main.nodes not defined"
2762
2763 # printing colors to terminal
2764 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2765 'blue': '\033[94m', 'green': '\033[92m',
2766 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2767 main.case( "Test Cleanup" )
2768 main.step( "Killing tcpdumps" )
2769 main.Mininet2.stopTcpdump()
2770
2771 testname = main.TEST
2772 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2773 main.step( "Copying MN pcap and ONOS log files to test station" )
2774 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2775 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2776 # NOTE: MN Pcap file is being saved to logdir.
2777 # We scp this file as MN and TestON aren't necessarily the same vm
2778
2779 # FIXME: To be replaced with a Jenkin's post script
2780 # TODO: Load these from params
2781 # NOTE: must end in /
2782 logFolder = "/opt/onos/log/"
2783 logFiles = [ "karaf.log", "karaf.log.1" ]
2784 # NOTE: must end in /
2785 for f in logFiles:
2786 for node in main.nodes:
2787 dstName = main.logdir + "/" + node.name + "-" + f
2788 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2789 logFolder + f, dstName )
2790 # std*.log's
2791 # NOTE: must end in /
2792 logFolder = "/opt/onos/var/"
2793 logFiles = [ "stderr.log", "stdout.log" ]
2794 # NOTE: must end in /
2795 for f in logFiles:
2796 for node in main.nodes:
2797 dstName = main.logdir + "/" + node.name + "-" + f
2798 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2799 logFolder + f, dstName )
2800 else:
2801 main.log.debug( "skipping saving log files" )
2802
2803 main.step( "Stopping Mininet" )
2804 mnResult = main.Mininet1.stopNet()
2805 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2806 onpass="Mininet stopped",
2807 onfail="MN cleanup NOT successful" )
2808
2809 main.step( "Checking ONOS Logs for errors" )
2810 for node in main.nodes:
2811 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2812 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2813
2814 try:
2815 timerLog = open( main.logdir + "/Timers.csv", 'w')
2816 # Overwrite with empty line and close
2817 labels = "Gossip Intents"
2818 data = str( gossipTime )
2819 timerLog.write( labels + "\n" + data )
2820 timerLog.close()
2821 except NameError, e:
2822 main.log.exception(e)
2823
2824 def CASE14( self, main ):
2825 """
2826 start election app on all onos nodes
2827 """
2828 assert main.numCtrls, "main.numCtrls not defined"
2829 assert main, "main not defined"
2830 assert utilities.assert_equals, "utilities.assert_equals not defined"
2831 assert main.CLIs, "main.CLIs not defined"
2832 assert main.nodes, "main.nodes not defined"
2833
2834 main.case("Start Leadership Election app")
2835 main.step( "Install leadership election app" )
2836 onosCli = main.CLIs[ main.activeNodes[0] ]
2837 appResult = onosCli.activateApp( "org.onosproject.election" )
2838 utilities.assert_equals(
2839 expect=main.TRUE,
2840 actual=appResult,
2841 onpass="Election app installed",
2842 onfail="Something went wrong with installing Leadership election" )
2843
2844 main.step( "Run for election on each node" )
2845 leaderResult = main.TRUE
2846 leaders = []
2847 for i in main.activeNodes:
2848 main.CLIs[i].electionTestRun()
2849 for i in main.activeNodes:
2850 cli = main.CLIs[i]
2851 leader = cli.electionTestLeader()
2852 if leader is None or leader == main.FALSE:
2853 main.log.error( cli.name + ": Leader for the election app " +
2854 "should be an ONOS node, instead got '" +
2855 str( leader ) + "'" )
2856 leaderResult = main.FALSE
2857 leaders.append( leader )
2858 utilities.assert_equals(
2859 expect=main.TRUE,
2860 actual=leaderResult,
2861 onpass="Successfully ran for leadership",
2862 onfail="Failed to run for leadership" )
2863
2864 main.step( "Check that each node shows the same leader" )
2865 sameLeader = main.TRUE
2866 if len( set( leaders ) ) != 1:
2867 sameLeader = main.FALSE
2868 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
2869 str( leaders ) )
2870 utilities.assert_equals(
2871 expect=main.TRUE,
2872 actual=sameLeader,
2873 onpass="Leadership is consistent for the election topic",
2874 onfail="Nodes have different leaders" )
2875
2876 def CASE15( self, main ):
2877 """
2878 Check that Leadership Election is still functional
2879 15.1 Run election on each node
2880 15.2 Check that each node has the same leaders and candidates
2881 15.3 Find current leader and withdraw
2882 15.4 Check that a new node was elected leader
2883 15.5 Check that that new leader was the candidate of old leader
2884 15.6 Run for election on old leader
2885 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2886 15.8 Make sure that the old leader was added to the candidate list
2887
2888 old and new variable prefixes refer to data from before vs after
2889 withdrawl and later before withdrawl vs after re-election
2890 """
2891 import time
2892 assert main.numCtrls, "main.numCtrls not defined"
2893 assert main, "main not defined"
2894 assert utilities.assert_equals, "utilities.assert_equals not defined"
2895 assert main.CLIs, "main.CLIs not defined"
2896 assert main.nodes, "main.nodes not defined"
2897
2898 description = "Check that Leadership Election is still functional"
2899 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002900 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall6e709752016-02-01 13:38:46 -08002901
Jon Halla440e872016-03-31 15:15:50 -07002902 oldLeaders = [] # list of lists of each nodes' candidates before
2903 newLeaders = [] # list of lists of each nodes' candidates after
Jon Hall6e709752016-02-01 13:38:46 -08002904 oldLeader = '' # the old leader from oldLeaders, None if not same
2905 newLeader = '' # the new leaders fron newLoeaders, None if not same
2906 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2907 expectNoLeader = False # True when there is only one leader
2908 if main.numCtrls == 1:
2909 expectNoLeader = True
2910
2911 main.step( "Run for election on each node" )
2912 electionResult = main.TRUE
2913
2914 for i in main.activeNodes: # run test election on each node
2915 if main.CLIs[i].electionTestRun() == main.FALSE:
2916 electionResult = main.FALSE
2917 utilities.assert_equals(
2918 expect=main.TRUE,
2919 actual=electionResult,
2920 onpass="All nodes successfully ran for leadership",
2921 onfail="At least one node failed to run for leadership" )
2922
2923 if electionResult == main.FALSE:
2924 main.log.error(
2925 "Skipping Test Case because Election Test App isn't loaded" )
2926 main.skipCase()
2927
2928 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07002929 failMessage = "Nodes have different leaderboards"
Jon Halla440e872016-03-31 15:15:50 -07002930 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
Jon Hall41d39f12016-04-11 22:54:35 -07002931 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Halla440e872016-03-31 15:15:50 -07002932 if sameResult:
2933 oldLeader = oldLeaders[ 0 ][ 0 ]
2934 main.log.warn( oldLeader )
Jon Hall6e709752016-02-01 13:38:46 -08002935 else:
Jon Halla440e872016-03-31 15:15:50 -07002936 oldLeader = None
Jon Hall6e709752016-02-01 13:38:46 -08002937 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002938 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08002939 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07002940 onpass="Leaderboards are consistent for the election topic",
Jon Hall6e709752016-02-01 13:38:46 -08002941 onfail=failMessage )
2942
2943 main.step( "Find current leader and withdraw" )
2944 withdrawResult = main.TRUE
2945 # do some sanity checking on leader before using it
2946 if oldLeader is None:
2947 main.log.error( "Leadership isn't consistent." )
2948 withdrawResult = main.FALSE
2949 # Get the CLI of the oldLeader
2950 for i in main.activeNodes:
2951 if oldLeader == main.nodes[ i ].ip_address:
2952 oldLeaderCLI = main.CLIs[ i ]
2953 break
2954 else: # FOR/ELSE statement
2955 main.log.error( "Leader election, could not find current leader" )
2956 if oldLeader:
2957 withdrawResult = oldLeaderCLI.electionTestWithdraw()
2958 utilities.assert_equals(
2959 expect=main.TRUE,
2960 actual=withdrawResult,
2961 onpass="Node was withdrawn from election",
2962 onfail="Node was not withdrawn from election" )
2963
2964 main.step( "Check that a new node was elected leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002965 failMessage = "Nodes have different leaders"
Jon Hall6e709752016-02-01 13:38:46 -08002966 # Get new leaders and candidates
Jon Hall41d39f12016-04-11 22:54:35 -07002967 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Halla440e872016-03-31 15:15:50 -07002968 if newLeaders[ 0 ][ 0 ] == 'none':
2969 main.log.error( "No leader was elected on at least 1 node" )
2970 if not expectNoLeader:
2971 newLeaderResult = False
2972 if newLeaderResult:
2973 newLeader = newLeaders[ 0 ][ 0 ]
Jon Hall6e709752016-02-01 13:38:46 -08002974 else:
Jon Halla440e872016-03-31 15:15:50 -07002975 newLeader = None
Jon Hall6e709752016-02-01 13:38:46 -08002976
2977 # Check that the new leader is not the older leader, which was withdrawn
2978 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07002979 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08002980 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
2981 " as the current leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002982 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002983 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08002984 actual=newLeaderResult,
2985 onpass="Leadership election passed",
2986 onfail="Something went wrong with Leadership election" )
2987
Jon Halla440e872016-03-31 15:15:50 -07002988 main.step( "Check that that new leader was the candidate of old leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002989 # candidates[ 2 ] should become the top candidate after withdrawl
2990 correctCandidateResult = main.TRUE
2991 if expectNoLeader:
2992 if newLeader == 'none':
2993 main.log.info( "No leader expected. None found. Pass" )
2994 correctCandidateResult = main.TRUE
2995 else:
2996 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2997 correctCandidateResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002998 elif len( oldLeaders[0] ) >= 3:
2999 if newLeader == oldLeaders[ 0 ][ 2 ]:
3000 # correct leader was elected
3001 correctCandidateResult = main.TRUE
3002 else:
3003 correctCandidateResult = main.FALSE
3004 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3005 newLeader, oldLeaders[ 0 ][ 2 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08003006 else:
3007 main.log.warn( "Could not determine who should be the correct leader" )
Jon Halla440e872016-03-31 15:15:50 -07003008 main.log.debug( oldLeaders[ 0 ] )
Jon Hall6e709752016-02-01 13:38:46 -08003009 correctCandidateResult = main.FALSE
3010 utilities.assert_equals(
3011 expect=main.TRUE,
3012 actual=correctCandidateResult,
3013 onpass="Correct Candidate Elected",
3014 onfail="Incorrect Candidate Elected" )
3015
3016 main.step( "Run for election on old leader( just so everyone " +
3017 "is in the hat )" )
3018 if oldLeaderCLI is not None:
3019 runResult = oldLeaderCLI.electionTestRun()
3020 else:
3021 main.log.error( "No old leader to re-elect" )
3022 runResult = main.FALSE
3023 utilities.assert_equals(
3024 expect=main.TRUE,
3025 actual=runResult,
3026 onpass="App re-ran for election",
3027 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07003028
Jon Hall6e709752016-02-01 13:38:46 -08003029 main.step(
3030 "Check that oldLeader is a candidate, and leader if only 1 node" )
3031 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07003032 # Get new leaders and candidates
3033 reRunLeaders = []
3034 time.sleep( 5 ) # Paremterize
Jon Hall41d39f12016-04-11 22:54:35 -07003035 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall6e709752016-02-01 13:38:46 -08003036
3037 # Check that the re-elected node is last on the candidate List
Jon Halla440e872016-03-31 15:15:50 -07003038 if oldLeader != reRunLeaders[ 0 ][ -1 ]:
3039 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3040 str( reRunLeaders[ 0 ] ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08003041 positionResult = main.FALSE
3042
3043 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003044 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08003045 actual=positionResult,
3046 onpass="Old leader successfully re-ran for election",
3047 onfail="Something went wrong with Leadership election after " +
3048 "the old leader re-ran for election" )
3049
3050 def CASE16( self, main ):
3051 """
3052 Install Distributed Primitives app
3053 """
3054 import time
3055 assert main.numCtrls, "main.numCtrls not defined"
3056 assert main, "main not defined"
3057 assert utilities.assert_equals, "utilities.assert_equals not defined"
3058 assert main.CLIs, "main.CLIs not defined"
3059 assert main.nodes, "main.nodes not defined"
3060
3061 # Variables for the distributed primitives tests
3062 global pCounterName
Jon Hall6e709752016-02-01 13:38:46 -08003063 global pCounterValue
Jon Hall6e709752016-02-01 13:38:46 -08003064 global onosSet
3065 global onosSetName
3066 pCounterName = "TestON-Partitions"
Jon Hall6e709752016-02-01 13:38:46 -08003067 pCounterValue = 0
Jon Hall6e709752016-02-01 13:38:46 -08003068 onosSet = set([])
3069 onosSetName = "TestON-set"
3070
3071 description = "Install Primitives app"
3072 main.case( description )
3073 main.step( "Install Primitives app" )
3074 appName = "org.onosproject.distributedprimitives"
3075 node = main.activeNodes[0]
3076 appResults = main.CLIs[node].activateApp( appName )
3077 utilities.assert_equals( expect=main.TRUE,
3078 actual=appResults,
3079 onpass="Primitives app activated",
3080 onfail="Primitives app not activated" )
3081 time.sleep( 5 ) # To allow all nodes to activate
3082
3083 def CASE17( self, main ):
3084 """
3085 Check for basic functionality with distributed primitives
3086 """
3087 # Make sure variables are defined/set
3088 assert main.numCtrls, "main.numCtrls not defined"
3089 assert main, "main not defined"
3090 assert utilities.assert_equals, "utilities.assert_equals not defined"
3091 assert main.CLIs, "main.CLIs not defined"
3092 assert main.nodes, "main.nodes not defined"
3093 assert pCounterName, "pCounterName not defined"
Jon Hall6e709752016-02-01 13:38:46 -08003094 assert onosSetName, "onosSetName not defined"
3095 # NOTE: assert fails if value is 0/None/Empty/False
3096 try:
3097 pCounterValue
3098 except NameError:
3099 main.log.error( "pCounterValue not defined, setting to 0" )
3100 pCounterValue = 0
3101 try:
Jon Hall6e709752016-02-01 13:38:46 -08003102 onosSet
3103 except NameError:
3104 main.log.error( "onosSet not defined, setting to empty Set" )
3105 onosSet = set([])
3106 # Variables for the distributed primitives tests. These are local only
3107 addValue = "a"
3108 addAllValue = "a b c d e f"
3109 retainValue = "c d e f"
3110
3111 description = "Check for basic functionality with distributed " +\
3112 "primitives"
3113 main.case( description )
3114 main.caseExplanation = "Test the methods of the distributed " +\
3115 "primitives (counters and sets) throught the cli"
3116 # DISTRIBUTED ATOMIC COUNTERS
3117 # Partitioned counters
3118 main.step( "Increment then get a default counter on each node" )
3119 pCounters = []
3120 threads = []
3121 addedPValues = []
3122 for i in main.activeNodes:
3123 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3124 name="counterAddAndGet-" + str( i ),
3125 args=[ pCounterName ] )
3126 pCounterValue += 1
3127 addedPValues.append( pCounterValue )
3128 threads.append( t )
3129 t.start()
3130
3131 for t in threads:
3132 t.join()
3133 pCounters.append( t.result )
3134 # Check that counter incremented numController times
3135 pCounterResults = True
3136 for i in addedPValues:
3137 tmpResult = i in pCounters
3138 pCounterResults = pCounterResults and tmpResult
3139 if not tmpResult:
3140 main.log.error( str( i ) + " is not in partitioned "
3141 "counter incremented results" )
3142 utilities.assert_equals( expect=True,
3143 actual=pCounterResults,
3144 onpass="Default counter incremented",
3145 onfail="Error incrementing default" +
3146 " counter" )
3147
3148 main.step( "Get then Increment a default counter on each node" )
3149 pCounters = []
3150 threads = []
3151 addedPValues = []
3152 for i in main.activeNodes:
3153 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3154 name="counterGetAndAdd-" + str( i ),
3155 args=[ pCounterName ] )
3156 addedPValues.append( pCounterValue )
3157 pCounterValue += 1
3158 threads.append( t )
3159 t.start()
3160
3161 for t in threads:
3162 t.join()
3163 pCounters.append( t.result )
3164 # Check that counter incremented numController times
3165 pCounterResults = True
3166 for i in addedPValues:
3167 tmpResult = i in pCounters
3168 pCounterResults = pCounterResults and tmpResult
3169 if not tmpResult:
3170 main.log.error( str( i ) + " is not in partitioned "
3171 "counter incremented results" )
3172 utilities.assert_equals( expect=True,
3173 actual=pCounterResults,
3174 onpass="Default counter incremented",
3175 onfail="Error incrementing default" +
3176 " counter" )
3177
3178 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003179 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Hall6e709752016-02-01 13:38:46 -08003180 utilities.assert_equals( expect=main.TRUE,
3181 actual=incrementCheck,
3182 onpass="Added counters are correct",
3183 onfail="Added counters are incorrect" )
3184
3185 main.step( "Add -8 to then get a default counter on each node" )
3186 pCounters = []
3187 threads = []
3188 addedPValues = []
3189 for i in main.activeNodes:
3190 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3191 name="counterIncrement-" + str( i ),
3192 args=[ pCounterName ],
3193 kwargs={ "delta": -8 } )
3194 pCounterValue += -8
3195 addedPValues.append( pCounterValue )
3196 threads.append( t )
3197 t.start()
3198
3199 for t in threads:
3200 t.join()
3201 pCounters.append( t.result )
3202 # Check that counter incremented numController times
3203 pCounterResults = True
3204 for i in addedPValues:
3205 tmpResult = i in pCounters
3206 pCounterResults = pCounterResults and tmpResult
3207 if not tmpResult:
3208 main.log.error( str( i ) + " is not in partitioned "
3209 "counter incremented results" )
3210 utilities.assert_equals( expect=True,
3211 actual=pCounterResults,
3212 onpass="Default counter incremented",
3213 onfail="Error incrementing default" +
3214 " counter" )
3215
3216 main.step( "Add 5 to then get a default counter on each node" )
3217 pCounters = []
3218 threads = []
3219 addedPValues = []
3220 for i in main.activeNodes:
3221 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3222 name="counterIncrement-" + str( i ),
3223 args=[ pCounterName ],
3224 kwargs={ "delta": 5 } )
3225 pCounterValue += 5
3226 addedPValues.append( pCounterValue )
3227 threads.append( t )
3228 t.start()
3229
3230 for t in threads:
3231 t.join()
3232 pCounters.append( t.result )
3233 # Check that counter incremented numController times
3234 pCounterResults = True
3235 for i in addedPValues:
3236 tmpResult = i in pCounters
3237 pCounterResults = pCounterResults and tmpResult
3238 if not tmpResult:
3239 main.log.error( str( i ) + " is not in partitioned "
3240 "counter incremented results" )
3241 utilities.assert_equals( expect=True,
3242 actual=pCounterResults,
3243 onpass="Default counter incremented",
3244 onfail="Error incrementing default" +
3245 " counter" )
3246
3247 main.step( "Get then add 5 to a default counter on each node" )
3248 pCounters = []
3249 threads = []
3250 addedPValues = []
3251 for i in main.activeNodes:
3252 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3253 name="counterIncrement-" + str( i ),
3254 args=[ pCounterName ],
3255 kwargs={ "delta": 5 } )
3256 addedPValues.append( pCounterValue )
3257 pCounterValue += 5
3258 threads.append( t )
3259 t.start()
3260
3261 for t in threads:
3262 t.join()
3263 pCounters.append( t.result )
3264 # Check that counter incremented numController times
3265 pCounterResults = True
3266 for i in addedPValues:
3267 tmpResult = i in pCounters
3268 pCounterResults = pCounterResults and tmpResult
3269 if not tmpResult:
3270 main.log.error( str( i ) + " is not in partitioned "
3271 "counter incremented results" )
3272 utilities.assert_equals( expect=True,
3273 actual=pCounterResults,
3274 onpass="Default counter incremented",
3275 onfail="Error incrementing default" +
3276 " counter" )
3277
3278 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003279 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Hall6e709752016-02-01 13:38:46 -08003280 utilities.assert_equals( expect=main.TRUE,
3281 actual=incrementCheck,
3282 onpass="Added counters are correct",
3283 onfail="Added counters are incorrect" )
3284
Jon Hall6e709752016-02-01 13:38:46 -08003285 # DISTRIBUTED SETS
3286 main.step( "Distributed Set get" )
3287 size = len( onosSet )
3288 getResponses = []
3289 threads = []
3290 for i in main.activeNodes:
3291 t = main.Thread( target=main.CLIs[i].setTestGet,
3292 name="setTestGet-" + str( i ),
3293 args=[ onosSetName ] )
3294 threads.append( t )
3295 t.start()
3296 for t in threads:
3297 t.join()
3298 getResponses.append( t.result )
3299
3300 getResults = main.TRUE
3301 for i in range( len( main.activeNodes ) ):
3302 node = str( main.activeNodes[i] + 1 )
3303 if isinstance( getResponses[ i ], list):
3304 current = set( getResponses[ i ] )
3305 if len( current ) == len( getResponses[ i ] ):
3306 # no repeats
3307 if onosSet != current:
3308 main.log.error( "ONOS" + node +
3309 " has incorrect view" +
3310 " of set " + onosSetName + ":\n" +
3311 str( getResponses[ i ] ) )
3312 main.log.debug( "Expected: " + str( onosSet ) )
3313 main.log.debug( "Actual: " + str( current ) )
3314 getResults = main.FALSE
3315 else:
3316 # error, set is not a set
3317 main.log.error( "ONOS" + node +
3318 " has repeat elements in" +
3319 " set " + onosSetName + ":\n" +
3320 str( getResponses[ i ] ) )
3321 getResults = main.FALSE
3322 elif getResponses[ i ] == main.ERROR:
3323 getResults = main.FALSE
3324 utilities.assert_equals( expect=main.TRUE,
3325 actual=getResults,
3326 onpass="Set elements are correct",
3327 onfail="Set elements are incorrect" )
3328
3329 main.step( "Distributed Set size" )
3330 sizeResponses = []
3331 threads = []
3332 for i in main.activeNodes:
3333 t = main.Thread( target=main.CLIs[i].setTestSize,
3334 name="setTestSize-" + str( i ),
3335 args=[ onosSetName ] )
3336 threads.append( t )
3337 t.start()
3338 for t in threads:
3339 t.join()
3340 sizeResponses.append( t.result )
3341
3342 sizeResults = main.TRUE
3343 for i in range( len( main.activeNodes ) ):
3344 node = str( main.activeNodes[i] + 1 )
3345 if size != sizeResponses[ i ]:
3346 sizeResults = main.FALSE
3347 main.log.error( "ONOS" + node +
3348 " expected a size of " + str( size ) +
3349 " for set " + onosSetName +
3350 " but got " + str( sizeResponses[ i ] ) )
3351 utilities.assert_equals( expect=main.TRUE,
3352 actual=sizeResults,
3353 onpass="Set sizes are correct",
3354 onfail="Set sizes are incorrect" )
3355
3356 main.step( "Distributed Set add()" )
3357 onosSet.add( addValue )
3358 addResponses = []
3359 threads = []
3360 for i in main.activeNodes:
3361 t = main.Thread( target=main.CLIs[i].setTestAdd,
3362 name="setTestAdd-" + str( i ),
3363 args=[ onosSetName, addValue ] )
3364 threads.append( t )
3365 t.start()
3366 for t in threads:
3367 t.join()
3368 addResponses.append( t.result )
3369
3370 # main.TRUE = successfully changed the set
3371 # main.FALSE = action resulted in no change in set
3372 # main.ERROR - Some error in executing the function
3373 addResults = main.TRUE
3374 for i in range( len( main.activeNodes ) ):
3375 if addResponses[ i ] == main.TRUE:
3376 # All is well
3377 pass
3378 elif addResponses[ i ] == main.FALSE:
3379 # Already in set, probably fine
3380 pass
3381 elif addResponses[ i ] == main.ERROR:
3382 # Error in execution
3383 addResults = main.FALSE
3384 else:
3385 # unexpected result
3386 addResults = main.FALSE
3387 if addResults != main.TRUE:
3388 main.log.error( "Error executing set add" )
3389
3390 # Check if set is still correct
3391 size = len( onosSet )
3392 getResponses = []
3393 threads = []
3394 for i in main.activeNodes:
3395 t = main.Thread( target=main.CLIs[i].setTestGet,
3396 name="setTestGet-" + str( i ),
3397 args=[ onosSetName ] )
3398 threads.append( t )
3399 t.start()
3400 for t in threads:
3401 t.join()
3402 getResponses.append( t.result )
3403 getResults = main.TRUE
3404 for i in range( len( main.activeNodes ) ):
3405 node = str( main.activeNodes[i] + 1 )
3406 if isinstance( getResponses[ i ], list):
3407 current = set( getResponses[ i ] )
3408 if len( current ) == len( getResponses[ i ] ):
3409 # no repeats
3410 if onosSet != current:
3411 main.log.error( "ONOS" + node + " has incorrect view" +
3412 " of set " + onosSetName + ":\n" +
3413 str( getResponses[ i ] ) )
3414 main.log.debug( "Expected: " + str( onosSet ) )
3415 main.log.debug( "Actual: " + str( current ) )
3416 getResults = main.FALSE
3417 else:
3418 # error, set is not a set
3419 main.log.error( "ONOS" + node + " has repeat elements in" +
3420 " set " + onosSetName + ":\n" +
3421 str( getResponses[ i ] ) )
3422 getResults = main.FALSE
3423 elif getResponses[ i ] == main.ERROR:
3424 getResults = main.FALSE
3425 sizeResponses = []
3426 threads = []
3427 for i in main.activeNodes:
3428 t = main.Thread( target=main.CLIs[i].setTestSize,
3429 name="setTestSize-" + str( i ),
3430 args=[ onosSetName ] )
3431 threads.append( t )
3432 t.start()
3433 for t in threads:
3434 t.join()
3435 sizeResponses.append( t.result )
3436 sizeResults = main.TRUE
3437 for i in range( len( main.activeNodes ) ):
3438 node = str( main.activeNodes[i] + 1 )
3439 if size != sizeResponses[ i ]:
3440 sizeResults = main.FALSE
3441 main.log.error( "ONOS" + node +
3442 " expected a size of " + str( size ) +
3443 " for set " + onosSetName +
3444 " but got " + str( sizeResponses[ i ] ) )
3445 addResults = addResults and getResults and sizeResults
3446 utilities.assert_equals( expect=main.TRUE,
3447 actual=addResults,
3448 onpass="Set add correct",
3449 onfail="Set add was incorrect" )
3450
3451 main.step( "Distributed Set addAll()" )
3452 onosSet.update( addAllValue.split() )
3453 addResponses = []
3454 threads = []
3455 for i in main.activeNodes:
3456 t = main.Thread( target=main.CLIs[i].setTestAdd,
3457 name="setTestAddAll-" + str( i ),
3458 args=[ onosSetName, addAllValue ] )
3459 threads.append( t )
3460 t.start()
3461 for t in threads:
3462 t.join()
3463 addResponses.append( t.result )
3464
3465 # main.TRUE = successfully changed the set
3466 # main.FALSE = action resulted in no change in set
3467 # main.ERROR - Some error in executing the function
3468 addAllResults = main.TRUE
3469 for i in range( len( main.activeNodes ) ):
3470 if addResponses[ i ] == main.TRUE:
3471 # All is well
3472 pass
3473 elif addResponses[ i ] == main.FALSE:
3474 # Already in set, probably fine
3475 pass
3476 elif addResponses[ i ] == main.ERROR:
3477 # Error in execution
3478 addAllResults = main.FALSE
3479 else:
3480 # unexpected result
3481 addAllResults = main.FALSE
3482 if addAllResults != main.TRUE:
3483 main.log.error( "Error executing set addAll" )
3484
3485 # Check if set is still correct
3486 size = len( onosSet )
3487 getResponses = []
3488 threads = []
3489 for i in main.activeNodes:
3490 t = main.Thread( target=main.CLIs[i].setTestGet,
3491 name="setTestGet-" + str( i ),
3492 args=[ onosSetName ] )
3493 threads.append( t )
3494 t.start()
3495 for t in threads:
3496 t.join()
3497 getResponses.append( t.result )
3498 getResults = main.TRUE
3499 for i in range( len( main.activeNodes ) ):
3500 node = str( main.activeNodes[i] + 1 )
3501 if isinstance( getResponses[ i ], list):
3502 current = set( getResponses[ i ] )
3503 if len( current ) == len( getResponses[ i ] ):
3504 # no repeats
3505 if onosSet != current:
3506 main.log.error( "ONOS" + node +
3507 " has incorrect view" +
3508 " of set " + onosSetName + ":\n" +
3509 str( getResponses[ i ] ) )
3510 main.log.debug( "Expected: " + str( onosSet ) )
3511 main.log.debug( "Actual: " + str( current ) )
3512 getResults = main.FALSE
3513 else:
3514 # error, set is not a set
3515 main.log.error( "ONOS" + node +
3516 " has repeat elements in" +
3517 " set " + onosSetName + ":\n" +
3518 str( getResponses[ i ] ) )
3519 getResults = main.FALSE
3520 elif getResponses[ i ] == main.ERROR:
3521 getResults = main.FALSE
3522 sizeResponses = []
3523 threads = []
3524 for i in main.activeNodes:
3525 t = main.Thread( target=main.CLIs[i].setTestSize,
3526 name="setTestSize-" + str( i ),
3527 args=[ onosSetName ] )
3528 threads.append( t )
3529 t.start()
3530 for t in threads:
3531 t.join()
3532 sizeResponses.append( t.result )
3533 sizeResults = main.TRUE
3534 for i in range( len( main.activeNodes ) ):
3535 node = str( main.activeNodes[i] + 1 )
3536 if size != sizeResponses[ i ]:
3537 sizeResults = main.FALSE
3538 main.log.error( "ONOS" + node +
3539 " expected a size of " + str( size ) +
3540 " for set " + onosSetName +
3541 " but got " + str( sizeResponses[ i ] ) )
3542 addAllResults = addAllResults and getResults and sizeResults
3543 utilities.assert_equals( expect=main.TRUE,
3544 actual=addAllResults,
3545 onpass="Set addAll correct",
3546 onfail="Set addAll was incorrect" )
3547
3548 main.step( "Distributed Set contains()" )
3549 containsResponses = []
3550 threads = []
3551 for i in main.activeNodes:
3552 t = main.Thread( target=main.CLIs[i].setTestGet,
3553 name="setContains-" + str( i ),
3554 args=[ onosSetName ],
3555 kwargs={ "values": addValue } )
3556 threads.append( t )
3557 t.start()
3558 for t in threads:
3559 t.join()
3560 # NOTE: This is the tuple
3561 containsResponses.append( t.result )
3562
3563 containsResults = main.TRUE
3564 for i in range( len( main.activeNodes ) ):
3565 if containsResponses[ i ] == main.ERROR:
3566 containsResults = main.FALSE
3567 else:
3568 containsResults = containsResults and\
3569 containsResponses[ i ][ 1 ]
3570 utilities.assert_equals( expect=main.TRUE,
3571 actual=containsResults,
3572 onpass="Set contains is functional",
3573 onfail="Set contains failed" )
3574
3575 main.step( "Distributed Set containsAll()" )
3576 containsAllResponses = []
3577 threads = []
3578 for i in main.activeNodes:
3579 t = main.Thread( target=main.CLIs[i].setTestGet,
3580 name="setContainsAll-" + str( i ),
3581 args=[ onosSetName ],
3582 kwargs={ "values": addAllValue } )
3583 threads.append( t )
3584 t.start()
3585 for t in threads:
3586 t.join()
3587 # NOTE: This is the tuple
3588 containsAllResponses.append( t.result )
3589
3590 containsAllResults = main.TRUE
3591 for i in range( len( main.activeNodes ) ):
3592 if containsResponses[ i ] == main.ERROR:
3593 containsResults = main.FALSE
3594 else:
3595 containsResults = containsResults and\
3596 containsResponses[ i ][ 1 ]
3597 utilities.assert_equals( expect=main.TRUE,
3598 actual=containsAllResults,
3599 onpass="Set containsAll is functional",
3600 onfail="Set containsAll failed" )
3601
3602 main.step( "Distributed Set remove()" )
3603 onosSet.remove( addValue )
3604 removeResponses = []
3605 threads = []
3606 for i in main.activeNodes:
3607 t = main.Thread( target=main.CLIs[i].setTestRemove,
3608 name="setTestRemove-" + str( i ),
3609 args=[ onosSetName, addValue ] )
3610 threads.append( t )
3611 t.start()
3612 for t in threads:
3613 t.join()
3614 removeResponses.append( t.result )
3615
3616 # main.TRUE = successfully changed the set
3617 # main.FALSE = action resulted in no change in set
3618 # main.ERROR - Some error in executing the function
3619 removeResults = main.TRUE
3620 for i in range( len( main.activeNodes ) ):
3621 if removeResponses[ i ] == main.TRUE:
3622 # All is well
3623 pass
3624 elif removeResponses[ i ] == main.FALSE:
3625 # not in set, probably fine
3626 pass
3627 elif removeResponses[ i ] == main.ERROR:
3628 # Error in execution
3629 removeResults = main.FALSE
3630 else:
3631 # unexpected result
3632 removeResults = main.FALSE
3633 if removeResults != main.TRUE:
3634 main.log.error( "Error executing set remove" )
3635
3636 # Check if set is still correct
3637 size = len( onosSet )
3638 getResponses = []
3639 threads = []
3640 for i in main.activeNodes:
3641 t = main.Thread( target=main.CLIs[i].setTestGet,
3642 name="setTestGet-" + str( i ),
3643 args=[ onosSetName ] )
3644 threads.append( t )
3645 t.start()
3646 for t in threads:
3647 t.join()
3648 getResponses.append( t.result )
3649 getResults = main.TRUE
3650 for i in range( len( main.activeNodes ) ):
3651 node = str( main.activeNodes[i] + 1 )
3652 if isinstance( getResponses[ i ], list):
3653 current = set( getResponses[ i ] )
3654 if len( current ) == len( getResponses[ i ] ):
3655 # no repeats
3656 if onosSet != current:
3657 main.log.error( "ONOS" + node +
3658 " has incorrect view" +
3659 " of set " + onosSetName + ":\n" +
3660 str( getResponses[ i ] ) )
3661 main.log.debug( "Expected: " + str( onosSet ) )
3662 main.log.debug( "Actual: " + str( current ) )
3663 getResults = main.FALSE
3664 else:
3665 # error, set is not a set
3666 main.log.error( "ONOS" + node +
3667 " has repeat elements in" +
3668 " set " + onosSetName + ":\n" +
3669 str( getResponses[ i ] ) )
3670 getResults = main.FALSE
3671 elif getResponses[ i ] == main.ERROR:
3672 getResults = main.FALSE
3673 sizeResponses = []
3674 threads = []
3675 for i in main.activeNodes:
3676 t = main.Thread( target=main.CLIs[i].setTestSize,
3677 name="setTestSize-" + str( i ),
3678 args=[ onosSetName ] )
3679 threads.append( t )
3680 t.start()
3681 for t in threads:
3682 t.join()
3683 sizeResponses.append( t.result )
3684 sizeResults = main.TRUE
3685 for i in range( len( main.activeNodes ) ):
3686 node = str( main.activeNodes[i] + 1 )
3687 if size != sizeResponses[ i ]:
3688 sizeResults = main.FALSE
3689 main.log.error( "ONOS" + node +
3690 " expected a size of " + str( size ) +
3691 " for set " + onosSetName +
3692 " but got " + str( sizeResponses[ i ] ) )
3693 removeResults = removeResults and getResults and sizeResults
3694 utilities.assert_equals( expect=main.TRUE,
3695 actual=removeResults,
3696 onpass="Set remove correct",
3697 onfail="Set remove was incorrect" )
3698
3699 main.step( "Distributed Set removeAll()" )
3700 onosSet.difference_update( addAllValue.split() )
3701 removeAllResponses = []
3702 threads = []
3703 try:
3704 for i in main.activeNodes:
3705 t = main.Thread( target=main.CLIs[i].setTestRemove,
3706 name="setTestRemoveAll-" + str( i ),
3707 args=[ onosSetName, addAllValue ] )
3708 threads.append( t )
3709 t.start()
3710 for t in threads:
3711 t.join()
3712 removeAllResponses.append( t.result )
3713 except Exception, e:
3714 main.log.exception(e)
3715
3716 # main.TRUE = successfully changed the set
3717 # main.FALSE = action resulted in no change in set
3718 # main.ERROR - Some error in executing the function
3719 removeAllResults = main.TRUE
3720 for i in range( len( main.activeNodes ) ):
3721 if removeAllResponses[ i ] == main.TRUE:
3722 # All is well
3723 pass
3724 elif removeAllResponses[ i ] == main.FALSE:
3725 # not in set, probably fine
3726 pass
3727 elif removeAllResponses[ i ] == main.ERROR:
3728 # Error in execution
3729 removeAllResults = main.FALSE
3730 else:
3731 # unexpected result
3732 removeAllResults = main.FALSE
3733 if removeAllResults != main.TRUE:
3734 main.log.error( "Error executing set removeAll" )
3735
3736 # Check if set is still correct
3737 size = len( onosSet )
3738 getResponses = []
3739 threads = []
3740 for i in main.activeNodes:
3741 t = main.Thread( target=main.CLIs[i].setTestGet,
3742 name="setTestGet-" + str( i ),
3743 args=[ onosSetName ] )
3744 threads.append( t )
3745 t.start()
3746 for t in threads:
3747 t.join()
3748 getResponses.append( t.result )
3749 getResults = main.TRUE
3750 for i in range( len( main.activeNodes ) ):
3751 node = str( main.activeNodes[i] + 1 )
3752 if isinstance( getResponses[ i ], list):
3753 current = set( getResponses[ i ] )
3754 if len( current ) == len( getResponses[ i ] ):
3755 # no repeats
3756 if onosSet != current:
3757 main.log.error( "ONOS" + node +
3758 " has incorrect view" +
3759 " of set " + onosSetName + ":\n" +
3760 str( getResponses[ i ] ) )
3761 main.log.debug( "Expected: " + str( onosSet ) )
3762 main.log.debug( "Actual: " + str( current ) )
3763 getResults = main.FALSE
3764 else:
3765 # error, set is not a set
3766 main.log.error( "ONOS" + node +
3767 " has repeat elements in" +
3768 " set " + onosSetName + ":\n" +
3769 str( getResponses[ i ] ) )
3770 getResults = main.FALSE
3771 elif getResponses[ i ] == main.ERROR:
3772 getResults = main.FALSE
3773 sizeResponses = []
3774 threads = []
3775 for i in main.activeNodes:
3776 t = main.Thread( target=main.CLIs[i].setTestSize,
3777 name="setTestSize-" + str( i ),
3778 args=[ onosSetName ] )
3779 threads.append( t )
3780 t.start()
3781 for t in threads:
3782 t.join()
3783 sizeResponses.append( t.result )
3784 sizeResults = main.TRUE
3785 for i in range( len( main.activeNodes ) ):
3786 node = str( main.activeNodes[i] + 1 )
3787 if size != sizeResponses[ i ]:
3788 sizeResults = main.FALSE
3789 main.log.error( "ONOS" + node +
3790 " expected a size of " + str( size ) +
3791 " for set " + onosSetName +
3792 " but got " + str( sizeResponses[ i ] ) )
3793 removeAllResults = removeAllResults and getResults and sizeResults
3794 utilities.assert_equals( expect=main.TRUE,
3795 actual=removeAllResults,
3796 onpass="Set removeAll correct",
3797 onfail="Set removeAll was incorrect" )
3798
3799 main.step( "Distributed Set addAll()" )
3800 onosSet.update( addAllValue.split() )
3801 addResponses = []
3802 threads = []
3803 for i in main.activeNodes:
3804 t = main.Thread( target=main.CLIs[i].setTestAdd,
3805 name="setTestAddAll-" + str( i ),
3806 args=[ onosSetName, addAllValue ] )
3807 threads.append( t )
3808 t.start()
3809 for t in threads:
3810 t.join()
3811 addResponses.append( t.result )
3812
3813 # main.TRUE = successfully changed the set
3814 # main.FALSE = action resulted in no change in set
3815 # main.ERROR - Some error in executing the function
3816 addAllResults = main.TRUE
3817 for i in range( len( main.activeNodes ) ):
3818 if addResponses[ i ] == main.TRUE:
3819 # All is well
3820 pass
3821 elif addResponses[ i ] == main.FALSE:
3822 # Already in set, probably fine
3823 pass
3824 elif addResponses[ i ] == main.ERROR:
3825 # Error in execution
3826 addAllResults = main.FALSE
3827 else:
3828 # unexpected result
3829 addAllResults = main.FALSE
3830 if addAllResults != main.TRUE:
3831 main.log.error( "Error executing set addAll" )
3832
3833 # Check if set is still correct
3834 size = len( onosSet )
3835 getResponses = []
3836 threads = []
3837 for i in main.activeNodes:
3838 t = main.Thread( target=main.CLIs[i].setTestGet,
3839 name="setTestGet-" + str( i ),
3840 args=[ onosSetName ] )
3841 threads.append( t )
3842 t.start()
3843 for t in threads:
3844 t.join()
3845 getResponses.append( t.result )
3846 getResults = main.TRUE
3847 for i in range( len( main.activeNodes ) ):
3848 node = str( main.activeNodes[i] + 1 )
3849 if isinstance( getResponses[ i ], list):
3850 current = set( getResponses[ i ] )
3851 if len( current ) == len( getResponses[ i ] ):
3852 # no repeats
3853 if onosSet != current:
3854 main.log.error( "ONOS" + node +
3855 " has incorrect view" +
3856 " of set " + onosSetName + ":\n" +
3857 str( getResponses[ i ] ) )
3858 main.log.debug( "Expected: " + str( onosSet ) )
3859 main.log.debug( "Actual: " + str( current ) )
3860 getResults = main.FALSE
3861 else:
3862 # error, set is not a set
3863 main.log.error( "ONOS" + node +
3864 " has repeat elements in" +
3865 " set " + onosSetName + ":\n" +
3866 str( getResponses[ i ] ) )
3867 getResults = main.FALSE
3868 elif getResponses[ i ] == main.ERROR:
3869 getResults = main.FALSE
3870 sizeResponses = []
3871 threads = []
3872 for i in main.activeNodes:
3873 t = main.Thread( target=main.CLIs[i].setTestSize,
3874 name="setTestSize-" + str( i ),
3875 args=[ onosSetName ] )
3876 threads.append( t )
3877 t.start()
3878 for t in threads:
3879 t.join()
3880 sizeResponses.append( t.result )
3881 sizeResults = main.TRUE
3882 for i in range( len( main.activeNodes ) ):
3883 node = str( main.activeNodes[i] + 1 )
3884 if size != sizeResponses[ i ]:
3885 sizeResults = main.FALSE
3886 main.log.error( "ONOS" + node +
3887 " expected a size of " + str( size ) +
3888 " for set " + onosSetName +
3889 " but got " + str( sizeResponses[ i ] ) )
3890 addAllResults = addAllResults and getResults and sizeResults
3891 utilities.assert_equals( expect=main.TRUE,
3892 actual=addAllResults,
3893 onpass="Set addAll correct",
3894 onfail="Set addAll was incorrect" )
3895
3896 main.step( "Distributed Set clear()" )
3897 onosSet.clear()
3898 clearResponses = []
3899 threads = []
3900 for i in main.activeNodes:
3901 t = main.Thread( target=main.CLIs[i].setTestRemove,
3902 name="setTestClear-" + str( i ),
3903 args=[ onosSetName, " "], # Values doesn't matter
3904 kwargs={ "clear": True } )
3905 threads.append( t )
3906 t.start()
3907 for t in threads:
3908 t.join()
3909 clearResponses.append( t.result )
3910
3911 # main.TRUE = successfully changed the set
3912 # main.FALSE = action resulted in no change in set
3913 # main.ERROR - Some error in executing the function
3914 clearResults = main.TRUE
3915 for i in range( len( main.activeNodes ) ):
3916 if clearResponses[ i ] == main.TRUE:
3917 # All is well
3918 pass
3919 elif clearResponses[ i ] == main.FALSE:
3920 # Nothing set, probably fine
3921 pass
3922 elif clearResponses[ i ] == main.ERROR:
3923 # Error in execution
3924 clearResults = main.FALSE
3925 else:
3926 # unexpected result
3927 clearResults = main.FALSE
3928 if clearResults != main.TRUE:
3929 main.log.error( "Error executing set clear" )
3930
3931 # Check if set is still correct
3932 size = len( onosSet )
3933 getResponses = []
3934 threads = []
3935 for i in main.activeNodes:
3936 t = main.Thread( target=main.CLIs[i].setTestGet,
3937 name="setTestGet-" + str( i ),
3938 args=[ onosSetName ] )
3939 threads.append( t )
3940 t.start()
3941 for t in threads:
3942 t.join()
3943 getResponses.append( t.result )
3944 getResults = main.TRUE
3945 for i in range( len( main.activeNodes ) ):
3946 node = str( main.activeNodes[i] + 1 )
3947 if isinstance( getResponses[ i ], list):
3948 current = set( getResponses[ i ] )
3949 if len( current ) == len( getResponses[ i ] ):
3950 # no repeats
3951 if onosSet != current:
3952 main.log.error( "ONOS" + node +
3953 " has incorrect view" +
3954 " of set " + onosSetName + ":\n" +
3955 str( getResponses[ i ] ) )
3956 main.log.debug( "Expected: " + str( onosSet ) )
3957 main.log.debug( "Actual: " + str( current ) )
3958 getResults = main.FALSE
3959 else:
3960 # error, set is not a set
3961 main.log.error( "ONOS" + node +
3962 " has repeat elements in" +
3963 " set " + onosSetName + ":\n" +
3964 str( getResponses[ i ] ) )
3965 getResults = main.FALSE
3966 elif getResponses[ i ] == main.ERROR:
3967 getResults = main.FALSE
3968 sizeResponses = []
3969 threads = []
3970 for i in main.activeNodes:
3971 t = main.Thread( target=main.CLIs[i].setTestSize,
3972 name="setTestSize-" + str( i ),
3973 args=[ onosSetName ] )
3974 threads.append( t )
3975 t.start()
3976 for t in threads:
3977 t.join()
3978 sizeResponses.append( t.result )
3979 sizeResults = main.TRUE
3980 for i in range( len( main.activeNodes ) ):
3981 node = str( main.activeNodes[i] + 1 )
3982 if size != sizeResponses[ i ]:
3983 sizeResults = main.FALSE
3984 main.log.error( "ONOS" + node +
3985 " expected a size of " + str( size ) +
3986 " for set " + onosSetName +
3987 " but got " + str( sizeResponses[ i ] ) )
3988 clearResults = clearResults and getResults and sizeResults
3989 utilities.assert_equals( expect=main.TRUE,
3990 actual=clearResults,
3991 onpass="Set clear correct",
3992 onfail="Set clear was incorrect" )
3993
3994 main.step( "Distributed Set addAll()" )
3995 onosSet.update( addAllValue.split() )
3996 addResponses = []
3997 threads = []
3998 for i in main.activeNodes:
3999 t = main.Thread( target=main.CLIs[i].setTestAdd,
4000 name="setTestAddAll-" + str( i ),
4001 args=[ onosSetName, addAllValue ] )
4002 threads.append( t )
4003 t.start()
4004 for t in threads:
4005 t.join()
4006 addResponses.append( t.result )
4007
4008 # main.TRUE = successfully changed the set
4009 # main.FALSE = action resulted in no change in set
4010 # main.ERROR - Some error in executing the function
4011 addAllResults = main.TRUE
4012 for i in range( len( main.activeNodes ) ):
4013 if addResponses[ i ] == main.TRUE:
4014 # All is well
4015 pass
4016 elif addResponses[ i ] == main.FALSE:
4017 # Already in set, probably fine
4018 pass
4019 elif addResponses[ i ] == main.ERROR:
4020 # Error in execution
4021 addAllResults = main.FALSE
4022 else:
4023 # unexpected result
4024 addAllResults = main.FALSE
4025 if addAllResults != main.TRUE:
4026 main.log.error( "Error executing set addAll" )
4027
4028 # Check if set is still correct
4029 size = len( onosSet )
4030 getResponses = []
4031 threads = []
4032 for i in main.activeNodes:
4033 t = main.Thread( target=main.CLIs[i].setTestGet,
4034 name="setTestGet-" + str( i ),
4035 args=[ onosSetName ] )
4036 threads.append( t )
4037 t.start()
4038 for t in threads:
4039 t.join()
4040 getResponses.append( t.result )
4041 getResults = main.TRUE
4042 for i in range( len( main.activeNodes ) ):
4043 node = str( main.activeNodes[i] + 1 )
4044 if isinstance( getResponses[ i ], list):
4045 current = set( getResponses[ i ] )
4046 if len( current ) == len( getResponses[ i ] ):
4047 # no repeats
4048 if onosSet != current:
4049 main.log.error( "ONOS" + node +
4050 " has incorrect view" +
4051 " of set " + onosSetName + ":\n" +
4052 str( getResponses[ i ] ) )
4053 main.log.debug( "Expected: " + str( onosSet ) )
4054 main.log.debug( "Actual: " + str( current ) )
4055 getResults = main.FALSE
4056 else:
4057 # error, set is not a set
4058 main.log.error( "ONOS" + node +
4059 " has repeat elements in" +
4060 " set " + onosSetName + ":\n" +
4061 str( getResponses[ i ] ) )
4062 getResults = main.FALSE
4063 elif getResponses[ i ] == main.ERROR:
4064 getResults = main.FALSE
4065 sizeResponses = []
4066 threads = []
4067 for i in main.activeNodes:
4068 t = main.Thread( target=main.CLIs[i].setTestSize,
4069 name="setTestSize-" + str( i ),
4070 args=[ onosSetName ] )
4071 threads.append( t )
4072 t.start()
4073 for t in threads:
4074 t.join()
4075 sizeResponses.append( t.result )
4076 sizeResults = main.TRUE
4077 for i in range( len( main.activeNodes ) ):
4078 node = str( main.activeNodes[i] + 1 )
4079 if size != sizeResponses[ i ]:
4080 sizeResults = main.FALSE
4081 main.log.error( "ONOS" + node +
4082 " expected a size of " + str( size ) +
4083 " for set " + onosSetName +
4084 " but got " + str( sizeResponses[ i ] ) )
4085 addAllResults = addAllResults and getResults and sizeResults
4086 utilities.assert_equals( expect=main.TRUE,
4087 actual=addAllResults,
4088 onpass="Set addAll correct",
4089 onfail="Set addAll was incorrect" )
4090
4091 main.step( "Distributed Set retain()" )
4092 onosSet.intersection_update( retainValue.split() )
4093 retainResponses = []
4094 threads = []
4095 for i in main.activeNodes:
4096 t = main.Thread( target=main.CLIs[i].setTestRemove,
4097 name="setTestRetain-" + str( i ),
4098 args=[ onosSetName, retainValue ],
4099 kwargs={ "retain": True } )
4100 threads.append( t )
4101 t.start()
4102 for t in threads:
4103 t.join()
4104 retainResponses.append( t.result )
4105
4106 # main.TRUE = successfully changed the set
4107 # main.FALSE = action resulted in no change in set
4108 # main.ERROR - Some error in executing the function
4109 retainResults = main.TRUE
4110 for i in range( len( main.activeNodes ) ):
4111 if retainResponses[ i ] == main.TRUE:
4112 # All is well
4113 pass
4114 elif retainResponses[ i ] == main.FALSE:
4115 # Already in set, probably fine
4116 pass
4117 elif retainResponses[ i ] == main.ERROR:
4118 # Error in execution
4119 retainResults = main.FALSE
4120 else:
4121 # unexpected result
4122 retainResults = main.FALSE
4123 if retainResults != main.TRUE:
4124 main.log.error( "Error executing set retain" )
4125
4126 # Check if set is still correct
4127 size = len( onosSet )
4128 getResponses = []
4129 threads = []
4130 for i in main.activeNodes:
4131 t = main.Thread( target=main.CLIs[i].setTestGet,
4132 name="setTestGet-" + str( i ),
4133 args=[ onosSetName ] )
4134 threads.append( t )
4135 t.start()
4136 for t in threads:
4137 t.join()
4138 getResponses.append( t.result )
4139 getResults = main.TRUE
4140 for i in range( len( main.activeNodes ) ):
4141 node = str( main.activeNodes[i] + 1 )
4142 if isinstance( getResponses[ i ], list):
4143 current = set( getResponses[ i ] )
4144 if len( current ) == len( getResponses[ i ] ):
4145 # no repeats
4146 if onosSet != current:
4147 main.log.error( "ONOS" + node +
4148 " has incorrect view" +
4149 " of set " + onosSetName + ":\n" +
4150 str( getResponses[ i ] ) )
4151 main.log.debug( "Expected: " + str( onosSet ) )
4152 main.log.debug( "Actual: " + str( current ) )
4153 getResults = main.FALSE
4154 else:
4155 # error, set is not a set
4156 main.log.error( "ONOS" + node +
4157 " has repeat elements in" +
4158 " set " + onosSetName + ":\n" +
4159 str( getResponses[ i ] ) )
4160 getResults = main.FALSE
4161 elif getResponses[ i ] == main.ERROR:
4162 getResults = main.FALSE
4163 sizeResponses = []
4164 threads = []
4165 for i in main.activeNodes:
4166 t = main.Thread( target=main.CLIs[i].setTestSize,
4167 name="setTestSize-" + str( i ),
4168 args=[ onosSetName ] )
4169 threads.append( t )
4170 t.start()
4171 for t in threads:
4172 t.join()
4173 sizeResponses.append( t.result )
4174 sizeResults = main.TRUE
4175 for i in range( len( main.activeNodes ) ):
4176 node = str( main.activeNodes[i] + 1 )
4177 if size != sizeResponses[ i ]:
4178 sizeResults = main.FALSE
4179 main.log.error( "ONOS" + node + " expected a size of " +
4180 str( size ) + " for set " + onosSetName +
4181 " but got " + str( sizeResponses[ i ] ) )
4182 retainResults = retainResults and getResults and sizeResults
4183 utilities.assert_equals( expect=main.TRUE,
4184 actual=retainResults,
4185 onpass="Set retain correct",
4186 onfail="Set retain was incorrect" )
4187
4188 # Transactional maps
4189 main.step( "Partitioned Transactional maps put" )
4190 tMapValue = "Testing"
4191 numKeys = 100
4192 putResult = True
4193 node = main.activeNodes[0]
4194 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4195 if putResponses and len( putResponses ) == 100:
4196 for i in putResponses:
4197 if putResponses[ i ][ 'value' ] != tMapValue:
4198 putResult = False
4199 else:
4200 putResult = False
4201 if not putResult:
4202 main.log.debug( "Put response values: " + str( putResponses ) )
4203 utilities.assert_equals( expect=True,
4204 actual=putResult,
4205 onpass="Partitioned Transactional Map put successful",
4206 onfail="Partitioned Transactional Map put values are incorrect" )
4207
4208 main.step( "Partitioned Transactional maps get" )
4209 getCheck = True
4210 for n in range( 1, numKeys + 1 ):
4211 getResponses = []
4212 threads = []
4213 valueCheck = True
4214 for i in main.activeNodes:
4215 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4216 name="TMap-get-" + str( i ),
4217 args=[ "Key" + str( n ) ] )
4218 threads.append( t )
4219 t.start()
4220 for t in threads:
4221 t.join()
4222 getResponses.append( t.result )
4223 for node in getResponses:
4224 if node != tMapValue:
4225 valueCheck = False
4226 if not valueCheck:
4227 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4228 main.log.warn( getResponses )
4229 getCheck = getCheck and valueCheck
4230 utilities.assert_equals( expect=True,
4231 actual=getCheck,
4232 onpass="Partitioned Transactional Map get values were correct",
4233 onfail="Partitioned Transactional Map values incorrect" )