blob: cebf43e884421fd0845d421d74bb1538f1cc0a17 [file] [log] [blame]
Jon Hall6e709752016-02-01 13:38:46 -08001"""
2Description: This test is to determine if ONOS can handle
3 a full network partion
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
14CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
28class HAfullNetPartition:
29
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
51 import imp
52 import pexpect
53 import time
Jon Halla440e872016-03-31 15:15:50 -070054 import json
Jon Hall6e709752016-02-01 13:38:46 -080055 main.log.info( "ONOS HA test: Partition ONOS nodes into two sub-clusters - " +
56 "initialization" )
57 main.case( "Setting up test environment" )
58 main.caseExplanation = "Setup the test environment including " +\
59 "installing ONOS, starting Mininet and ONOS" +\
60 "cli sessions."
61
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
69 main.numCtrls = int( main.params[ 'num_controllers' ] )
70 if main.ONOSbench.maxNodes:
71 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
74 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070081 # These are for csv plotting in jenkins
82 global labels
83 global data
84 labels = []
85 data = []
Jon Hall6e709752016-02-01 13:38:46 -080086
87 # FIXME: just get controller port from params?
88 # TODO: do we really need all these?
89 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
90 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
91 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
92 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
93 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
94 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
95 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
96
97 try:
Jon Hall41d39f12016-04-11 22:54:35 -070098 from tests.HAsanity.dependencies.HA import HA
99 main.HA = HA()
Jon Hall6e709752016-02-01 13:38:46 -0800100 except Exception as e:
101 main.log.exception( e )
102 main.cleanup()
103 main.exit()
104
105 main.CLIs = []
106 main.nodes = []
107 ipList = []
108 for i in range( 1, main.numCtrls + 1 ):
109 try:
110 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
111 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
112 ipList.append( main.nodes[ -1 ].ip_address )
113 except AttributeError:
114 break
115
116 main.step( "Create cell file" )
117 cellAppString = main.params[ 'ENV' ][ 'appString' ]
118 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
119 main.Mininet1.ip_address,
120 cellAppString, ipList )
121 main.step( "Applying cell variable to environment" )
122 cellResult = main.ONOSbench.setCell( cellName )
123 verifyResult = main.ONOSbench.verifyCell()
124
125 # FIXME:this is short term fix
126 main.log.info( "Removing raft logs" )
127 main.ONOSbench.onosRemoveRaftLogs()
128
129 main.log.info( "Uninstalling ONOS" )
130 for node in main.nodes:
131 main.ONOSbench.onosUninstall( node.ip_address )
132
133 # Make sure ONOS is DEAD
134 main.log.info( "Killing any ONOS processes" )
135 killResults = main.TRUE
136 for node in main.nodes:
137 killed = main.ONOSbench.onosKill( node.ip_address )
138 killResults = killResults and killed
139
140 cleanInstallResult = main.TRUE
141 gitPullResult = main.TRUE
142
143 main.step( "Starting Mininet" )
144 # scp topo file to mininet
145 # TODO: move to params?
146 topoName = "obelisk.py"
147 filePath = main.ONOSbench.home + "/tools/test/topos/"
148 main.ONOSbench.scp( main.Mininet1,
149 filePath + topoName,
150 main.Mininet1.home,
151 direction="to" )
152 mnResult = main.Mininet1.startNet( )
153 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
154 onpass="Mininet Started",
155 onfail="Error starting Mininet" )
156
157 main.step( "Git checkout and pull " + gitBranch )
158 if PULLCODE:
159 main.ONOSbench.gitCheckout( gitBranch )
160 gitPullResult = main.ONOSbench.gitPull()
161 # values of 1 or 3 are good
162 utilities.assert_lesser( expect=0, actual=gitPullResult,
163 onpass="Git pull successful",
164 onfail="Git pull failed" )
165 main.ONOSbench.getVersion( report=True )
166
167 main.step( "Using mvn clean install" )
168 cleanInstallResult = main.TRUE
169 if PULLCODE and gitPullResult == main.TRUE:
170 cleanInstallResult = main.ONOSbench.cleanInstall()
171 else:
172 main.log.warn( "Did not pull new code so skipping mvn " +
173 "clean install" )
174 utilities.assert_equals( expect=main.TRUE,
175 actual=cleanInstallResult,
176 onpass="MCI successful",
177 onfail="MCI failed" )
178 # GRAPHS
179 # NOTE: important params here:
180 # job = name of Jenkins job
181 # Plot Name = Plot-HA, only can be used if multiple plots
182 # index = The number of the graph under plot name
183 job = "HAfullNetPartition"
184 plotName = "Plot-HA"
Jon Hall843f8bc2016-03-18 14:28:13 -0700185 index = "1"
Jon Hall6e709752016-02-01 13:38:46 -0800186 graphs = '<ac:structured-macro ac:name="html">\n'
187 graphs += '<ac:plain-text-body><![CDATA[\n'
188 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
189 '/plot/' + plotName + '/getPlot?index=' + index +\
190 '&width=500&height=300"' +\
191 'noborder="0" width="500" height="300" scrolling="yes" ' +\
192 'seamless="seamless"></iframe>\n'
193 graphs += ']]></ac:plain-text-body>\n'
194 graphs += '</ac:structured-macro>\n'
195 main.log.wiki(graphs)
196
197 main.step( "Creating ONOS package" )
198 # copy gen-partions file to ONOS
199 # NOTE: this assumes TestON and ONOS are on the same machine
200 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
201 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
202 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
203 main.ONOSbench.ip_address,
204 srcFile,
205 dstDir,
206 pwd=main.ONOSbench.pwd,
207 direction="from" )
208 packageResult = main.ONOSbench.onosPackage()
209 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
210 onpass="ONOS package successful",
211 onfail="ONOS package failed" )
212
213 main.step( "Installing ONOS package" )
214 onosInstallResult = main.TRUE
215 for node in main.nodes:
216 tmpResult = main.ONOSbench.onosInstall( options="-f",
217 node=node.ip_address )
218 onosInstallResult = onosInstallResult and tmpResult
219 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
220 onpass="ONOS install successful",
221 onfail="ONOS install failed" )
222 # clean up gen-partitions file
223 try:
224 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
225 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
226 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
227 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
228 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
229 str( main.ONOSbench.handle.before ) )
230 except ( pexpect.TIMEOUT, pexpect.EOF ):
231 main.log.exception( "ONOSbench: pexpect exception found:" +
232 main.ONOSbench.handle.before )
233 main.cleanup()
234 main.exit()
235
236 main.step( "Checking if ONOS is up yet" )
237 for i in range( 2 ):
238 onosIsupResult = main.TRUE
239 for node in main.nodes:
240 started = main.ONOSbench.isup( node.ip_address )
241 if not started:
242 main.log.error( node.name + " hasn't started" )
243 onosIsupResult = onosIsupResult and started
244 if onosIsupResult == main.TRUE:
245 break
246 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
247 onpass="ONOS startup successful",
248 onfail="ONOS startup failed" )
249
250 main.log.step( "Starting ONOS CLI sessions" )
251 cliResults = main.TRUE
252 threads = []
253 for i in range( main.numCtrls ):
254 t = main.Thread( target=main.CLIs[i].startOnosCli,
255 name="startOnosCli-" + str( i ),
256 args=[main.nodes[i].ip_address] )
257 threads.append( t )
258 t.start()
259
260 for t in threads:
261 t.join()
262 cliResults = cliResults and t.result
263 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
264 onpass="ONOS cli startup successful",
265 onfail="ONOS cli startup failed" )
266
267 # Create a list of active nodes for use when some nodes are stopped
268 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
269
270 if main.params[ 'tcpdump' ].lower() == "true":
271 main.step( "Start Packet Capture MN" )
272 main.Mininet2.startTcpdump(
273 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
274 + "-MN.pcap",
275 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
276 port=main.params[ 'MNtcpdump' ][ 'port' ] )
277
Jon Halla440e872016-03-31 15:15:50 -0700278 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -0700279 nodeResults = utilities.retry( main.HA.nodesCheck,
280 False,
281 args=[main.activeNodes],
282 attempts=5 )
Jon Halla440e872016-03-31 15:15:50 -0700283
Jon Hall41d39f12016-04-11 22:54:35 -0700284 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Halla440e872016-03-31 15:15:50 -0700285 onpass="Nodes check successful",
286 onfail="Nodes check NOT successful" )
287
288 if not nodeResults:
289 for cli in main.CLIs:
290 main.log.debug( "{} components not ACTIVE: \n{}".format(
291 cli.name,
292 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
293
Jon Hall6e709752016-02-01 13:38:46 -0800294 if cliResults == main.FALSE:
295 main.log.error( "Failed to start ONOS, stopping test" )
296 main.cleanup()
297 main.exit()
298
Jon Hall172b7ba2016-04-07 18:12:20 -0700299 main.step( "Activate apps defined in the params file" )
300 # get data from the params
301 apps = main.params.get( 'apps' )
302 if apps:
303 apps = apps.split(',')
304 main.log.warn( apps )
305 activateResult = True
306 for app in apps:
307 main.CLIs[ 0 ].app( app, "Activate" )
308 # TODO: check this worked
309 time.sleep( 10 ) # wait for apps to activate
310 for app in apps:
311 state = main.CLIs[ 0 ].appStatus( app )
312 if state == "ACTIVE":
313 activateResult = activeResult and True
314 else:
315 main.log.error( "{} is in {} state".format( app, state ) )
316 activeResult = False
317 utilities.assert_equals( expect=True,
318 actual=activateResult,
319 onpass="Successfully activated apps",
320 onfail="Failed to activate apps" )
321 else:
322 main.log.warn( "No apps were specified to be loaded after startup" )
323
324 main.step( "Set ONOS configurations" )
325 config = main.params.get( 'ONOS_Configuration' )
326 if config:
327 main.log.debug( config )
328 checkResult = main.TRUE
329 for component in config:
330 for setting in config[component]:
331 value = config[component][setting]
332 check = main.CLIs[ 0 ].setCfg( component, setting, value )
333 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
334 checkResult = check and checkResult
335 utilities.assert_equals( expect=main.TRUE,
336 actual=checkResult,
337 onpass="Successfully set config",
338 onfail="Failed to set config" )
339 else:
340 main.log.warn( "No configurations were specified to be changed after startup" )
341
Jon Hall9d2dcad2016-04-08 10:15:20 -0700342 main.step( "App Ids check" )
343 appCheck = main.TRUE
344 threads = []
345 for i in main.activeNodes:
346 t = main.Thread( target=main.CLIs[i].appToIDCheck,
347 name="appToIDCheck-" + str( i ),
348 args=[] )
349 threads.append( t )
350 t.start()
351
352 for t in threads:
353 t.join()
354 appCheck = appCheck and t.result
355 if appCheck != main.TRUE:
356 node = main.activeNodes[0]
357 main.log.warn( main.CLIs[node].apps() )
358 main.log.warn( main.CLIs[node].appIDs() )
359 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
360 onpass="App Ids seem to be correct",
361 onfail="Something is wrong with app Ids" )
362
Jon Hall6e709752016-02-01 13:38:46 -0800363 def CASE2( self, main ):
364 """
365 Assign devices to controllers
366 """
367 import re
368 assert main.numCtrls, "main.numCtrls not defined"
369 assert main, "main not defined"
370 assert utilities.assert_equals, "utilities.assert_equals not defined"
371 assert main.CLIs, "main.CLIs not defined"
372 assert main.nodes, "main.nodes not defined"
373 assert ONOS1Port, "ONOS1Port not defined"
374 assert ONOS2Port, "ONOS2Port not defined"
375 assert ONOS3Port, "ONOS3Port not defined"
376 assert ONOS4Port, "ONOS4Port not defined"
377 assert ONOS5Port, "ONOS5Port not defined"
378 assert ONOS6Port, "ONOS6Port not defined"
379 assert ONOS7Port, "ONOS7Port not defined"
380
381 main.case( "Assigning devices to controllers" )
382 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
383 "and check that an ONOS node becomes the " +\
384 "master of the device."
385 main.step( "Assign switches to controllers" )
386
387 ipList = []
388 for i in range( main.numCtrls ):
389 ipList.append( main.nodes[ i ].ip_address )
390 swList = []
391 for i in range( 1, 29 ):
392 swList.append( "s" + str( i ) )
393 main.Mininet1.assignSwController( sw=swList, ip=ipList )
394
395 mastershipCheck = main.TRUE
396 for i in range( 1, 29 ):
397 response = main.Mininet1.getSwController( "s" + str( i ) )
398 try:
399 main.log.info( str( response ) )
400 except Exception:
401 main.log.info( repr( response ) )
402 for node in main.nodes:
403 if re.search( "tcp:" + node.ip_address, response ):
404 mastershipCheck = mastershipCheck and main.TRUE
405 else:
406 main.log.error( "Error, node " + node.ip_address + " is " +
407 "not in the list of controllers s" +
408 str( i ) + " is connecting to." )
409 mastershipCheck = main.FALSE
410 utilities.assert_equals(
411 expect=main.TRUE,
412 actual=mastershipCheck,
413 onpass="Switch mastership assigned correctly",
414 onfail="Switches not assigned correctly to controllers" )
415
416 def CASE21( self, main ):
417 """
418 Assign mastership to controllers
419 """
420 import time
421 assert main.numCtrls, "main.numCtrls not defined"
422 assert main, "main not defined"
423 assert utilities.assert_equals, "utilities.assert_equals not defined"
424 assert main.CLIs, "main.CLIs not defined"
425 assert main.nodes, "main.nodes not defined"
426 assert ONOS1Port, "ONOS1Port not defined"
427 assert ONOS2Port, "ONOS2Port not defined"
428 assert ONOS3Port, "ONOS3Port not defined"
429 assert ONOS4Port, "ONOS4Port not defined"
430 assert ONOS5Port, "ONOS5Port not defined"
431 assert ONOS6Port, "ONOS6Port not defined"
432 assert ONOS7Port, "ONOS7Port not defined"
433
434 main.case( "Assigning Controller roles for switches" )
435 main.caseExplanation = "Check that ONOS is connected to each " +\
436 "device. Then manually assign" +\
437 " mastership to specific ONOS nodes using" +\
438 " 'device-role'"
439 main.step( "Assign mastership of switches to specific controllers" )
440 # Manually assign mastership to the controller we want
441 roleCall = main.TRUE
442
443 ipList = [ ]
444 deviceList = []
445 onosCli = main.CLIs[ main.activeNodes[0] ]
446 try:
447 # Assign mastership to specific controllers. This assignment was
448 # determined for a 7 node cluser, but will work with any sized
449 # cluster
450 for i in range( 1, 29 ): # switches 1 through 28
451 # set up correct variables:
452 if i == 1:
453 c = 0
454 ip = main.nodes[ c ].ip_address # ONOS1
455 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
456 elif i == 2:
457 c = 1 % main.numCtrls
458 ip = main.nodes[ c ].ip_address # ONOS2
459 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
460 elif i == 3:
461 c = 1 % main.numCtrls
462 ip = main.nodes[ c ].ip_address # ONOS2
463 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
464 elif i == 4:
465 c = 3 % main.numCtrls
466 ip = main.nodes[ c ].ip_address # ONOS4
467 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
468 elif i == 5:
469 c = 2 % main.numCtrls
470 ip = main.nodes[ c ].ip_address # ONOS3
471 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
472 elif i == 6:
473 c = 2 % main.numCtrls
474 ip = main.nodes[ c ].ip_address # ONOS3
475 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
476 elif i == 7:
477 c = 5 % main.numCtrls
478 ip = main.nodes[ c ].ip_address # ONOS6
479 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
480 elif i >= 8 and i <= 17:
481 c = 4 % main.numCtrls
482 ip = main.nodes[ c ].ip_address # ONOS5
483 dpid = '3' + str( i ).zfill( 3 )
484 deviceId = onosCli.getDevice( dpid ).get( 'id' )
485 elif i >= 18 and i <= 27:
486 c = 6 % main.numCtrls
487 ip = main.nodes[ c ].ip_address # ONOS7
488 dpid = '6' + str( i ).zfill( 3 )
489 deviceId = onosCli.getDevice( dpid ).get( 'id' )
490 elif i == 28:
491 c = 0
492 ip = main.nodes[ c ].ip_address # ONOS1
493 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
494 else:
495 main.log.error( "You didn't write an else statement for " +
496 "switch s" + str( i ) )
497 roleCall = main.FALSE
498 # Assign switch
499 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
500 # TODO: make this controller dynamic
501 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
502 ipList.append( ip )
503 deviceList.append( deviceId )
504 except ( AttributeError, AssertionError ):
505 main.log.exception( "Something is wrong with ONOS device view" )
506 main.log.info( onosCli.devices() )
507 utilities.assert_equals(
508 expect=main.TRUE,
509 actual=roleCall,
510 onpass="Re-assigned switch mastership to designated controller",
511 onfail="Something wrong with deviceRole calls" )
512
513 main.step( "Check mastership was correctly assigned" )
514 roleCheck = main.TRUE
515 # NOTE: This is due to the fact that device mastership change is not
516 # atomic and is actually a multi step process
517 time.sleep( 5 )
518 for i in range( len( ipList ) ):
519 ip = ipList[i]
520 deviceId = deviceList[i]
521 # Check assignment
522 master = onosCli.getRole( deviceId ).get( 'master' )
523 if ip in master:
524 roleCheck = roleCheck and main.TRUE
525 else:
526 roleCheck = roleCheck and main.FALSE
527 main.log.error( "Error, controller " + ip + " is not" +
528 " master " + "of device " +
529 str( deviceId ) + ". Master is " +
530 repr( master ) + "." )
531 utilities.assert_equals(
532 expect=main.TRUE,
533 actual=roleCheck,
534 onpass="Switches were successfully reassigned to designated " +
535 "controller",
536 onfail="Switches were not successfully reassigned" )
537
538 def CASE3( self, main ):
539 """
540 Assign intents
541 """
542 import time
543 import json
544 assert main.numCtrls, "main.numCtrls not defined"
545 assert main, "main not defined"
546 assert utilities.assert_equals, "utilities.assert_equals not defined"
547 assert main.CLIs, "main.CLIs not defined"
548 assert main.nodes, "main.nodes not defined"
549 main.case( "Adding host Intents" )
550 main.caseExplanation = "Discover hosts by using pingall then " +\
551 "assign predetermined host-to-host intents." +\
552 " After installation, check that the intent" +\
553 " is distributed to all nodes and the state" +\
554 " is INSTALLED"
555
556 # install onos-app-fwd
557 main.step( "Install reactive forwarding app" )
558 onosCli = main.CLIs[ main.activeNodes[0] ]
559 installResults = onosCli.activateApp( "org.onosproject.fwd" )
560 utilities.assert_equals( expect=main.TRUE, actual=installResults,
561 onpass="Install fwd successful",
562 onfail="Install fwd failed" )
563
564 main.step( "Check app ids" )
565 appCheck = main.TRUE
566 threads = []
567 for i in main.activeNodes:
568 t = main.Thread( target=main.CLIs[i].appToIDCheck,
569 name="appToIDCheck-" + str( i ),
570 args=[] )
571 threads.append( t )
572 t.start()
573
574 for t in threads:
575 t.join()
576 appCheck = appCheck and t.result
577 if appCheck != main.TRUE:
578 main.log.warn( onosCli.apps() )
579 main.log.warn( onosCli.appIDs() )
580 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
581 onpass="App Ids seem to be correct",
582 onfail="Something is wrong with app Ids" )
583
584 main.step( "Discovering Hosts( Via pingall for now )" )
585 # FIXME: Once we have a host discovery mechanism, use that instead
586 # REACTIVE FWD test
587 pingResult = main.FALSE
588 passMsg = "Reactive Pingall test passed"
589 time1 = time.time()
590 pingResult = main.Mininet1.pingall()
591 time2 = time.time()
592 if not pingResult:
593 main.log.warn("First pingall failed. Trying again...")
594 pingResult = main.Mininet1.pingall()
595 passMsg += " on the second try"
596 utilities.assert_equals(
597 expect=main.TRUE,
598 actual=pingResult,
599 onpass= passMsg,
600 onfail="Reactive Pingall failed, " +
601 "one or more ping pairs failed" )
602 main.log.info( "Time for pingall: %2f seconds" %
603 ( time2 - time1 ) )
604 # timeout for fwd flows
605 time.sleep( 11 )
606 # uninstall onos-app-fwd
607 main.step( "Uninstall reactive forwarding app" )
608 node = main.activeNodes[0]
609 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
610 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
611 onpass="Uninstall fwd successful",
612 onfail="Uninstall fwd failed" )
613
614 main.step( "Check app ids" )
615 threads = []
616 appCheck2 = main.TRUE
617 for i in main.activeNodes:
618 t = main.Thread( target=main.CLIs[i].appToIDCheck,
619 name="appToIDCheck-" + str( i ),
620 args=[] )
621 threads.append( t )
622 t.start()
623
624 for t in threads:
625 t.join()
626 appCheck2 = appCheck2 and t.result
627 if appCheck2 != main.TRUE:
628 node = main.activeNodes[0]
629 main.log.warn( main.CLIs[node].apps() )
630 main.log.warn( main.CLIs[node].appIDs() )
631 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
632 onpass="App Ids seem to be correct",
633 onfail="Something is wrong with app Ids" )
634
635 main.step( "Add host intents via cli" )
636 intentIds = []
637 # TODO: move the host numbers to params
638 # Maybe look at all the paths we ping?
639 intentAddResult = True
640 hostResult = main.TRUE
641 for i in range( 8, 18 ):
642 main.log.info( "Adding host intent between h" + str( i ) +
643 " and h" + str( i + 10 ) )
644 host1 = "00:00:00:00:00:" + \
645 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
646 host2 = "00:00:00:00:00:" + \
647 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
648 # NOTE: getHost can return None
649 host1Dict = onosCli.getHost( host1 )
650 host2Dict = onosCli.getHost( host2 )
651 host1Id = None
652 host2Id = None
653 if host1Dict and host2Dict:
654 host1Id = host1Dict.get( 'id', None )
655 host2Id = host2Dict.get( 'id', None )
656 if host1Id and host2Id:
657 nodeNum = ( i % len( main.activeNodes ) )
658 node = main.activeNodes[nodeNum]
659 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
660 if tmpId:
661 main.log.info( "Added intent with id: " + tmpId )
662 intentIds.append( tmpId )
663 else:
664 main.log.error( "addHostIntent returned: " +
665 repr( tmpId ) )
666 else:
667 main.log.error( "Error, getHost() failed for h" + str( i ) +
668 " and/or h" + str( i + 10 ) )
669 node = main.activeNodes[0]
670 hosts = main.CLIs[node].hosts()
671 main.log.warn( "Hosts output: " )
672 try:
673 main.log.warn( json.dumps( json.loads( hosts ),
674 sort_keys=True,
675 indent=4,
676 separators=( ',', ': ' ) ) )
677 except ( ValueError, TypeError ):
678 main.log.warn( repr( hosts ) )
679 hostResult = main.FALSE
680 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
681 onpass="Found a host id for each host",
682 onfail="Error looking up host ids" )
683
684 intentStart = time.time()
685 onosIds = onosCli.getAllIntentsId()
686 main.log.info( "Submitted intents: " + str( intentIds ) )
687 main.log.info( "Intents in ONOS: " + str( onosIds ) )
688 for intent in intentIds:
689 if intent in onosIds:
690 pass # intent submitted is in onos
691 else:
692 intentAddResult = False
693 if intentAddResult:
694 intentStop = time.time()
695 else:
696 intentStop = None
697 # Print the intent states
698 intents = onosCli.intents()
699 intentStates = []
700 installedCheck = True
701 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
702 count = 0
703 try:
704 for intent in json.loads( intents ):
705 state = intent.get( 'state', None )
706 if "INSTALLED" not in state:
707 installedCheck = False
708 intentId = intent.get( 'id', None )
709 intentStates.append( ( intentId, state ) )
710 except ( ValueError, TypeError ):
711 main.log.exception( "Error parsing intents" )
712 # add submitted intents not in the store
713 tmplist = [ i for i, s in intentStates ]
714 missingIntents = False
715 for i in intentIds:
716 if i not in tmplist:
717 intentStates.append( ( i, " - " ) )
718 missingIntents = True
719 intentStates.sort()
720 for i, s in intentStates:
721 count += 1
722 main.log.info( "%-6s%-15s%-15s" %
723 ( str( count ), str( i ), str( s ) ) )
724 leaders = onosCli.leaders()
725 try:
726 missing = False
727 if leaders:
728 parsedLeaders = json.loads( leaders )
729 main.log.warn( json.dumps( parsedLeaders,
730 sort_keys=True,
731 indent=4,
732 separators=( ',', ': ' ) ) )
733 # check for all intent partitions
734 topics = []
735 for i in range( 14 ):
736 topics.append( "intent-partition-" + str( i ) )
737 main.log.debug( topics )
738 ONOStopics = [ j['topic'] for j in parsedLeaders ]
739 for topic in topics:
740 if topic not in ONOStopics:
741 main.log.error( "Error: " + topic +
742 " not in leaders" )
743 missing = True
744 else:
745 main.log.error( "leaders() returned None" )
746 except ( ValueError, TypeError ):
747 main.log.exception( "Error parsing leaders" )
748 main.log.error( repr( leaders ) )
749 # Check all nodes
750 if missing:
751 for i in main.activeNodes:
752 response = main.CLIs[i].leaders( jsonFormat=False)
753 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
754 str( response ) )
755
756 partitions = onosCli.partitions()
757 try:
758 if partitions :
759 parsedPartitions = json.loads( partitions )
760 main.log.warn( json.dumps( parsedPartitions,
761 sort_keys=True,
762 indent=4,
763 separators=( ',', ': ' ) ) )
764 # TODO check for a leader in all paritions
765 # TODO check for consistency among nodes
766 else:
767 main.log.error( "partitions() returned None" )
768 except ( ValueError, TypeError ):
769 main.log.exception( "Error parsing partitions" )
770 main.log.error( repr( partitions ) )
771 pendingMap = onosCli.pendingMap()
772 try:
773 if pendingMap :
774 parsedPending = json.loads( pendingMap )
775 main.log.warn( json.dumps( parsedPending,
776 sort_keys=True,
777 indent=4,
778 separators=( ',', ': ' ) ) )
779 # TODO check something here?
780 else:
781 main.log.error( "pendingMap() returned None" )
782 except ( ValueError, TypeError ):
783 main.log.exception( "Error parsing pending map" )
784 main.log.error( repr( pendingMap ) )
785
786 intentAddResult = bool( intentAddResult and not missingIntents and
787 installedCheck )
788 if not intentAddResult:
789 main.log.error( "Error in pushing host intents to ONOS" )
790
791 main.step( "Intent Anti-Entropy dispersion" )
792 for j in range(100):
793 correct = True
794 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
795 for i in main.activeNodes:
796 onosIds = []
797 ids = main.CLIs[i].getAllIntentsId()
798 onosIds.append( ids )
799 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
800 str( sorted( onosIds ) ) )
801 if sorted( ids ) != sorted( intentIds ):
802 main.log.warn( "Set of intent IDs doesn't match" )
803 correct = False
804 break
805 else:
806 intents = json.loads( main.CLIs[i].intents() )
807 for intent in intents:
808 if intent[ 'state' ] != "INSTALLED":
809 main.log.warn( "Intent " + intent[ 'id' ] +
810 " is " + intent[ 'state' ] )
811 correct = False
812 break
813 if correct:
814 break
815 else:
816 time.sleep(1)
817 if not intentStop:
818 intentStop = time.time()
819 global gossipTime
820 gossipTime = intentStop - intentStart
821 main.log.info( "It took about " + str( gossipTime ) +
822 " seconds for all intents to appear in each node" )
823 gossipPeriod = int( main.params['timers']['gossip'] )
824 maxGossipTime = gossipPeriod * len( main.activeNodes )
825 utilities.assert_greater_equals(
826 expect=maxGossipTime, actual=gossipTime,
827 onpass="ECM anti-entropy for intents worked within " +
828 "expected time",
829 onfail="Intent ECM anti-entropy took too long. " +
830 "Expected time:{}, Actual time:{}".format( maxGossipTime,
831 gossipTime ) )
832 if gossipTime <= maxGossipTime:
833 intentAddResult = True
834
835 if not intentAddResult or "key" in pendingMap:
836 import time
837 installedCheck = True
838 main.log.info( "Sleeping 60 seconds to see if intents are found" )
839 time.sleep( 60 )
840 onosIds = onosCli.getAllIntentsId()
841 main.log.info( "Submitted intents: " + str( intentIds ) )
842 main.log.info( "Intents in ONOS: " + str( onosIds ) )
843 # Print the intent states
844 intents = onosCli.intents()
845 intentStates = []
846 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
847 count = 0
848 try:
849 for intent in json.loads( intents ):
850 # Iter through intents of a node
851 state = intent.get( 'state', None )
852 if "INSTALLED" not in state:
853 installedCheck = False
854 intentId = intent.get( 'id', None )
855 intentStates.append( ( intentId, state ) )
856 except ( ValueError, TypeError ):
857 main.log.exception( "Error parsing intents" )
858 # add submitted intents not in the store
859 tmplist = [ i for i, s in intentStates ]
860 for i in intentIds:
861 if i not in tmplist:
862 intentStates.append( ( i, " - " ) )
863 intentStates.sort()
864 for i, s in intentStates:
865 count += 1
866 main.log.info( "%-6s%-15s%-15s" %
867 ( str( count ), str( i ), str( s ) ) )
868 leaders = onosCli.leaders()
869 try:
870 missing = False
871 if leaders:
872 parsedLeaders = json.loads( leaders )
873 main.log.warn( json.dumps( parsedLeaders,
874 sort_keys=True,
875 indent=4,
876 separators=( ',', ': ' ) ) )
877 # check for all intent partitions
878 # check for election
879 topics = []
880 for i in range( 14 ):
881 topics.append( "intent-partition-" + str( i ) )
882 # FIXME: this should only be after we start the app
883 topics.append( "org.onosproject.election" )
884 main.log.debug( topics )
885 ONOStopics = [ j['topic'] for j in parsedLeaders ]
886 for topic in topics:
887 if topic not in ONOStopics:
888 main.log.error( "Error: " + topic +
889 " not in leaders" )
890 missing = True
891 else:
892 main.log.error( "leaders() returned None" )
893 except ( ValueError, TypeError ):
894 main.log.exception( "Error parsing leaders" )
895 main.log.error( repr( leaders ) )
896 # Check all nodes
897 if missing:
898 for i in main.activeNodes:
899 node = main.CLIs[i]
900 response = node.leaders( jsonFormat=False)
901 main.log.warn( str( node.name ) + " leaders output: \n" +
902 str( response ) )
903
904 partitions = onosCli.partitions()
905 try:
906 if partitions :
907 parsedPartitions = json.loads( partitions )
908 main.log.warn( json.dumps( parsedPartitions,
909 sort_keys=True,
910 indent=4,
911 separators=( ',', ': ' ) ) )
912 # TODO check for a leader in all paritions
913 # TODO check for consistency among nodes
914 else:
915 main.log.error( "partitions() returned None" )
916 except ( ValueError, TypeError ):
917 main.log.exception( "Error parsing partitions" )
918 main.log.error( repr( partitions ) )
919 pendingMap = onosCli.pendingMap()
920 try:
921 if pendingMap :
922 parsedPending = json.loads( pendingMap )
923 main.log.warn( json.dumps( parsedPending,
924 sort_keys=True,
925 indent=4,
926 separators=( ',', ': ' ) ) )
927 # TODO check something here?
928 else:
929 main.log.error( "pendingMap() returned None" )
930 except ( ValueError, TypeError ):
931 main.log.exception( "Error parsing pending map" )
932 main.log.error( repr( pendingMap ) )
933
934 def CASE4( self, main ):
935 """
936 Ping across added host intents
937 """
938 import json
939 import time
940 assert main.numCtrls, "main.numCtrls not defined"
941 assert main, "main not defined"
942 assert utilities.assert_equals, "utilities.assert_equals not defined"
943 assert main.CLIs, "main.CLIs not defined"
944 assert main.nodes, "main.nodes not defined"
945 main.case( "Verify connectivity by sending traffic across Intents" )
946 main.caseExplanation = "Ping across added host intents to check " +\
947 "functionality and check the state of " +\
948 "the intent"
Jon Hall6e709752016-02-01 13:38:46 -0800949
Jon Hall41d39f12016-04-11 22:54:35 -0700950 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall6e709752016-02-01 13:38:46 -0800951 main.step( "Check Intent state" )
952 installedCheck = False
953 loopCount = 0
954 while not installedCheck and loopCount < 40:
955 installedCheck = True
956 # Print the intent states
957 intents = onosCli.intents()
958 intentStates = []
959 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
960 count = 0
961 # Iter through intents of a node
962 try:
963 for intent in json.loads( intents ):
964 state = intent.get( 'state', None )
965 if "INSTALLED" not in state:
966 installedCheck = False
967 intentId = intent.get( 'id', None )
968 intentStates.append( ( intentId, state ) )
969 except ( ValueError, TypeError ):
970 main.log.exception( "Error parsing intents." )
971 # Print states
972 intentStates.sort()
973 for i, s in intentStates:
974 count += 1
975 main.log.info( "%-6s%-15s%-15s" %
976 ( str( count ), str( i ), str( s ) ) )
977 if not installedCheck:
978 time.sleep( 1 )
979 loopCount += 1
980 utilities.assert_equals( expect=True, actual=installedCheck,
981 onpass="Intents are all INSTALLED",
982 onfail="Intents are not all in " +
983 "INSTALLED state" )
984
Jon Hall9d2dcad2016-04-08 10:15:20 -0700985 main.step( "Ping across added host intents" )
Jon Hall9d2dcad2016-04-08 10:15:20 -0700986 PingResult = main.TRUE
987 for i in range( 8, 18 ):
988 ping = main.Mininet1.pingHost( src="h" + str( i ),
989 target="h" + str( i + 10 ) )
990 PingResult = PingResult and ping
991 if ping == main.FALSE:
992 main.log.warn( "Ping failed between h" + str( i ) +
993 " and h" + str( i + 10 ) )
994 elif ping == main.TRUE:
995 main.log.info( "Ping test passed!" )
996 # Don't set PingResult or you'd override failures
997 if PingResult == main.FALSE:
998 main.log.error(
999 "Intents have not been installed correctly, pings failed." )
1000 # TODO: pretty print
1001 main.log.warn( "ONOS1 intents: " )
1002 try:
1003 tmpIntents = onosCli.intents()
1004 main.log.warn( json.dumps( json.loads( tmpIntents ),
1005 sort_keys=True,
1006 indent=4,
1007 separators=( ',', ': ' ) ) )
1008 except ( ValueError, TypeError ):
1009 main.log.warn( repr( tmpIntents ) )
1010 utilities.assert_equals(
1011 expect=main.TRUE,
1012 actual=PingResult,
1013 onpass="Intents have been installed correctly and pings work",
1014 onfail="Intents have not been installed correctly, pings failed." )
1015
Jon Hall6e709752016-02-01 13:38:46 -08001016 main.step( "Check leadership of topics" )
1017 leaders = onosCli.leaders()
1018 topicCheck = main.TRUE
1019 try:
1020 if leaders:
1021 parsedLeaders = json.loads( leaders )
1022 main.log.warn( json.dumps( parsedLeaders,
1023 sort_keys=True,
1024 indent=4,
1025 separators=( ',', ': ' ) ) )
1026 # check for all intent partitions
1027 # check for election
1028 # TODO: Look at Devices as topics now that it uses this system
1029 topics = []
1030 for i in range( 14 ):
1031 topics.append( "intent-partition-" + str( i ) )
1032 # FIXME: this should only be after we start the app
1033 # FIXME: topics.append( "org.onosproject.election" )
1034 # Print leaders output
1035 main.log.debug( topics )
1036 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1037 for topic in topics:
1038 if topic not in ONOStopics:
1039 main.log.error( "Error: " + topic +
1040 " not in leaders" )
1041 topicCheck = main.FALSE
1042 else:
1043 main.log.error( "leaders() returned None" )
1044 topicCheck = main.FALSE
1045 except ( ValueError, TypeError ):
1046 topicCheck = main.FALSE
1047 main.log.exception( "Error parsing leaders" )
1048 main.log.error( repr( leaders ) )
1049 # TODO: Check for a leader of these topics
1050 # Check all nodes
1051 if topicCheck:
1052 for i in main.activeNodes:
1053 node = main.CLIs[i]
1054 response = node.leaders( jsonFormat=False)
1055 main.log.warn( str( node.name ) + " leaders output: \n" +
1056 str( response ) )
1057
1058 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1059 onpass="intent Partitions is in leaders",
1060 onfail="Some topics were lost " )
1061 # Print partitions
1062 partitions = onosCli.partitions()
1063 try:
1064 if partitions :
1065 parsedPartitions = json.loads( partitions )
1066 main.log.warn( json.dumps( parsedPartitions,
1067 sort_keys=True,
1068 indent=4,
1069 separators=( ',', ': ' ) ) )
1070 # TODO check for a leader in all paritions
1071 # TODO check for consistency among nodes
1072 else:
1073 main.log.error( "partitions() returned None" )
1074 except ( ValueError, TypeError ):
1075 main.log.exception( "Error parsing partitions" )
1076 main.log.error( repr( partitions ) )
1077 # Print Pending Map
1078 pendingMap = onosCli.pendingMap()
1079 try:
1080 if pendingMap :
1081 parsedPending = json.loads( pendingMap )
1082 main.log.warn( json.dumps( parsedPending,
1083 sort_keys=True,
1084 indent=4,
1085 separators=( ',', ': ' ) ) )
1086 # TODO check something here?
1087 else:
1088 main.log.error( "pendingMap() returned None" )
1089 except ( ValueError, TypeError ):
1090 main.log.exception( "Error parsing pending map" )
1091 main.log.error( repr( pendingMap ) )
1092
1093 if not installedCheck:
1094 main.log.info( "Waiting 60 seconds to see if the state of " +
1095 "intents change" )
1096 time.sleep( 60 )
1097 # Print the intent states
1098 intents = onosCli.intents()
1099 intentStates = []
1100 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1101 count = 0
1102 # Iter through intents of a node
1103 try:
1104 for intent in json.loads( intents ):
1105 state = intent.get( 'state', None )
1106 if "INSTALLED" not in state:
1107 installedCheck = False
1108 intentId = intent.get( 'id', None )
1109 intentStates.append( ( intentId, state ) )
1110 except ( ValueError, TypeError ):
1111 main.log.exception( "Error parsing intents." )
1112 intentStates.sort()
1113 for i, s in intentStates:
1114 count += 1
1115 main.log.info( "%-6s%-15s%-15s" %
1116 ( str( count ), str( i ), str( s ) ) )
1117 leaders = onosCli.leaders()
1118 try:
1119 missing = False
1120 if leaders:
1121 parsedLeaders = json.loads( leaders )
1122 main.log.warn( json.dumps( parsedLeaders,
1123 sort_keys=True,
1124 indent=4,
1125 separators=( ',', ': ' ) ) )
1126 # check for all intent partitions
1127 # check for election
1128 topics = []
1129 for i in range( 14 ):
1130 topics.append( "intent-partition-" + str( i ) )
1131 # FIXME: this should only be after we start the app
1132 topics.append( "org.onosproject.election" )
1133 main.log.debug( topics )
1134 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1135 for topic in topics:
1136 if topic not in ONOStopics:
1137 main.log.error( "Error: " + topic +
1138 " not in leaders" )
1139 missing = True
1140 else:
1141 main.log.error( "leaders() returned None" )
1142 except ( ValueError, TypeError ):
1143 main.log.exception( "Error parsing leaders" )
1144 main.log.error( repr( leaders ) )
1145 if missing:
1146 for i in main.activeNodes:
1147 node = main.CLIs[i]
1148 response = node.leaders( jsonFormat=False)
1149 main.log.warn( str( node.name ) + " leaders output: \n" +
1150 str( response ) )
1151
1152 partitions = onosCli.partitions()
1153 try:
1154 if partitions :
1155 parsedPartitions = json.loads( partitions )
1156 main.log.warn( json.dumps( parsedPartitions,
1157 sort_keys=True,
1158 indent=4,
1159 separators=( ',', ': ' ) ) )
1160 # TODO check for a leader in all paritions
1161 # TODO check for consistency among nodes
1162 else:
1163 main.log.error( "partitions() returned None" )
1164 except ( ValueError, TypeError ):
1165 main.log.exception( "Error parsing partitions" )
1166 main.log.error( repr( partitions ) )
1167 pendingMap = onosCli.pendingMap()
1168 try:
1169 if pendingMap :
1170 parsedPending = json.loads( pendingMap )
1171 main.log.warn( json.dumps( parsedPending,
1172 sort_keys=True,
1173 indent=4,
1174 separators=( ',', ': ' ) ) )
1175 # TODO check something here?
1176 else:
1177 main.log.error( "pendingMap() returned None" )
1178 except ( ValueError, TypeError ):
1179 main.log.exception( "Error parsing pending map" )
1180 main.log.error( repr( pendingMap ) )
1181 # Print flowrules
1182 node = main.activeNodes[0]
1183 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1184 main.step( "Wait a minute then ping again" )
1185 # the wait is above
1186 PingResult = main.TRUE
1187 for i in range( 8, 18 ):
1188 ping = main.Mininet1.pingHost( src="h" + str( i ),
1189 target="h" + str( i + 10 ) )
1190 PingResult = PingResult and ping
1191 if ping == main.FALSE:
1192 main.log.warn( "Ping failed between h" + str( i ) +
1193 " and h" + str( i + 10 ) )
1194 elif ping == main.TRUE:
1195 main.log.info( "Ping test passed!" )
1196 # Don't set PingResult or you'd override failures
1197 if PingResult == main.FALSE:
1198 main.log.error(
1199 "Intents have not been installed correctly, pings failed." )
1200 # TODO: pretty print
1201 main.log.warn( "ONOS1 intents: " )
1202 try:
1203 tmpIntents = onosCli.intents()
1204 main.log.warn( json.dumps( json.loads( tmpIntents ),
1205 sort_keys=True,
1206 indent=4,
1207 separators=( ',', ': ' ) ) )
1208 except ( ValueError, TypeError ):
1209 main.log.warn( repr( tmpIntents ) )
1210 utilities.assert_equals(
1211 expect=main.TRUE,
1212 actual=PingResult,
1213 onpass="Intents have been installed correctly and pings work",
1214 onfail="Intents have not been installed correctly, pings failed." )
1215
1216 def CASE5( self, main ):
1217 """
1218 Reading state of ONOS
1219 """
1220 import json
1221 import time
1222 assert main.numCtrls, "main.numCtrls not defined"
1223 assert main, "main not defined"
1224 assert utilities.assert_equals, "utilities.assert_equals not defined"
1225 assert main.CLIs, "main.CLIs not defined"
1226 assert main.nodes, "main.nodes not defined"
1227
1228 main.case( "Setting up and gathering data for current state" )
1229 # The general idea for this test case is to pull the state of
1230 # ( intents,flows, topology,... ) from each ONOS node
1231 # We can then compare them with each other and also with past states
1232
1233 main.step( "Check that each switch has a master" )
1234 global mastershipState
1235 mastershipState = '[]'
1236
1237 # Assert that each device has a master
1238 rolesNotNull = main.TRUE
1239 threads = []
1240 for i in main.activeNodes:
1241 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1242 name="rolesNotNull-" + str( i ),
1243 args=[] )
1244 threads.append( t )
1245 t.start()
1246
1247 for t in threads:
1248 t.join()
1249 rolesNotNull = rolesNotNull and t.result
1250 utilities.assert_equals(
1251 expect=main.TRUE,
1252 actual=rolesNotNull,
1253 onpass="Each device has a master",
1254 onfail="Some devices don't have a master assigned" )
1255
1256 main.step( "Get the Mastership of each switch from each controller" )
1257 ONOSMastership = []
1258 mastershipCheck = main.FALSE
1259 consistentMastership = True
1260 rolesResults = True
1261 threads = []
1262 for i in main.activeNodes:
1263 t = main.Thread( target=main.CLIs[i].roles,
1264 name="roles-" + str( i ),
1265 args=[] )
1266 threads.append( t )
1267 t.start()
1268
1269 for t in threads:
1270 t.join()
1271 ONOSMastership.append( t.result )
1272
1273 for i in range( len( ONOSMastership ) ):
1274 node = str( main.activeNodes[i] + 1 )
1275 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1276 main.log.error( "Error in getting ONOS" + node + " roles" )
1277 main.log.warn( "ONOS" + node + " mastership response: " +
1278 repr( ONOSMastership[i] ) )
1279 rolesResults = False
1280 utilities.assert_equals(
1281 expect=True,
1282 actual=rolesResults,
1283 onpass="No error in reading roles output",
1284 onfail="Error in reading roles from ONOS" )
1285
1286 main.step( "Check for consistency in roles from each controller" )
1287 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1288 main.log.info(
1289 "Switch roles are consistent across all ONOS nodes" )
1290 else:
1291 consistentMastership = False
1292 utilities.assert_equals(
1293 expect=True,
1294 actual=consistentMastership,
1295 onpass="Switch roles are consistent across all ONOS nodes",
1296 onfail="ONOS nodes have different views of switch roles" )
1297
1298 if rolesResults and not consistentMastership:
1299 for i in range( len( main.activeNodes ) ):
1300 node = str( main.activeNodes[i] + 1 )
1301 try:
1302 main.log.warn(
1303 "ONOS" + node + " roles: ",
1304 json.dumps(
1305 json.loads( ONOSMastership[ i ] ),
1306 sort_keys=True,
1307 indent=4,
1308 separators=( ',', ': ' ) ) )
1309 except ( ValueError, TypeError ):
1310 main.log.warn( repr( ONOSMastership[ i ] ) )
1311 elif rolesResults and consistentMastership:
1312 mastershipCheck = main.TRUE
1313 mastershipState = ONOSMastership[ 0 ]
1314
1315 main.step( "Get the intents from each controller" )
1316 global intentState
1317 intentState = []
1318 ONOSIntents = []
1319 intentCheck = main.FALSE
1320 consistentIntents = True
1321 intentsResults = True
1322 threads = []
1323 for i in main.activeNodes:
1324 t = main.Thread( target=main.CLIs[i].intents,
1325 name="intents-" + str( i ),
1326 args=[],
1327 kwargs={ 'jsonFormat': True } )
1328 threads.append( t )
1329 t.start()
1330
1331 for t in threads:
1332 t.join()
1333 ONOSIntents.append( t.result )
1334
1335 for i in range( len( ONOSIntents ) ):
1336 node = str( main.activeNodes[i] + 1 )
1337 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1338 main.log.error( "Error in getting ONOS" + node + " intents" )
1339 main.log.warn( "ONOS" + node + " intents response: " +
1340 repr( ONOSIntents[ i ] ) )
1341 intentsResults = False
1342 utilities.assert_equals(
1343 expect=True,
1344 actual=intentsResults,
1345 onpass="No error in reading intents output",
1346 onfail="Error in reading intents from ONOS" )
1347
1348 main.step( "Check for consistency in Intents from each controller" )
1349 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1350 main.log.info( "Intents are consistent across all ONOS " +
1351 "nodes" )
1352 else:
1353 consistentIntents = False
1354 main.log.error( "Intents not consistent" )
1355 utilities.assert_equals(
1356 expect=True,
1357 actual=consistentIntents,
1358 onpass="Intents are consistent across all ONOS nodes",
1359 onfail="ONOS nodes have different views of intents" )
1360
1361 if intentsResults:
1362 # Try to make it easy to figure out what is happening
1363 #
1364 # Intent ONOS1 ONOS2 ...
1365 # 0x01 INSTALLED INSTALLING
1366 # ... ... ...
1367 # ... ... ...
1368 title = " Id"
1369 for n in main.activeNodes:
1370 title += " " * 10 + "ONOS" + str( n + 1 )
1371 main.log.warn( title )
1372 # get all intent keys in the cluster
1373 keys = []
Jon Halla440e872016-03-31 15:15:50 -07001374 try:
1375 # Get the set of all intent keys
Jon Hall6e709752016-02-01 13:38:46 -08001376 for nodeStr in ONOSIntents:
1377 node = json.loads( nodeStr )
1378 for intent in node:
Jon Halla440e872016-03-31 15:15:50 -07001379 keys.append( intent.get( 'id' ) )
1380 keys = set( keys )
1381 # For each intent key, print the state on each node
1382 for key in keys:
1383 row = "%-13s" % key
1384 for nodeStr in ONOSIntents:
1385 node = json.loads( nodeStr )
1386 for intent in node:
1387 if intent.get( 'id', "Error" ) == key:
1388 row += "%-15s" % intent.get( 'state' )
1389 main.log.warn( row )
1390 # End of intent state table
1391 except ValueError as e:
1392 main.log.exception( e )
1393 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
Jon Hall6e709752016-02-01 13:38:46 -08001394
1395 if intentsResults and not consistentIntents:
1396 # print the json objects
1397 n = str( main.activeNodes[-1] + 1 )
1398 main.log.debug( "ONOS" + n + " intents: " )
1399 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1400 sort_keys=True,
1401 indent=4,
1402 separators=( ',', ': ' ) ) )
1403 for i in range( len( ONOSIntents ) ):
1404 node = str( main.activeNodes[i] + 1 )
1405 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1406 main.log.debug( "ONOS" + node + " intents: " )
1407 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1408 sort_keys=True,
1409 indent=4,
1410 separators=( ',', ': ' ) ) )
1411 else:
1412 main.log.debug( "ONOS" + node + " intents match ONOS" +
1413 n + " intents" )
1414 elif intentsResults and consistentIntents:
1415 intentCheck = main.TRUE
1416 intentState = ONOSIntents[ 0 ]
1417
1418 main.step( "Get the flows from each controller" )
1419 global flowState
1420 flowState = []
1421 ONOSFlows = []
1422 ONOSFlowsJson = []
1423 flowCheck = main.FALSE
1424 consistentFlows = True
1425 flowsResults = True
1426 threads = []
1427 for i in main.activeNodes:
1428 t = main.Thread( target=main.CLIs[i].flows,
1429 name="flows-" + str( i ),
1430 args=[],
1431 kwargs={ 'jsonFormat': True } )
1432 threads.append( t )
1433 t.start()
1434
1435 # NOTE: Flows command can take some time to run
1436 time.sleep(30)
1437 for t in threads:
1438 t.join()
1439 result = t.result
1440 ONOSFlows.append( result )
1441
1442 for i in range( len( ONOSFlows ) ):
1443 num = str( main.activeNodes[i] + 1 )
1444 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1445 main.log.error( "Error in getting ONOS" + num + " flows" )
1446 main.log.warn( "ONOS" + num + " flows response: " +
1447 repr( ONOSFlows[ i ] ) )
1448 flowsResults = False
1449 ONOSFlowsJson.append( None )
1450 else:
1451 try:
1452 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1453 except ( ValueError, TypeError ):
1454 # FIXME: change this to log.error?
1455 main.log.exception( "Error in parsing ONOS" + num +
1456 " response as json." )
1457 main.log.error( repr( ONOSFlows[ i ] ) )
1458 ONOSFlowsJson.append( None )
1459 flowsResults = False
1460 utilities.assert_equals(
1461 expect=True,
1462 actual=flowsResults,
1463 onpass="No error in reading flows output",
1464 onfail="Error in reading flows from ONOS" )
1465
1466 main.step( "Check for consistency in Flows from each controller" )
1467 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1468 if all( tmp ):
1469 main.log.info( "Flow count is consistent across all ONOS nodes" )
1470 else:
1471 consistentFlows = False
1472 utilities.assert_equals(
1473 expect=True,
1474 actual=consistentFlows,
1475 onpass="The flow count is consistent across all ONOS nodes",
1476 onfail="ONOS nodes have different flow counts" )
1477
1478 if flowsResults and not consistentFlows:
1479 for i in range( len( ONOSFlows ) ):
1480 node = str( main.activeNodes[i] + 1 )
1481 try:
1482 main.log.warn(
1483 "ONOS" + node + " flows: " +
1484 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1485 indent=4, separators=( ',', ': ' ) ) )
1486 except ( ValueError, TypeError ):
1487 main.log.warn( "ONOS" + node + " flows: " +
1488 repr( ONOSFlows[ i ] ) )
1489 elif flowsResults and consistentFlows:
1490 flowCheck = main.TRUE
1491 flowState = ONOSFlows[ 0 ]
1492
1493 main.step( "Get the OF Table entries" )
1494 global flows
1495 flows = []
1496 for i in range( 1, 29 ):
1497 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1498 if flowCheck == main.FALSE:
1499 for table in flows:
1500 main.log.warn( table )
1501 # TODO: Compare switch flow tables with ONOS flow tables
1502
1503 main.step( "Start continuous pings" )
1504 main.Mininet2.pingLong(
1505 src=main.params[ 'PING' ][ 'source1' ],
1506 target=main.params[ 'PING' ][ 'target1' ],
1507 pingTime=500 )
1508 main.Mininet2.pingLong(
1509 src=main.params[ 'PING' ][ 'source2' ],
1510 target=main.params[ 'PING' ][ 'target2' ],
1511 pingTime=500 )
1512 main.Mininet2.pingLong(
1513 src=main.params[ 'PING' ][ 'source3' ],
1514 target=main.params[ 'PING' ][ 'target3' ],
1515 pingTime=500 )
1516 main.Mininet2.pingLong(
1517 src=main.params[ 'PING' ][ 'source4' ],
1518 target=main.params[ 'PING' ][ 'target4' ],
1519 pingTime=500 )
1520 main.Mininet2.pingLong(
1521 src=main.params[ 'PING' ][ 'source5' ],
1522 target=main.params[ 'PING' ][ 'target5' ],
1523 pingTime=500 )
1524 main.Mininet2.pingLong(
1525 src=main.params[ 'PING' ][ 'source6' ],
1526 target=main.params[ 'PING' ][ 'target6' ],
1527 pingTime=500 )
1528 main.Mininet2.pingLong(
1529 src=main.params[ 'PING' ][ 'source7' ],
1530 target=main.params[ 'PING' ][ 'target7' ],
1531 pingTime=500 )
1532 main.Mininet2.pingLong(
1533 src=main.params[ 'PING' ][ 'source8' ],
1534 target=main.params[ 'PING' ][ 'target8' ],
1535 pingTime=500 )
1536 main.Mininet2.pingLong(
1537 src=main.params[ 'PING' ][ 'source9' ],
1538 target=main.params[ 'PING' ][ 'target9' ],
1539 pingTime=500 )
1540 main.Mininet2.pingLong(
1541 src=main.params[ 'PING' ][ 'source10' ],
1542 target=main.params[ 'PING' ][ 'target10' ],
1543 pingTime=500 )
1544
1545 main.step( "Collecting topology information from ONOS" )
1546 devices = []
1547 threads = []
1548 for i in main.activeNodes:
1549 t = main.Thread( target=main.CLIs[i].devices,
1550 name="devices-" + str( i ),
1551 args=[ ] )
1552 threads.append( t )
1553 t.start()
1554
1555 for t in threads:
1556 t.join()
1557 devices.append( t.result )
1558 hosts = []
1559 threads = []
1560 for i in main.activeNodes:
1561 t = main.Thread( target=main.CLIs[i].hosts,
1562 name="hosts-" + str( i ),
1563 args=[ ] )
1564 threads.append( t )
1565 t.start()
1566
1567 for t in threads:
1568 t.join()
1569 try:
1570 hosts.append( json.loads( t.result ) )
1571 except ( ValueError, TypeError ):
1572 # FIXME: better handling of this, print which node
1573 # Maybe use thread name?
1574 main.log.exception( "Error parsing json output of hosts" )
1575 main.log.warn( repr( t.result ) )
1576 hosts.append( None )
1577
1578 ports = []
1579 threads = []
1580 for i in main.activeNodes:
1581 t = main.Thread( target=main.CLIs[i].ports,
1582 name="ports-" + str( i ),
1583 args=[ ] )
1584 threads.append( t )
1585 t.start()
1586
1587 for t in threads:
1588 t.join()
1589 ports.append( t.result )
1590 links = []
1591 threads = []
1592 for i in main.activeNodes:
1593 t = main.Thread( target=main.CLIs[i].links,
1594 name="links-" + str( i ),
1595 args=[ ] )
1596 threads.append( t )
1597 t.start()
1598
1599 for t in threads:
1600 t.join()
1601 links.append( t.result )
1602 clusters = []
1603 threads = []
1604 for i in main.activeNodes:
1605 t = main.Thread( target=main.CLIs[i].clusters,
1606 name="clusters-" + str( i ),
1607 args=[ ] )
1608 threads.append( t )
1609 t.start()
1610
1611 for t in threads:
1612 t.join()
1613 clusters.append( t.result )
1614 # Compare json objects for hosts and dataplane clusters
1615
1616 # hosts
1617 main.step( "Host view is consistent across ONOS nodes" )
1618 consistentHostsResult = main.TRUE
1619 for controller in range( len( hosts ) ):
1620 controllerStr = str( main.activeNodes[controller] + 1 )
1621 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1622 if hosts[ controller ] == hosts[ 0 ]:
1623 continue
1624 else: # hosts not consistent
1625 main.log.error( "hosts from ONOS" +
1626 controllerStr +
1627 " is inconsistent with ONOS1" )
1628 main.log.warn( repr( hosts[ controller ] ) )
1629 consistentHostsResult = main.FALSE
1630
1631 else:
1632 main.log.error( "Error in getting ONOS hosts from ONOS" +
1633 controllerStr )
1634 consistentHostsResult = main.FALSE
1635 main.log.warn( "ONOS" + controllerStr +
1636 " hosts response: " +
1637 repr( hosts[ controller ] ) )
1638 utilities.assert_equals(
1639 expect=main.TRUE,
1640 actual=consistentHostsResult,
1641 onpass="Hosts view is consistent across all ONOS nodes",
1642 onfail="ONOS nodes have different views of hosts" )
1643
1644 main.step( "Each host has an IP address" )
1645 ipResult = main.TRUE
1646 for controller in range( 0, len( hosts ) ):
1647 controllerStr = str( main.activeNodes[controller] + 1 )
1648 if hosts[ controller ]:
1649 for host in hosts[ controller ]:
1650 if not host.get( 'ipAddresses', [ ] ):
1651 main.log.error( "Error with host ips on controller" +
1652 controllerStr + ": " + str( host ) )
1653 ipResult = main.FALSE
1654 utilities.assert_equals(
1655 expect=main.TRUE,
1656 actual=ipResult,
1657 onpass="The ips of the hosts aren't empty",
1658 onfail="The ip of at least one host is missing" )
1659
1660 # Strongly connected clusters of devices
1661 main.step( "Cluster view is consistent across ONOS nodes" )
1662 consistentClustersResult = main.TRUE
1663 for controller in range( len( clusters ) ):
1664 controllerStr = str( main.activeNodes[controller] + 1 )
1665 if "Error" not in clusters[ controller ]:
1666 if clusters[ controller ] == clusters[ 0 ]:
1667 continue
1668 else: # clusters not consistent
1669 main.log.error( "clusters from ONOS" + controllerStr +
1670 " is inconsistent with ONOS1" )
1671 consistentClustersResult = main.FALSE
1672
1673 else:
1674 main.log.error( "Error in getting dataplane clusters " +
1675 "from ONOS" + controllerStr )
1676 consistentClustersResult = main.FALSE
1677 main.log.warn( "ONOS" + controllerStr +
1678 " clusters response: " +
1679 repr( clusters[ controller ] ) )
1680 utilities.assert_equals(
1681 expect=main.TRUE,
1682 actual=consistentClustersResult,
1683 onpass="Clusters view is consistent across all ONOS nodes",
1684 onfail="ONOS nodes have different views of clusters" )
Jon Hall172b7ba2016-04-07 18:12:20 -07001685 if consistentClustersResult != main.TRUE:
1686 main.log.debug( clusters )
Jon Hall6e709752016-02-01 13:38:46 -08001687 # there should always only be one cluster
1688 main.step( "Cluster view correct across ONOS nodes" )
1689 try:
1690 numClusters = len( json.loads( clusters[ 0 ] ) )
1691 except ( ValueError, TypeError ):
1692 main.log.exception( "Error parsing clusters[0]: " +
1693 repr( clusters[ 0 ] ) )
1694 numClusters = "ERROR"
1695 clusterResults = main.FALSE
1696 if numClusters == 1:
1697 clusterResults = main.TRUE
1698 utilities.assert_equals(
1699 expect=1,
1700 actual=numClusters,
1701 onpass="ONOS shows 1 SCC",
1702 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1703
1704 main.step( "Comparing ONOS topology to MN" )
1705 devicesResults = main.TRUE
1706 linksResults = main.TRUE
1707 hostsResults = main.TRUE
1708 mnSwitches = main.Mininet1.getSwitches()
1709 mnLinks = main.Mininet1.getLinks()
1710 mnHosts = main.Mininet1.getHosts()
1711 for controller in main.activeNodes:
1712 controllerStr = str( main.activeNodes[controller] + 1 )
1713 if devices[ controller ] and ports[ controller ] and\
1714 "Error" not in devices[ controller ] and\
1715 "Error" not in ports[ controller ]:
1716 currentDevicesResult = main.Mininet1.compareSwitches(
1717 mnSwitches,
1718 json.loads( devices[ controller ] ),
1719 json.loads( ports[ controller ] ) )
1720 else:
1721 currentDevicesResult = main.FALSE
1722 utilities.assert_equals( expect=main.TRUE,
1723 actual=currentDevicesResult,
1724 onpass="ONOS" + controllerStr +
1725 " Switches view is correct",
1726 onfail="ONOS" + controllerStr +
1727 " Switches view is incorrect" )
1728 if links[ controller ] and "Error" not in links[ controller ]:
1729 currentLinksResult = main.Mininet1.compareLinks(
1730 mnSwitches, mnLinks,
1731 json.loads( links[ controller ] ) )
1732 else:
1733 currentLinksResult = main.FALSE
1734 utilities.assert_equals( expect=main.TRUE,
1735 actual=currentLinksResult,
1736 onpass="ONOS" + controllerStr +
1737 " links view is correct",
1738 onfail="ONOS" + controllerStr +
1739 " links view is incorrect" )
1740
1741 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1742 currentHostsResult = main.Mininet1.compareHosts(
1743 mnHosts,
1744 hosts[ controller ] )
1745 else:
1746 currentHostsResult = main.FALSE
1747 utilities.assert_equals( expect=main.TRUE,
1748 actual=currentHostsResult,
1749 onpass="ONOS" + controllerStr +
1750 " hosts exist in Mininet",
1751 onfail="ONOS" + controllerStr +
1752 " hosts don't match Mininet" )
1753
1754 devicesResults = devicesResults and currentDevicesResult
1755 linksResults = linksResults and currentLinksResult
1756 hostsResults = hostsResults and currentHostsResult
1757
1758 main.step( "Device information is correct" )
1759 utilities.assert_equals(
1760 expect=main.TRUE,
1761 actual=devicesResults,
1762 onpass="Device information is correct",
1763 onfail="Device information is incorrect" )
1764
1765 main.step( "Links are correct" )
1766 utilities.assert_equals(
1767 expect=main.TRUE,
1768 actual=linksResults,
1769 onpass="Link are correct",
1770 onfail="Links are incorrect" )
1771
1772 main.step( "Hosts are correct" )
1773 utilities.assert_equals(
1774 expect=main.TRUE,
1775 actual=hostsResults,
1776 onpass="Hosts are correct",
1777 onfail="Hosts are incorrect" )
1778
1779 def CASE61( self, main ):
1780 """
1781 The Failure case.
1782 """
1783 import math
1784 assert main.numCtrls, "main.numCtrls not defined"
1785 assert main, "main not defined"
1786 assert utilities.assert_equals, "utilities.assert_equals not defined"
1787 assert main.CLIs, "main.CLIs not defined"
1788 assert main.nodes, "main.nodes not defined"
1789 main.case( "Partition ONOS nodes into two distinct partitions" )
1790
1791 main.step( "Checking ONOS Logs for errors" )
1792 for node in main.nodes:
1793 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1794 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1795
1796 n = len( main.nodes ) # Number of nodes
1797 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1798 main.partition = [ 0 ] # ONOS node to partition, listed by index in main.nodes
1799 if n > 3:
1800 main.partition.append( p - 1 )
1801 # NOTE: This only works for cluster sizes of 3,5, or 7.
1802
1803 main.step( "Partitioning ONOS nodes" )
1804 nodeList = [ str( i + 1 ) for i in main.partition ]
1805 main.log.info( "Nodes to be partitioned: " + str( nodeList ) )
1806 partitionResults = main.TRUE
1807 for i in range( 0, n ):
1808 this = main.nodes[i]
1809 if i not in main.partition:
1810 for j in main.partition:
1811 foe = main.nodes[j]
1812 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1813 #CMD HERE
1814 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1815 this.handle.sendline( cmdStr )
1816 this.handle.expect( "\$" )
1817 main.log.debug( this.handle.before )
1818 else:
1819 for j in range( 0, n ):
1820 if j not in main.partition:
1821 foe = main.nodes[j]
1822 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1823 #CMD HERE
1824 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1825 this.handle.sendline( cmdStr )
1826 this.handle.expect( "\$" )
1827 main.log.debug( this.handle.before )
1828 main.activeNodes.remove( i )
1829 # NOTE: When dynamic clustering is finished, we need to start checking
1830 # main.partion nodes still work when partitioned
1831 utilities.assert_equals( expect=main.TRUE, actual=partitionResults,
1832 onpass="Firewall rules set successfully",
1833 onfail="Error setting firewall rules" )
1834
1835 main.log.step( "Sleeping 60 seconds" )
1836 time.sleep( 60 )
1837
1838 def CASE62( self, main ):
1839 """
1840 Healing Partition
1841 """
1842 import time
1843 assert main.numCtrls, "main.numCtrls not defined"
1844 assert main, "main not defined"
1845 assert utilities.assert_equals, "utilities.assert_equals not defined"
1846 assert main.CLIs, "main.CLIs not defined"
1847 assert main.nodes, "main.nodes not defined"
1848 assert main.partition, "main.partition not defined"
1849 main.case( "Healing Partition" )
1850
1851 main.step( "Deleteing firewall rules" )
1852 healResults = main.TRUE
1853 for node in main.nodes:
1854 cmdStr = "sudo iptables -F"
1855 node.handle.sendline( cmdStr )
1856 node.handle.expect( "\$" )
1857 main.log.debug( node.handle.before )
1858 utilities.assert_equals( expect=main.TRUE, actual=healResults,
1859 onpass="Firewall rules removed",
1860 onfail="Error removing firewall rules" )
1861
1862 for node in main.partition:
1863 main.activeNodes.append( node )
1864 main.activeNodes.sort()
1865 try:
1866 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1867 "List of active nodes has duplicates, this likely indicates something was run out of order"
1868 except AssertionError:
1869 main.log.exception( "" )
1870 main.cleanup()
1871 main.exit()
1872
1873 def CASE7( self, main ):
1874 """
1875 Check state after ONOS failure
1876 """
1877 import json
1878 assert main.numCtrls, "main.numCtrls not defined"
1879 assert main, "main not defined"
1880 assert utilities.assert_equals, "utilities.assert_equals not defined"
1881 assert main.CLIs, "main.CLIs not defined"
1882 assert main.nodes, "main.nodes not defined"
1883 try:
1884 main.partition
1885 except AttributeError:
1886 main.partition = []
1887
1888 main.case( "Running ONOS Constant State Tests" )
1889
1890 main.step( "Check that each switch has a master" )
1891 # Assert that each device has a master
1892 rolesNotNull = main.TRUE
1893 threads = []
1894 for i in main.activeNodes:
1895 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1896 name="rolesNotNull-" + str( i ),
1897 args=[ ] )
1898 threads.append( t )
1899 t.start()
1900
1901 for t in threads:
1902 t.join()
1903 rolesNotNull = rolesNotNull and t.result
1904 utilities.assert_equals(
1905 expect=main.TRUE,
1906 actual=rolesNotNull,
1907 onpass="Each device has a master",
1908 onfail="Some devices don't have a master assigned" )
1909
1910 main.step( "Read device roles from ONOS" )
1911 ONOSMastership = []
1912 mastershipCheck = main.FALSE
1913 consistentMastership = True
1914 rolesResults = True
1915 threads = []
1916 for i in main.activeNodes:
1917 t = main.Thread( target=main.CLIs[i].roles,
1918 name="roles-" + str( i ),
1919 args=[] )
1920 threads.append( t )
1921 t.start()
1922
1923 for t in threads:
1924 t.join()
1925 ONOSMastership.append( t.result )
1926
1927 for i in range( len( ONOSMastership ) ):
1928 node = str( main.activeNodes[i] + 1 )
1929 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1930 main.log.error( "Error in getting ONOS" + node + " roles" )
1931 main.log.warn( "ONOS" + node + " mastership response: " +
1932 repr( ONOSMastership[i] ) )
1933 rolesResults = False
1934 utilities.assert_equals(
1935 expect=True,
1936 actual=rolesResults,
1937 onpass="No error in reading roles output",
1938 onfail="Error in reading roles from ONOS" )
1939
1940 main.step( "Check for consistency in roles from each controller" )
1941 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1942 main.log.info(
1943 "Switch roles are consistent across all ONOS nodes" )
1944 else:
1945 consistentMastership = False
1946 utilities.assert_equals(
1947 expect=True,
1948 actual=consistentMastership,
1949 onpass="Switch roles are consistent across all ONOS nodes",
1950 onfail="ONOS nodes have different views of switch roles" )
1951
1952 if rolesResults and not consistentMastership:
1953 for i in range( len( ONOSMastership ) ):
1954 node = str( main.activeNodes[i] + 1 )
1955 main.log.warn( "ONOS" + node + " roles: ",
1956 json.dumps( json.loads( ONOSMastership[ i ] ),
1957 sort_keys=True,
1958 indent=4,
1959 separators=( ',', ': ' ) ) )
1960
1961 # NOTE: we expect mastership to change on controller failure
1962
1963 main.step( "Get the intents and compare across all nodes" )
1964 ONOSIntents = []
1965 intentCheck = main.FALSE
1966 consistentIntents = True
1967 intentsResults = True
1968 threads = []
1969 for i in main.activeNodes:
1970 t = main.Thread( target=main.CLIs[i].intents,
1971 name="intents-" + str( i ),
1972 args=[],
1973 kwargs={ 'jsonFormat': True } )
1974 threads.append( t )
1975 t.start()
1976
1977 for t in threads:
1978 t.join()
1979 ONOSIntents.append( t.result )
1980
1981 for i in range( len( ONOSIntents) ):
1982 node = str( main.activeNodes[i] + 1 )
1983 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1984 main.log.error( "Error in getting ONOS" + node + " intents" )
1985 main.log.warn( "ONOS" + node + " intents response: " +
1986 repr( ONOSIntents[ i ] ) )
1987 intentsResults = False
1988 utilities.assert_equals(
1989 expect=True,
1990 actual=intentsResults,
1991 onpass="No error in reading intents output",
1992 onfail="Error in reading intents from ONOS" )
1993
1994 main.step( "Check for consistency in Intents from each controller" )
1995 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1996 main.log.info( "Intents are consistent across all ONOS " +
1997 "nodes" )
1998 else:
1999 consistentIntents = False
2000
2001 # Try to make it easy to figure out what is happening
2002 #
2003 # Intent ONOS1 ONOS2 ...
2004 # 0x01 INSTALLED INSTALLING
2005 # ... ... ...
2006 # ... ... ...
2007 title = " ID"
2008 for n in main.activeNodes:
2009 title += " " * 10 + "ONOS" + str( n + 1 )
2010 main.log.warn( title )
2011 # get all intent keys in the cluster
2012 keys = []
2013 for nodeStr in ONOSIntents:
2014 node = json.loads( nodeStr )
2015 for intent in node:
2016 keys.append( intent.get( 'id' ) )
2017 keys = set( keys )
2018 for key in keys:
2019 row = "%-13s" % key
2020 for nodeStr in ONOSIntents:
2021 node = json.loads( nodeStr )
2022 for intent in node:
2023 if intent.get( 'id' ) == key:
2024 row += "%-15s" % intent.get( 'state' )
2025 main.log.warn( row )
2026 # End table view
2027
2028 utilities.assert_equals(
2029 expect=True,
2030 actual=consistentIntents,
2031 onpass="Intents are consistent across all ONOS nodes",
2032 onfail="ONOS nodes have different views of intents" )
2033 intentStates = []
2034 for node in ONOSIntents: # Iter through ONOS nodes
2035 nodeStates = []
2036 # Iter through intents of a node
2037 try:
2038 for intent in json.loads( node ):
2039 nodeStates.append( intent[ 'state' ] )
2040 except ( ValueError, TypeError ):
2041 main.log.exception( "Error in parsing intents" )
2042 main.log.error( repr( node ) )
2043 intentStates.append( nodeStates )
2044 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2045 main.log.info( dict( out ) )
2046
2047 if intentsResults and not consistentIntents:
2048 for i in range( len( main.activeNodes ) ):
2049 node = str( main.activeNodes[i] + 1 )
2050 main.log.warn( "ONOS" + node + " intents: " )
2051 main.log.warn( json.dumps(
2052 json.loads( ONOSIntents[ i ] ),
2053 sort_keys=True,
2054 indent=4,
2055 separators=( ',', ': ' ) ) )
2056 elif intentsResults and consistentIntents:
2057 intentCheck = main.TRUE
2058
2059 # NOTE: Store has no durability, so intents are lost across system
2060 # restarts
2061 main.step( "Compare current intents with intents before the failure" )
2062 # NOTE: this requires case 5 to pass for intentState to be set.
2063 # maybe we should stop the test if that fails?
2064 sameIntents = main.FALSE
2065 try:
2066 intentState
2067 except NameError:
2068 main.log.warn( "No previous intent state was saved" )
2069 else:
2070 if intentState and intentState == ONOSIntents[ 0 ]:
2071 sameIntents = main.TRUE
2072 main.log.info( "Intents are consistent with before failure" )
2073 # TODO: possibly the states have changed? we may need to figure out
2074 # what the acceptable states are
2075 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2076 sameIntents = main.TRUE
2077 try:
2078 before = json.loads( intentState )
2079 after = json.loads( ONOSIntents[ 0 ] )
2080 for intent in before:
2081 if intent not in after:
2082 sameIntents = main.FALSE
2083 main.log.debug( "Intent is not currently in ONOS " +
2084 "(at least in the same form):" )
2085 main.log.debug( json.dumps( intent ) )
2086 except ( ValueError, TypeError ):
2087 main.log.exception( "Exception printing intents" )
2088 main.log.debug( repr( ONOSIntents[0] ) )
2089 main.log.debug( repr( intentState ) )
2090 if sameIntents == main.FALSE:
2091 try:
2092 main.log.debug( "ONOS intents before: " )
2093 main.log.debug( json.dumps( json.loads( intentState ),
2094 sort_keys=True, indent=4,
2095 separators=( ',', ': ' ) ) )
2096 main.log.debug( "Current ONOS intents: " )
2097 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2098 sort_keys=True, indent=4,
2099 separators=( ',', ': ' ) ) )
2100 except ( ValueError, TypeError ):
2101 main.log.exception( "Exception printing intents" )
2102 main.log.debug( repr( ONOSIntents[0] ) )
2103 main.log.debug( repr( intentState ) )
2104 utilities.assert_equals(
2105 expect=main.TRUE,
2106 actual=sameIntents,
2107 onpass="Intents are consistent with before failure",
2108 onfail="The Intents changed during failure" )
2109 intentCheck = intentCheck and sameIntents
2110
2111 main.step( "Get the OF Table entries and compare to before " +
2112 "component failure" )
2113 FlowTables = main.TRUE
2114 for i in range( 28 ):
2115 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2116 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
Jon Hall41d39f12016-04-11 22:54:35 -07002117 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2118 FlowTables = FlowTables and curSwitch
2119 if curSwitch == main.FALSE:
Jon Hall6e709752016-02-01 13:38:46 -08002120 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2121 utilities.assert_equals(
2122 expect=main.TRUE,
2123 actual=FlowTables,
2124 onpass="No changes were found in the flow tables",
2125 onfail="Changes were found in the flow tables" )
2126
2127 main.Mininet2.pingLongKill()
2128 '''
2129 main.step( "Check the continuous pings to ensure that no packets " +
2130 "were dropped during component failure" )
2131 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2132 main.params[ 'TESTONIP' ] )
2133 LossInPings = main.FALSE
2134 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2135 for i in range( 8, 18 ):
2136 main.log.info(
2137 "Checking for a loss in pings along flow from s" +
2138 str( i ) )
2139 LossInPings = main.Mininet2.checkForLoss(
2140 "/tmp/ping.h" +
2141 str( i ) ) or LossInPings
2142 if LossInPings == main.TRUE:
2143 main.log.info( "Loss in ping detected" )
2144 elif LossInPings == main.ERROR:
2145 main.log.info( "There are multiple mininet process running" )
2146 elif LossInPings == main.FALSE:
2147 main.log.info( "No Loss in the pings" )
2148 main.log.info( "No loss of dataplane connectivity" )
2149 utilities.assert_equals(
2150 expect=main.FALSE,
2151 actual=LossInPings,
2152 onpass="No Loss of connectivity",
2153 onfail="Loss of dataplane connectivity detected" )
2154 '''
2155
2156 main.step( "Leadership Election is still functional" )
2157 # Test of LeadershipElection
2158 leaderList = []
2159
2160 partitioned = []
2161 for i in main.partition:
2162 partitioned.append( main.nodes[i].ip_address )
2163 leaderResult = main.TRUE
2164
2165 for i in main.activeNodes:
2166 cli = main.CLIs[i]
2167 leaderN = cli.electionTestLeader()
2168 leaderList.append( leaderN )
2169 if leaderN == main.FALSE:
2170 # error in response
2171 main.log.error( "Something is wrong with " +
2172 "electionTestLeader function, check the" +
2173 " error logs" )
2174 leaderResult = main.FALSE
2175 elif leaderN is None:
2176 main.log.error( cli.name +
2177 " shows no leader for the election-app was" +
2178 " elected after the old one died" )
2179 leaderResult = main.FALSE
2180 elif leaderN in partitioned:
2181 main.log.error( cli.name + " shows " + str( leaderN ) +
2182 " as leader for the election-app, but it " +
2183 "was partitioned" )
2184 leaderResult = main.FALSE
2185 if len( set( leaderList ) ) != 1:
2186 leaderResult = main.FALSE
2187 main.log.error(
2188 "Inconsistent view of leader for the election test app" )
2189 # TODO: print the list
2190 utilities.assert_equals(
2191 expect=main.TRUE,
2192 actual=leaderResult,
2193 onpass="Leadership election passed",
2194 onfail="Something went wrong with Leadership election" )
2195
2196 def CASE8( self, main ):
2197 """
2198 Compare topo
2199 """
2200 import json
2201 import time
2202 assert main.numCtrls, "main.numCtrls not defined"
2203 assert main, "main not defined"
2204 assert utilities.assert_equals, "utilities.assert_equals not defined"
2205 assert main.CLIs, "main.CLIs not defined"
2206 assert main.nodes, "main.nodes not defined"
2207
2208 main.case( "Compare ONOS Topology view to Mininet topology" )
2209 main.caseExplanation = "Compare topology objects between Mininet" +\
2210 " and ONOS"
2211 topoResult = main.FALSE
2212 topoFailMsg = "ONOS topology don't match Mininet"
2213 elapsed = 0
2214 count = 0
2215 main.step( "Comparing ONOS topology to MN topology" )
2216 startTime = time.time()
2217 # Give time for Gossip to work
2218 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2219 devicesResults = main.TRUE
2220 linksResults = main.TRUE
2221 hostsResults = main.TRUE
2222 hostAttachmentResults = True
2223 count += 1
2224 cliStart = time.time()
2225 devices = []
2226 threads = []
2227 for i in main.activeNodes:
2228 t = main.Thread( target=utilities.retry,
2229 name="devices-" + str( i ),
2230 args=[ main.CLIs[i].devices, [ None ] ],
2231 kwargs= { 'sleep': 5, 'attempts': 5,
2232 'randomTime': True } )
2233 threads.append( t )
2234 t.start()
2235
2236 for t in threads:
2237 t.join()
2238 devices.append( t.result )
2239 hosts = []
2240 ipResult = main.TRUE
2241 threads = []
2242 for i in main.activeNodes:
2243 t = main.Thread( target=utilities.retry,
2244 name="hosts-" + str( i ),
2245 args=[ main.CLIs[i].hosts, [ None ] ],
2246 kwargs= { 'sleep': 5, 'attempts': 5,
2247 'randomTime': True } )
2248 threads.append( t )
2249 t.start()
2250
2251 for t in threads:
2252 t.join()
2253 try:
2254 hosts.append( json.loads( t.result ) )
2255 except ( ValueError, TypeError ):
2256 main.log.exception( "Error parsing hosts results" )
2257 main.log.error( repr( t.result ) )
2258 hosts.append( None )
2259 for controller in range( 0, len( hosts ) ):
2260 controllerStr = str( main.activeNodes[controller] + 1 )
2261 if hosts[ controller ]:
2262 for host in hosts[ controller ]:
2263 if host is None or host.get( 'ipAddresses', [] ) == []:
2264 main.log.error(
2265 "Error with host ipAddresses on controller" +
2266 controllerStr + ": " + str( host ) )
2267 ipResult = main.FALSE
2268 ports = []
2269 threads = []
2270 for i in main.activeNodes:
2271 t = main.Thread( target=utilities.retry,
2272 name="ports-" + str( i ),
2273 args=[ main.CLIs[i].ports, [ None ] ],
2274 kwargs= { 'sleep': 5, 'attempts': 5,
2275 'randomTime': True } )
2276 threads.append( t )
2277 t.start()
2278
2279 for t in threads:
2280 t.join()
2281 ports.append( t.result )
2282 links = []
2283 threads = []
2284 for i in main.activeNodes:
2285 t = main.Thread( target=utilities.retry,
2286 name="links-" + str( i ),
2287 args=[ main.CLIs[i].links, [ None ] ],
2288 kwargs= { 'sleep': 5, 'attempts': 5,
2289 'randomTime': True } )
2290 threads.append( t )
2291 t.start()
2292
2293 for t in threads:
2294 t.join()
2295 links.append( t.result )
2296 clusters = []
2297 threads = []
2298 for i in main.activeNodes:
2299 t = main.Thread( target=utilities.retry,
2300 name="clusters-" + str( i ),
2301 args=[ main.CLIs[i].clusters, [ None ] ],
2302 kwargs= { 'sleep': 5, 'attempts': 5,
2303 'randomTime': True } )
2304 threads.append( t )
2305 t.start()
2306
2307 for t in threads:
2308 t.join()
2309 clusters.append( t.result )
2310
2311 elapsed = time.time() - startTime
2312 cliTime = time.time() - cliStart
2313 print "Elapsed time: " + str( elapsed )
2314 print "CLI time: " + str( cliTime )
2315
2316 if all( e is None for e in devices ) and\
2317 all( e is None for e in hosts ) and\
2318 all( e is None for e in ports ) and\
2319 all( e is None for e in links ) and\
2320 all( e is None for e in clusters ):
2321 topoFailMsg = "Could not get topology from ONOS"
2322 main.log.error( topoFailMsg )
2323 continue # Try again, No use trying to compare
2324
2325 mnSwitches = main.Mininet1.getSwitches()
2326 mnLinks = main.Mininet1.getLinks()
2327 mnHosts = main.Mininet1.getHosts()
2328 for controller in range( len( main.activeNodes ) ):
2329 controllerStr = str( main.activeNodes[controller] + 1 )
2330 if devices[ controller ] and ports[ controller ] and\
2331 "Error" not in devices[ controller ] and\
2332 "Error" not in ports[ controller ]:
2333
2334 try:
2335 currentDevicesResult = main.Mininet1.compareSwitches(
2336 mnSwitches,
2337 json.loads( devices[ controller ] ),
2338 json.loads( ports[ controller ] ) )
2339 except ( TypeError, ValueError ) as e:
2340 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2341 devices[ controller ], ports[ controller ] ) )
2342 else:
2343 currentDevicesResult = main.FALSE
2344 utilities.assert_equals( expect=main.TRUE,
2345 actual=currentDevicesResult,
2346 onpass="ONOS" + controllerStr +
2347 " Switches view is correct",
2348 onfail="ONOS" + controllerStr +
2349 " Switches view is incorrect" )
2350
2351 if links[ controller ] and "Error" not in links[ controller ]:
2352 currentLinksResult = main.Mininet1.compareLinks(
2353 mnSwitches, mnLinks,
2354 json.loads( links[ controller ] ) )
2355 else:
2356 currentLinksResult = main.FALSE
2357 utilities.assert_equals( expect=main.TRUE,
2358 actual=currentLinksResult,
2359 onpass="ONOS" + controllerStr +
2360 " links view is correct",
2361 onfail="ONOS" + controllerStr +
2362 " links view is incorrect" )
2363 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2364 currentHostsResult = main.Mininet1.compareHosts(
2365 mnHosts,
2366 hosts[ controller ] )
2367 elif hosts[ controller ] == []:
2368 currentHostsResult = main.TRUE
2369 else:
2370 currentHostsResult = main.FALSE
2371 utilities.assert_equals( expect=main.TRUE,
2372 actual=currentHostsResult,
2373 onpass="ONOS" + controllerStr +
2374 " hosts exist in Mininet",
2375 onfail="ONOS" + controllerStr +
2376 " hosts don't match Mininet" )
2377 # CHECKING HOST ATTACHMENT POINTS
2378 hostAttachment = True
2379 zeroHosts = False
2380 # FIXME: topo-HA/obelisk specific mappings:
2381 # key is mac and value is dpid
2382 mappings = {}
2383 for i in range( 1, 29 ): # hosts 1 through 28
2384 # set up correct variables:
2385 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2386 if i == 1:
2387 deviceId = "1000".zfill(16)
2388 elif i == 2:
2389 deviceId = "2000".zfill(16)
2390 elif i == 3:
2391 deviceId = "3000".zfill(16)
2392 elif i == 4:
2393 deviceId = "3004".zfill(16)
2394 elif i == 5:
2395 deviceId = "5000".zfill(16)
2396 elif i == 6:
2397 deviceId = "6000".zfill(16)
2398 elif i == 7:
2399 deviceId = "6007".zfill(16)
2400 elif i >= 8 and i <= 17:
2401 dpid = '3' + str( i ).zfill( 3 )
2402 deviceId = dpid.zfill(16)
2403 elif i >= 18 and i <= 27:
2404 dpid = '6' + str( i ).zfill( 3 )
2405 deviceId = dpid.zfill(16)
2406 elif i == 28:
2407 deviceId = "2800".zfill(16)
2408 mappings[ macId ] = deviceId
2409 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2410 if hosts[ controller ] == []:
2411 main.log.warn( "There are no hosts discovered" )
2412 zeroHosts = True
2413 else:
2414 for host in hosts[ controller ]:
2415 mac = None
2416 location = None
2417 device = None
2418 port = None
2419 try:
2420 mac = host.get( 'mac' )
2421 assert mac, "mac field could not be found for this host object"
2422
2423 location = host.get( 'location' )
2424 assert location, "location field could not be found for this host object"
2425
2426 # Trim the protocol identifier off deviceId
2427 device = str( location.get( 'elementId' ) ).split(':')[1]
2428 assert device, "elementId field could not be found for this host location object"
2429
2430 port = location.get( 'port' )
2431 assert port, "port field could not be found for this host location object"
2432
2433 # Now check if this matches where they should be
2434 if mac and device and port:
2435 if str( port ) != "1":
2436 main.log.error( "The attachment port is incorrect for " +
2437 "host " + str( mac ) +
2438 ". Expected: 1 Actual: " + str( port) )
2439 hostAttachment = False
2440 if device != mappings[ str( mac ) ]:
2441 main.log.error( "The attachment device is incorrect for " +
2442 "host " + str( mac ) +
2443 ". Expected: " + mappings[ str( mac ) ] +
2444 " Actual: " + device )
2445 hostAttachment = False
2446 else:
2447 hostAttachment = False
2448 except AssertionError:
2449 main.log.exception( "Json object not as expected" )
2450 main.log.error( repr( host ) )
2451 hostAttachment = False
2452 else:
2453 main.log.error( "No hosts json output or \"Error\"" +
2454 " in output. hosts = " +
2455 repr( hosts[ controller ] ) )
2456 if zeroHosts is False:
2457 hostAttachment = True
2458
2459 # END CHECKING HOST ATTACHMENT POINTS
2460 devicesResults = devicesResults and currentDevicesResult
2461 linksResults = linksResults and currentLinksResult
2462 hostsResults = hostsResults and currentHostsResult
2463 hostAttachmentResults = hostAttachmentResults and\
2464 hostAttachment
Jon Halla440e872016-03-31 15:15:50 -07002465 topoResult = ( devicesResults and linksResults
2466 and hostsResults and ipResult and
2467 hostAttachmentResults )
Jon Hall6e709752016-02-01 13:38:46 -08002468 utilities.assert_equals( expect=True,
2469 actual=topoResult,
2470 onpass="ONOS topology matches Mininet",
2471 onfail=topoFailMsg )
2472 # End of While loop to pull ONOS state
2473
2474 # Compare json objects for hosts and dataplane clusters
2475
2476 # hosts
2477 main.step( "Hosts view is consistent across all ONOS nodes" )
2478 consistentHostsResult = main.TRUE
2479 for controller in range( len( hosts ) ):
2480 controllerStr = str( main.activeNodes[controller] + 1 )
2481 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2482 if hosts[ controller ] == hosts[ 0 ]:
2483 continue
2484 else: # hosts not consistent
2485 main.log.error( "hosts from ONOS" + controllerStr +
2486 " is inconsistent with ONOS1" )
2487 main.log.warn( repr( hosts[ controller ] ) )
2488 consistentHostsResult = main.FALSE
2489
2490 else:
2491 main.log.error( "Error in getting ONOS hosts from ONOS" +
2492 controllerStr )
2493 consistentHostsResult = main.FALSE
2494 main.log.warn( "ONOS" + controllerStr +
2495 " hosts response: " +
2496 repr( hosts[ controller ] ) )
2497 utilities.assert_equals(
2498 expect=main.TRUE,
2499 actual=consistentHostsResult,
2500 onpass="Hosts view is consistent across all ONOS nodes",
2501 onfail="ONOS nodes have different views of hosts" )
2502
2503 main.step( "Hosts information is correct" )
2504 hostsResults = hostsResults and ipResult
2505 utilities.assert_equals(
2506 expect=main.TRUE,
2507 actual=hostsResults,
2508 onpass="Host information is correct",
2509 onfail="Host information is incorrect" )
2510
2511 main.step( "Host attachment points to the network" )
2512 utilities.assert_equals(
2513 expect=True,
2514 actual=hostAttachmentResults,
2515 onpass="Hosts are correctly attached to the network",
2516 onfail="ONOS did not correctly attach hosts to the network" )
2517
2518 # Strongly connected clusters of devices
2519 main.step( "Clusters view is consistent across all ONOS nodes" )
2520 consistentClustersResult = main.TRUE
2521 for controller in range( len( clusters ) ):
2522 controllerStr = str( main.activeNodes[controller] + 1 )
2523 if "Error" not in clusters[ controller ]:
2524 if clusters[ controller ] == clusters[ 0 ]:
2525 continue
2526 else: # clusters not consistent
2527 main.log.error( "clusters from ONOS" +
2528 controllerStr +
2529 " is inconsistent with ONOS1" )
2530 consistentClustersResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002531 else:
2532 main.log.error( "Error in getting dataplane clusters " +
2533 "from ONOS" + controllerStr )
2534 consistentClustersResult = main.FALSE
2535 main.log.warn( "ONOS" + controllerStr +
2536 " clusters response: " +
2537 repr( clusters[ controller ] ) )
2538 utilities.assert_equals(
2539 expect=main.TRUE,
2540 actual=consistentClustersResult,
2541 onpass="Clusters view is consistent across all ONOS nodes",
2542 onfail="ONOS nodes have different views of clusters" )
2543
2544 main.step( "There is only one SCC" )
2545 # there should always only be one cluster
2546 try:
2547 numClusters = len( json.loads( clusters[ 0 ] ) )
2548 except ( ValueError, TypeError ):
2549 main.log.exception( "Error parsing clusters[0]: " +
2550 repr( clusters[0] ) )
Jon Halla440e872016-03-31 15:15:50 -07002551 numClusters = "ERROR"
Jon Hall6e709752016-02-01 13:38:46 -08002552 clusterResults = main.FALSE
2553 if numClusters == 1:
2554 clusterResults = main.TRUE
2555 utilities.assert_equals(
2556 expect=1,
2557 actual=numClusters,
2558 onpass="ONOS shows 1 SCC",
2559 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2560
2561 topoResult = ( devicesResults and linksResults
2562 and hostsResults and consistentHostsResult
2563 and consistentClustersResult and clusterResults
2564 and ipResult and hostAttachmentResults )
2565
2566 topoResult = topoResult and int( count <= 2 )
2567 note = "note it takes about " + str( int( cliTime ) ) + \
2568 " seconds for the test to make all the cli calls to fetch " +\
2569 "the topology from each ONOS instance"
2570 main.log.info(
2571 "Very crass estimate for topology discovery/convergence( " +
2572 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2573 str( count ) + " tries" )
2574
2575 main.step( "Device information is correct" )
2576 utilities.assert_equals(
2577 expect=main.TRUE,
2578 actual=devicesResults,
2579 onpass="Device information is correct",
2580 onfail="Device information is incorrect" )
2581
2582 main.step( "Links are correct" )
2583 utilities.assert_equals(
2584 expect=main.TRUE,
2585 actual=linksResults,
2586 onpass="Link are correct",
2587 onfail="Links are incorrect" )
2588
Jon Halla440e872016-03-31 15:15:50 -07002589 main.step( "Hosts are correct" )
2590 utilities.assert_equals(
2591 expect=main.TRUE,
2592 actual=hostsResults,
2593 onpass="Hosts are correct",
2594 onfail="Hosts are incorrect" )
2595
Jon Hall6e709752016-02-01 13:38:46 -08002596 # FIXME: move this to an ONOS state case
2597 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -07002598 nodeResults = utilities.retry( main.HA.nodesCheck,
2599 False,
2600 args=[main.activeNodes],
2601 attempts=5 )
Jon Hall6e709752016-02-01 13:38:46 -08002602
Jon Hall41d39f12016-04-11 22:54:35 -07002603 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Hall6e709752016-02-01 13:38:46 -08002604 onpass="Nodes check successful",
2605 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002606 if not nodeResults:
Jon Hall41d39f12016-04-11 22:54:35 -07002607 for i in main.activeNodes:
Jon Halla440e872016-03-31 15:15:50 -07002608 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hall41d39f12016-04-11 22:54:35 -07002609 main.CLIs[i].name,
2610 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08002611
2612 def CASE9( self, main ):
2613 """
2614 Link s3-s28 down
2615 """
2616 import time
2617 assert main.numCtrls, "main.numCtrls not defined"
2618 assert main, "main not defined"
2619 assert utilities.assert_equals, "utilities.assert_equals not defined"
2620 assert main.CLIs, "main.CLIs not defined"
2621 assert main.nodes, "main.nodes not defined"
2622 # NOTE: You should probably run a topology check after this
2623
2624 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2625
2626 description = "Turn off a link to ensure that Link Discovery " +\
2627 "is working properly"
2628 main.case( description )
2629
2630 main.step( "Kill Link between s3 and s28" )
2631 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2632 main.log.info( "Waiting " + str( linkSleep ) +
2633 " seconds for link down to be discovered" )
2634 time.sleep( linkSleep )
2635 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2636 onpass="Link down successful",
2637 onfail="Failed to bring link down" )
2638 # TODO do some sort of check here
2639
2640 def CASE10( self, main ):
2641 """
2642 Link s3-s28 up
2643 """
2644 import time
2645 assert main.numCtrls, "main.numCtrls not defined"
2646 assert main, "main not defined"
2647 assert utilities.assert_equals, "utilities.assert_equals not defined"
2648 assert main.CLIs, "main.CLIs not defined"
2649 assert main.nodes, "main.nodes not defined"
2650 # NOTE: You should probably run a topology check after this
2651
2652 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2653
2654 description = "Restore a link to ensure that Link Discovery is " + \
2655 "working properly"
2656 main.case( description )
2657
2658 main.step( "Bring link between s3 and s28 back up" )
2659 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2660 main.log.info( "Waiting " + str( linkSleep ) +
2661 " seconds for link up to be discovered" )
2662 time.sleep( linkSleep )
2663 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2664 onpass="Link up successful",
2665 onfail="Failed to bring link up" )
2666 # TODO do some sort of check here
2667
2668 def CASE11( self, main ):
2669 """
2670 Switch Down
2671 """
2672 # NOTE: You should probably run a topology check after this
2673 import time
2674 assert main.numCtrls, "main.numCtrls not defined"
2675 assert main, "main not defined"
2676 assert utilities.assert_equals, "utilities.assert_equals not defined"
2677 assert main.CLIs, "main.CLIs not defined"
2678 assert main.nodes, "main.nodes not defined"
2679
2680 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2681
2682 description = "Killing a switch to ensure it is discovered correctly"
2683 onosCli = main.CLIs[ main.activeNodes[0] ]
2684 main.case( description )
2685 switch = main.params[ 'kill' ][ 'switch' ]
2686 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2687
2688 # TODO: Make this switch parameterizable
2689 main.step( "Kill " + switch )
2690 main.log.info( "Deleting " + switch )
2691 main.Mininet1.delSwitch( switch )
2692 main.log.info( "Waiting " + str( switchSleep ) +
2693 " seconds for switch down to be discovered" )
2694 time.sleep( switchSleep )
2695 device = onosCli.getDevice( dpid=switchDPID )
2696 # Peek at the deleted switch
2697 main.log.warn( str( device ) )
2698 result = main.FALSE
2699 if device and device[ 'available' ] is False:
2700 result = main.TRUE
2701 utilities.assert_equals( expect=main.TRUE, actual=result,
2702 onpass="Kill switch successful",
2703 onfail="Failed to kill switch?" )
2704
2705 def CASE12( self, main ):
2706 """
2707 Switch Up
2708 """
2709 # NOTE: You should probably run a topology check after this
2710 import time
2711 assert main.numCtrls, "main.numCtrls not defined"
2712 assert main, "main not defined"
2713 assert utilities.assert_equals, "utilities.assert_equals not defined"
2714 assert main.CLIs, "main.CLIs not defined"
2715 assert main.nodes, "main.nodes not defined"
2716 assert ONOS1Port, "ONOS1Port not defined"
2717 assert ONOS2Port, "ONOS2Port not defined"
2718 assert ONOS3Port, "ONOS3Port not defined"
2719 assert ONOS4Port, "ONOS4Port not defined"
2720 assert ONOS5Port, "ONOS5Port not defined"
2721 assert ONOS6Port, "ONOS6Port not defined"
2722 assert ONOS7Port, "ONOS7Port not defined"
2723
2724 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2725 switch = main.params[ 'kill' ][ 'switch' ]
2726 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2727 links = main.params[ 'kill' ][ 'links' ].split()
2728 onosCli = main.CLIs[ main.activeNodes[0] ]
2729 description = "Adding a switch to ensure it is discovered correctly"
2730 main.case( description )
2731
2732 main.step( "Add back " + switch )
2733 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2734 for peer in links:
2735 main.Mininet1.addLink( switch, peer )
2736 ipList = [ node.ip_address for node in main.nodes ]
2737 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2738 main.log.info( "Waiting " + str( switchSleep ) +
2739 " seconds for switch up to be discovered" )
2740 time.sleep( switchSleep )
2741 device = onosCli.getDevice( dpid=switchDPID )
2742 # Peek at the deleted switch
2743 main.log.warn( str( device ) )
2744 result = main.FALSE
2745 if device and device[ 'available' ]:
2746 result = main.TRUE
2747 utilities.assert_equals( expect=main.TRUE, actual=result,
2748 onpass="add switch successful",
2749 onfail="Failed to add switch?" )
2750
2751 def CASE13( self, main ):
2752 """
2753 Clean up
2754 """
2755 import os
2756 import time
2757 assert main.numCtrls, "main.numCtrls not defined"
2758 assert main, "main not defined"
2759 assert utilities.assert_equals, "utilities.assert_equals not defined"
2760 assert main.CLIs, "main.CLIs not defined"
2761 assert main.nodes, "main.nodes not defined"
2762
2763 # printing colors to terminal
2764 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2765 'blue': '\033[94m', 'green': '\033[92m',
2766 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2767 main.case( "Test Cleanup" )
2768 main.step( "Killing tcpdumps" )
2769 main.Mininet2.stopTcpdump()
2770
2771 testname = main.TEST
2772 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2773 main.step( "Copying MN pcap and ONOS log files to test station" )
2774 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2775 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2776 # NOTE: MN Pcap file is being saved to logdir.
2777 # We scp this file as MN and TestON aren't necessarily the same vm
2778
2779 # FIXME: To be replaced with a Jenkin's post script
2780 # TODO: Load these from params
2781 # NOTE: must end in /
2782 logFolder = "/opt/onos/log/"
2783 logFiles = [ "karaf.log", "karaf.log.1" ]
2784 # NOTE: must end in /
2785 for f in logFiles:
2786 for node in main.nodes:
2787 dstName = main.logdir + "/" + node.name + "-" + f
2788 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2789 logFolder + f, dstName )
2790 # std*.log's
2791 # NOTE: must end in /
2792 logFolder = "/opt/onos/var/"
2793 logFiles = [ "stderr.log", "stdout.log" ]
2794 # NOTE: must end in /
2795 for f in logFiles:
2796 for node in main.nodes:
2797 dstName = main.logdir + "/" + node.name + "-" + f
2798 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2799 logFolder + f, dstName )
2800 else:
2801 main.log.debug( "skipping saving log files" )
2802
2803 main.step( "Stopping Mininet" )
2804 mnResult = main.Mininet1.stopNet()
2805 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2806 onpass="Mininet stopped",
2807 onfail="MN cleanup NOT successful" )
2808
2809 main.step( "Checking ONOS Logs for errors" )
2810 for node in main.nodes:
2811 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2812 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2813
2814 try:
2815 timerLog = open( main.logdir + "/Timers.csv", 'w')
2816 # Overwrite with empty line and close
2817 labels = "Gossip Intents"
2818 data = str( gossipTime )
2819 timerLog.write( labels + "\n" + data )
2820 timerLog.close()
2821 except NameError, e:
2822 main.log.exception(e)
2823
2824 def CASE14( self, main ):
2825 """
2826 start election app on all onos nodes
2827 """
2828 assert main.numCtrls, "main.numCtrls not defined"
2829 assert main, "main not defined"
2830 assert utilities.assert_equals, "utilities.assert_equals not defined"
2831 assert main.CLIs, "main.CLIs not defined"
2832 assert main.nodes, "main.nodes not defined"
2833
2834 main.case("Start Leadership Election app")
2835 main.step( "Install leadership election app" )
2836 onosCli = main.CLIs[ main.activeNodes[0] ]
2837 appResult = onosCli.activateApp( "org.onosproject.election" )
2838 utilities.assert_equals(
2839 expect=main.TRUE,
2840 actual=appResult,
2841 onpass="Election app installed",
2842 onfail="Something went wrong with installing Leadership election" )
2843
2844 main.step( "Run for election on each node" )
2845 leaderResult = main.TRUE
2846 leaders = []
2847 for i in main.activeNodes:
2848 main.CLIs[i].electionTestRun()
2849 for i in main.activeNodes:
2850 cli = main.CLIs[i]
2851 leader = cli.electionTestLeader()
2852 if leader is None or leader == main.FALSE:
2853 main.log.error( cli.name + ": Leader for the election app " +
2854 "should be an ONOS node, instead got '" +
2855 str( leader ) + "'" )
2856 leaderResult = main.FALSE
2857 leaders.append( leader )
2858 utilities.assert_equals(
2859 expect=main.TRUE,
2860 actual=leaderResult,
2861 onpass="Successfully ran for leadership",
2862 onfail="Failed to run for leadership" )
2863
2864 main.step( "Check that each node shows the same leader" )
2865 sameLeader = main.TRUE
2866 if len( set( leaders ) ) != 1:
2867 sameLeader = main.FALSE
2868 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
2869 str( leaders ) )
2870 utilities.assert_equals(
2871 expect=main.TRUE,
2872 actual=sameLeader,
2873 onpass="Leadership is consistent for the election topic",
2874 onfail="Nodes have different leaders" )
2875
2876 def CASE15( self, main ):
2877 """
2878 Check that Leadership Election is still functional
2879 15.1 Run election on each node
2880 15.2 Check that each node has the same leaders and candidates
2881 15.3 Find current leader and withdraw
2882 15.4 Check that a new node was elected leader
2883 15.5 Check that that new leader was the candidate of old leader
2884 15.6 Run for election on old leader
2885 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2886 15.8 Make sure that the old leader was added to the candidate list
2887
2888 old and new variable prefixes refer to data from before vs after
2889 withdrawl and later before withdrawl vs after re-election
2890 """
2891 import time
2892 assert main.numCtrls, "main.numCtrls not defined"
2893 assert main, "main not defined"
2894 assert utilities.assert_equals, "utilities.assert_equals not defined"
2895 assert main.CLIs, "main.CLIs not defined"
2896 assert main.nodes, "main.nodes not defined"
2897
2898 description = "Check that Leadership Election is still functional"
2899 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002900 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall6e709752016-02-01 13:38:46 -08002901
Jon Halla440e872016-03-31 15:15:50 -07002902 oldLeaders = [] # list of lists of each nodes' candidates before
2903 newLeaders = [] # list of lists of each nodes' candidates after
Jon Hall6e709752016-02-01 13:38:46 -08002904 oldLeader = '' # the old leader from oldLeaders, None if not same
2905 newLeader = '' # the new leaders fron newLoeaders, None if not same
2906 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2907 expectNoLeader = False # True when there is only one leader
2908 if main.numCtrls == 1:
2909 expectNoLeader = True
2910
2911 main.step( "Run for election on each node" )
2912 electionResult = main.TRUE
2913
2914 for i in main.activeNodes: # run test election on each node
2915 if main.CLIs[i].electionTestRun() == main.FALSE:
2916 electionResult = main.FALSE
2917 utilities.assert_equals(
2918 expect=main.TRUE,
2919 actual=electionResult,
2920 onpass="All nodes successfully ran for leadership",
2921 onfail="At least one node failed to run for leadership" )
2922
2923 if electionResult == main.FALSE:
2924 main.log.error(
2925 "Skipping Test Case because Election Test App isn't loaded" )
2926 main.skipCase()
2927
2928 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07002929 failMessage = "Nodes have different leaderboards"
Jon Halla440e872016-03-31 15:15:50 -07002930 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
Jon Hall41d39f12016-04-11 22:54:35 -07002931 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Halla440e872016-03-31 15:15:50 -07002932 if sameResult:
2933 oldLeader = oldLeaders[ 0 ][ 0 ]
2934 main.log.warn( oldLeader )
Jon Hall6e709752016-02-01 13:38:46 -08002935 else:
Jon Halla440e872016-03-31 15:15:50 -07002936 oldLeader = None
Jon Hall6e709752016-02-01 13:38:46 -08002937 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002938 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08002939 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07002940 onpass="Leaderboards are consistent for the election topic",
Jon Hall6e709752016-02-01 13:38:46 -08002941 onfail=failMessage )
2942
2943 main.step( "Find current leader and withdraw" )
2944 withdrawResult = main.TRUE
2945 # do some sanity checking on leader before using it
2946 if oldLeader is None:
2947 main.log.error( "Leadership isn't consistent." )
2948 withdrawResult = main.FALSE
2949 # Get the CLI of the oldLeader
2950 for i in main.activeNodes:
2951 if oldLeader == main.nodes[ i ].ip_address:
2952 oldLeaderCLI = main.CLIs[ i ]
2953 break
2954 else: # FOR/ELSE statement
2955 main.log.error( "Leader election, could not find current leader" )
2956 if oldLeader:
2957 withdrawResult = oldLeaderCLI.electionTestWithdraw()
2958 utilities.assert_equals(
2959 expect=main.TRUE,
2960 actual=withdrawResult,
2961 onpass="Node was withdrawn from election",
2962 onfail="Node was not withdrawn from election" )
2963
2964 main.step( "Check that a new node was elected leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002965 failMessage = "Nodes have different leaders"
Jon Hall6e709752016-02-01 13:38:46 -08002966 # Get new leaders and candidates
Jon Hall41d39f12016-04-11 22:54:35 -07002967 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall3a7843a2016-04-12 03:01:09 -07002968 newLeader = None
Jon Halla440e872016-03-31 15:15:50 -07002969 if newLeaderResult:
Jon Hall3a7843a2016-04-12 03:01:09 -07002970 if newLeaders[ 0 ][ 0 ] == 'none':
2971 main.log.error( "No leader was elected on at least 1 node" )
2972 if not expectNoLeader:
2973 newLeaderResult = False
2974 else:
2975 newLeader = newLeaders[ 0 ][ 0 ]
Jon Hall6e709752016-02-01 13:38:46 -08002976
2977 # Check that the new leader is not the older leader, which was withdrawn
2978 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07002979 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08002980 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
2981 " as the current leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002982 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002983 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08002984 actual=newLeaderResult,
2985 onpass="Leadership election passed",
2986 onfail="Something went wrong with Leadership election" )
2987
Jon Halla440e872016-03-31 15:15:50 -07002988 main.step( "Check that that new leader was the candidate of old leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002989 # candidates[ 2 ] should become the top candidate after withdrawl
2990 correctCandidateResult = main.TRUE
2991 if expectNoLeader:
2992 if newLeader == 'none':
2993 main.log.info( "No leader expected. None found. Pass" )
2994 correctCandidateResult = main.TRUE
2995 else:
2996 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2997 correctCandidateResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002998 elif len( oldLeaders[0] ) >= 3:
2999 if newLeader == oldLeaders[ 0 ][ 2 ]:
3000 # correct leader was elected
3001 correctCandidateResult = main.TRUE
3002 else:
3003 correctCandidateResult = main.FALSE
3004 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3005 newLeader, oldLeaders[ 0 ][ 2 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08003006 else:
3007 main.log.warn( "Could not determine who should be the correct leader" )
Jon Halla440e872016-03-31 15:15:50 -07003008 main.log.debug( oldLeaders[ 0 ] )
Jon Hall6e709752016-02-01 13:38:46 -08003009 correctCandidateResult = main.FALSE
3010 utilities.assert_equals(
3011 expect=main.TRUE,
3012 actual=correctCandidateResult,
3013 onpass="Correct Candidate Elected",
3014 onfail="Incorrect Candidate Elected" )
3015
3016 main.step( "Run for election on old leader( just so everyone " +
3017 "is in the hat )" )
3018 if oldLeaderCLI is not None:
3019 runResult = oldLeaderCLI.electionTestRun()
3020 else:
3021 main.log.error( "No old leader to re-elect" )
3022 runResult = main.FALSE
3023 utilities.assert_equals(
3024 expect=main.TRUE,
3025 actual=runResult,
3026 onpass="App re-ran for election",
3027 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07003028
Jon Hall6e709752016-02-01 13:38:46 -08003029 main.step(
3030 "Check that oldLeader is a candidate, and leader if only 1 node" )
3031 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07003032 # Get new leaders and candidates
3033 reRunLeaders = []
3034 time.sleep( 5 ) # Paremterize
Jon Hall41d39f12016-04-11 22:54:35 -07003035 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall6e709752016-02-01 13:38:46 -08003036
3037 # Check that the re-elected node is last on the candidate List
Jon Hall3a7843a2016-04-12 03:01:09 -07003038 if not reRunLeaders[0]:
3039 positionResult = main.FALSE
3040 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
Jon Halla440e872016-03-31 15:15:50 -07003041 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3042 str( reRunLeaders[ 0 ] ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08003043 positionResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08003044 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003045 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08003046 actual=positionResult,
3047 onpass="Old leader successfully re-ran for election",
3048 onfail="Something went wrong with Leadership election after " +
3049 "the old leader re-ran for election" )
3050
3051 def CASE16( self, main ):
3052 """
3053 Install Distributed Primitives app
3054 """
3055 import time
3056 assert main.numCtrls, "main.numCtrls not defined"
3057 assert main, "main not defined"
3058 assert utilities.assert_equals, "utilities.assert_equals not defined"
3059 assert main.CLIs, "main.CLIs not defined"
3060 assert main.nodes, "main.nodes not defined"
3061
3062 # Variables for the distributed primitives tests
3063 global pCounterName
Jon Hall6e709752016-02-01 13:38:46 -08003064 global pCounterValue
Jon Hall6e709752016-02-01 13:38:46 -08003065 global onosSet
3066 global onosSetName
3067 pCounterName = "TestON-Partitions"
Jon Hall6e709752016-02-01 13:38:46 -08003068 pCounterValue = 0
Jon Hall6e709752016-02-01 13:38:46 -08003069 onosSet = set([])
3070 onosSetName = "TestON-set"
3071
3072 description = "Install Primitives app"
3073 main.case( description )
3074 main.step( "Install Primitives app" )
3075 appName = "org.onosproject.distributedprimitives"
3076 node = main.activeNodes[0]
3077 appResults = main.CLIs[node].activateApp( appName )
3078 utilities.assert_equals( expect=main.TRUE,
3079 actual=appResults,
3080 onpass="Primitives app activated",
3081 onfail="Primitives app not activated" )
3082 time.sleep( 5 ) # To allow all nodes to activate
3083
3084 def CASE17( self, main ):
3085 """
3086 Check for basic functionality with distributed primitives
3087 """
3088 # Make sure variables are defined/set
3089 assert main.numCtrls, "main.numCtrls not defined"
3090 assert main, "main not defined"
3091 assert utilities.assert_equals, "utilities.assert_equals not defined"
3092 assert main.CLIs, "main.CLIs not defined"
3093 assert main.nodes, "main.nodes not defined"
3094 assert pCounterName, "pCounterName not defined"
Jon Hall6e709752016-02-01 13:38:46 -08003095 assert onosSetName, "onosSetName not defined"
3096 # NOTE: assert fails if value is 0/None/Empty/False
3097 try:
3098 pCounterValue
3099 except NameError:
3100 main.log.error( "pCounterValue not defined, setting to 0" )
3101 pCounterValue = 0
3102 try:
Jon Hall6e709752016-02-01 13:38:46 -08003103 onosSet
3104 except NameError:
3105 main.log.error( "onosSet not defined, setting to empty Set" )
3106 onosSet = set([])
3107 # Variables for the distributed primitives tests. These are local only
3108 addValue = "a"
3109 addAllValue = "a b c d e f"
3110 retainValue = "c d e f"
3111
3112 description = "Check for basic functionality with distributed " +\
3113 "primitives"
3114 main.case( description )
3115 main.caseExplanation = "Test the methods of the distributed " +\
3116 "primitives (counters and sets) throught the cli"
3117 # DISTRIBUTED ATOMIC COUNTERS
3118 # Partitioned counters
3119 main.step( "Increment then get a default counter on each node" )
3120 pCounters = []
3121 threads = []
3122 addedPValues = []
3123 for i in main.activeNodes:
3124 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3125 name="counterAddAndGet-" + str( i ),
3126 args=[ pCounterName ] )
3127 pCounterValue += 1
3128 addedPValues.append( pCounterValue )
3129 threads.append( t )
3130 t.start()
3131
3132 for t in threads:
3133 t.join()
3134 pCounters.append( t.result )
3135 # Check that counter incremented numController times
3136 pCounterResults = True
3137 for i in addedPValues:
3138 tmpResult = i in pCounters
3139 pCounterResults = pCounterResults and tmpResult
3140 if not tmpResult:
3141 main.log.error( str( i ) + " is not in partitioned "
3142 "counter incremented results" )
3143 utilities.assert_equals( expect=True,
3144 actual=pCounterResults,
3145 onpass="Default counter incremented",
3146 onfail="Error incrementing default" +
3147 " counter" )
3148
3149 main.step( "Get then Increment a default counter on each node" )
3150 pCounters = []
3151 threads = []
3152 addedPValues = []
3153 for i in main.activeNodes:
3154 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3155 name="counterGetAndAdd-" + str( i ),
3156 args=[ pCounterName ] )
3157 addedPValues.append( pCounterValue )
3158 pCounterValue += 1
3159 threads.append( t )
3160 t.start()
3161
3162 for t in threads:
3163 t.join()
3164 pCounters.append( t.result )
3165 # Check that counter incremented numController times
3166 pCounterResults = True
3167 for i in addedPValues:
3168 tmpResult = i in pCounters
3169 pCounterResults = pCounterResults and tmpResult
3170 if not tmpResult:
3171 main.log.error( str( i ) + " is not in partitioned "
3172 "counter incremented results" )
3173 utilities.assert_equals( expect=True,
3174 actual=pCounterResults,
3175 onpass="Default counter incremented",
3176 onfail="Error incrementing default" +
3177 " counter" )
3178
3179 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003180 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Hall6e709752016-02-01 13:38:46 -08003181 utilities.assert_equals( expect=main.TRUE,
3182 actual=incrementCheck,
3183 onpass="Added counters are correct",
3184 onfail="Added counters are incorrect" )
3185
3186 main.step( "Add -8 to then get a default counter on each node" )
3187 pCounters = []
3188 threads = []
3189 addedPValues = []
3190 for i in main.activeNodes:
3191 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3192 name="counterIncrement-" + str( i ),
3193 args=[ pCounterName ],
3194 kwargs={ "delta": -8 } )
3195 pCounterValue += -8
3196 addedPValues.append( pCounterValue )
3197 threads.append( t )
3198 t.start()
3199
3200 for t in threads:
3201 t.join()
3202 pCounters.append( t.result )
3203 # Check that counter incremented numController times
3204 pCounterResults = True
3205 for i in addedPValues:
3206 tmpResult = i in pCounters
3207 pCounterResults = pCounterResults and tmpResult
3208 if not tmpResult:
3209 main.log.error( str( i ) + " is not in partitioned "
3210 "counter incremented results" )
3211 utilities.assert_equals( expect=True,
3212 actual=pCounterResults,
3213 onpass="Default counter incremented",
3214 onfail="Error incrementing default" +
3215 " counter" )
3216
3217 main.step( "Add 5 to then get a default counter on each node" )
3218 pCounters = []
3219 threads = []
3220 addedPValues = []
3221 for i in main.activeNodes:
3222 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3223 name="counterIncrement-" + str( i ),
3224 args=[ pCounterName ],
3225 kwargs={ "delta": 5 } )
3226 pCounterValue += 5
3227 addedPValues.append( pCounterValue )
3228 threads.append( t )
3229 t.start()
3230
3231 for t in threads:
3232 t.join()
3233 pCounters.append( t.result )
3234 # Check that counter incremented numController times
3235 pCounterResults = True
3236 for i in addedPValues:
3237 tmpResult = i in pCounters
3238 pCounterResults = pCounterResults and tmpResult
3239 if not tmpResult:
3240 main.log.error( str( i ) + " is not in partitioned "
3241 "counter incremented results" )
3242 utilities.assert_equals( expect=True,
3243 actual=pCounterResults,
3244 onpass="Default counter incremented",
3245 onfail="Error incrementing default" +
3246 " counter" )
3247
3248 main.step( "Get then add 5 to a default counter on each node" )
3249 pCounters = []
3250 threads = []
3251 addedPValues = []
3252 for i in main.activeNodes:
3253 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3254 name="counterIncrement-" + str( i ),
3255 args=[ pCounterName ],
3256 kwargs={ "delta": 5 } )
3257 addedPValues.append( pCounterValue )
3258 pCounterValue += 5
3259 threads.append( t )
3260 t.start()
3261
3262 for t in threads:
3263 t.join()
3264 pCounters.append( t.result )
3265 # Check that counter incremented numController times
3266 pCounterResults = True
3267 for i in addedPValues:
3268 tmpResult = i in pCounters
3269 pCounterResults = pCounterResults and tmpResult
3270 if not tmpResult:
3271 main.log.error( str( i ) + " is not in partitioned "
3272 "counter incremented results" )
3273 utilities.assert_equals( expect=True,
3274 actual=pCounterResults,
3275 onpass="Default counter incremented",
3276 onfail="Error incrementing default" +
3277 " counter" )
3278
3279 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003280 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Hall6e709752016-02-01 13:38:46 -08003281 utilities.assert_equals( expect=main.TRUE,
3282 actual=incrementCheck,
3283 onpass="Added counters are correct",
3284 onfail="Added counters are incorrect" )
3285
Jon Hall6e709752016-02-01 13:38:46 -08003286 # DISTRIBUTED SETS
3287 main.step( "Distributed Set get" )
3288 size = len( onosSet )
3289 getResponses = []
3290 threads = []
3291 for i in main.activeNodes:
3292 t = main.Thread( target=main.CLIs[i].setTestGet,
3293 name="setTestGet-" + str( i ),
3294 args=[ onosSetName ] )
3295 threads.append( t )
3296 t.start()
3297 for t in threads:
3298 t.join()
3299 getResponses.append( t.result )
3300
3301 getResults = main.TRUE
3302 for i in range( len( main.activeNodes ) ):
3303 node = str( main.activeNodes[i] + 1 )
3304 if isinstance( getResponses[ i ], list):
3305 current = set( getResponses[ i ] )
3306 if len( current ) == len( getResponses[ i ] ):
3307 # no repeats
3308 if onosSet != current:
3309 main.log.error( "ONOS" + node +
3310 " has incorrect view" +
3311 " of set " + onosSetName + ":\n" +
3312 str( getResponses[ i ] ) )
3313 main.log.debug( "Expected: " + str( onosSet ) )
3314 main.log.debug( "Actual: " + str( current ) )
3315 getResults = main.FALSE
3316 else:
3317 # error, set is not a set
3318 main.log.error( "ONOS" + node +
3319 " has repeat elements in" +
3320 " set " + onosSetName + ":\n" +
3321 str( getResponses[ i ] ) )
3322 getResults = main.FALSE
3323 elif getResponses[ i ] == main.ERROR:
3324 getResults = main.FALSE
3325 utilities.assert_equals( expect=main.TRUE,
3326 actual=getResults,
3327 onpass="Set elements are correct",
3328 onfail="Set elements are incorrect" )
3329
3330 main.step( "Distributed Set size" )
3331 sizeResponses = []
3332 threads = []
3333 for i in main.activeNodes:
3334 t = main.Thread( target=main.CLIs[i].setTestSize,
3335 name="setTestSize-" + str( i ),
3336 args=[ onosSetName ] )
3337 threads.append( t )
3338 t.start()
3339 for t in threads:
3340 t.join()
3341 sizeResponses.append( t.result )
3342
3343 sizeResults = main.TRUE
3344 for i in range( len( main.activeNodes ) ):
3345 node = str( main.activeNodes[i] + 1 )
3346 if size != sizeResponses[ i ]:
3347 sizeResults = main.FALSE
3348 main.log.error( "ONOS" + node +
3349 " expected a size of " + str( size ) +
3350 " for set " + onosSetName +
3351 " but got " + str( sizeResponses[ i ] ) )
3352 utilities.assert_equals( expect=main.TRUE,
3353 actual=sizeResults,
3354 onpass="Set sizes are correct",
3355 onfail="Set sizes are incorrect" )
3356
3357 main.step( "Distributed Set add()" )
3358 onosSet.add( addValue )
3359 addResponses = []
3360 threads = []
3361 for i in main.activeNodes:
3362 t = main.Thread( target=main.CLIs[i].setTestAdd,
3363 name="setTestAdd-" + str( i ),
3364 args=[ onosSetName, addValue ] )
3365 threads.append( t )
3366 t.start()
3367 for t in threads:
3368 t.join()
3369 addResponses.append( t.result )
3370
3371 # main.TRUE = successfully changed the set
3372 # main.FALSE = action resulted in no change in set
3373 # main.ERROR - Some error in executing the function
3374 addResults = main.TRUE
3375 for i in range( len( main.activeNodes ) ):
3376 if addResponses[ i ] == main.TRUE:
3377 # All is well
3378 pass
3379 elif addResponses[ i ] == main.FALSE:
3380 # Already in set, probably fine
3381 pass
3382 elif addResponses[ i ] == main.ERROR:
3383 # Error in execution
3384 addResults = main.FALSE
3385 else:
3386 # unexpected result
3387 addResults = main.FALSE
3388 if addResults != main.TRUE:
3389 main.log.error( "Error executing set add" )
3390
3391 # Check if set is still correct
3392 size = len( onosSet )
3393 getResponses = []
3394 threads = []
3395 for i in main.activeNodes:
3396 t = main.Thread( target=main.CLIs[i].setTestGet,
3397 name="setTestGet-" + str( i ),
3398 args=[ onosSetName ] )
3399 threads.append( t )
3400 t.start()
3401 for t in threads:
3402 t.join()
3403 getResponses.append( t.result )
3404 getResults = main.TRUE
3405 for i in range( len( main.activeNodes ) ):
3406 node = str( main.activeNodes[i] + 1 )
3407 if isinstance( getResponses[ i ], list):
3408 current = set( getResponses[ i ] )
3409 if len( current ) == len( getResponses[ i ] ):
3410 # no repeats
3411 if onosSet != current:
3412 main.log.error( "ONOS" + node + " has incorrect view" +
3413 " of set " + onosSetName + ":\n" +
3414 str( getResponses[ i ] ) )
3415 main.log.debug( "Expected: " + str( onosSet ) )
3416 main.log.debug( "Actual: " + str( current ) )
3417 getResults = main.FALSE
3418 else:
3419 # error, set is not a set
3420 main.log.error( "ONOS" + node + " has repeat elements in" +
3421 " set " + onosSetName + ":\n" +
3422 str( getResponses[ i ] ) )
3423 getResults = main.FALSE
3424 elif getResponses[ i ] == main.ERROR:
3425 getResults = main.FALSE
3426 sizeResponses = []
3427 threads = []
3428 for i in main.activeNodes:
3429 t = main.Thread( target=main.CLIs[i].setTestSize,
3430 name="setTestSize-" + str( i ),
3431 args=[ onosSetName ] )
3432 threads.append( t )
3433 t.start()
3434 for t in threads:
3435 t.join()
3436 sizeResponses.append( t.result )
3437 sizeResults = main.TRUE
3438 for i in range( len( main.activeNodes ) ):
3439 node = str( main.activeNodes[i] + 1 )
3440 if size != sizeResponses[ i ]:
3441 sizeResults = main.FALSE
3442 main.log.error( "ONOS" + node +
3443 " expected a size of " + str( size ) +
3444 " for set " + onosSetName +
3445 " but got " + str( sizeResponses[ i ] ) )
3446 addResults = addResults and getResults and sizeResults
3447 utilities.assert_equals( expect=main.TRUE,
3448 actual=addResults,
3449 onpass="Set add correct",
3450 onfail="Set add was incorrect" )
3451
3452 main.step( "Distributed Set addAll()" )
3453 onosSet.update( addAllValue.split() )
3454 addResponses = []
3455 threads = []
3456 for i in main.activeNodes:
3457 t = main.Thread( target=main.CLIs[i].setTestAdd,
3458 name="setTestAddAll-" + str( i ),
3459 args=[ onosSetName, addAllValue ] )
3460 threads.append( t )
3461 t.start()
3462 for t in threads:
3463 t.join()
3464 addResponses.append( t.result )
3465
3466 # main.TRUE = successfully changed the set
3467 # main.FALSE = action resulted in no change in set
3468 # main.ERROR - Some error in executing the function
3469 addAllResults = main.TRUE
3470 for i in range( len( main.activeNodes ) ):
3471 if addResponses[ i ] == main.TRUE:
3472 # All is well
3473 pass
3474 elif addResponses[ i ] == main.FALSE:
3475 # Already in set, probably fine
3476 pass
3477 elif addResponses[ i ] == main.ERROR:
3478 # Error in execution
3479 addAllResults = main.FALSE
3480 else:
3481 # unexpected result
3482 addAllResults = main.FALSE
3483 if addAllResults != main.TRUE:
3484 main.log.error( "Error executing set addAll" )
3485
3486 # Check if set is still correct
3487 size = len( onosSet )
3488 getResponses = []
3489 threads = []
3490 for i in main.activeNodes:
3491 t = main.Thread( target=main.CLIs[i].setTestGet,
3492 name="setTestGet-" + str( i ),
3493 args=[ onosSetName ] )
3494 threads.append( t )
3495 t.start()
3496 for t in threads:
3497 t.join()
3498 getResponses.append( t.result )
3499 getResults = main.TRUE
3500 for i in range( len( main.activeNodes ) ):
3501 node = str( main.activeNodes[i] + 1 )
3502 if isinstance( getResponses[ i ], list):
3503 current = set( getResponses[ i ] )
3504 if len( current ) == len( getResponses[ i ] ):
3505 # no repeats
3506 if onosSet != current:
3507 main.log.error( "ONOS" + node +
3508 " has incorrect view" +
3509 " of set " + onosSetName + ":\n" +
3510 str( getResponses[ i ] ) )
3511 main.log.debug( "Expected: " + str( onosSet ) )
3512 main.log.debug( "Actual: " + str( current ) )
3513 getResults = main.FALSE
3514 else:
3515 # error, set is not a set
3516 main.log.error( "ONOS" + node +
3517 " has repeat elements in" +
3518 " set " + onosSetName + ":\n" +
3519 str( getResponses[ i ] ) )
3520 getResults = main.FALSE
3521 elif getResponses[ i ] == main.ERROR:
3522 getResults = main.FALSE
3523 sizeResponses = []
3524 threads = []
3525 for i in main.activeNodes:
3526 t = main.Thread( target=main.CLIs[i].setTestSize,
3527 name="setTestSize-" + str( i ),
3528 args=[ onosSetName ] )
3529 threads.append( t )
3530 t.start()
3531 for t in threads:
3532 t.join()
3533 sizeResponses.append( t.result )
3534 sizeResults = main.TRUE
3535 for i in range( len( main.activeNodes ) ):
3536 node = str( main.activeNodes[i] + 1 )
3537 if size != sizeResponses[ i ]:
3538 sizeResults = main.FALSE
3539 main.log.error( "ONOS" + node +
3540 " expected a size of " + str( size ) +
3541 " for set " + onosSetName +
3542 " but got " + str( sizeResponses[ i ] ) )
3543 addAllResults = addAllResults and getResults and sizeResults
3544 utilities.assert_equals( expect=main.TRUE,
3545 actual=addAllResults,
3546 onpass="Set addAll correct",
3547 onfail="Set addAll was incorrect" )
3548
3549 main.step( "Distributed Set contains()" )
3550 containsResponses = []
3551 threads = []
3552 for i in main.activeNodes:
3553 t = main.Thread( target=main.CLIs[i].setTestGet,
3554 name="setContains-" + str( i ),
3555 args=[ onosSetName ],
3556 kwargs={ "values": addValue } )
3557 threads.append( t )
3558 t.start()
3559 for t in threads:
3560 t.join()
3561 # NOTE: This is the tuple
3562 containsResponses.append( t.result )
3563
3564 containsResults = main.TRUE
3565 for i in range( len( main.activeNodes ) ):
3566 if containsResponses[ i ] == main.ERROR:
3567 containsResults = main.FALSE
3568 else:
3569 containsResults = containsResults and\
3570 containsResponses[ i ][ 1 ]
3571 utilities.assert_equals( expect=main.TRUE,
3572 actual=containsResults,
3573 onpass="Set contains is functional",
3574 onfail="Set contains failed" )
3575
3576 main.step( "Distributed Set containsAll()" )
3577 containsAllResponses = []
3578 threads = []
3579 for i in main.activeNodes:
3580 t = main.Thread( target=main.CLIs[i].setTestGet,
3581 name="setContainsAll-" + str( i ),
3582 args=[ onosSetName ],
3583 kwargs={ "values": addAllValue } )
3584 threads.append( t )
3585 t.start()
3586 for t in threads:
3587 t.join()
3588 # NOTE: This is the tuple
3589 containsAllResponses.append( t.result )
3590
3591 containsAllResults = main.TRUE
3592 for i in range( len( main.activeNodes ) ):
3593 if containsResponses[ i ] == main.ERROR:
3594 containsResults = main.FALSE
3595 else:
3596 containsResults = containsResults and\
3597 containsResponses[ i ][ 1 ]
3598 utilities.assert_equals( expect=main.TRUE,
3599 actual=containsAllResults,
3600 onpass="Set containsAll is functional",
3601 onfail="Set containsAll failed" )
3602
3603 main.step( "Distributed Set remove()" )
3604 onosSet.remove( addValue )
3605 removeResponses = []
3606 threads = []
3607 for i in main.activeNodes:
3608 t = main.Thread( target=main.CLIs[i].setTestRemove,
3609 name="setTestRemove-" + str( i ),
3610 args=[ onosSetName, addValue ] )
3611 threads.append( t )
3612 t.start()
3613 for t in threads:
3614 t.join()
3615 removeResponses.append( t.result )
3616
3617 # main.TRUE = successfully changed the set
3618 # main.FALSE = action resulted in no change in set
3619 # main.ERROR - Some error in executing the function
3620 removeResults = main.TRUE
3621 for i in range( len( main.activeNodes ) ):
3622 if removeResponses[ i ] == main.TRUE:
3623 # All is well
3624 pass
3625 elif removeResponses[ i ] == main.FALSE:
3626 # not in set, probably fine
3627 pass
3628 elif removeResponses[ i ] == main.ERROR:
3629 # Error in execution
3630 removeResults = main.FALSE
3631 else:
3632 # unexpected result
3633 removeResults = main.FALSE
3634 if removeResults != main.TRUE:
3635 main.log.error( "Error executing set remove" )
3636
3637 # Check if set is still correct
3638 size = len( onosSet )
3639 getResponses = []
3640 threads = []
3641 for i in main.activeNodes:
3642 t = main.Thread( target=main.CLIs[i].setTestGet,
3643 name="setTestGet-" + str( i ),
3644 args=[ onosSetName ] )
3645 threads.append( t )
3646 t.start()
3647 for t in threads:
3648 t.join()
3649 getResponses.append( t.result )
3650 getResults = main.TRUE
3651 for i in range( len( main.activeNodes ) ):
3652 node = str( main.activeNodes[i] + 1 )
3653 if isinstance( getResponses[ i ], list):
3654 current = set( getResponses[ i ] )
3655 if len( current ) == len( getResponses[ i ] ):
3656 # no repeats
3657 if onosSet != current:
3658 main.log.error( "ONOS" + node +
3659 " has incorrect view" +
3660 " of set " + onosSetName + ":\n" +
3661 str( getResponses[ i ] ) )
3662 main.log.debug( "Expected: " + str( onosSet ) )
3663 main.log.debug( "Actual: " + str( current ) )
3664 getResults = main.FALSE
3665 else:
3666 # error, set is not a set
3667 main.log.error( "ONOS" + node +
3668 " has repeat elements in" +
3669 " set " + onosSetName + ":\n" +
3670 str( getResponses[ i ] ) )
3671 getResults = main.FALSE
3672 elif getResponses[ i ] == main.ERROR:
3673 getResults = main.FALSE
3674 sizeResponses = []
3675 threads = []
3676 for i in main.activeNodes:
3677 t = main.Thread( target=main.CLIs[i].setTestSize,
3678 name="setTestSize-" + str( i ),
3679 args=[ onosSetName ] )
3680 threads.append( t )
3681 t.start()
3682 for t in threads:
3683 t.join()
3684 sizeResponses.append( t.result )
3685 sizeResults = main.TRUE
3686 for i in range( len( main.activeNodes ) ):
3687 node = str( main.activeNodes[i] + 1 )
3688 if size != sizeResponses[ i ]:
3689 sizeResults = main.FALSE
3690 main.log.error( "ONOS" + node +
3691 " expected a size of " + str( size ) +
3692 " for set " + onosSetName +
3693 " but got " + str( sizeResponses[ i ] ) )
3694 removeResults = removeResults and getResults and sizeResults
3695 utilities.assert_equals( expect=main.TRUE,
3696 actual=removeResults,
3697 onpass="Set remove correct",
3698 onfail="Set remove was incorrect" )
3699
3700 main.step( "Distributed Set removeAll()" )
3701 onosSet.difference_update( addAllValue.split() )
3702 removeAllResponses = []
3703 threads = []
3704 try:
3705 for i in main.activeNodes:
3706 t = main.Thread( target=main.CLIs[i].setTestRemove,
3707 name="setTestRemoveAll-" + str( i ),
3708 args=[ onosSetName, addAllValue ] )
3709 threads.append( t )
3710 t.start()
3711 for t in threads:
3712 t.join()
3713 removeAllResponses.append( t.result )
3714 except Exception, e:
3715 main.log.exception(e)
3716
3717 # main.TRUE = successfully changed the set
3718 # main.FALSE = action resulted in no change in set
3719 # main.ERROR - Some error in executing the function
3720 removeAllResults = main.TRUE
3721 for i in range( len( main.activeNodes ) ):
3722 if removeAllResponses[ i ] == main.TRUE:
3723 # All is well
3724 pass
3725 elif removeAllResponses[ i ] == main.FALSE:
3726 # not in set, probably fine
3727 pass
3728 elif removeAllResponses[ i ] == main.ERROR:
3729 # Error in execution
3730 removeAllResults = main.FALSE
3731 else:
3732 # unexpected result
3733 removeAllResults = main.FALSE
3734 if removeAllResults != main.TRUE:
3735 main.log.error( "Error executing set removeAll" )
3736
3737 # Check if set is still correct
3738 size = len( onosSet )
3739 getResponses = []
3740 threads = []
3741 for i in main.activeNodes:
3742 t = main.Thread( target=main.CLIs[i].setTestGet,
3743 name="setTestGet-" + str( i ),
3744 args=[ onosSetName ] )
3745 threads.append( t )
3746 t.start()
3747 for t in threads:
3748 t.join()
3749 getResponses.append( t.result )
3750 getResults = main.TRUE
3751 for i in range( len( main.activeNodes ) ):
3752 node = str( main.activeNodes[i] + 1 )
3753 if isinstance( getResponses[ i ], list):
3754 current = set( getResponses[ i ] )
3755 if len( current ) == len( getResponses[ i ] ):
3756 # no repeats
3757 if onosSet != current:
3758 main.log.error( "ONOS" + node +
3759 " has incorrect view" +
3760 " of set " + onosSetName + ":\n" +
3761 str( getResponses[ i ] ) )
3762 main.log.debug( "Expected: " + str( onosSet ) )
3763 main.log.debug( "Actual: " + str( current ) )
3764 getResults = main.FALSE
3765 else:
3766 # error, set is not a set
3767 main.log.error( "ONOS" + node +
3768 " has repeat elements in" +
3769 " set " + onosSetName + ":\n" +
3770 str( getResponses[ i ] ) )
3771 getResults = main.FALSE
3772 elif getResponses[ i ] == main.ERROR:
3773 getResults = main.FALSE
3774 sizeResponses = []
3775 threads = []
3776 for i in main.activeNodes:
3777 t = main.Thread( target=main.CLIs[i].setTestSize,
3778 name="setTestSize-" + str( i ),
3779 args=[ onosSetName ] )
3780 threads.append( t )
3781 t.start()
3782 for t in threads:
3783 t.join()
3784 sizeResponses.append( t.result )
3785 sizeResults = main.TRUE
3786 for i in range( len( main.activeNodes ) ):
3787 node = str( main.activeNodes[i] + 1 )
3788 if size != sizeResponses[ i ]:
3789 sizeResults = main.FALSE
3790 main.log.error( "ONOS" + node +
3791 " expected a size of " + str( size ) +
3792 " for set " + onosSetName +
3793 " but got " + str( sizeResponses[ i ] ) )
3794 removeAllResults = removeAllResults and getResults and sizeResults
3795 utilities.assert_equals( expect=main.TRUE,
3796 actual=removeAllResults,
3797 onpass="Set removeAll correct",
3798 onfail="Set removeAll was incorrect" )
3799
3800 main.step( "Distributed Set addAll()" )
3801 onosSet.update( addAllValue.split() )
3802 addResponses = []
3803 threads = []
3804 for i in main.activeNodes:
3805 t = main.Thread( target=main.CLIs[i].setTestAdd,
3806 name="setTestAddAll-" + str( i ),
3807 args=[ onosSetName, addAllValue ] )
3808 threads.append( t )
3809 t.start()
3810 for t in threads:
3811 t.join()
3812 addResponses.append( t.result )
3813
3814 # main.TRUE = successfully changed the set
3815 # main.FALSE = action resulted in no change in set
3816 # main.ERROR - Some error in executing the function
3817 addAllResults = main.TRUE
3818 for i in range( len( main.activeNodes ) ):
3819 if addResponses[ i ] == main.TRUE:
3820 # All is well
3821 pass
3822 elif addResponses[ i ] == main.FALSE:
3823 # Already in set, probably fine
3824 pass
3825 elif addResponses[ i ] == main.ERROR:
3826 # Error in execution
3827 addAllResults = main.FALSE
3828 else:
3829 # unexpected result
3830 addAllResults = main.FALSE
3831 if addAllResults != main.TRUE:
3832 main.log.error( "Error executing set addAll" )
3833
3834 # Check if set is still correct
3835 size = len( onosSet )
3836 getResponses = []
3837 threads = []
3838 for i in main.activeNodes:
3839 t = main.Thread( target=main.CLIs[i].setTestGet,
3840 name="setTestGet-" + str( i ),
3841 args=[ onosSetName ] )
3842 threads.append( t )
3843 t.start()
3844 for t in threads:
3845 t.join()
3846 getResponses.append( t.result )
3847 getResults = main.TRUE
3848 for i in range( len( main.activeNodes ) ):
3849 node = str( main.activeNodes[i] + 1 )
3850 if isinstance( getResponses[ i ], list):
3851 current = set( getResponses[ i ] )
3852 if len( current ) == len( getResponses[ i ] ):
3853 # no repeats
3854 if onosSet != current:
3855 main.log.error( "ONOS" + node +
3856 " has incorrect view" +
3857 " of set " + onosSetName + ":\n" +
3858 str( getResponses[ i ] ) )
3859 main.log.debug( "Expected: " + str( onosSet ) )
3860 main.log.debug( "Actual: " + str( current ) )
3861 getResults = main.FALSE
3862 else:
3863 # error, set is not a set
3864 main.log.error( "ONOS" + node +
3865 " has repeat elements in" +
3866 " set " + onosSetName + ":\n" +
3867 str( getResponses[ i ] ) )
3868 getResults = main.FALSE
3869 elif getResponses[ i ] == main.ERROR:
3870 getResults = main.FALSE
3871 sizeResponses = []
3872 threads = []
3873 for i in main.activeNodes:
3874 t = main.Thread( target=main.CLIs[i].setTestSize,
3875 name="setTestSize-" + str( i ),
3876 args=[ onosSetName ] )
3877 threads.append( t )
3878 t.start()
3879 for t in threads:
3880 t.join()
3881 sizeResponses.append( t.result )
3882 sizeResults = main.TRUE
3883 for i in range( len( main.activeNodes ) ):
3884 node = str( main.activeNodes[i] + 1 )
3885 if size != sizeResponses[ i ]:
3886 sizeResults = main.FALSE
3887 main.log.error( "ONOS" + node +
3888 " expected a size of " + str( size ) +
3889 " for set " + onosSetName +
3890 " but got " + str( sizeResponses[ i ] ) )
3891 addAllResults = addAllResults and getResults and sizeResults
3892 utilities.assert_equals( expect=main.TRUE,
3893 actual=addAllResults,
3894 onpass="Set addAll correct",
3895 onfail="Set addAll was incorrect" )
3896
3897 main.step( "Distributed Set clear()" )
3898 onosSet.clear()
3899 clearResponses = []
3900 threads = []
3901 for i in main.activeNodes:
3902 t = main.Thread( target=main.CLIs[i].setTestRemove,
3903 name="setTestClear-" + str( i ),
3904 args=[ onosSetName, " "], # Values doesn't matter
3905 kwargs={ "clear": True } )
3906 threads.append( t )
3907 t.start()
3908 for t in threads:
3909 t.join()
3910 clearResponses.append( t.result )
3911
3912 # main.TRUE = successfully changed the set
3913 # main.FALSE = action resulted in no change in set
3914 # main.ERROR - Some error in executing the function
3915 clearResults = main.TRUE
3916 for i in range( len( main.activeNodes ) ):
3917 if clearResponses[ i ] == main.TRUE:
3918 # All is well
3919 pass
3920 elif clearResponses[ i ] == main.FALSE:
3921 # Nothing set, probably fine
3922 pass
3923 elif clearResponses[ i ] == main.ERROR:
3924 # Error in execution
3925 clearResults = main.FALSE
3926 else:
3927 # unexpected result
3928 clearResults = main.FALSE
3929 if clearResults != main.TRUE:
3930 main.log.error( "Error executing set clear" )
3931
3932 # Check if set is still correct
3933 size = len( onosSet )
3934 getResponses = []
3935 threads = []
3936 for i in main.activeNodes:
3937 t = main.Thread( target=main.CLIs[i].setTestGet,
3938 name="setTestGet-" + str( i ),
3939 args=[ onosSetName ] )
3940 threads.append( t )
3941 t.start()
3942 for t in threads:
3943 t.join()
3944 getResponses.append( t.result )
3945 getResults = main.TRUE
3946 for i in range( len( main.activeNodes ) ):
3947 node = str( main.activeNodes[i] + 1 )
3948 if isinstance( getResponses[ i ], list):
3949 current = set( getResponses[ i ] )
3950 if len( current ) == len( getResponses[ i ] ):
3951 # no repeats
3952 if onosSet != current:
3953 main.log.error( "ONOS" + node +
3954 " has incorrect view" +
3955 " of set " + onosSetName + ":\n" +
3956 str( getResponses[ i ] ) )
3957 main.log.debug( "Expected: " + str( onosSet ) )
3958 main.log.debug( "Actual: " + str( current ) )
3959 getResults = main.FALSE
3960 else:
3961 # error, set is not a set
3962 main.log.error( "ONOS" + node +
3963 " has repeat elements in" +
3964 " set " + onosSetName + ":\n" +
3965 str( getResponses[ i ] ) )
3966 getResults = main.FALSE
3967 elif getResponses[ i ] == main.ERROR:
3968 getResults = main.FALSE
3969 sizeResponses = []
3970 threads = []
3971 for i in main.activeNodes:
3972 t = main.Thread( target=main.CLIs[i].setTestSize,
3973 name="setTestSize-" + str( i ),
3974 args=[ onosSetName ] )
3975 threads.append( t )
3976 t.start()
3977 for t in threads:
3978 t.join()
3979 sizeResponses.append( t.result )
3980 sizeResults = main.TRUE
3981 for i in range( len( main.activeNodes ) ):
3982 node = str( main.activeNodes[i] + 1 )
3983 if size != sizeResponses[ i ]:
3984 sizeResults = main.FALSE
3985 main.log.error( "ONOS" + node +
3986 " expected a size of " + str( size ) +
3987 " for set " + onosSetName +
3988 " but got " + str( sizeResponses[ i ] ) )
3989 clearResults = clearResults and getResults and sizeResults
3990 utilities.assert_equals( expect=main.TRUE,
3991 actual=clearResults,
3992 onpass="Set clear correct",
3993 onfail="Set clear was incorrect" )
3994
3995 main.step( "Distributed Set addAll()" )
3996 onosSet.update( addAllValue.split() )
3997 addResponses = []
3998 threads = []
3999 for i in main.activeNodes:
4000 t = main.Thread( target=main.CLIs[i].setTestAdd,
4001 name="setTestAddAll-" + str( i ),
4002 args=[ onosSetName, addAllValue ] )
4003 threads.append( t )
4004 t.start()
4005 for t in threads:
4006 t.join()
4007 addResponses.append( t.result )
4008
4009 # main.TRUE = successfully changed the set
4010 # main.FALSE = action resulted in no change in set
4011 # main.ERROR - Some error in executing the function
4012 addAllResults = main.TRUE
4013 for i in range( len( main.activeNodes ) ):
4014 if addResponses[ i ] == main.TRUE:
4015 # All is well
4016 pass
4017 elif addResponses[ i ] == main.FALSE:
4018 # Already in set, probably fine
4019 pass
4020 elif addResponses[ i ] == main.ERROR:
4021 # Error in execution
4022 addAllResults = main.FALSE
4023 else:
4024 # unexpected result
4025 addAllResults = main.FALSE
4026 if addAllResults != main.TRUE:
4027 main.log.error( "Error executing set addAll" )
4028
4029 # Check if set is still correct
4030 size = len( onosSet )
4031 getResponses = []
4032 threads = []
4033 for i in main.activeNodes:
4034 t = main.Thread( target=main.CLIs[i].setTestGet,
4035 name="setTestGet-" + str( i ),
4036 args=[ onosSetName ] )
4037 threads.append( t )
4038 t.start()
4039 for t in threads:
4040 t.join()
4041 getResponses.append( t.result )
4042 getResults = main.TRUE
4043 for i in range( len( main.activeNodes ) ):
4044 node = str( main.activeNodes[i] + 1 )
4045 if isinstance( getResponses[ i ], list):
4046 current = set( getResponses[ i ] )
4047 if len( current ) == len( getResponses[ i ] ):
4048 # no repeats
4049 if onosSet != current:
4050 main.log.error( "ONOS" + node +
4051 " has incorrect view" +
4052 " of set " + onosSetName + ":\n" +
4053 str( getResponses[ i ] ) )
4054 main.log.debug( "Expected: " + str( onosSet ) )
4055 main.log.debug( "Actual: " + str( current ) )
4056 getResults = main.FALSE
4057 else:
4058 # error, set is not a set
4059 main.log.error( "ONOS" + node +
4060 " has repeat elements in" +
4061 " set " + onosSetName + ":\n" +
4062 str( getResponses[ i ] ) )
4063 getResults = main.FALSE
4064 elif getResponses[ i ] == main.ERROR:
4065 getResults = main.FALSE
4066 sizeResponses = []
4067 threads = []
4068 for i in main.activeNodes:
4069 t = main.Thread( target=main.CLIs[i].setTestSize,
4070 name="setTestSize-" + str( i ),
4071 args=[ onosSetName ] )
4072 threads.append( t )
4073 t.start()
4074 for t in threads:
4075 t.join()
4076 sizeResponses.append( t.result )
4077 sizeResults = main.TRUE
4078 for i in range( len( main.activeNodes ) ):
4079 node = str( main.activeNodes[i] + 1 )
4080 if size != sizeResponses[ i ]:
4081 sizeResults = main.FALSE
4082 main.log.error( "ONOS" + node +
4083 " expected a size of " + str( size ) +
4084 " for set " + onosSetName +
4085 " but got " + str( sizeResponses[ i ] ) )
4086 addAllResults = addAllResults and getResults and sizeResults
4087 utilities.assert_equals( expect=main.TRUE,
4088 actual=addAllResults,
4089 onpass="Set addAll correct",
4090 onfail="Set addAll was incorrect" )
4091
4092 main.step( "Distributed Set retain()" )
4093 onosSet.intersection_update( retainValue.split() )
4094 retainResponses = []
4095 threads = []
4096 for i in main.activeNodes:
4097 t = main.Thread( target=main.CLIs[i].setTestRemove,
4098 name="setTestRetain-" + str( i ),
4099 args=[ onosSetName, retainValue ],
4100 kwargs={ "retain": True } )
4101 threads.append( t )
4102 t.start()
4103 for t in threads:
4104 t.join()
4105 retainResponses.append( t.result )
4106
4107 # main.TRUE = successfully changed the set
4108 # main.FALSE = action resulted in no change in set
4109 # main.ERROR - Some error in executing the function
4110 retainResults = main.TRUE
4111 for i in range( len( main.activeNodes ) ):
4112 if retainResponses[ i ] == main.TRUE:
4113 # All is well
4114 pass
4115 elif retainResponses[ i ] == main.FALSE:
4116 # Already in set, probably fine
4117 pass
4118 elif retainResponses[ i ] == main.ERROR:
4119 # Error in execution
4120 retainResults = main.FALSE
4121 else:
4122 # unexpected result
4123 retainResults = main.FALSE
4124 if retainResults != main.TRUE:
4125 main.log.error( "Error executing set retain" )
4126
4127 # Check if set is still correct
4128 size = len( onosSet )
4129 getResponses = []
4130 threads = []
4131 for i in main.activeNodes:
4132 t = main.Thread( target=main.CLIs[i].setTestGet,
4133 name="setTestGet-" + str( i ),
4134 args=[ onosSetName ] )
4135 threads.append( t )
4136 t.start()
4137 for t in threads:
4138 t.join()
4139 getResponses.append( t.result )
4140 getResults = main.TRUE
4141 for i in range( len( main.activeNodes ) ):
4142 node = str( main.activeNodes[i] + 1 )
4143 if isinstance( getResponses[ i ], list):
4144 current = set( getResponses[ i ] )
4145 if len( current ) == len( getResponses[ i ] ):
4146 # no repeats
4147 if onosSet != current:
4148 main.log.error( "ONOS" + node +
4149 " has incorrect view" +
4150 " of set " + onosSetName + ":\n" +
4151 str( getResponses[ i ] ) )
4152 main.log.debug( "Expected: " + str( onosSet ) )
4153 main.log.debug( "Actual: " + str( current ) )
4154 getResults = main.FALSE
4155 else:
4156 # error, set is not a set
4157 main.log.error( "ONOS" + node +
4158 " has repeat elements in" +
4159 " set " + onosSetName + ":\n" +
4160 str( getResponses[ i ] ) )
4161 getResults = main.FALSE
4162 elif getResponses[ i ] == main.ERROR:
4163 getResults = main.FALSE
4164 sizeResponses = []
4165 threads = []
4166 for i in main.activeNodes:
4167 t = main.Thread( target=main.CLIs[i].setTestSize,
4168 name="setTestSize-" + str( i ),
4169 args=[ onosSetName ] )
4170 threads.append( t )
4171 t.start()
4172 for t in threads:
4173 t.join()
4174 sizeResponses.append( t.result )
4175 sizeResults = main.TRUE
4176 for i in range( len( main.activeNodes ) ):
4177 node = str( main.activeNodes[i] + 1 )
4178 if size != sizeResponses[ i ]:
4179 sizeResults = main.FALSE
4180 main.log.error( "ONOS" + node + " expected a size of " +
4181 str( size ) + " for set " + onosSetName +
4182 " but got " + str( sizeResponses[ i ] ) )
4183 retainResults = retainResults and getResults and sizeResults
4184 utilities.assert_equals( expect=main.TRUE,
4185 actual=retainResults,
4186 onpass="Set retain correct",
4187 onfail="Set retain was incorrect" )
4188
4189 # Transactional maps
4190 main.step( "Partitioned Transactional maps put" )
4191 tMapValue = "Testing"
4192 numKeys = 100
4193 putResult = True
4194 node = main.activeNodes[0]
4195 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4196 if putResponses and len( putResponses ) == 100:
4197 for i in putResponses:
4198 if putResponses[ i ][ 'value' ] != tMapValue:
4199 putResult = False
4200 else:
4201 putResult = False
4202 if not putResult:
4203 main.log.debug( "Put response values: " + str( putResponses ) )
4204 utilities.assert_equals( expect=True,
4205 actual=putResult,
4206 onpass="Partitioned Transactional Map put successful",
4207 onfail="Partitioned Transactional Map put values are incorrect" )
4208
4209 main.step( "Partitioned Transactional maps get" )
4210 getCheck = True
4211 for n in range( 1, numKeys + 1 ):
4212 getResponses = []
4213 threads = []
4214 valueCheck = True
4215 for i in main.activeNodes:
4216 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4217 name="TMap-get-" + str( i ),
4218 args=[ "Key" + str( n ) ] )
4219 threads.append( t )
4220 t.start()
4221 for t in threads:
4222 t.join()
4223 getResponses.append( t.result )
4224 for node in getResponses:
4225 if node != tMapValue:
4226 valueCheck = False
4227 if not valueCheck:
4228 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4229 main.log.warn( getResponses )
4230 getCheck = getCheck and valueCheck
4231 utilities.assert_equals( expect=True,
4232 actual=getCheck,
4233 onpass="Partitioned Transactional Map get values were correct",
4234 onfail="Partitioned Transactional Map values incorrect" )