blob: 47fb895f2961dd939366a1e566538dc700e0172f [file] [log] [blame]
Jon Hall6e709752016-02-01 13:38:46 -08001"""
2Description: This test is to determine if ONOS can handle
3 a full network partion
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
14CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
28class HAfullNetPartition:
29
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
51 import imp
52 import pexpect
53 import time
Jon Halla440e872016-03-31 15:15:50 -070054 import json
Jon Hall6e709752016-02-01 13:38:46 -080055 main.log.info( "ONOS HA test: Partition ONOS nodes into two sub-clusters - " +
56 "initialization" )
57 main.case( "Setting up test environment" )
58 main.caseExplanation = "Setup the test environment including " +\
59 "installing ONOS, starting Mininet and ONOS" +\
60 "cli sessions."
61
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
69 main.numCtrls = int( main.params[ 'num_controllers' ] )
70 if main.ONOSbench.maxNodes:
71 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
74 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070081 # These are for csv plotting in jenkins
82 global labels
83 global data
84 labels = []
85 data = []
Jon Hall6e709752016-02-01 13:38:46 -080086
87 # FIXME: just get controller port from params?
88 # TODO: do we really need all these?
89 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
90 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
91 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
92 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
93 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
94 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
95 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
96
97 try:
Jon Halla440e872016-03-31 15:15:50 -070098 from tests.HAsanity.dependencies.Counters import Counters
99 main.Counters = Counters()
Jon Hall6e709752016-02-01 13:38:46 -0800100 except Exception as e:
101 main.log.exception( e )
102 main.cleanup()
103 main.exit()
104
105 main.CLIs = []
106 main.nodes = []
107 ipList = []
108 for i in range( 1, main.numCtrls + 1 ):
109 try:
110 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
111 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
112 ipList.append( main.nodes[ -1 ].ip_address )
113 except AttributeError:
114 break
115
116 main.step( "Create cell file" )
117 cellAppString = main.params[ 'ENV' ][ 'appString' ]
118 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
119 main.Mininet1.ip_address,
120 cellAppString, ipList )
121 main.step( "Applying cell variable to environment" )
122 cellResult = main.ONOSbench.setCell( cellName )
123 verifyResult = main.ONOSbench.verifyCell()
124
125 # FIXME:this is short term fix
126 main.log.info( "Removing raft logs" )
127 main.ONOSbench.onosRemoveRaftLogs()
128
129 main.log.info( "Uninstalling ONOS" )
130 for node in main.nodes:
131 main.ONOSbench.onosUninstall( node.ip_address )
132
133 # Make sure ONOS is DEAD
134 main.log.info( "Killing any ONOS processes" )
135 killResults = main.TRUE
136 for node in main.nodes:
137 killed = main.ONOSbench.onosKill( node.ip_address )
138 killResults = killResults and killed
139
140 cleanInstallResult = main.TRUE
141 gitPullResult = main.TRUE
142
143 main.step( "Starting Mininet" )
144 # scp topo file to mininet
145 # TODO: move to params?
146 topoName = "obelisk.py"
147 filePath = main.ONOSbench.home + "/tools/test/topos/"
148 main.ONOSbench.scp( main.Mininet1,
149 filePath + topoName,
150 main.Mininet1.home,
151 direction="to" )
152 mnResult = main.Mininet1.startNet( )
153 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
154 onpass="Mininet Started",
155 onfail="Error starting Mininet" )
156
157 main.step( "Git checkout and pull " + gitBranch )
158 if PULLCODE:
159 main.ONOSbench.gitCheckout( gitBranch )
160 gitPullResult = main.ONOSbench.gitPull()
161 # values of 1 or 3 are good
162 utilities.assert_lesser( expect=0, actual=gitPullResult,
163 onpass="Git pull successful",
164 onfail="Git pull failed" )
165 main.ONOSbench.getVersion( report=True )
166
167 main.step( "Using mvn clean install" )
168 cleanInstallResult = main.TRUE
169 if PULLCODE and gitPullResult == main.TRUE:
170 cleanInstallResult = main.ONOSbench.cleanInstall()
171 else:
172 main.log.warn( "Did not pull new code so skipping mvn " +
173 "clean install" )
174 utilities.assert_equals( expect=main.TRUE,
175 actual=cleanInstallResult,
176 onpass="MCI successful",
177 onfail="MCI failed" )
178 # GRAPHS
179 # NOTE: important params here:
180 # job = name of Jenkins job
181 # Plot Name = Plot-HA, only can be used if multiple plots
182 # index = The number of the graph under plot name
183 job = "HAfullNetPartition"
184 plotName = "Plot-HA"
Jon Hall843f8bc2016-03-18 14:28:13 -0700185 index = "1"
Jon Hall6e709752016-02-01 13:38:46 -0800186 graphs = '<ac:structured-macro ac:name="html">\n'
187 graphs += '<ac:plain-text-body><![CDATA[\n'
188 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
189 '/plot/' + plotName + '/getPlot?index=' + index +\
190 '&width=500&height=300"' +\
191 'noborder="0" width="500" height="300" scrolling="yes" ' +\
192 'seamless="seamless"></iframe>\n'
193 graphs += ']]></ac:plain-text-body>\n'
194 graphs += '</ac:structured-macro>\n'
195 main.log.wiki(graphs)
196
197 main.step( "Creating ONOS package" )
198 # copy gen-partions file to ONOS
199 # NOTE: this assumes TestON and ONOS are on the same machine
200 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
201 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
202 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
203 main.ONOSbench.ip_address,
204 srcFile,
205 dstDir,
206 pwd=main.ONOSbench.pwd,
207 direction="from" )
208 packageResult = main.ONOSbench.onosPackage()
209 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
210 onpass="ONOS package successful",
211 onfail="ONOS package failed" )
212
213 main.step( "Installing ONOS package" )
214 onosInstallResult = main.TRUE
215 for node in main.nodes:
216 tmpResult = main.ONOSbench.onosInstall( options="-f",
217 node=node.ip_address )
218 onosInstallResult = onosInstallResult and tmpResult
219 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
220 onpass="ONOS install successful",
221 onfail="ONOS install failed" )
222 # clean up gen-partitions file
223 try:
224 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
225 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
226 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
227 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
228 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
229 str( main.ONOSbench.handle.before ) )
230 except ( pexpect.TIMEOUT, pexpect.EOF ):
231 main.log.exception( "ONOSbench: pexpect exception found:" +
232 main.ONOSbench.handle.before )
233 main.cleanup()
234 main.exit()
235
236 main.step( "Checking if ONOS is up yet" )
237 for i in range( 2 ):
238 onosIsupResult = main.TRUE
239 for node in main.nodes:
240 started = main.ONOSbench.isup( node.ip_address )
241 if not started:
242 main.log.error( node.name + " hasn't started" )
243 onosIsupResult = onosIsupResult and started
244 if onosIsupResult == main.TRUE:
245 break
246 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
247 onpass="ONOS startup successful",
248 onfail="ONOS startup failed" )
249
250 main.log.step( "Starting ONOS CLI sessions" )
251 cliResults = main.TRUE
252 threads = []
253 for i in range( main.numCtrls ):
254 t = main.Thread( target=main.CLIs[i].startOnosCli,
255 name="startOnosCli-" + str( i ),
256 args=[main.nodes[i].ip_address] )
257 threads.append( t )
258 t.start()
259
260 for t in threads:
261 t.join()
262 cliResults = cliResults and t.result
263 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
264 onpass="ONOS cli startup successful",
265 onfail="ONOS cli startup failed" )
266
267 # Create a list of active nodes for use when some nodes are stopped
268 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
269
270 if main.params[ 'tcpdump' ].lower() == "true":
271 main.step( "Start Packet Capture MN" )
272 main.Mininet2.startTcpdump(
273 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
274 + "-MN.pcap",
275 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
276 port=main.params[ 'MNtcpdump' ][ 'port' ] )
277
278 main.step( "App Ids check" )
Jon Hall6e709752016-02-01 13:38:46 -0800279 appCheck = main.TRUE
280 threads = []
281 for i in main.activeNodes:
282 t = main.Thread( target=main.CLIs[i].appToIDCheck,
283 name="appToIDCheck-" + str( i ),
284 args=[] )
285 threads.append( t )
286 t.start()
287
288 for t in threads:
289 t.join()
290 appCheck = appCheck and t.result
291 if appCheck != main.TRUE:
292 node = main.activeNodes[0]
293 main.log.warn( main.CLIs[node].apps() )
294 main.log.warn( main.CLIs[node].appIDs() )
295 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
296 onpass="App Ids seem to be correct",
297 onfail="Something is wrong with app Ids" )
298
Jon Halla440e872016-03-31 15:15:50 -0700299 main.step( "Checking ONOS nodes" )
300 nodesOutput = []
301 nodeResults = main.TRUE
302 threads = []
303 for i in main.activeNodes:
304 t = main.Thread( target=main.CLIs[i].nodes,
305 name="nodes-" + str( i ),
306 args=[ ] )
307 threads.append( t )
308 t.start()
309
310 for t in threads:
311 t.join()
312 nodesOutput.append( t.result )
313 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
314 ips.sort()
315 for i in nodesOutput:
316 try:
317 current = json.loads( i )
318 activeIps = []
319 currentResult = main.FALSE
320 for node in current:
321 if node['state'] == 'READY':
322 activeIps.append( node['ip'] )
323 activeIps.sort()
324 if ips == activeIps:
325 currentResult = main.TRUE
326 except ( ValueError, TypeError ):
327 main.log.error( "Error parsing nodes output" )
328 main.log.warn( repr( i ) )
329 currentResult = main.FALSE
330 nodeResults = nodeResults and currentResult
331 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
332 onpass="Nodes check successful",
333 onfail="Nodes check NOT successful" )
334
335 if not nodeResults:
336 for cli in main.CLIs:
337 main.log.debug( "{} components not ACTIVE: \n{}".format(
338 cli.name,
339 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
340
Jon Hall6e709752016-02-01 13:38:46 -0800341 if cliResults == main.FALSE:
342 main.log.error( "Failed to start ONOS, stopping test" )
343 main.cleanup()
344 main.exit()
345
Jon Hall172b7ba2016-04-07 18:12:20 -0700346 main.step( "Activate apps defined in the params file" )
347 # get data from the params
348 apps = main.params.get( 'apps' )
349 if apps:
350 apps = apps.split(',')
351 main.log.warn( apps )
352 activateResult = True
353 for app in apps:
354 main.CLIs[ 0 ].app( app, "Activate" )
355 # TODO: check this worked
356 time.sleep( 10 ) # wait for apps to activate
357 for app in apps:
358 state = main.CLIs[ 0 ].appStatus( app )
359 if state == "ACTIVE":
360 activateResult = activeResult and True
361 else:
362 main.log.error( "{} is in {} state".format( app, state ) )
363 activeResult = False
364 utilities.assert_equals( expect=True,
365 actual=activateResult,
366 onpass="Successfully activated apps",
367 onfail="Failed to activate apps" )
368 else:
369 main.log.warn( "No apps were specified to be loaded after startup" )
370
371 main.step( "Set ONOS configurations" )
372 config = main.params.get( 'ONOS_Configuration' )
373 if config:
374 main.log.debug( config )
375 checkResult = main.TRUE
376 for component in config:
377 for setting in config[component]:
378 value = config[component][setting]
379 check = main.CLIs[ 0 ].setCfg( component, setting, value )
380 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
381 checkResult = check and checkResult
382 utilities.assert_equals( expect=main.TRUE,
383 actual=checkResult,
384 onpass="Successfully set config",
385 onfail="Failed to set config" )
386 else:
387 main.log.warn( "No configurations were specified to be changed after startup" )
388
Jon Hall6e709752016-02-01 13:38:46 -0800389 def CASE2( self, main ):
390 """
391 Assign devices to controllers
392 """
393 import re
394 assert main.numCtrls, "main.numCtrls not defined"
395 assert main, "main not defined"
396 assert utilities.assert_equals, "utilities.assert_equals not defined"
397 assert main.CLIs, "main.CLIs not defined"
398 assert main.nodes, "main.nodes not defined"
399 assert ONOS1Port, "ONOS1Port not defined"
400 assert ONOS2Port, "ONOS2Port not defined"
401 assert ONOS3Port, "ONOS3Port not defined"
402 assert ONOS4Port, "ONOS4Port not defined"
403 assert ONOS5Port, "ONOS5Port not defined"
404 assert ONOS6Port, "ONOS6Port not defined"
405 assert ONOS7Port, "ONOS7Port not defined"
406
407 main.case( "Assigning devices to controllers" )
408 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
409 "and check that an ONOS node becomes the " +\
410 "master of the device."
411 main.step( "Assign switches to controllers" )
412
413 ipList = []
414 for i in range( main.numCtrls ):
415 ipList.append( main.nodes[ i ].ip_address )
416 swList = []
417 for i in range( 1, 29 ):
418 swList.append( "s" + str( i ) )
419 main.Mininet1.assignSwController( sw=swList, ip=ipList )
420
421 mastershipCheck = main.TRUE
422 for i in range( 1, 29 ):
423 response = main.Mininet1.getSwController( "s" + str( i ) )
424 try:
425 main.log.info( str( response ) )
426 except Exception:
427 main.log.info( repr( response ) )
428 for node in main.nodes:
429 if re.search( "tcp:" + node.ip_address, response ):
430 mastershipCheck = mastershipCheck and main.TRUE
431 else:
432 main.log.error( "Error, node " + node.ip_address + " is " +
433 "not in the list of controllers s" +
434 str( i ) + " is connecting to." )
435 mastershipCheck = main.FALSE
436 utilities.assert_equals(
437 expect=main.TRUE,
438 actual=mastershipCheck,
439 onpass="Switch mastership assigned correctly",
440 onfail="Switches not assigned correctly to controllers" )
441
442 def CASE21( self, main ):
443 """
444 Assign mastership to controllers
445 """
446 import time
447 assert main.numCtrls, "main.numCtrls not defined"
448 assert main, "main not defined"
449 assert utilities.assert_equals, "utilities.assert_equals not defined"
450 assert main.CLIs, "main.CLIs not defined"
451 assert main.nodes, "main.nodes not defined"
452 assert ONOS1Port, "ONOS1Port not defined"
453 assert ONOS2Port, "ONOS2Port not defined"
454 assert ONOS3Port, "ONOS3Port not defined"
455 assert ONOS4Port, "ONOS4Port not defined"
456 assert ONOS5Port, "ONOS5Port not defined"
457 assert ONOS6Port, "ONOS6Port not defined"
458 assert ONOS7Port, "ONOS7Port not defined"
459
460 main.case( "Assigning Controller roles for switches" )
461 main.caseExplanation = "Check that ONOS is connected to each " +\
462 "device. Then manually assign" +\
463 " mastership to specific ONOS nodes using" +\
464 " 'device-role'"
465 main.step( "Assign mastership of switches to specific controllers" )
466 # Manually assign mastership to the controller we want
467 roleCall = main.TRUE
468
469 ipList = [ ]
470 deviceList = []
471 onosCli = main.CLIs[ main.activeNodes[0] ]
472 try:
473 # Assign mastership to specific controllers. This assignment was
474 # determined for a 7 node cluser, but will work with any sized
475 # cluster
476 for i in range( 1, 29 ): # switches 1 through 28
477 # set up correct variables:
478 if i == 1:
479 c = 0
480 ip = main.nodes[ c ].ip_address # ONOS1
481 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
482 elif i == 2:
483 c = 1 % main.numCtrls
484 ip = main.nodes[ c ].ip_address # ONOS2
485 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
486 elif i == 3:
487 c = 1 % main.numCtrls
488 ip = main.nodes[ c ].ip_address # ONOS2
489 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
490 elif i == 4:
491 c = 3 % main.numCtrls
492 ip = main.nodes[ c ].ip_address # ONOS4
493 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
494 elif i == 5:
495 c = 2 % main.numCtrls
496 ip = main.nodes[ c ].ip_address # ONOS3
497 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
498 elif i == 6:
499 c = 2 % main.numCtrls
500 ip = main.nodes[ c ].ip_address # ONOS3
501 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
502 elif i == 7:
503 c = 5 % main.numCtrls
504 ip = main.nodes[ c ].ip_address # ONOS6
505 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
506 elif i >= 8 and i <= 17:
507 c = 4 % main.numCtrls
508 ip = main.nodes[ c ].ip_address # ONOS5
509 dpid = '3' + str( i ).zfill( 3 )
510 deviceId = onosCli.getDevice( dpid ).get( 'id' )
511 elif i >= 18 and i <= 27:
512 c = 6 % main.numCtrls
513 ip = main.nodes[ c ].ip_address # ONOS7
514 dpid = '6' + str( i ).zfill( 3 )
515 deviceId = onosCli.getDevice( dpid ).get( 'id' )
516 elif i == 28:
517 c = 0
518 ip = main.nodes[ c ].ip_address # ONOS1
519 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
520 else:
521 main.log.error( "You didn't write an else statement for " +
522 "switch s" + str( i ) )
523 roleCall = main.FALSE
524 # Assign switch
525 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
526 # TODO: make this controller dynamic
527 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
528 ipList.append( ip )
529 deviceList.append( deviceId )
530 except ( AttributeError, AssertionError ):
531 main.log.exception( "Something is wrong with ONOS device view" )
532 main.log.info( onosCli.devices() )
533 utilities.assert_equals(
534 expect=main.TRUE,
535 actual=roleCall,
536 onpass="Re-assigned switch mastership to designated controller",
537 onfail="Something wrong with deviceRole calls" )
538
539 main.step( "Check mastership was correctly assigned" )
540 roleCheck = main.TRUE
541 # NOTE: This is due to the fact that device mastership change is not
542 # atomic and is actually a multi step process
543 time.sleep( 5 )
544 for i in range( len( ipList ) ):
545 ip = ipList[i]
546 deviceId = deviceList[i]
547 # Check assignment
548 master = onosCli.getRole( deviceId ).get( 'master' )
549 if ip in master:
550 roleCheck = roleCheck and main.TRUE
551 else:
552 roleCheck = roleCheck and main.FALSE
553 main.log.error( "Error, controller " + ip + " is not" +
554 " master " + "of device " +
555 str( deviceId ) + ". Master is " +
556 repr( master ) + "." )
557 utilities.assert_equals(
558 expect=main.TRUE,
559 actual=roleCheck,
560 onpass="Switches were successfully reassigned to designated " +
561 "controller",
562 onfail="Switches were not successfully reassigned" )
563
564 def CASE3( self, main ):
565 """
566 Assign intents
567 """
568 import time
569 import json
570 assert main.numCtrls, "main.numCtrls not defined"
571 assert main, "main not defined"
572 assert utilities.assert_equals, "utilities.assert_equals not defined"
573 assert main.CLIs, "main.CLIs not defined"
574 assert main.nodes, "main.nodes not defined"
575 main.case( "Adding host Intents" )
576 main.caseExplanation = "Discover hosts by using pingall then " +\
577 "assign predetermined host-to-host intents." +\
578 " After installation, check that the intent" +\
579 " is distributed to all nodes and the state" +\
580 " is INSTALLED"
581
582 # install onos-app-fwd
583 main.step( "Install reactive forwarding app" )
584 onosCli = main.CLIs[ main.activeNodes[0] ]
585 installResults = onosCli.activateApp( "org.onosproject.fwd" )
586 utilities.assert_equals( expect=main.TRUE, actual=installResults,
587 onpass="Install fwd successful",
588 onfail="Install fwd failed" )
589
590 main.step( "Check app ids" )
591 appCheck = main.TRUE
592 threads = []
593 for i in main.activeNodes:
594 t = main.Thread( target=main.CLIs[i].appToIDCheck,
595 name="appToIDCheck-" + str( i ),
596 args=[] )
597 threads.append( t )
598 t.start()
599
600 for t in threads:
601 t.join()
602 appCheck = appCheck and t.result
603 if appCheck != main.TRUE:
604 main.log.warn( onosCli.apps() )
605 main.log.warn( onosCli.appIDs() )
606 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
607 onpass="App Ids seem to be correct",
608 onfail="Something is wrong with app Ids" )
609
610 main.step( "Discovering Hosts( Via pingall for now )" )
611 # FIXME: Once we have a host discovery mechanism, use that instead
612 # REACTIVE FWD test
613 pingResult = main.FALSE
614 passMsg = "Reactive Pingall test passed"
615 time1 = time.time()
616 pingResult = main.Mininet1.pingall()
617 time2 = time.time()
618 if not pingResult:
619 main.log.warn("First pingall failed. Trying again...")
620 pingResult = main.Mininet1.pingall()
621 passMsg += " on the second try"
622 utilities.assert_equals(
623 expect=main.TRUE,
624 actual=pingResult,
625 onpass= passMsg,
626 onfail="Reactive Pingall failed, " +
627 "one or more ping pairs failed" )
628 main.log.info( "Time for pingall: %2f seconds" %
629 ( time2 - time1 ) )
630 # timeout for fwd flows
631 time.sleep( 11 )
632 # uninstall onos-app-fwd
633 main.step( "Uninstall reactive forwarding app" )
634 node = main.activeNodes[0]
635 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
636 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
637 onpass="Uninstall fwd successful",
638 onfail="Uninstall fwd failed" )
639
640 main.step( "Check app ids" )
641 threads = []
642 appCheck2 = main.TRUE
643 for i in main.activeNodes:
644 t = main.Thread( target=main.CLIs[i].appToIDCheck,
645 name="appToIDCheck-" + str( i ),
646 args=[] )
647 threads.append( t )
648 t.start()
649
650 for t in threads:
651 t.join()
652 appCheck2 = appCheck2 and t.result
653 if appCheck2 != main.TRUE:
654 node = main.activeNodes[0]
655 main.log.warn( main.CLIs[node].apps() )
656 main.log.warn( main.CLIs[node].appIDs() )
657 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
658 onpass="App Ids seem to be correct",
659 onfail="Something is wrong with app Ids" )
660
661 main.step( "Add host intents via cli" )
662 intentIds = []
663 # TODO: move the host numbers to params
664 # Maybe look at all the paths we ping?
665 intentAddResult = True
666 hostResult = main.TRUE
667 for i in range( 8, 18 ):
668 main.log.info( "Adding host intent between h" + str( i ) +
669 " and h" + str( i + 10 ) )
670 host1 = "00:00:00:00:00:" + \
671 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
672 host2 = "00:00:00:00:00:" + \
673 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
674 # NOTE: getHost can return None
675 host1Dict = onosCli.getHost( host1 )
676 host2Dict = onosCli.getHost( host2 )
677 host1Id = None
678 host2Id = None
679 if host1Dict and host2Dict:
680 host1Id = host1Dict.get( 'id', None )
681 host2Id = host2Dict.get( 'id', None )
682 if host1Id and host2Id:
683 nodeNum = ( i % len( main.activeNodes ) )
684 node = main.activeNodes[nodeNum]
685 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
686 if tmpId:
687 main.log.info( "Added intent with id: " + tmpId )
688 intentIds.append( tmpId )
689 else:
690 main.log.error( "addHostIntent returned: " +
691 repr( tmpId ) )
692 else:
693 main.log.error( "Error, getHost() failed for h" + str( i ) +
694 " and/or h" + str( i + 10 ) )
695 node = main.activeNodes[0]
696 hosts = main.CLIs[node].hosts()
697 main.log.warn( "Hosts output: " )
698 try:
699 main.log.warn( json.dumps( json.loads( hosts ),
700 sort_keys=True,
701 indent=4,
702 separators=( ',', ': ' ) ) )
703 except ( ValueError, TypeError ):
704 main.log.warn( repr( hosts ) )
705 hostResult = main.FALSE
706 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
707 onpass="Found a host id for each host",
708 onfail="Error looking up host ids" )
709
710 intentStart = time.time()
711 onosIds = onosCli.getAllIntentsId()
712 main.log.info( "Submitted intents: " + str( intentIds ) )
713 main.log.info( "Intents in ONOS: " + str( onosIds ) )
714 for intent in intentIds:
715 if intent in onosIds:
716 pass # intent submitted is in onos
717 else:
718 intentAddResult = False
719 if intentAddResult:
720 intentStop = time.time()
721 else:
722 intentStop = None
723 # Print the intent states
724 intents = onosCli.intents()
725 intentStates = []
726 installedCheck = True
727 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
728 count = 0
729 try:
730 for intent in json.loads( intents ):
731 state = intent.get( 'state', None )
732 if "INSTALLED" not in state:
733 installedCheck = False
734 intentId = intent.get( 'id', None )
735 intentStates.append( ( intentId, state ) )
736 except ( ValueError, TypeError ):
737 main.log.exception( "Error parsing intents" )
738 # add submitted intents not in the store
739 tmplist = [ i for i, s in intentStates ]
740 missingIntents = False
741 for i in intentIds:
742 if i not in tmplist:
743 intentStates.append( ( i, " - " ) )
744 missingIntents = True
745 intentStates.sort()
746 for i, s in intentStates:
747 count += 1
748 main.log.info( "%-6s%-15s%-15s" %
749 ( str( count ), str( i ), str( s ) ) )
750 leaders = onosCli.leaders()
751 try:
752 missing = False
753 if leaders:
754 parsedLeaders = json.loads( leaders )
755 main.log.warn( json.dumps( parsedLeaders,
756 sort_keys=True,
757 indent=4,
758 separators=( ',', ': ' ) ) )
759 # check for all intent partitions
760 topics = []
761 for i in range( 14 ):
762 topics.append( "intent-partition-" + str( i ) )
763 main.log.debug( topics )
764 ONOStopics = [ j['topic'] for j in parsedLeaders ]
765 for topic in topics:
766 if topic not in ONOStopics:
767 main.log.error( "Error: " + topic +
768 " not in leaders" )
769 missing = True
770 else:
771 main.log.error( "leaders() returned None" )
772 except ( ValueError, TypeError ):
773 main.log.exception( "Error parsing leaders" )
774 main.log.error( repr( leaders ) )
775 # Check all nodes
776 if missing:
777 for i in main.activeNodes:
778 response = main.CLIs[i].leaders( jsonFormat=False)
779 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
780 str( response ) )
781
782 partitions = onosCli.partitions()
783 try:
784 if partitions :
785 parsedPartitions = json.loads( partitions )
786 main.log.warn( json.dumps( parsedPartitions,
787 sort_keys=True,
788 indent=4,
789 separators=( ',', ': ' ) ) )
790 # TODO check for a leader in all paritions
791 # TODO check for consistency among nodes
792 else:
793 main.log.error( "partitions() returned None" )
794 except ( ValueError, TypeError ):
795 main.log.exception( "Error parsing partitions" )
796 main.log.error( repr( partitions ) )
797 pendingMap = onosCli.pendingMap()
798 try:
799 if pendingMap :
800 parsedPending = json.loads( pendingMap )
801 main.log.warn( json.dumps( parsedPending,
802 sort_keys=True,
803 indent=4,
804 separators=( ',', ': ' ) ) )
805 # TODO check something here?
806 else:
807 main.log.error( "pendingMap() returned None" )
808 except ( ValueError, TypeError ):
809 main.log.exception( "Error parsing pending map" )
810 main.log.error( repr( pendingMap ) )
811
812 intentAddResult = bool( intentAddResult and not missingIntents and
813 installedCheck )
814 if not intentAddResult:
815 main.log.error( "Error in pushing host intents to ONOS" )
816
817 main.step( "Intent Anti-Entropy dispersion" )
818 for j in range(100):
819 correct = True
820 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
821 for i in main.activeNodes:
822 onosIds = []
823 ids = main.CLIs[i].getAllIntentsId()
824 onosIds.append( ids )
825 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
826 str( sorted( onosIds ) ) )
827 if sorted( ids ) != sorted( intentIds ):
828 main.log.warn( "Set of intent IDs doesn't match" )
829 correct = False
830 break
831 else:
832 intents = json.loads( main.CLIs[i].intents() )
833 for intent in intents:
834 if intent[ 'state' ] != "INSTALLED":
835 main.log.warn( "Intent " + intent[ 'id' ] +
836 " is " + intent[ 'state' ] )
837 correct = False
838 break
839 if correct:
840 break
841 else:
842 time.sleep(1)
843 if not intentStop:
844 intentStop = time.time()
845 global gossipTime
846 gossipTime = intentStop - intentStart
847 main.log.info( "It took about " + str( gossipTime ) +
848 " seconds for all intents to appear in each node" )
849 gossipPeriod = int( main.params['timers']['gossip'] )
850 maxGossipTime = gossipPeriod * len( main.activeNodes )
851 utilities.assert_greater_equals(
852 expect=maxGossipTime, actual=gossipTime,
853 onpass="ECM anti-entropy for intents worked within " +
854 "expected time",
855 onfail="Intent ECM anti-entropy took too long. " +
856 "Expected time:{}, Actual time:{}".format( maxGossipTime,
857 gossipTime ) )
858 if gossipTime <= maxGossipTime:
859 intentAddResult = True
860
861 if not intentAddResult or "key" in pendingMap:
862 import time
863 installedCheck = True
864 main.log.info( "Sleeping 60 seconds to see if intents are found" )
865 time.sleep( 60 )
866 onosIds = onosCli.getAllIntentsId()
867 main.log.info( "Submitted intents: " + str( intentIds ) )
868 main.log.info( "Intents in ONOS: " + str( onosIds ) )
869 # Print the intent states
870 intents = onosCli.intents()
871 intentStates = []
872 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
873 count = 0
874 try:
875 for intent in json.loads( intents ):
876 # Iter through intents of a node
877 state = intent.get( 'state', None )
878 if "INSTALLED" not in state:
879 installedCheck = False
880 intentId = intent.get( 'id', None )
881 intentStates.append( ( intentId, state ) )
882 except ( ValueError, TypeError ):
883 main.log.exception( "Error parsing intents" )
884 # add submitted intents not in the store
885 tmplist = [ i for i, s in intentStates ]
886 for i in intentIds:
887 if i not in tmplist:
888 intentStates.append( ( i, " - " ) )
889 intentStates.sort()
890 for i, s in intentStates:
891 count += 1
892 main.log.info( "%-6s%-15s%-15s" %
893 ( str( count ), str( i ), str( s ) ) )
894 leaders = onosCli.leaders()
895 try:
896 missing = False
897 if leaders:
898 parsedLeaders = json.loads( leaders )
899 main.log.warn( json.dumps( parsedLeaders,
900 sort_keys=True,
901 indent=4,
902 separators=( ',', ': ' ) ) )
903 # check for all intent partitions
904 # check for election
905 topics = []
906 for i in range( 14 ):
907 topics.append( "intent-partition-" + str( i ) )
908 # FIXME: this should only be after we start the app
909 topics.append( "org.onosproject.election" )
910 main.log.debug( topics )
911 ONOStopics = [ j['topic'] for j in parsedLeaders ]
912 for topic in topics:
913 if topic not in ONOStopics:
914 main.log.error( "Error: " + topic +
915 " not in leaders" )
916 missing = True
917 else:
918 main.log.error( "leaders() returned None" )
919 except ( ValueError, TypeError ):
920 main.log.exception( "Error parsing leaders" )
921 main.log.error( repr( leaders ) )
922 # Check all nodes
923 if missing:
924 for i in main.activeNodes:
925 node = main.CLIs[i]
926 response = node.leaders( jsonFormat=False)
927 main.log.warn( str( node.name ) + " leaders output: \n" +
928 str( response ) )
929
930 partitions = onosCli.partitions()
931 try:
932 if partitions :
933 parsedPartitions = json.loads( partitions )
934 main.log.warn( json.dumps( parsedPartitions,
935 sort_keys=True,
936 indent=4,
937 separators=( ',', ': ' ) ) )
938 # TODO check for a leader in all paritions
939 # TODO check for consistency among nodes
940 else:
941 main.log.error( "partitions() returned None" )
942 except ( ValueError, TypeError ):
943 main.log.exception( "Error parsing partitions" )
944 main.log.error( repr( partitions ) )
945 pendingMap = onosCli.pendingMap()
946 try:
947 if pendingMap :
948 parsedPending = json.loads( pendingMap )
949 main.log.warn( json.dumps( parsedPending,
950 sort_keys=True,
951 indent=4,
952 separators=( ',', ': ' ) ) )
953 # TODO check something here?
954 else:
955 main.log.error( "pendingMap() returned None" )
956 except ( ValueError, TypeError ):
957 main.log.exception( "Error parsing pending map" )
958 main.log.error( repr( pendingMap ) )
959
960 def CASE4( self, main ):
961 """
962 Ping across added host intents
963 """
964 import json
965 import time
966 assert main.numCtrls, "main.numCtrls not defined"
967 assert main, "main not defined"
968 assert utilities.assert_equals, "utilities.assert_equals not defined"
969 assert main.CLIs, "main.CLIs not defined"
970 assert main.nodes, "main.nodes not defined"
971 main.case( "Verify connectivity by sending traffic across Intents" )
972 main.caseExplanation = "Ping across added host intents to check " +\
973 "functionality and check the state of " +\
974 "the intent"
975 main.step( "Ping across added host intents" )
976 onosCli = main.CLIs[ main.activeNodes[0] ]
977 PingResult = main.TRUE
978 for i in range( 8, 18 ):
979 ping = main.Mininet1.pingHost( src="h" + str( i ),
980 target="h" + str( i + 10 ) )
981 PingResult = PingResult and ping
982 if ping == main.FALSE:
983 main.log.warn( "Ping failed between h" + str( i ) +
984 " and h" + str( i + 10 ) )
985 elif ping == main.TRUE:
986 main.log.info( "Ping test passed!" )
987 # Don't set PingResult or you'd override failures
988 if PingResult == main.FALSE:
989 main.log.error(
990 "Intents have not been installed correctly, pings failed." )
991 # TODO: pretty print
992 main.log.warn( "ONOS1 intents: " )
993 try:
994 tmpIntents = onosCli.intents()
995 main.log.warn( json.dumps( json.loads( tmpIntents ),
996 sort_keys=True,
997 indent=4,
998 separators=( ',', ': ' ) ) )
999 except ( ValueError, TypeError ):
1000 main.log.warn( repr( tmpIntents ) )
1001 utilities.assert_equals(
1002 expect=main.TRUE,
1003 actual=PingResult,
1004 onpass="Intents have been installed correctly and pings work",
1005 onfail="Intents have not been installed correctly, pings failed." )
1006
1007 main.step( "Check Intent state" )
1008 installedCheck = False
1009 loopCount = 0
1010 while not installedCheck and loopCount < 40:
1011 installedCheck = True
1012 # Print the intent states
1013 intents = onosCli.intents()
1014 intentStates = []
1015 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1016 count = 0
1017 # Iter through intents of a node
1018 try:
1019 for intent in json.loads( intents ):
1020 state = intent.get( 'state', None )
1021 if "INSTALLED" not in state:
1022 installedCheck = False
1023 intentId = intent.get( 'id', None )
1024 intentStates.append( ( intentId, state ) )
1025 except ( ValueError, TypeError ):
1026 main.log.exception( "Error parsing intents." )
1027 # Print states
1028 intentStates.sort()
1029 for i, s in intentStates:
1030 count += 1
1031 main.log.info( "%-6s%-15s%-15s" %
1032 ( str( count ), str( i ), str( s ) ) )
1033 if not installedCheck:
1034 time.sleep( 1 )
1035 loopCount += 1
1036 utilities.assert_equals( expect=True, actual=installedCheck,
1037 onpass="Intents are all INSTALLED",
1038 onfail="Intents are not all in " +
1039 "INSTALLED state" )
1040
1041 main.step( "Check leadership of topics" )
1042 leaders = onosCli.leaders()
1043 topicCheck = main.TRUE
1044 try:
1045 if leaders:
1046 parsedLeaders = json.loads( leaders )
1047 main.log.warn( json.dumps( parsedLeaders,
1048 sort_keys=True,
1049 indent=4,
1050 separators=( ',', ': ' ) ) )
1051 # check for all intent partitions
1052 # check for election
1053 # TODO: Look at Devices as topics now that it uses this system
1054 topics = []
1055 for i in range( 14 ):
1056 topics.append( "intent-partition-" + str( i ) )
1057 # FIXME: this should only be after we start the app
1058 # FIXME: topics.append( "org.onosproject.election" )
1059 # Print leaders output
1060 main.log.debug( topics )
1061 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1062 for topic in topics:
1063 if topic not in ONOStopics:
1064 main.log.error( "Error: " + topic +
1065 " not in leaders" )
1066 topicCheck = main.FALSE
1067 else:
1068 main.log.error( "leaders() returned None" )
1069 topicCheck = main.FALSE
1070 except ( ValueError, TypeError ):
1071 topicCheck = main.FALSE
1072 main.log.exception( "Error parsing leaders" )
1073 main.log.error( repr( leaders ) )
1074 # TODO: Check for a leader of these topics
1075 # Check all nodes
1076 if topicCheck:
1077 for i in main.activeNodes:
1078 node = main.CLIs[i]
1079 response = node.leaders( jsonFormat=False)
1080 main.log.warn( str( node.name ) + " leaders output: \n" +
1081 str( response ) )
1082
1083 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1084 onpass="intent Partitions is in leaders",
1085 onfail="Some topics were lost " )
1086 # Print partitions
1087 partitions = onosCli.partitions()
1088 try:
1089 if partitions :
1090 parsedPartitions = json.loads( partitions )
1091 main.log.warn( json.dumps( parsedPartitions,
1092 sort_keys=True,
1093 indent=4,
1094 separators=( ',', ': ' ) ) )
1095 # TODO check for a leader in all paritions
1096 # TODO check for consistency among nodes
1097 else:
1098 main.log.error( "partitions() returned None" )
1099 except ( ValueError, TypeError ):
1100 main.log.exception( "Error parsing partitions" )
1101 main.log.error( repr( partitions ) )
1102 # Print Pending Map
1103 pendingMap = onosCli.pendingMap()
1104 try:
1105 if pendingMap :
1106 parsedPending = json.loads( pendingMap )
1107 main.log.warn( json.dumps( parsedPending,
1108 sort_keys=True,
1109 indent=4,
1110 separators=( ',', ': ' ) ) )
1111 # TODO check something here?
1112 else:
1113 main.log.error( "pendingMap() returned None" )
1114 except ( ValueError, TypeError ):
1115 main.log.exception( "Error parsing pending map" )
1116 main.log.error( repr( pendingMap ) )
1117
1118 if not installedCheck:
1119 main.log.info( "Waiting 60 seconds to see if the state of " +
1120 "intents change" )
1121 time.sleep( 60 )
1122 # Print the intent states
1123 intents = onosCli.intents()
1124 intentStates = []
1125 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1126 count = 0
1127 # Iter through intents of a node
1128 try:
1129 for intent in json.loads( intents ):
1130 state = intent.get( 'state', None )
1131 if "INSTALLED" not in state:
1132 installedCheck = False
1133 intentId = intent.get( 'id', None )
1134 intentStates.append( ( intentId, state ) )
1135 except ( ValueError, TypeError ):
1136 main.log.exception( "Error parsing intents." )
1137 intentStates.sort()
1138 for i, s in intentStates:
1139 count += 1
1140 main.log.info( "%-6s%-15s%-15s" %
1141 ( str( count ), str( i ), str( s ) ) )
1142 leaders = onosCli.leaders()
1143 try:
1144 missing = False
1145 if leaders:
1146 parsedLeaders = json.loads( leaders )
1147 main.log.warn( json.dumps( parsedLeaders,
1148 sort_keys=True,
1149 indent=4,
1150 separators=( ',', ': ' ) ) )
1151 # check for all intent partitions
1152 # check for election
1153 topics = []
1154 for i in range( 14 ):
1155 topics.append( "intent-partition-" + str( i ) )
1156 # FIXME: this should only be after we start the app
1157 topics.append( "org.onosproject.election" )
1158 main.log.debug( topics )
1159 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1160 for topic in topics:
1161 if topic not in ONOStopics:
1162 main.log.error( "Error: " + topic +
1163 " not in leaders" )
1164 missing = True
1165 else:
1166 main.log.error( "leaders() returned None" )
1167 except ( ValueError, TypeError ):
1168 main.log.exception( "Error parsing leaders" )
1169 main.log.error( repr( leaders ) )
1170 if missing:
1171 for i in main.activeNodes:
1172 node = main.CLIs[i]
1173 response = node.leaders( jsonFormat=False)
1174 main.log.warn( str( node.name ) + " leaders output: \n" +
1175 str( response ) )
1176
1177 partitions = onosCli.partitions()
1178 try:
1179 if partitions :
1180 parsedPartitions = json.loads( partitions )
1181 main.log.warn( json.dumps( parsedPartitions,
1182 sort_keys=True,
1183 indent=4,
1184 separators=( ',', ': ' ) ) )
1185 # TODO check for a leader in all paritions
1186 # TODO check for consistency among nodes
1187 else:
1188 main.log.error( "partitions() returned None" )
1189 except ( ValueError, TypeError ):
1190 main.log.exception( "Error parsing partitions" )
1191 main.log.error( repr( partitions ) )
1192 pendingMap = onosCli.pendingMap()
1193 try:
1194 if pendingMap :
1195 parsedPending = json.loads( pendingMap )
1196 main.log.warn( json.dumps( parsedPending,
1197 sort_keys=True,
1198 indent=4,
1199 separators=( ',', ': ' ) ) )
1200 # TODO check something here?
1201 else:
1202 main.log.error( "pendingMap() returned None" )
1203 except ( ValueError, TypeError ):
1204 main.log.exception( "Error parsing pending map" )
1205 main.log.error( repr( pendingMap ) )
1206 # Print flowrules
1207 node = main.activeNodes[0]
1208 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1209 main.step( "Wait a minute then ping again" )
1210 # the wait is above
1211 PingResult = main.TRUE
1212 for i in range( 8, 18 ):
1213 ping = main.Mininet1.pingHost( src="h" + str( i ),
1214 target="h" + str( i + 10 ) )
1215 PingResult = PingResult and ping
1216 if ping == main.FALSE:
1217 main.log.warn( "Ping failed between h" + str( i ) +
1218 " and h" + str( i + 10 ) )
1219 elif ping == main.TRUE:
1220 main.log.info( "Ping test passed!" )
1221 # Don't set PingResult or you'd override failures
1222 if PingResult == main.FALSE:
1223 main.log.error(
1224 "Intents have not been installed correctly, pings failed." )
1225 # TODO: pretty print
1226 main.log.warn( "ONOS1 intents: " )
1227 try:
1228 tmpIntents = onosCli.intents()
1229 main.log.warn( json.dumps( json.loads( tmpIntents ),
1230 sort_keys=True,
1231 indent=4,
1232 separators=( ',', ': ' ) ) )
1233 except ( ValueError, TypeError ):
1234 main.log.warn( repr( tmpIntents ) )
1235 utilities.assert_equals(
1236 expect=main.TRUE,
1237 actual=PingResult,
1238 onpass="Intents have been installed correctly and pings work",
1239 onfail="Intents have not been installed correctly, pings failed." )
1240
1241 def CASE5( self, main ):
1242 """
1243 Reading state of ONOS
1244 """
1245 import json
1246 import time
1247 assert main.numCtrls, "main.numCtrls not defined"
1248 assert main, "main not defined"
1249 assert utilities.assert_equals, "utilities.assert_equals not defined"
1250 assert main.CLIs, "main.CLIs not defined"
1251 assert main.nodes, "main.nodes not defined"
1252
1253 main.case( "Setting up and gathering data for current state" )
1254 # The general idea for this test case is to pull the state of
1255 # ( intents,flows, topology,... ) from each ONOS node
1256 # We can then compare them with each other and also with past states
1257
1258 main.step( "Check that each switch has a master" )
1259 global mastershipState
1260 mastershipState = '[]'
1261
1262 # Assert that each device has a master
1263 rolesNotNull = main.TRUE
1264 threads = []
1265 for i in main.activeNodes:
1266 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1267 name="rolesNotNull-" + str( i ),
1268 args=[] )
1269 threads.append( t )
1270 t.start()
1271
1272 for t in threads:
1273 t.join()
1274 rolesNotNull = rolesNotNull and t.result
1275 utilities.assert_equals(
1276 expect=main.TRUE,
1277 actual=rolesNotNull,
1278 onpass="Each device has a master",
1279 onfail="Some devices don't have a master assigned" )
1280
1281 main.step( "Get the Mastership of each switch from each controller" )
1282 ONOSMastership = []
1283 mastershipCheck = main.FALSE
1284 consistentMastership = True
1285 rolesResults = True
1286 threads = []
1287 for i in main.activeNodes:
1288 t = main.Thread( target=main.CLIs[i].roles,
1289 name="roles-" + str( i ),
1290 args=[] )
1291 threads.append( t )
1292 t.start()
1293
1294 for t in threads:
1295 t.join()
1296 ONOSMastership.append( t.result )
1297
1298 for i in range( len( ONOSMastership ) ):
1299 node = str( main.activeNodes[i] + 1 )
1300 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1301 main.log.error( "Error in getting ONOS" + node + " roles" )
1302 main.log.warn( "ONOS" + node + " mastership response: " +
1303 repr( ONOSMastership[i] ) )
1304 rolesResults = False
1305 utilities.assert_equals(
1306 expect=True,
1307 actual=rolesResults,
1308 onpass="No error in reading roles output",
1309 onfail="Error in reading roles from ONOS" )
1310
1311 main.step( "Check for consistency in roles from each controller" )
1312 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1313 main.log.info(
1314 "Switch roles are consistent across all ONOS nodes" )
1315 else:
1316 consistentMastership = False
1317 utilities.assert_equals(
1318 expect=True,
1319 actual=consistentMastership,
1320 onpass="Switch roles are consistent across all ONOS nodes",
1321 onfail="ONOS nodes have different views of switch roles" )
1322
1323 if rolesResults and not consistentMastership:
1324 for i in range( len( main.activeNodes ) ):
1325 node = str( main.activeNodes[i] + 1 )
1326 try:
1327 main.log.warn(
1328 "ONOS" + node + " roles: ",
1329 json.dumps(
1330 json.loads( ONOSMastership[ i ] ),
1331 sort_keys=True,
1332 indent=4,
1333 separators=( ',', ': ' ) ) )
1334 except ( ValueError, TypeError ):
1335 main.log.warn( repr( ONOSMastership[ i ] ) )
1336 elif rolesResults and consistentMastership:
1337 mastershipCheck = main.TRUE
1338 mastershipState = ONOSMastership[ 0 ]
1339
1340 main.step( "Get the intents from each controller" )
1341 global intentState
1342 intentState = []
1343 ONOSIntents = []
1344 intentCheck = main.FALSE
1345 consistentIntents = True
1346 intentsResults = True
1347 threads = []
1348 for i in main.activeNodes:
1349 t = main.Thread( target=main.CLIs[i].intents,
1350 name="intents-" + str( i ),
1351 args=[],
1352 kwargs={ 'jsonFormat': True } )
1353 threads.append( t )
1354 t.start()
1355
1356 for t in threads:
1357 t.join()
1358 ONOSIntents.append( t.result )
1359
1360 for i in range( len( ONOSIntents ) ):
1361 node = str( main.activeNodes[i] + 1 )
1362 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1363 main.log.error( "Error in getting ONOS" + node + " intents" )
1364 main.log.warn( "ONOS" + node + " intents response: " +
1365 repr( ONOSIntents[ i ] ) )
1366 intentsResults = False
1367 utilities.assert_equals(
1368 expect=True,
1369 actual=intentsResults,
1370 onpass="No error in reading intents output",
1371 onfail="Error in reading intents from ONOS" )
1372
1373 main.step( "Check for consistency in Intents from each controller" )
1374 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1375 main.log.info( "Intents are consistent across all ONOS " +
1376 "nodes" )
1377 else:
1378 consistentIntents = False
1379 main.log.error( "Intents not consistent" )
1380 utilities.assert_equals(
1381 expect=True,
1382 actual=consistentIntents,
1383 onpass="Intents are consistent across all ONOS nodes",
1384 onfail="ONOS nodes have different views of intents" )
1385
1386 if intentsResults:
1387 # Try to make it easy to figure out what is happening
1388 #
1389 # Intent ONOS1 ONOS2 ...
1390 # 0x01 INSTALLED INSTALLING
1391 # ... ... ...
1392 # ... ... ...
1393 title = " Id"
1394 for n in main.activeNodes:
1395 title += " " * 10 + "ONOS" + str( n + 1 )
1396 main.log.warn( title )
1397 # get all intent keys in the cluster
1398 keys = []
Jon Halla440e872016-03-31 15:15:50 -07001399 try:
1400 # Get the set of all intent keys
Jon Hall6e709752016-02-01 13:38:46 -08001401 for nodeStr in ONOSIntents:
1402 node = json.loads( nodeStr )
1403 for intent in node:
Jon Halla440e872016-03-31 15:15:50 -07001404 keys.append( intent.get( 'id' ) )
1405 keys = set( keys )
1406 # For each intent key, print the state on each node
1407 for key in keys:
1408 row = "%-13s" % key
1409 for nodeStr in ONOSIntents:
1410 node = json.loads( nodeStr )
1411 for intent in node:
1412 if intent.get( 'id', "Error" ) == key:
1413 row += "%-15s" % intent.get( 'state' )
1414 main.log.warn( row )
1415 # End of intent state table
1416 except ValueError as e:
1417 main.log.exception( e )
1418 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
Jon Hall6e709752016-02-01 13:38:46 -08001419
1420 if intentsResults and not consistentIntents:
1421 # print the json objects
1422 n = str( main.activeNodes[-1] + 1 )
1423 main.log.debug( "ONOS" + n + " intents: " )
1424 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1425 sort_keys=True,
1426 indent=4,
1427 separators=( ',', ': ' ) ) )
1428 for i in range( len( ONOSIntents ) ):
1429 node = str( main.activeNodes[i] + 1 )
1430 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1431 main.log.debug( "ONOS" + node + " intents: " )
1432 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1433 sort_keys=True,
1434 indent=4,
1435 separators=( ',', ': ' ) ) )
1436 else:
1437 main.log.debug( "ONOS" + node + " intents match ONOS" +
1438 n + " intents" )
1439 elif intentsResults and consistentIntents:
1440 intentCheck = main.TRUE
1441 intentState = ONOSIntents[ 0 ]
1442
1443 main.step( "Get the flows from each controller" )
1444 global flowState
1445 flowState = []
1446 ONOSFlows = []
1447 ONOSFlowsJson = []
1448 flowCheck = main.FALSE
1449 consistentFlows = True
1450 flowsResults = True
1451 threads = []
1452 for i in main.activeNodes:
1453 t = main.Thread( target=main.CLIs[i].flows,
1454 name="flows-" + str( i ),
1455 args=[],
1456 kwargs={ 'jsonFormat': True } )
1457 threads.append( t )
1458 t.start()
1459
1460 # NOTE: Flows command can take some time to run
1461 time.sleep(30)
1462 for t in threads:
1463 t.join()
1464 result = t.result
1465 ONOSFlows.append( result )
1466
1467 for i in range( len( ONOSFlows ) ):
1468 num = str( main.activeNodes[i] + 1 )
1469 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1470 main.log.error( "Error in getting ONOS" + num + " flows" )
1471 main.log.warn( "ONOS" + num + " flows response: " +
1472 repr( ONOSFlows[ i ] ) )
1473 flowsResults = False
1474 ONOSFlowsJson.append( None )
1475 else:
1476 try:
1477 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1478 except ( ValueError, TypeError ):
1479 # FIXME: change this to log.error?
1480 main.log.exception( "Error in parsing ONOS" + num +
1481 " response as json." )
1482 main.log.error( repr( ONOSFlows[ i ] ) )
1483 ONOSFlowsJson.append( None )
1484 flowsResults = False
1485 utilities.assert_equals(
1486 expect=True,
1487 actual=flowsResults,
1488 onpass="No error in reading flows output",
1489 onfail="Error in reading flows from ONOS" )
1490
1491 main.step( "Check for consistency in Flows from each controller" )
1492 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1493 if all( tmp ):
1494 main.log.info( "Flow count is consistent across all ONOS nodes" )
1495 else:
1496 consistentFlows = False
1497 utilities.assert_equals(
1498 expect=True,
1499 actual=consistentFlows,
1500 onpass="The flow count is consistent across all ONOS nodes",
1501 onfail="ONOS nodes have different flow counts" )
1502
1503 if flowsResults and not consistentFlows:
1504 for i in range( len( ONOSFlows ) ):
1505 node = str( main.activeNodes[i] + 1 )
1506 try:
1507 main.log.warn(
1508 "ONOS" + node + " flows: " +
1509 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1510 indent=4, separators=( ',', ': ' ) ) )
1511 except ( ValueError, TypeError ):
1512 main.log.warn( "ONOS" + node + " flows: " +
1513 repr( ONOSFlows[ i ] ) )
1514 elif flowsResults and consistentFlows:
1515 flowCheck = main.TRUE
1516 flowState = ONOSFlows[ 0 ]
1517
1518 main.step( "Get the OF Table entries" )
1519 global flows
1520 flows = []
1521 for i in range( 1, 29 ):
1522 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1523 if flowCheck == main.FALSE:
1524 for table in flows:
1525 main.log.warn( table )
1526 # TODO: Compare switch flow tables with ONOS flow tables
1527
1528 main.step( "Start continuous pings" )
1529 main.Mininet2.pingLong(
1530 src=main.params[ 'PING' ][ 'source1' ],
1531 target=main.params[ 'PING' ][ 'target1' ],
1532 pingTime=500 )
1533 main.Mininet2.pingLong(
1534 src=main.params[ 'PING' ][ 'source2' ],
1535 target=main.params[ 'PING' ][ 'target2' ],
1536 pingTime=500 )
1537 main.Mininet2.pingLong(
1538 src=main.params[ 'PING' ][ 'source3' ],
1539 target=main.params[ 'PING' ][ 'target3' ],
1540 pingTime=500 )
1541 main.Mininet2.pingLong(
1542 src=main.params[ 'PING' ][ 'source4' ],
1543 target=main.params[ 'PING' ][ 'target4' ],
1544 pingTime=500 )
1545 main.Mininet2.pingLong(
1546 src=main.params[ 'PING' ][ 'source5' ],
1547 target=main.params[ 'PING' ][ 'target5' ],
1548 pingTime=500 )
1549 main.Mininet2.pingLong(
1550 src=main.params[ 'PING' ][ 'source6' ],
1551 target=main.params[ 'PING' ][ 'target6' ],
1552 pingTime=500 )
1553 main.Mininet2.pingLong(
1554 src=main.params[ 'PING' ][ 'source7' ],
1555 target=main.params[ 'PING' ][ 'target7' ],
1556 pingTime=500 )
1557 main.Mininet2.pingLong(
1558 src=main.params[ 'PING' ][ 'source8' ],
1559 target=main.params[ 'PING' ][ 'target8' ],
1560 pingTime=500 )
1561 main.Mininet2.pingLong(
1562 src=main.params[ 'PING' ][ 'source9' ],
1563 target=main.params[ 'PING' ][ 'target9' ],
1564 pingTime=500 )
1565 main.Mininet2.pingLong(
1566 src=main.params[ 'PING' ][ 'source10' ],
1567 target=main.params[ 'PING' ][ 'target10' ],
1568 pingTime=500 )
1569
1570 main.step( "Collecting topology information from ONOS" )
1571 devices = []
1572 threads = []
1573 for i in main.activeNodes:
1574 t = main.Thread( target=main.CLIs[i].devices,
1575 name="devices-" + str( i ),
1576 args=[ ] )
1577 threads.append( t )
1578 t.start()
1579
1580 for t in threads:
1581 t.join()
1582 devices.append( t.result )
1583 hosts = []
1584 threads = []
1585 for i in main.activeNodes:
1586 t = main.Thread( target=main.CLIs[i].hosts,
1587 name="hosts-" + str( i ),
1588 args=[ ] )
1589 threads.append( t )
1590 t.start()
1591
1592 for t in threads:
1593 t.join()
1594 try:
1595 hosts.append( json.loads( t.result ) )
1596 except ( ValueError, TypeError ):
1597 # FIXME: better handling of this, print which node
1598 # Maybe use thread name?
1599 main.log.exception( "Error parsing json output of hosts" )
1600 main.log.warn( repr( t.result ) )
1601 hosts.append( None )
1602
1603 ports = []
1604 threads = []
1605 for i in main.activeNodes:
1606 t = main.Thread( target=main.CLIs[i].ports,
1607 name="ports-" + str( i ),
1608 args=[ ] )
1609 threads.append( t )
1610 t.start()
1611
1612 for t in threads:
1613 t.join()
1614 ports.append( t.result )
1615 links = []
1616 threads = []
1617 for i in main.activeNodes:
1618 t = main.Thread( target=main.CLIs[i].links,
1619 name="links-" + str( i ),
1620 args=[ ] )
1621 threads.append( t )
1622 t.start()
1623
1624 for t in threads:
1625 t.join()
1626 links.append( t.result )
1627 clusters = []
1628 threads = []
1629 for i in main.activeNodes:
1630 t = main.Thread( target=main.CLIs[i].clusters,
1631 name="clusters-" + str( i ),
1632 args=[ ] )
1633 threads.append( t )
1634 t.start()
1635
1636 for t in threads:
1637 t.join()
1638 clusters.append( t.result )
1639 # Compare json objects for hosts and dataplane clusters
1640
1641 # hosts
1642 main.step( "Host view is consistent across ONOS nodes" )
1643 consistentHostsResult = main.TRUE
1644 for controller in range( len( hosts ) ):
1645 controllerStr = str( main.activeNodes[controller] + 1 )
1646 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1647 if hosts[ controller ] == hosts[ 0 ]:
1648 continue
1649 else: # hosts not consistent
1650 main.log.error( "hosts from ONOS" +
1651 controllerStr +
1652 " is inconsistent with ONOS1" )
1653 main.log.warn( repr( hosts[ controller ] ) )
1654 consistentHostsResult = main.FALSE
1655
1656 else:
1657 main.log.error( "Error in getting ONOS hosts from ONOS" +
1658 controllerStr )
1659 consistentHostsResult = main.FALSE
1660 main.log.warn( "ONOS" + controllerStr +
1661 " hosts response: " +
1662 repr( hosts[ controller ] ) )
1663 utilities.assert_equals(
1664 expect=main.TRUE,
1665 actual=consistentHostsResult,
1666 onpass="Hosts view is consistent across all ONOS nodes",
1667 onfail="ONOS nodes have different views of hosts" )
1668
1669 main.step( "Each host has an IP address" )
1670 ipResult = main.TRUE
1671 for controller in range( 0, len( hosts ) ):
1672 controllerStr = str( main.activeNodes[controller] + 1 )
1673 if hosts[ controller ]:
1674 for host in hosts[ controller ]:
1675 if not host.get( 'ipAddresses', [ ] ):
1676 main.log.error( "Error with host ips on controller" +
1677 controllerStr + ": " + str( host ) )
1678 ipResult = main.FALSE
1679 utilities.assert_equals(
1680 expect=main.TRUE,
1681 actual=ipResult,
1682 onpass="The ips of the hosts aren't empty",
1683 onfail="The ip of at least one host is missing" )
1684
1685 # Strongly connected clusters of devices
1686 main.step( "Cluster view is consistent across ONOS nodes" )
1687 consistentClustersResult = main.TRUE
1688 for controller in range( len( clusters ) ):
1689 controllerStr = str( main.activeNodes[controller] + 1 )
1690 if "Error" not in clusters[ controller ]:
1691 if clusters[ controller ] == clusters[ 0 ]:
1692 continue
1693 else: # clusters not consistent
1694 main.log.error( "clusters from ONOS" + controllerStr +
1695 " is inconsistent with ONOS1" )
1696 consistentClustersResult = main.FALSE
1697
1698 else:
1699 main.log.error( "Error in getting dataplane clusters " +
1700 "from ONOS" + controllerStr )
1701 consistentClustersResult = main.FALSE
1702 main.log.warn( "ONOS" + controllerStr +
1703 " clusters response: " +
1704 repr( clusters[ controller ] ) )
1705 utilities.assert_equals(
1706 expect=main.TRUE,
1707 actual=consistentClustersResult,
1708 onpass="Clusters view is consistent across all ONOS nodes",
1709 onfail="ONOS nodes have different views of clusters" )
Jon Hall172b7ba2016-04-07 18:12:20 -07001710 if consistentClustersResult != main.TRUE:
1711 main.log.debug( clusters )
Jon Hall6e709752016-02-01 13:38:46 -08001712 # there should always only be one cluster
1713 main.step( "Cluster view correct across ONOS nodes" )
1714 try:
1715 numClusters = len( json.loads( clusters[ 0 ] ) )
1716 except ( ValueError, TypeError ):
1717 main.log.exception( "Error parsing clusters[0]: " +
1718 repr( clusters[ 0 ] ) )
1719 numClusters = "ERROR"
1720 clusterResults = main.FALSE
1721 if numClusters == 1:
1722 clusterResults = main.TRUE
1723 utilities.assert_equals(
1724 expect=1,
1725 actual=numClusters,
1726 onpass="ONOS shows 1 SCC",
1727 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1728
1729 main.step( "Comparing ONOS topology to MN" )
1730 devicesResults = main.TRUE
1731 linksResults = main.TRUE
1732 hostsResults = main.TRUE
1733 mnSwitches = main.Mininet1.getSwitches()
1734 mnLinks = main.Mininet1.getLinks()
1735 mnHosts = main.Mininet1.getHosts()
1736 for controller in main.activeNodes:
1737 controllerStr = str( main.activeNodes[controller] + 1 )
1738 if devices[ controller ] and ports[ controller ] and\
1739 "Error" not in devices[ controller ] and\
1740 "Error" not in ports[ controller ]:
1741 currentDevicesResult = main.Mininet1.compareSwitches(
1742 mnSwitches,
1743 json.loads( devices[ controller ] ),
1744 json.loads( ports[ controller ] ) )
1745 else:
1746 currentDevicesResult = main.FALSE
1747 utilities.assert_equals( expect=main.TRUE,
1748 actual=currentDevicesResult,
1749 onpass="ONOS" + controllerStr +
1750 " Switches view is correct",
1751 onfail="ONOS" + controllerStr +
1752 " Switches view is incorrect" )
1753 if links[ controller ] and "Error" not in links[ controller ]:
1754 currentLinksResult = main.Mininet1.compareLinks(
1755 mnSwitches, mnLinks,
1756 json.loads( links[ controller ] ) )
1757 else:
1758 currentLinksResult = main.FALSE
1759 utilities.assert_equals( expect=main.TRUE,
1760 actual=currentLinksResult,
1761 onpass="ONOS" + controllerStr +
1762 " links view is correct",
1763 onfail="ONOS" + controllerStr +
1764 " links view is incorrect" )
1765
1766 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1767 currentHostsResult = main.Mininet1.compareHosts(
1768 mnHosts,
1769 hosts[ controller ] )
1770 else:
1771 currentHostsResult = main.FALSE
1772 utilities.assert_equals( expect=main.TRUE,
1773 actual=currentHostsResult,
1774 onpass="ONOS" + controllerStr +
1775 " hosts exist in Mininet",
1776 onfail="ONOS" + controllerStr +
1777 " hosts don't match Mininet" )
1778
1779 devicesResults = devicesResults and currentDevicesResult
1780 linksResults = linksResults and currentLinksResult
1781 hostsResults = hostsResults and currentHostsResult
1782
1783 main.step( "Device information is correct" )
1784 utilities.assert_equals(
1785 expect=main.TRUE,
1786 actual=devicesResults,
1787 onpass="Device information is correct",
1788 onfail="Device information is incorrect" )
1789
1790 main.step( "Links are correct" )
1791 utilities.assert_equals(
1792 expect=main.TRUE,
1793 actual=linksResults,
1794 onpass="Link are correct",
1795 onfail="Links are incorrect" )
1796
1797 main.step( "Hosts are correct" )
1798 utilities.assert_equals(
1799 expect=main.TRUE,
1800 actual=hostsResults,
1801 onpass="Hosts are correct",
1802 onfail="Hosts are incorrect" )
1803
1804 def CASE61( self, main ):
1805 """
1806 The Failure case.
1807 """
1808 import math
1809 assert main.numCtrls, "main.numCtrls not defined"
1810 assert main, "main not defined"
1811 assert utilities.assert_equals, "utilities.assert_equals not defined"
1812 assert main.CLIs, "main.CLIs not defined"
1813 assert main.nodes, "main.nodes not defined"
1814 main.case( "Partition ONOS nodes into two distinct partitions" )
1815
1816 main.step( "Checking ONOS Logs for errors" )
1817 for node in main.nodes:
1818 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1819 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1820
1821 n = len( main.nodes ) # Number of nodes
1822 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1823 main.partition = [ 0 ] # ONOS node to partition, listed by index in main.nodes
1824 if n > 3:
1825 main.partition.append( p - 1 )
1826 # NOTE: This only works for cluster sizes of 3,5, or 7.
1827
1828 main.step( "Partitioning ONOS nodes" )
1829 nodeList = [ str( i + 1 ) for i in main.partition ]
1830 main.log.info( "Nodes to be partitioned: " + str( nodeList ) )
1831 partitionResults = main.TRUE
1832 for i in range( 0, n ):
1833 this = main.nodes[i]
1834 if i not in main.partition:
1835 for j in main.partition:
1836 foe = main.nodes[j]
1837 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1838 #CMD HERE
1839 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1840 this.handle.sendline( cmdStr )
1841 this.handle.expect( "\$" )
1842 main.log.debug( this.handle.before )
1843 else:
1844 for j in range( 0, n ):
1845 if j not in main.partition:
1846 foe = main.nodes[j]
1847 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1848 #CMD HERE
1849 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1850 this.handle.sendline( cmdStr )
1851 this.handle.expect( "\$" )
1852 main.log.debug( this.handle.before )
1853 main.activeNodes.remove( i )
1854 # NOTE: When dynamic clustering is finished, we need to start checking
1855 # main.partion nodes still work when partitioned
1856 utilities.assert_equals( expect=main.TRUE, actual=partitionResults,
1857 onpass="Firewall rules set successfully",
1858 onfail="Error setting firewall rules" )
1859
1860 main.log.step( "Sleeping 60 seconds" )
1861 time.sleep( 60 )
1862
1863 def CASE62( self, main ):
1864 """
1865 Healing Partition
1866 """
1867 import time
1868 assert main.numCtrls, "main.numCtrls not defined"
1869 assert main, "main not defined"
1870 assert utilities.assert_equals, "utilities.assert_equals not defined"
1871 assert main.CLIs, "main.CLIs not defined"
1872 assert main.nodes, "main.nodes not defined"
1873 assert main.partition, "main.partition not defined"
1874 main.case( "Healing Partition" )
1875
1876 main.step( "Deleteing firewall rules" )
1877 healResults = main.TRUE
1878 for node in main.nodes:
1879 cmdStr = "sudo iptables -F"
1880 node.handle.sendline( cmdStr )
1881 node.handle.expect( "\$" )
1882 main.log.debug( node.handle.before )
1883 utilities.assert_equals( expect=main.TRUE, actual=healResults,
1884 onpass="Firewall rules removed",
1885 onfail="Error removing firewall rules" )
1886
1887 for node in main.partition:
1888 main.activeNodes.append( node )
1889 main.activeNodes.sort()
1890 try:
1891 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1892 "List of active nodes has duplicates, this likely indicates something was run out of order"
1893 except AssertionError:
1894 main.log.exception( "" )
1895 main.cleanup()
1896 main.exit()
1897
1898 def CASE7( self, main ):
1899 """
1900 Check state after ONOS failure
1901 """
1902 import json
1903 assert main.numCtrls, "main.numCtrls not defined"
1904 assert main, "main not defined"
1905 assert utilities.assert_equals, "utilities.assert_equals not defined"
1906 assert main.CLIs, "main.CLIs not defined"
1907 assert main.nodes, "main.nodes not defined"
1908 try:
1909 main.partition
1910 except AttributeError:
1911 main.partition = []
1912
1913 main.case( "Running ONOS Constant State Tests" )
1914
1915 main.step( "Check that each switch has a master" )
1916 # Assert that each device has a master
1917 rolesNotNull = main.TRUE
1918 threads = []
1919 for i in main.activeNodes:
1920 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1921 name="rolesNotNull-" + str( i ),
1922 args=[ ] )
1923 threads.append( t )
1924 t.start()
1925
1926 for t in threads:
1927 t.join()
1928 rolesNotNull = rolesNotNull and t.result
1929 utilities.assert_equals(
1930 expect=main.TRUE,
1931 actual=rolesNotNull,
1932 onpass="Each device has a master",
1933 onfail="Some devices don't have a master assigned" )
1934
1935 main.step( "Read device roles from ONOS" )
1936 ONOSMastership = []
1937 mastershipCheck = main.FALSE
1938 consistentMastership = True
1939 rolesResults = True
1940 threads = []
1941 for i in main.activeNodes:
1942 t = main.Thread( target=main.CLIs[i].roles,
1943 name="roles-" + str( i ),
1944 args=[] )
1945 threads.append( t )
1946 t.start()
1947
1948 for t in threads:
1949 t.join()
1950 ONOSMastership.append( t.result )
1951
1952 for i in range( len( ONOSMastership ) ):
1953 node = str( main.activeNodes[i] + 1 )
1954 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1955 main.log.error( "Error in getting ONOS" + node + " roles" )
1956 main.log.warn( "ONOS" + node + " mastership response: " +
1957 repr( ONOSMastership[i] ) )
1958 rolesResults = False
1959 utilities.assert_equals(
1960 expect=True,
1961 actual=rolesResults,
1962 onpass="No error in reading roles output",
1963 onfail="Error in reading roles from ONOS" )
1964
1965 main.step( "Check for consistency in roles from each controller" )
1966 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1967 main.log.info(
1968 "Switch roles are consistent across all ONOS nodes" )
1969 else:
1970 consistentMastership = False
1971 utilities.assert_equals(
1972 expect=True,
1973 actual=consistentMastership,
1974 onpass="Switch roles are consistent across all ONOS nodes",
1975 onfail="ONOS nodes have different views of switch roles" )
1976
1977 if rolesResults and not consistentMastership:
1978 for i in range( len( ONOSMastership ) ):
1979 node = str( main.activeNodes[i] + 1 )
1980 main.log.warn( "ONOS" + node + " roles: ",
1981 json.dumps( json.loads( ONOSMastership[ i ] ),
1982 sort_keys=True,
1983 indent=4,
1984 separators=( ',', ': ' ) ) )
1985
1986 # NOTE: we expect mastership to change on controller failure
1987
1988 main.step( "Get the intents and compare across all nodes" )
1989 ONOSIntents = []
1990 intentCheck = main.FALSE
1991 consistentIntents = True
1992 intentsResults = True
1993 threads = []
1994 for i in main.activeNodes:
1995 t = main.Thread( target=main.CLIs[i].intents,
1996 name="intents-" + str( i ),
1997 args=[],
1998 kwargs={ 'jsonFormat': True } )
1999 threads.append( t )
2000 t.start()
2001
2002 for t in threads:
2003 t.join()
2004 ONOSIntents.append( t.result )
2005
2006 for i in range( len( ONOSIntents) ):
2007 node = str( main.activeNodes[i] + 1 )
2008 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2009 main.log.error( "Error in getting ONOS" + node + " intents" )
2010 main.log.warn( "ONOS" + node + " intents response: " +
2011 repr( ONOSIntents[ i ] ) )
2012 intentsResults = False
2013 utilities.assert_equals(
2014 expect=True,
2015 actual=intentsResults,
2016 onpass="No error in reading intents output",
2017 onfail="Error in reading intents from ONOS" )
2018
2019 main.step( "Check for consistency in Intents from each controller" )
2020 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2021 main.log.info( "Intents are consistent across all ONOS " +
2022 "nodes" )
2023 else:
2024 consistentIntents = False
2025
2026 # Try to make it easy to figure out what is happening
2027 #
2028 # Intent ONOS1 ONOS2 ...
2029 # 0x01 INSTALLED INSTALLING
2030 # ... ... ...
2031 # ... ... ...
2032 title = " ID"
2033 for n in main.activeNodes:
2034 title += " " * 10 + "ONOS" + str( n + 1 )
2035 main.log.warn( title )
2036 # get all intent keys in the cluster
2037 keys = []
2038 for nodeStr in ONOSIntents:
2039 node = json.loads( nodeStr )
2040 for intent in node:
2041 keys.append( intent.get( 'id' ) )
2042 keys = set( keys )
2043 for key in keys:
2044 row = "%-13s" % key
2045 for nodeStr in ONOSIntents:
2046 node = json.loads( nodeStr )
2047 for intent in node:
2048 if intent.get( 'id' ) == key:
2049 row += "%-15s" % intent.get( 'state' )
2050 main.log.warn( row )
2051 # End table view
2052
2053 utilities.assert_equals(
2054 expect=True,
2055 actual=consistentIntents,
2056 onpass="Intents are consistent across all ONOS nodes",
2057 onfail="ONOS nodes have different views of intents" )
2058 intentStates = []
2059 for node in ONOSIntents: # Iter through ONOS nodes
2060 nodeStates = []
2061 # Iter through intents of a node
2062 try:
2063 for intent in json.loads( node ):
2064 nodeStates.append( intent[ 'state' ] )
2065 except ( ValueError, TypeError ):
2066 main.log.exception( "Error in parsing intents" )
2067 main.log.error( repr( node ) )
2068 intentStates.append( nodeStates )
2069 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2070 main.log.info( dict( out ) )
2071
2072 if intentsResults and not consistentIntents:
2073 for i in range( len( main.activeNodes ) ):
2074 node = str( main.activeNodes[i] + 1 )
2075 main.log.warn( "ONOS" + node + " intents: " )
2076 main.log.warn( json.dumps(
2077 json.loads( ONOSIntents[ i ] ),
2078 sort_keys=True,
2079 indent=4,
2080 separators=( ',', ': ' ) ) )
2081 elif intentsResults and consistentIntents:
2082 intentCheck = main.TRUE
2083
2084 # NOTE: Store has no durability, so intents are lost across system
2085 # restarts
2086 main.step( "Compare current intents with intents before the failure" )
2087 # NOTE: this requires case 5 to pass for intentState to be set.
2088 # maybe we should stop the test if that fails?
2089 sameIntents = main.FALSE
2090 try:
2091 intentState
2092 except NameError:
2093 main.log.warn( "No previous intent state was saved" )
2094 else:
2095 if intentState and intentState == ONOSIntents[ 0 ]:
2096 sameIntents = main.TRUE
2097 main.log.info( "Intents are consistent with before failure" )
2098 # TODO: possibly the states have changed? we may need to figure out
2099 # what the acceptable states are
2100 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2101 sameIntents = main.TRUE
2102 try:
2103 before = json.loads( intentState )
2104 after = json.loads( ONOSIntents[ 0 ] )
2105 for intent in before:
2106 if intent not in after:
2107 sameIntents = main.FALSE
2108 main.log.debug( "Intent is not currently in ONOS " +
2109 "(at least in the same form):" )
2110 main.log.debug( json.dumps( intent ) )
2111 except ( ValueError, TypeError ):
2112 main.log.exception( "Exception printing intents" )
2113 main.log.debug( repr( ONOSIntents[0] ) )
2114 main.log.debug( repr( intentState ) )
2115 if sameIntents == main.FALSE:
2116 try:
2117 main.log.debug( "ONOS intents before: " )
2118 main.log.debug( json.dumps( json.loads( intentState ),
2119 sort_keys=True, indent=4,
2120 separators=( ',', ': ' ) ) )
2121 main.log.debug( "Current ONOS intents: " )
2122 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2123 sort_keys=True, indent=4,
2124 separators=( ',', ': ' ) ) )
2125 except ( ValueError, TypeError ):
2126 main.log.exception( "Exception printing intents" )
2127 main.log.debug( repr( ONOSIntents[0] ) )
2128 main.log.debug( repr( intentState ) )
2129 utilities.assert_equals(
2130 expect=main.TRUE,
2131 actual=sameIntents,
2132 onpass="Intents are consistent with before failure",
2133 onfail="The Intents changed during failure" )
2134 intentCheck = intentCheck and sameIntents
2135
2136 main.step( "Get the OF Table entries and compare to before " +
2137 "component failure" )
2138 FlowTables = main.TRUE
2139 for i in range( 28 ):
2140 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2141 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2142 FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
2143 if FlowTables == main.FALSE:
2144 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2145 utilities.assert_equals(
2146 expect=main.TRUE,
2147 actual=FlowTables,
2148 onpass="No changes were found in the flow tables",
2149 onfail="Changes were found in the flow tables" )
2150
2151 main.Mininet2.pingLongKill()
2152 '''
2153 main.step( "Check the continuous pings to ensure that no packets " +
2154 "were dropped during component failure" )
2155 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2156 main.params[ 'TESTONIP' ] )
2157 LossInPings = main.FALSE
2158 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2159 for i in range( 8, 18 ):
2160 main.log.info(
2161 "Checking for a loss in pings along flow from s" +
2162 str( i ) )
2163 LossInPings = main.Mininet2.checkForLoss(
2164 "/tmp/ping.h" +
2165 str( i ) ) or LossInPings
2166 if LossInPings == main.TRUE:
2167 main.log.info( "Loss in ping detected" )
2168 elif LossInPings == main.ERROR:
2169 main.log.info( "There are multiple mininet process running" )
2170 elif LossInPings == main.FALSE:
2171 main.log.info( "No Loss in the pings" )
2172 main.log.info( "No loss of dataplane connectivity" )
2173 utilities.assert_equals(
2174 expect=main.FALSE,
2175 actual=LossInPings,
2176 onpass="No Loss of connectivity",
2177 onfail="Loss of dataplane connectivity detected" )
2178 '''
2179
2180 main.step( "Leadership Election is still functional" )
2181 # Test of LeadershipElection
2182 leaderList = []
2183
2184 partitioned = []
2185 for i in main.partition:
2186 partitioned.append( main.nodes[i].ip_address )
2187 leaderResult = main.TRUE
2188
2189 for i in main.activeNodes:
2190 cli = main.CLIs[i]
2191 leaderN = cli.electionTestLeader()
2192 leaderList.append( leaderN )
2193 if leaderN == main.FALSE:
2194 # error in response
2195 main.log.error( "Something is wrong with " +
2196 "electionTestLeader function, check the" +
2197 " error logs" )
2198 leaderResult = main.FALSE
2199 elif leaderN is None:
2200 main.log.error( cli.name +
2201 " shows no leader for the election-app was" +
2202 " elected after the old one died" )
2203 leaderResult = main.FALSE
2204 elif leaderN in partitioned:
2205 main.log.error( cli.name + " shows " + str( leaderN ) +
2206 " as leader for the election-app, but it " +
2207 "was partitioned" )
2208 leaderResult = main.FALSE
2209 if len( set( leaderList ) ) != 1:
2210 leaderResult = main.FALSE
2211 main.log.error(
2212 "Inconsistent view of leader for the election test app" )
2213 # TODO: print the list
2214 utilities.assert_equals(
2215 expect=main.TRUE,
2216 actual=leaderResult,
2217 onpass="Leadership election passed",
2218 onfail="Something went wrong with Leadership election" )
2219
2220 def CASE8( self, main ):
2221 """
2222 Compare topo
2223 """
2224 import json
2225 import time
2226 assert main.numCtrls, "main.numCtrls not defined"
2227 assert main, "main not defined"
2228 assert utilities.assert_equals, "utilities.assert_equals not defined"
2229 assert main.CLIs, "main.CLIs not defined"
2230 assert main.nodes, "main.nodes not defined"
2231
2232 main.case( "Compare ONOS Topology view to Mininet topology" )
2233 main.caseExplanation = "Compare topology objects between Mininet" +\
2234 " and ONOS"
2235 topoResult = main.FALSE
2236 topoFailMsg = "ONOS topology don't match Mininet"
2237 elapsed = 0
2238 count = 0
2239 main.step( "Comparing ONOS topology to MN topology" )
2240 startTime = time.time()
2241 # Give time for Gossip to work
2242 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2243 devicesResults = main.TRUE
2244 linksResults = main.TRUE
2245 hostsResults = main.TRUE
2246 hostAttachmentResults = True
2247 count += 1
2248 cliStart = time.time()
2249 devices = []
2250 threads = []
2251 for i in main.activeNodes:
2252 t = main.Thread( target=utilities.retry,
2253 name="devices-" + str( i ),
2254 args=[ main.CLIs[i].devices, [ None ] ],
2255 kwargs= { 'sleep': 5, 'attempts': 5,
2256 'randomTime': True } )
2257 threads.append( t )
2258 t.start()
2259
2260 for t in threads:
2261 t.join()
2262 devices.append( t.result )
2263 hosts = []
2264 ipResult = main.TRUE
2265 threads = []
2266 for i in main.activeNodes:
2267 t = main.Thread( target=utilities.retry,
2268 name="hosts-" + str( i ),
2269 args=[ main.CLIs[i].hosts, [ None ] ],
2270 kwargs= { 'sleep': 5, 'attempts': 5,
2271 'randomTime': True } )
2272 threads.append( t )
2273 t.start()
2274
2275 for t in threads:
2276 t.join()
2277 try:
2278 hosts.append( json.loads( t.result ) )
2279 except ( ValueError, TypeError ):
2280 main.log.exception( "Error parsing hosts results" )
2281 main.log.error( repr( t.result ) )
2282 hosts.append( None )
2283 for controller in range( 0, len( hosts ) ):
2284 controllerStr = str( main.activeNodes[controller] + 1 )
2285 if hosts[ controller ]:
2286 for host in hosts[ controller ]:
2287 if host is None or host.get( 'ipAddresses', [] ) == []:
2288 main.log.error(
2289 "Error with host ipAddresses on controller" +
2290 controllerStr + ": " + str( host ) )
2291 ipResult = main.FALSE
2292 ports = []
2293 threads = []
2294 for i in main.activeNodes:
2295 t = main.Thread( target=utilities.retry,
2296 name="ports-" + str( i ),
2297 args=[ main.CLIs[i].ports, [ None ] ],
2298 kwargs= { 'sleep': 5, 'attempts': 5,
2299 'randomTime': True } )
2300 threads.append( t )
2301 t.start()
2302
2303 for t in threads:
2304 t.join()
2305 ports.append( t.result )
2306 links = []
2307 threads = []
2308 for i in main.activeNodes:
2309 t = main.Thread( target=utilities.retry,
2310 name="links-" + str( i ),
2311 args=[ main.CLIs[i].links, [ None ] ],
2312 kwargs= { 'sleep': 5, 'attempts': 5,
2313 'randomTime': True } )
2314 threads.append( t )
2315 t.start()
2316
2317 for t in threads:
2318 t.join()
2319 links.append( t.result )
2320 clusters = []
2321 threads = []
2322 for i in main.activeNodes:
2323 t = main.Thread( target=utilities.retry,
2324 name="clusters-" + str( i ),
2325 args=[ main.CLIs[i].clusters, [ None ] ],
2326 kwargs= { 'sleep': 5, 'attempts': 5,
2327 'randomTime': True } )
2328 threads.append( t )
2329 t.start()
2330
2331 for t in threads:
2332 t.join()
2333 clusters.append( t.result )
2334
2335 elapsed = time.time() - startTime
2336 cliTime = time.time() - cliStart
2337 print "Elapsed time: " + str( elapsed )
2338 print "CLI time: " + str( cliTime )
2339
2340 if all( e is None for e in devices ) and\
2341 all( e is None for e in hosts ) and\
2342 all( e is None for e in ports ) and\
2343 all( e is None for e in links ) and\
2344 all( e is None for e in clusters ):
2345 topoFailMsg = "Could not get topology from ONOS"
2346 main.log.error( topoFailMsg )
2347 continue # Try again, No use trying to compare
2348
2349 mnSwitches = main.Mininet1.getSwitches()
2350 mnLinks = main.Mininet1.getLinks()
2351 mnHosts = main.Mininet1.getHosts()
2352 for controller in range( len( main.activeNodes ) ):
2353 controllerStr = str( main.activeNodes[controller] + 1 )
2354 if devices[ controller ] and ports[ controller ] and\
2355 "Error" not in devices[ controller ] and\
2356 "Error" not in ports[ controller ]:
2357
2358 try:
2359 currentDevicesResult = main.Mininet1.compareSwitches(
2360 mnSwitches,
2361 json.loads( devices[ controller ] ),
2362 json.loads( ports[ controller ] ) )
2363 except ( TypeError, ValueError ) as e:
2364 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2365 devices[ controller ], ports[ controller ] ) )
2366 else:
2367 currentDevicesResult = main.FALSE
2368 utilities.assert_equals( expect=main.TRUE,
2369 actual=currentDevicesResult,
2370 onpass="ONOS" + controllerStr +
2371 " Switches view is correct",
2372 onfail="ONOS" + controllerStr +
2373 " Switches view is incorrect" )
2374
2375 if links[ controller ] and "Error" not in links[ controller ]:
2376 currentLinksResult = main.Mininet1.compareLinks(
2377 mnSwitches, mnLinks,
2378 json.loads( links[ controller ] ) )
2379 else:
2380 currentLinksResult = main.FALSE
2381 utilities.assert_equals( expect=main.TRUE,
2382 actual=currentLinksResult,
2383 onpass="ONOS" + controllerStr +
2384 " links view is correct",
2385 onfail="ONOS" + controllerStr +
2386 " links view is incorrect" )
2387 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2388 currentHostsResult = main.Mininet1.compareHosts(
2389 mnHosts,
2390 hosts[ controller ] )
2391 elif hosts[ controller ] == []:
2392 currentHostsResult = main.TRUE
2393 else:
2394 currentHostsResult = main.FALSE
2395 utilities.assert_equals( expect=main.TRUE,
2396 actual=currentHostsResult,
2397 onpass="ONOS" + controllerStr +
2398 " hosts exist in Mininet",
2399 onfail="ONOS" + controllerStr +
2400 " hosts don't match Mininet" )
2401 # CHECKING HOST ATTACHMENT POINTS
2402 hostAttachment = True
2403 zeroHosts = False
2404 # FIXME: topo-HA/obelisk specific mappings:
2405 # key is mac and value is dpid
2406 mappings = {}
2407 for i in range( 1, 29 ): # hosts 1 through 28
2408 # set up correct variables:
2409 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2410 if i == 1:
2411 deviceId = "1000".zfill(16)
2412 elif i == 2:
2413 deviceId = "2000".zfill(16)
2414 elif i == 3:
2415 deviceId = "3000".zfill(16)
2416 elif i == 4:
2417 deviceId = "3004".zfill(16)
2418 elif i == 5:
2419 deviceId = "5000".zfill(16)
2420 elif i == 6:
2421 deviceId = "6000".zfill(16)
2422 elif i == 7:
2423 deviceId = "6007".zfill(16)
2424 elif i >= 8 and i <= 17:
2425 dpid = '3' + str( i ).zfill( 3 )
2426 deviceId = dpid.zfill(16)
2427 elif i >= 18 and i <= 27:
2428 dpid = '6' + str( i ).zfill( 3 )
2429 deviceId = dpid.zfill(16)
2430 elif i == 28:
2431 deviceId = "2800".zfill(16)
2432 mappings[ macId ] = deviceId
2433 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2434 if hosts[ controller ] == []:
2435 main.log.warn( "There are no hosts discovered" )
2436 zeroHosts = True
2437 else:
2438 for host in hosts[ controller ]:
2439 mac = None
2440 location = None
2441 device = None
2442 port = None
2443 try:
2444 mac = host.get( 'mac' )
2445 assert mac, "mac field could not be found for this host object"
2446
2447 location = host.get( 'location' )
2448 assert location, "location field could not be found for this host object"
2449
2450 # Trim the protocol identifier off deviceId
2451 device = str( location.get( 'elementId' ) ).split(':')[1]
2452 assert device, "elementId field could not be found for this host location object"
2453
2454 port = location.get( 'port' )
2455 assert port, "port field could not be found for this host location object"
2456
2457 # Now check if this matches where they should be
2458 if mac and device and port:
2459 if str( port ) != "1":
2460 main.log.error( "The attachment port is incorrect for " +
2461 "host " + str( mac ) +
2462 ". Expected: 1 Actual: " + str( port) )
2463 hostAttachment = False
2464 if device != mappings[ str( mac ) ]:
2465 main.log.error( "The attachment device is incorrect for " +
2466 "host " + str( mac ) +
2467 ". Expected: " + mappings[ str( mac ) ] +
2468 " Actual: " + device )
2469 hostAttachment = False
2470 else:
2471 hostAttachment = False
2472 except AssertionError:
2473 main.log.exception( "Json object not as expected" )
2474 main.log.error( repr( host ) )
2475 hostAttachment = False
2476 else:
2477 main.log.error( "No hosts json output or \"Error\"" +
2478 " in output. hosts = " +
2479 repr( hosts[ controller ] ) )
2480 if zeroHosts is False:
2481 hostAttachment = True
2482
2483 # END CHECKING HOST ATTACHMENT POINTS
2484 devicesResults = devicesResults and currentDevicesResult
2485 linksResults = linksResults and currentLinksResult
2486 hostsResults = hostsResults and currentHostsResult
2487 hostAttachmentResults = hostAttachmentResults and\
2488 hostAttachment
Jon Halla440e872016-03-31 15:15:50 -07002489 topoResult = ( devicesResults and linksResults
2490 and hostsResults and ipResult and
2491 hostAttachmentResults )
Jon Hall6e709752016-02-01 13:38:46 -08002492 utilities.assert_equals( expect=True,
2493 actual=topoResult,
2494 onpass="ONOS topology matches Mininet",
2495 onfail=topoFailMsg )
2496 # End of While loop to pull ONOS state
2497
2498 # Compare json objects for hosts and dataplane clusters
2499
2500 # hosts
2501 main.step( "Hosts view is consistent across all ONOS nodes" )
2502 consistentHostsResult = main.TRUE
2503 for controller in range( len( hosts ) ):
2504 controllerStr = str( main.activeNodes[controller] + 1 )
2505 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2506 if hosts[ controller ] == hosts[ 0 ]:
2507 continue
2508 else: # hosts not consistent
2509 main.log.error( "hosts from ONOS" + controllerStr +
2510 " is inconsistent with ONOS1" )
2511 main.log.warn( repr( hosts[ controller ] ) )
2512 consistentHostsResult = main.FALSE
2513
2514 else:
2515 main.log.error( "Error in getting ONOS hosts from ONOS" +
2516 controllerStr )
2517 consistentHostsResult = main.FALSE
2518 main.log.warn( "ONOS" + controllerStr +
2519 " hosts response: " +
2520 repr( hosts[ controller ] ) )
2521 utilities.assert_equals(
2522 expect=main.TRUE,
2523 actual=consistentHostsResult,
2524 onpass="Hosts view is consistent across all ONOS nodes",
2525 onfail="ONOS nodes have different views of hosts" )
2526
2527 main.step( "Hosts information is correct" )
2528 hostsResults = hostsResults and ipResult
2529 utilities.assert_equals(
2530 expect=main.TRUE,
2531 actual=hostsResults,
2532 onpass="Host information is correct",
2533 onfail="Host information is incorrect" )
2534
2535 main.step( "Host attachment points to the network" )
2536 utilities.assert_equals(
2537 expect=True,
2538 actual=hostAttachmentResults,
2539 onpass="Hosts are correctly attached to the network",
2540 onfail="ONOS did not correctly attach hosts to the network" )
2541
2542 # Strongly connected clusters of devices
2543 main.step( "Clusters view is consistent across all ONOS nodes" )
2544 consistentClustersResult = main.TRUE
2545 for controller in range( len( clusters ) ):
2546 controllerStr = str( main.activeNodes[controller] + 1 )
2547 if "Error" not in clusters[ controller ]:
2548 if clusters[ controller ] == clusters[ 0 ]:
2549 continue
2550 else: # clusters not consistent
2551 main.log.error( "clusters from ONOS" +
2552 controllerStr +
2553 " is inconsistent with ONOS1" )
2554 consistentClustersResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002555 else:
2556 main.log.error( "Error in getting dataplane clusters " +
2557 "from ONOS" + controllerStr )
2558 consistentClustersResult = main.FALSE
2559 main.log.warn( "ONOS" + controllerStr +
2560 " clusters response: " +
2561 repr( clusters[ controller ] ) )
2562 utilities.assert_equals(
2563 expect=main.TRUE,
2564 actual=consistentClustersResult,
2565 onpass="Clusters view is consistent across all ONOS nodes",
2566 onfail="ONOS nodes have different views of clusters" )
2567
2568 main.step( "There is only one SCC" )
2569 # there should always only be one cluster
2570 try:
2571 numClusters = len( json.loads( clusters[ 0 ] ) )
2572 except ( ValueError, TypeError ):
2573 main.log.exception( "Error parsing clusters[0]: " +
2574 repr( clusters[0] ) )
Jon Halla440e872016-03-31 15:15:50 -07002575 numClusters = "ERROR"
Jon Hall6e709752016-02-01 13:38:46 -08002576 clusterResults = main.FALSE
2577 if numClusters == 1:
2578 clusterResults = main.TRUE
2579 utilities.assert_equals(
2580 expect=1,
2581 actual=numClusters,
2582 onpass="ONOS shows 1 SCC",
2583 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2584
2585 topoResult = ( devicesResults and linksResults
2586 and hostsResults and consistentHostsResult
2587 and consistentClustersResult and clusterResults
2588 and ipResult and hostAttachmentResults )
2589
2590 topoResult = topoResult and int( count <= 2 )
2591 note = "note it takes about " + str( int( cliTime ) ) + \
2592 " seconds for the test to make all the cli calls to fetch " +\
2593 "the topology from each ONOS instance"
2594 main.log.info(
2595 "Very crass estimate for topology discovery/convergence( " +
2596 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2597 str( count ) + " tries" )
2598
2599 main.step( "Device information is correct" )
2600 utilities.assert_equals(
2601 expect=main.TRUE,
2602 actual=devicesResults,
2603 onpass="Device information is correct",
2604 onfail="Device information is incorrect" )
2605
2606 main.step( "Links are correct" )
2607 utilities.assert_equals(
2608 expect=main.TRUE,
2609 actual=linksResults,
2610 onpass="Link are correct",
2611 onfail="Links are incorrect" )
2612
Jon Halla440e872016-03-31 15:15:50 -07002613 main.step( "Hosts are correct" )
2614 utilities.assert_equals(
2615 expect=main.TRUE,
2616 actual=hostsResults,
2617 onpass="Hosts are correct",
2618 onfail="Hosts are incorrect" )
2619
Jon Hall6e709752016-02-01 13:38:46 -08002620 # FIXME: move this to an ONOS state case
2621 main.step( "Checking ONOS nodes" )
2622 nodesOutput = []
2623 nodeResults = main.TRUE
2624 threads = []
2625 for i in main.activeNodes:
2626 t = main.Thread( target=main.CLIs[i].nodes,
2627 name="nodes-" + str( i ),
2628 args=[ ] )
2629 threads.append( t )
2630 t.start()
2631
2632 for t in threads:
2633 t.join()
2634 nodesOutput.append( t.result )
2635 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
2636 ips.sort()
2637 for i in nodesOutput:
2638 try:
2639 current = json.loads( i )
2640 activeIps = []
2641 currentResult = main.FALSE
2642 for node in current:
Jon Hallbd182782016-03-28 16:42:22 -07002643 if node['state'] == 'READY':
Jon Hall6e709752016-02-01 13:38:46 -08002644 activeIps.append( node['ip'] )
2645 activeIps.sort()
2646 if ips == activeIps:
2647 currentResult = main.TRUE
2648 except ( ValueError, TypeError ):
2649 main.log.error( "Error parsing nodes output" )
2650 main.log.warn( repr( i ) )
2651 currentResult = main.FALSE
2652 nodeResults = nodeResults and currentResult
2653 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2654 onpass="Nodes check successful",
2655 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002656 if not nodeResults:
2657 for cli in main.CLIs:
2658 main.log.debug( "{} components not ACTIVE: \n{}".format(
2659 cli.name,
2660 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08002661
2662 def CASE9( self, main ):
2663 """
2664 Link s3-s28 down
2665 """
2666 import time
2667 assert main.numCtrls, "main.numCtrls not defined"
2668 assert main, "main not defined"
2669 assert utilities.assert_equals, "utilities.assert_equals not defined"
2670 assert main.CLIs, "main.CLIs not defined"
2671 assert main.nodes, "main.nodes not defined"
2672 # NOTE: You should probably run a topology check after this
2673
2674 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2675
2676 description = "Turn off a link to ensure that Link Discovery " +\
2677 "is working properly"
2678 main.case( description )
2679
2680 main.step( "Kill Link between s3 and s28" )
2681 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2682 main.log.info( "Waiting " + str( linkSleep ) +
2683 " seconds for link down to be discovered" )
2684 time.sleep( linkSleep )
2685 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2686 onpass="Link down successful",
2687 onfail="Failed to bring link down" )
2688 # TODO do some sort of check here
2689
2690 def CASE10( self, main ):
2691 """
2692 Link s3-s28 up
2693 """
2694 import time
2695 assert main.numCtrls, "main.numCtrls not defined"
2696 assert main, "main not defined"
2697 assert utilities.assert_equals, "utilities.assert_equals not defined"
2698 assert main.CLIs, "main.CLIs not defined"
2699 assert main.nodes, "main.nodes not defined"
2700 # NOTE: You should probably run a topology check after this
2701
2702 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2703
2704 description = "Restore a link to ensure that Link Discovery is " + \
2705 "working properly"
2706 main.case( description )
2707
2708 main.step( "Bring link between s3 and s28 back up" )
2709 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2710 main.log.info( "Waiting " + str( linkSleep ) +
2711 " seconds for link up to be discovered" )
2712 time.sleep( linkSleep )
2713 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2714 onpass="Link up successful",
2715 onfail="Failed to bring link up" )
2716 # TODO do some sort of check here
2717
2718 def CASE11( self, main ):
2719 """
2720 Switch Down
2721 """
2722 # NOTE: You should probably run a topology check after this
2723 import time
2724 assert main.numCtrls, "main.numCtrls not defined"
2725 assert main, "main not defined"
2726 assert utilities.assert_equals, "utilities.assert_equals not defined"
2727 assert main.CLIs, "main.CLIs not defined"
2728 assert main.nodes, "main.nodes not defined"
2729
2730 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2731
2732 description = "Killing a switch to ensure it is discovered correctly"
2733 onosCli = main.CLIs[ main.activeNodes[0] ]
2734 main.case( description )
2735 switch = main.params[ 'kill' ][ 'switch' ]
2736 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2737
2738 # TODO: Make this switch parameterizable
2739 main.step( "Kill " + switch )
2740 main.log.info( "Deleting " + switch )
2741 main.Mininet1.delSwitch( switch )
2742 main.log.info( "Waiting " + str( switchSleep ) +
2743 " seconds for switch down to be discovered" )
2744 time.sleep( switchSleep )
2745 device = onosCli.getDevice( dpid=switchDPID )
2746 # Peek at the deleted switch
2747 main.log.warn( str( device ) )
2748 result = main.FALSE
2749 if device and device[ 'available' ] is False:
2750 result = main.TRUE
2751 utilities.assert_equals( expect=main.TRUE, actual=result,
2752 onpass="Kill switch successful",
2753 onfail="Failed to kill switch?" )
2754
2755 def CASE12( self, main ):
2756 """
2757 Switch Up
2758 """
2759 # NOTE: You should probably run a topology check after this
2760 import time
2761 assert main.numCtrls, "main.numCtrls not defined"
2762 assert main, "main not defined"
2763 assert utilities.assert_equals, "utilities.assert_equals not defined"
2764 assert main.CLIs, "main.CLIs not defined"
2765 assert main.nodes, "main.nodes not defined"
2766 assert ONOS1Port, "ONOS1Port not defined"
2767 assert ONOS2Port, "ONOS2Port not defined"
2768 assert ONOS3Port, "ONOS3Port not defined"
2769 assert ONOS4Port, "ONOS4Port not defined"
2770 assert ONOS5Port, "ONOS5Port not defined"
2771 assert ONOS6Port, "ONOS6Port not defined"
2772 assert ONOS7Port, "ONOS7Port not defined"
2773
2774 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2775 switch = main.params[ 'kill' ][ 'switch' ]
2776 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2777 links = main.params[ 'kill' ][ 'links' ].split()
2778 onosCli = main.CLIs[ main.activeNodes[0] ]
2779 description = "Adding a switch to ensure it is discovered correctly"
2780 main.case( description )
2781
2782 main.step( "Add back " + switch )
2783 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2784 for peer in links:
2785 main.Mininet1.addLink( switch, peer )
2786 ipList = [ node.ip_address for node in main.nodes ]
2787 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2788 main.log.info( "Waiting " + str( switchSleep ) +
2789 " seconds for switch up to be discovered" )
2790 time.sleep( switchSleep )
2791 device = onosCli.getDevice( dpid=switchDPID )
2792 # Peek at the deleted switch
2793 main.log.warn( str( device ) )
2794 result = main.FALSE
2795 if device and device[ 'available' ]:
2796 result = main.TRUE
2797 utilities.assert_equals( expect=main.TRUE, actual=result,
2798 onpass="add switch successful",
2799 onfail="Failed to add switch?" )
2800
2801 def CASE13( self, main ):
2802 """
2803 Clean up
2804 """
2805 import os
2806 import time
2807 assert main.numCtrls, "main.numCtrls not defined"
2808 assert main, "main not defined"
2809 assert utilities.assert_equals, "utilities.assert_equals not defined"
2810 assert main.CLIs, "main.CLIs not defined"
2811 assert main.nodes, "main.nodes not defined"
2812
2813 # printing colors to terminal
2814 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2815 'blue': '\033[94m', 'green': '\033[92m',
2816 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2817 main.case( "Test Cleanup" )
2818 main.step( "Killing tcpdumps" )
2819 main.Mininet2.stopTcpdump()
2820
2821 testname = main.TEST
2822 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2823 main.step( "Copying MN pcap and ONOS log files to test station" )
2824 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2825 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2826 # NOTE: MN Pcap file is being saved to logdir.
2827 # We scp this file as MN and TestON aren't necessarily the same vm
2828
2829 # FIXME: To be replaced with a Jenkin's post script
2830 # TODO: Load these from params
2831 # NOTE: must end in /
2832 logFolder = "/opt/onos/log/"
2833 logFiles = [ "karaf.log", "karaf.log.1" ]
2834 # NOTE: must end in /
2835 for f in logFiles:
2836 for node in main.nodes:
2837 dstName = main.logdir + "/" + node.name + "-" + f
2838 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2839 logFolder + f, dstName )
2840 # std*.log's
2841 # NOTE: must end in /
2842 logFolder = "/opt/onos/var/"
2843 logFiles = [ "stderr.log", "stdout.log" ]
2844 # NOTE: must end in /
2845 for f in logFiles:
2846 for node in main.nodes:
2847 dstName = main.logdir + "/" + node.name + "-" + f
2848 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2849 logFolder + f, dstName )
2850 else:
2851 main.log.debug( "skipping saving log files" )
2852
2853 main.step( "Stopping Mininet" )
2854 mnResult = main.Mininet1.stopNet()
2855 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2856 onpass="Mininet stopped",
2857 onfail="MN cleanup NOT successful" )
2858
2859 main.step( "Checking ONOS Logs for errors" )
2860 for node in main.nodes:
2861 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2862 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2863
2864 try:
2865 timerLog = open( main.logdir + "/Timers.csv", 'w')
2866 # Overwrite with empty line and close
2867 labels = "Gossip Intents"
2868 data = str( gossipTime )
2869 timerLog.write( labels + "\n" + data )
2870 timerLog.close()
2871 except NameError, e:
2872 main.log.exception(e)
2873
2874 def CASE14( self, main ):
2875 """
2876 start election app on all onos nodes
2877 """
2878 assert main.numCtrls, "main.numCtrls not defined"
2879 assert main, "main not defined"
2880 assert utilities.assert_equals, "utilities.assert_equals not defined"
2881 assert main.CLIs, "main.CLIs not defined"
2882 assert main.nodes, "main.nodes not defined"
2883
2884 main.case("Start Leadership Election app")
2885 main.step( "Install leadership election app" )
2886 onosCli = main.CLIs[ main.activeNodes[0] ]
2887 appResult = onosCli.activateApp( "org.onosproject.election" )
2888 utilities.assert_equals(
2889 expect=main.TRUE,
2890 actual=appResult,
2891 onpass="Election app installed",
2892 onfail="Something went wrong with installing Leadership election" )
2893
2894 main.step( "Run for election on each node" )
2895 leaderResult = main.TRUE
2896 leaders = []
2897 for i in main.activeNodes:
2898 main.CLIs[i].electionTestRun()
2899 for i in main.activeNodes:
2900 cli = main.CLIs[i]
2901 leader = cli.electionTestLeader()
2902 if leader is None or leader == main.FALSE:
2903 main.log.error( cli.name + ": Leader for the election app " +
2904 "should be an ONOS node, instead got '" +
2905 str( leader ) + "'" )
2906 leaderResult = main.FALSE
2907 leaders.append( leader )
2908 utilities.assert_equals(
2909 expect=main.TRUE,
2910 actual=leaderResult,
2911 onpass="Successfully ran for leadership",
2912 onfail="Failed to run for leadership" )
2913
2914 main.step( "Check that each node shows the same leader" )
2915 sameLeader = main.TRUE
2916 if len( set( leaders ) ) != 1:
2917 sameLeader = main.FALSE
2918 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
2919 str( leaders ) )
2920 utilities.assert_equals(
2921 expect=main.TRUE,
2922 actual=sameLeader,
2923 onpass="Leadership is consistent for the election topic",
2924 onfail="Nodes have different leaders" )
2925
2926 def CASE15( self, main ):
2927 """
2928 Check that Leadership Election is still functional
2929 15.1 Run election on each node
2930 15.2 Check that each node has the same leaders and candidates
2931 15.3 Find current leader and withdraw
2932 15.4 Check that a new node was elected leader
2933 15.5 Check that that new leader was the candidate of old leader
2934 15.6 Run for election on old leader
2935 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2936 15.8 Make sure that the old leader was added to the candidate list
2937
2938 old and new variable prefixes refer to data from before vs after
2939 withdrawl and later before withdrawl vs after re-election
2940 """
2941 import time
2942 assert main.numCtrls, "main.numCtrls not defined"
2943 assert main, "main not defined"
2944 assert utilities.assert_equals, "utilities.assert_equals not defined"
2945 assert main.CLIs, "main.CLIs not defined"
2946 assert main.nodes, "main.nodes not defined"
2947
2948 description = "Check that Leadership Election is still functional"
2949 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002950 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall6e709752016-02-01 13:38:46 -08002951
Jon Halla440e872016-03-31 15:15:50 -07002952 oldLeaders = [] # list of lists of each nodes' candidates before
2953 newLeaders = [] # list of lists of each nodes' candidates after
Jon Hall6e709752016-02-01 13:38:46 -08002954 oldLeader = '' # the old leader from oldLeaders, None if not same
2955 newLeader = '' # the new leaders fron newLoeaders, None if not same
2956 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2957 expectNoLeader = False # True when there is only one leader
2958 if main.numCtrls == 1:
2959 expectNoLeader = True
2960
2961 main.step( "Run for election on each node" )
2962 electionResult = main.TRUE
2963
2964 for i in main.activeNodes: # run test election on each node
2965 if main.CLIs[i].electionTestRun() == main.FALSE:
2966 electionResult = main.FALSE
2967 utilities.assert_equals(
2968 expect=main.TRUE,
2969 actual=electionResult,
2970 onpass="All nodes successfully ran for leadership",
2971 onfail="At least one node failed to run for leadership" )
2972
2973 if electionResult == main.FALSE:
2974 main.log.error(
2975 "Skipping Test Case because Election Test App isn't loaded" )
2976 main.skipCase()
2977
2978 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07002979 failMessage = "Nodes have different leaderboards"
2980 def consistentLeaderboards( nodes ):
2981 TOPIC = 'org.onosproject.election'
2982 # FIXME: use threads
2983 #FIXME: should we retry outside the function?
2984 for n in range( 5 ): # Retry in case election is still happening
2985 leaderList = []
2986 # Get all leaderboards
2987 for cli in nodes:
2988 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
2989 # Compare leaderboards
2990 result = all( i == leaderList[0] for i in leaderList ) and\
2991 leaderList is not None
2992 main.log.debug( leaderList )
2993 main.log.warn( result )
2994 if result:
2995 return ( result, leaderList )
2996 time.sleep(5) #TODO: paramerterize
2997 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
2998 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2999 sameResult, oldLeaders = consistentLeaderboards( activeCLIs )
3000 if sameResult:
3001 oldLeader = oldLeaders[ 0 ][ 0 ]
3002 main.log.warn( oldLeader )
Jon Hall6e709752016-02-01 13:38:46 -08003003 else:
Jon Halla440e872016-03-31 15:15:50 -07003004 oldLeader = None
Jon Hall6e709752016-02-01 13:38:46 -08003005 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003006 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08003007 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07003008 onpass="Leaderboards are consistent for the election topic",
Jon Hall6e709752016-02-01 13:38:46 -08003009 onfail=failMessage )
3010
3011 main.step( "Find current leader and withdraw" )
3012 withdrawResult = main.TRUE
3013 # do some sanity checking on leader before using it
3014 if oldLeader is None:
3015 main.log.error( "Leadership isn't consistent." )
3016 withdrawResult = main.FALSE
3017 # Get the CLI of the oldLeader
3018 for i in main.activeNodes:
3019 if oldLeader == main.nodes[ i ].ip_address:
3020 oldLeaderCLI = main.CLIs[ i ]
3021 break
3022 else: # FOR/ELSE statement
3023 main.log.error( "Leader election, could not find current leader" )
3024 if oldLeader:
3025 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3026 utilities.assert_equals(
3027 expect=main.TRUE,
3028 actual=withdrawResult,
3029 onpass="Node was withdrawn from election",
3030 onfail="Node was not withdrawn from election" )
3031
3032 main.step( "Check that a new node was elected leader" )
Jon Hall6e709752016-02-01 13:38:46 -08003033 failMessage = "Nodes have different leaders"
Jon Hall6e709752016-02-01 13:38:46 -08003034 # Get new leaders and candidates
Jon Halla440e872016-03-31 15:15:50 -07003035 newLeaderResult, newLeaders = consistentLeaderboards( activeCLIs )
3036 if newLeaders[ 0 ][ 0 ] == 'none':
3037 main.log.error( "No leader was elected on at least 1 node" )
3038 if not expectNoLeader:
3039 newLeaderResult = False
3040 if newLeaderResult:
3041 newLeader = newLeaders[ 0 ][ 0 ]
Jon Hall6e709752016-02-01 13:38:46 -08003042 else:
Jon Halla440e872016-03-31 15:15:50 -07003043 newLeader = None
Jon Hall6e709752016-02-01 13:38:46 -08003044
3045 # Check that the new leader is not the older leader, which was withdrawn
3046 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07003047 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08003048 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3049 " as the current leader" )
Jon Hall6e709752016-02-01 13:38:46 -08003050 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003051 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08003052 actual=newLeaderResult,
3053 onpass="Leadership election passed",
3054 onfail="Something went wrong with Leadership election" )
3055
Jon Halla440e872016-03-31 15:15:50 -07003056 main.step( "Check that that new leader was the candidate of old leader" )
Jon Hall6e709752016-02-01 13:38:46 -08003057 # candidates[ 2 ] should become the top candidate after withdrawl
3058 correctCandidateResult = main.TRUE
3059 if expectNoLeader:
3060 if newLeader == 'none':
3061 main.log.info( "No leader expected. None found. Pass" )
3062 correctCandidateResult = main.TRUE
3063 else:
3064 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3065 correctCandidateResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003066 elif len( oldLeaders[0] ) >= 3:
3067 if newLeader == oldLeaders[ 0 ][ 2 ]:
3068 # correct leader was elected
3069 correctCandidateResult = main.TRUE
3070 else:
3071 correctCandidateResult = main.FALSE
3072 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3073 newLeader, oldLeaders[ 0 ][ 2 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08003074 else:
3075 main.log.warn( "Could not determine who should be the correct leader" )
Jon Halla440e872016-03-31 15:15:50 -07003076 main.log.debug( oldLeaders[ 0 ] )
Jon Hall6e709752016-02-01 13:38:46 -08003077 correctCandidateResult = main.FALSE
3078 utilities.assert_equals(
3079 expect=main.TRUE,
3080 actual=correctCandidateResult,
3081 onpass="Correct Candidate Elected",
3082 onfail="Incorrect Candidate Elected" )
3083
3084 main.step( "Run for election on old leader( just so everyone " +
3085 "is in the hat )" )
3086 if oldLeaderCLI is not None:
3087 runResult = oldLeaderCLI.electionTestRun()
3088 else:
3089 main.log.error( "No old leader to re-elect" )
3090 runResult = main.FALSE
3091 utilities.assert_equals(
3092 expect=main.TRUE,
3093 actual=runResult,
3094 onpass="App re-ran for election",
3095 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07003096
Jon Hall6e709752016-02-01 13:38:46 -08003097 main.step(
3098 "Check that oldLeader is a candidate, and leader if only 1 node" )
3099 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07003100 # Get new leaders and candidates
3101 reRunLeaders = []
3102 time.sleep( 5 ) # Paremterize
3103 positionResult, reRunLeaders = consistentLeaderboards( activeCLIs )
Jon Hall6e709752016-02-01 13:38:46 -08003104
3105 # Check that the re-elected node is last on the candidate List
Jon Halla440e872016-03-31 15:15:50 -07003106 if oldLeader != reRunLeaders[ 0 ][ -1 ]:
3107 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3108 str( reRunLeaders[ 0 ] ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08003109 positionResult = main.FALSE
3110
3111 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003112 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08003113 actual=positionResult,
3114 onpass="Old leader successfully re-ran for election",
3115 onfail="Something went wrong with Leadership election after " +
3116 "the old leader re-ran for election" )
3117
3118 def CASE16( self, main ):
3119 """
3120 Install Distributed Primitives app
3121 """
3122 import time
3123 assert main.numCtrls, "main.numCtrls not defined"
3124 assert main, "main not defined"
3125 assert utilities.assert_equals, "utilities.assert_equals not defined"
3126 assert main.CLIs, "main.CLIs not defined"
3127 assert main.nodes, "main.nodes not defined"
3128
3129 # Variables for the distributed primitives tests
3130 global pCounterName
Jon Hall6e709752016-02-01 13:38:46 -08003131 global pCounterValue
Jon Hall6e709752016-02-01 13:38:46 -08003132 global onosSet
3133 global onosSetName
3134 pCounterName = "TestON-Partitions"
Jon Hall6e709752016-02-01 13:38:46 -08003135 pCounterValue = 0
Jon Hall6e709752016-02-01 13:38:46 -08003136 onosSet = set([])
3137 onosSetName = "TestON-set"
3138
3139 description = "Install Primitives app"
3140 main.case( description )
3141 main.step( "Install Primitives app" )
3142 appName = "org.onosproject.distributedprimitives"
3143 node = main.activeNodes[0]
3144 appResults = main.CLIs[node].activateApp( appName )
3145 utilities.assert_equals( expect=main.TRUE,
3146 actual=appResults,
3147 onpass="Primitives app activated",
3148 onfail="Primitives app not activated" )
3149 time.sleep( 5 ) # To allow all nodes to activate
3150
3151 def CASE17( self, main ):
3152 """
3153 Check for basic functionality with distributed primitives
3154 """
3155 # Make sure variables are defined/set
3156 assert main.numCtrls, "main.numCtrls not defined"
3157 assert main, "main not defined"
3158 assert utilities.assert_equals, "utilities.assert_equals not defined"
3159 assert main.CLIs, "main.CLIs not defined"
3160 assert main.nodes, "main.nodes not defined"
3161 assert pCounterName, "pCounterName not defined"
Jon Hall6e709752016-02-01 13:38:46 -08003162 assert onosSetName, "onosSetName not defined"
3163 # NOTE: assert fails if value is 0/None/Empty/False
3164 try:
3165 pCounterValue
3166 except NameError:
3167 main.log.error( "pCounterValue not defined, setting to 0" )
3168 pCounterValue = 0
3169 try:
Jon Hall6e709752016-02-01 13:38:46 -08003170 onosSet
3171 except NameError:
3172 main.log.error( "onosSet not defined, setting to empty Set" )
3173 onosSet = set([])
3174 # Variables for the distributed primitives tests. These are local only
3175 addValue = "a"
3176 addAllValue = "a b c d e f"
3177 retainValue = "c d e f"
3178
3179 description = "Check for basic functionality with distributed " +\
3180 "primitives"
3181 main.case( description )
3182 main.caseExplanation = "Test the methods of the distributed " +\
3183 "primitives (counters and sets) throught the cli"
3184 # DISTRIBUTED ATOMIC COUNTERS
3185 # Partitioned counters
3186 main.step( "Increment then get a default counter on each node" )
3187 pCounters = []
3188 threads = []
3189 addedPValues = []
3190 for i in main.activeNodes:
3191 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3192 name="counterAddAndGet-" + str( i ),
3193 args=[ pCounterName ] )
3194 pCounterValue += 1
3195 addedPValues.append( pCounterValue )
3196 threads.append( t )
3197 t.start()
3198
3199 for t in threads:
3200 t.join()
3201 pCounters.append( t.result )
3202 # Check that counter incremented numController times
3203 pCounterResults = True
3204 for i in addedPValues:
3205 tmpResult = i in pCounters
3206 pCounterResults = pCounterResults and tmpResult
3207 if not tmpResult:
3208 main.log.error( str( i ) + " is not in partitioned "
3209 "counter incremented results" )
3210 utilities.assert_equals( expect=True,
3211 actual=pCounterResults,
3212 onpass="Default counter incremented",
3213 onfail="Error incrementing default" +
3214 " counter" )
3215
3216 main.step( "Get then Increment a default counter on each node" )
3217 pCounters = []
3218 threads = []
3219 addedPValues = []
3220 for i in main.activeNodes:
3221 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3222 name="counterGetAndAdd-" + str( i ),
3223 args=[ pCounterName ] )
3224 addedPValues.append( pCounterValue )
3225 pCounterValue += 1
3226 threads.append( t )
3227 t.start()
3228
3229 for t in threads:
3230 t.join()
3231 pCounters.append( t.result )
3232 # Check that counter incremented numController times
3233 pCounterResults = True
3234 for i in addedPValues:
3235 tmpResult = i in pCounters
3236 pCounterResults = pCounterResults and tmpResult
3237 if not tmpResult:
3238 main.log.error( str( i ) + " is not in partitioned "
3239 "counter incremented results" )
3240 utilities.assert_equals( expect=True,
3241 actual=pCounterResults,
3242 onpass="Default counter incremented",
3243 onfail="Error incrementing default" +
3244 " counter" )
3245
3246 main.step( "Counters we added have the correct values" )
3247 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3248 utilities.assert_equals( expect=main.TRUE,
3249 actual=incrementCheck,
3250 onpass="Added counters are correct",
3251 onfail="Added counters are incorrect" )
3252
3253 main.step( "Add -8 to then get a default counter on each node" )
3254 pCounters = []
3255 threads = []
3256 addedPValues = []
3257 for i in main.activeNodes:
3258 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3259 name="counterIncrement-" + str( i ),
3260 args=[ pCounterName ],
3261 kwargs={ "delta": -8 } )
3262 pCounterValue += -8
3263 addedPValues.append( pCounterValue )
3264 threads.append( t )
3265 t.start()
3266
3267 for t in threads:
3268 t.join()
3269 pCounters.append( t.result )
3270 # Check that counter incremented numController times
3271 pCounterResults = True
3272 for i in addedPValues:
3273 tmpResult = i in pCounters
3274 pCounterResults = pCounterResults and tmpResult
3275 if not tmpResult:
3276 main.log.error( str( i ) + " is not in partitioned "
3277 "counter incremented results" )
3278 utilities.assert_equals( expect=True,
3279 actual=pCounterResults,
3280 onpass="Default counter incremented",
3281 onfail="Error incrementing default" +
3282 " counter" )
3283
3284 main.step( "Add 5 to then get a default counter on each node" )
3285 pCounters = []
3286 threads = []
3287 addedPValues = []
3288 for i in main.activeNodes:
3289 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3290 name="counterIncrement-" + str( i ),
3291 args=[ pCounterName ],
3292 kwargs={ "delta": 5 } )
3293 pCounterValue += 5
3294 addedPValues.append( pCounterValue )
3295 threads.append( t )
3296 t.start()
3297
3298 for t in threads:
3299 t.join()
3300 pCounters.append( t.result )
3301 # Check that counter incremented numController times
3302 pCounterResults = True
3303 for i in addedPValues:
3304 tmpResult = i in pCounters
3305 pCounterResults = pCounterResults and tmpResult
3306 if not tmpResult:
3307 main.log.error( str( i ) + " is not in partitioned "
3308 "counter incremented results" )
3309 utilities.assert_equals( expect=True,
3310 actual=pCounterResults,
3311 onpass="Default counter incremented",
3312 onfail="Error incrementing default" +
3313 " counter" )
3314
3315 main.step( "Get then add 5 to a default counter on each node" )
3316 pCounters = []
3317 threads = []
3318 addedPValues = []
3319 for i in main.activeNodes:
3320 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3321 name="counterIncrement-" + str( i ),
3322 args=[ pCounterName ],
3323 kwargs={ "delta": 5 } )
3324 addedPValues.append( pCounterValue )
3325 pCounterValue += 5
3326 threads.append( t )
3327 t.start()
3328
3329 for t in threads:
3330 t.join()
3331 pCounters.append( t.result )
3332 # Check that counter incremented numController times
3333 pCounterResults = True
3334 for i in addedPValues:
3335 tmpResult = i in pCounters
3336 pCounterResults = pCounterResults and tmpResult
3337 if not tmpResult:
3338 main.log.error( str( i ) + " is not in partitioned "
3339 "counter incremented results" )
3340 utilities.assert_equals( expect=True,
3341 actual=pCounterResults,
3342 onpass="Default counter incremented",
3343 onfail="Error incrementing default" +
3344 " counter" )
3345
3346 main.step( "Counters we added have the correct values" )
3347 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3348 utilities.assert_equals( expect=main.TRUE,
3349 actual=incrementCheck,
3350 onpass="Added counters are correct",
3351 onfail="Added counters are incorrect" )
3352
Jon Hall6e709752016-02-01 13:38:46 -08003353 # DISTRIBUTED SETS
3354 main.step( "Distributed Set get" )
3355 size = len( onosSet )
3356 getResponses = []
3357 threads = []
3358 for i in main.activeNodes:
3359 t = main.Thread( target=main.CLIs[i].setTestGet,
3360 name="setTestGet-" + str( i ),
3361 args=[ onosSetName ] )
3362 threads.append( t )
3363 t.start()
3364 for t in threads:
3365 t.join()
3366 getResponses.append( t.result )
3367
3368 getResults = main.TRUE
3369 for i in range( len( main.activeNodes ) ):
3370 node = str( main.activeNodes[i] + 1 )
3371 if isinstance( getResponses[ i ], list):
3372 current = set( getResponses[ i ] )
3373 if len( current ) == len( getResponses[ i ] ):
3374 # no repeats
3375 if onosSet != current:
3376 main.log.error( "ONOS" + node +
3377 " has incorrect view" +
3378 " of set " + onosSetName + ":\n" +
3379 str( getResponses[ i ] ) )
3380 main.log.debug( "Expected: " + str( onosSet ) )
3381 main.log.debug( "Actual: " + str( current ) )
3382 getResults = main.FALSE
3383 else:
3384 # error, set is not a set
3385 main.log.error( "ONOS" + node +
3386 " has repeat elements in" +
3387 " set " + onosSetName + ":\n" +
3388 str( getResponses[ i ] ) )
3389 getResults = main.FALSE
3390 elif getResponses[ i ] == main.ERROR:
3391 getResults = main.FALSE
3392 utilities.assert_equals( expect=main.TRUE,
3393 actual=getResults,
3394 onpass="Set elements are correct",
3395 onfail="Set elements are incorrect" )
3396
3397 main.step( "Distributed Set size" )
3398 sizeResponses = []
3399 threads = []
3400 for i in main.activeNodes:
3401 t = main.Thread( target=main.CLIs[i].setTestSize,
3402 name="setTestSize-" + str( i ),
3403 args=[ onosSetName ] )
3404 threads.append( t )
3405 t.start()
3406 for t in threads:
3407 t.join()
3408 sizeResponses.append( t.result )
3409
3410 sizeResults = main.TRUE
3411 for i in range( len( main.activeNodes ) ):
3412 node = str( main.activeNodes[i] + 1 )
3413 if size != sizeResponses[ i ]:
3414 sizeResults = main.FALSE
3415 main.log.error( "ONOS" + node +
3416 " expected a size of " + str( size ) +
3417 " for set " + onosSetName +
3418 " but got " + str( sizeResponses[ i ] ) )
3419 utilities.assert_equals( expect=main.TRUE,
3420 actual=sizeResults,
3421 onpass="Set sizes are correct",
3422 onfail="Set sizes are incorrect" )
3423
3424 main.step( "Distributed Set add()" )
3425 onosSet.add( addValue )
3426 addResponses = []
3427 threads = []
3428 for i in main.activeNodes:
3429 t = main.Thread( target=main.CLIs[i].setTestAdd,
3430 name="setTestAdd-" + str( i ),
3431 args=[ onosSetName, addValue ] )
3432 threads.append( t )
3433 t.start()
3434 for t in threads:
3435 t.join()
3436 addResponses.append( t.result )
3437
3438 # main.TRUE = successfully changed the set
3439 # main.FALSE = action resulted in no change in set
3440 # main.ERROR - Some error in executing the function
3441 addResults = main.TRUE
3442 for i in range( len( main.activeNodes ) ):
3443 if addResponses[ i ] == main.TRUE:
3444 # All is well
3445 pass
3446 elif addResponses[ i ] == main.FALSE:
3447 # Already in set, probably fine
3448 pass
3449 elif addResponses[ i ] == main.ERROR:
3450 # Error in execution
3451 addResults = main.FALSE
3452 else:
3453 # unexpected result
3454 addResults = main.FALSE
3455 if addResults != main.TRUE:
3456 main.log.error( "Error executing set add" )
3457
3458 # Check if set is still correct
3459 size = len( onosSet )
3460 getResponses = []
3461 threads = []
3462 for i in main.activeNodes:
3463 t = main.Thread( target=main.CLIs[i].setTestGet,
3464 name="setTestGet-" + str( i ),
3465 args=[ onosSetName ] )
3466 threads.append( t )
3467 t.start()
3468 for t in threads:
3469 t.join()
3470 getResponses.append( t.result )
3471 getResults = main.TRUE
3472 for i in range( len( main.activeNodes ) ):
3473 node = str( main.activeNodes[i] + 1 )
3474 if isinstance( getResponses[ i ], list):
3475 current = set( getResponses[ i ] )
3476 if len( current ) == len( getResponses[ i ] ):
3477 # no repeats
3478 if onosSet != current:
3479 main.log.error( "ONOS" + node + " has incorrect view" +
3480 " of set " + onosSetName + ":\n" +
3481 str( getResponses[ i ] ) )
3482 main.log.debug( "Expected: " + str( onosSet ) )
3483 main.log.debug( "Actual: " + str( current ) )
3484 getResults = main.FALSE
3485 else:
3486 # error, set is not a set
3487 main.log.error( "ONOS" + node + " has repeat elements in" +
3488 " set " + onosSetName + ":\n" +
3489 str( getResponses[ i ] ) )
3490 getResults = main.FALSE
3491 elif getResponses[ i ] == main.ERROR:
3492 getResults = main.FALSE
3493 sizeResponses = []
3494 threads = []
3495 for i in main.activeNodes:
3496 t = main.Thread( target=main.CLIs[i].setTestSize,
3497 name="setTestSize-" + str( i ),
3498 args=[ onosSetName ] )
3499 threads.append( t )
3500 t.start()
3501 for t in threads:
3502 t.join()
3503 sizeResponses.append( t.result )
3504 sizeResults = main.TRUE
3505 for i in range( len( main.activeNodes ) ):
3506 node = str( main.activeNodes[i] + 1 )
3507 if size != sizeResponses[ i ]:
3508 sizeResults = main.FALSE
3509 main.log.error( "ONOS" + node +
3510 " expected a size of " + str( size ) +
3511 " for set " + onosSetName +
3512 " but got " + str( sizeResponses[ i ] ) )
3513 addResults = addResults and getResults and sizeResults
3514 utilities.assert_equals( expect=main.TRUE,
3515 actual=addResults,
3516 onpass="Set add correct",
3517 onfail="Set add was incorrect" )
3518
3519 main.step( "Distributed Set addAll()" )
3520 onosSet.update( addAllValue.split() )
3521 addResponses = []
3522 threads = []
3523 for i in main.activeNodes:
3524 t = main.Thread( target=main.CLIs[i].setTestAdd,
3525 name="setTestAddAll-" + str( i ),
3526 args=[ onosSetName, addAllValue ] )
3527 threads.append( t )
3528 t.start()
3529 for t in threads:
3530 t.join()
3531 addResponses.append( t.result )
3532
3533 # main.TRUE = successfully changed the set
3534 # main.FALSE = action resulted in no change in set
3535 # main.ERROR - Some error in executing the function
3536 addAllResults = main.TRUE
3537 for i in range( len( main.activeNodes ) ):
3538 if addResponses[ i ] == main.TRUE:
3539 # All is well
3540 pass
3541 elif addResponses[ i ] == main.FALSE:
3542 # Already in set, probably fine
3543 pass
3544 elif addResponses[ i ] == main.ERROR:
3545 # Error in execution
3546 addAllResults = main.FALSE
3547 else:
3548 # unexpected result
3549 addAllResults = main.FALSE
3550 if addAllResults != main.TRUE:
3551 main.log.error( "Error executing set addAll" )
3552
3553 # Check if set is still correct
3554 size = len( onosSet )
3555 getResponses = []
3556 threads = []
3557 for i in main.activeNodes:
3558 t = main.Thread( target=main.CLIs[i].setTestGet,
3559 name="setTestGet-" + str( i ),
3560 args=[ onosSetName ] )
3561 threads.append( t )
3562 t.start()
3563 for t in threads:
3564 t.join()
3565 getResponses.append( t.result )
3566 getResults = main.TRUE
3567 for i in range( len( main.activeNodes ) ):
3568 node = str( main.activeNodes[i] + 1 )
3569 if isinstance( getResponses[ i ], list):
3570 current = set( getResponses[ i ] )
3571 if len( current ) == len( getResponses[ i ] ):
3572 # no repeats
3573 if onosSet != current:
3574 main.log.error( "ONOS" + node +
3575 " has incorrect view" +
3576 " of set " + onosSetName + ":\n" +
3577 str( getResponses[ i ] ) )
3578 main.log.debug( "Expected: " + str( onosSet ) )
3579 main.log.debug( "Actual: " + str( current ) )
3580 getResults = main.FALSE
3581 else:
3582 # error, set is not a set
3583 main.log.error( "ONOS" + node +
3584 " has repeat elements in" +
3585 " set " + onosSetName + ":\n" +
3586 str( getResponses[ i ] ) )
3587 getResults = main.FALSE
3588 elif getResponses[ i ] == main.ERROR:
3589 getResults = main.FALSE
3590 sizeResponses = []
3591 threads = []
3592 for i in main.activeNodes:
3593 t = main.Thread( target=main.CLIs[i].setTestSize,
3594 name="setTestSize-" + str( i ),
3595 args=[ onosSetName ] )
3596 threads.append( t )
3597 t.start()
3598 for t in threads:
3599 t.join()
3600 sizeResponses.append( t.result )
3601 sizeResults = main.TRUE
3602 for i in range( len( main.activeNodes ) ):
3603 node = str( main.activeNodes[i] + 1 )
3604 if size != sizeResponses[ i ]:
3605 sizeResults = main.FALSE
3606 main.log.error( "ONOS" + node +
3607 " expected a size of " + str( size ) +
3608 " for set " + onosSetName +
3609 " but got " + str( sizeResponses[ i ] ) )
3610 addAllResults = addAllResults and getResults and sizeResults
3611 utilities.assert_equals( expect=main.TRUE,
3612 actual=addAllResults,
3613 onpass="Set addAll correct",
3614 onfail="Set addAll was incorrect" )
3615
3616 main.step( "Distributed Set contains()" )
3617 containsResponses = []
3618 threads = []
3619 for i in main.activeNodes:
3620 t = main.Thread( target=main.CLIs[i].setTestGet,
3621 name="setContains-" + str( i ),
3622 args=[ onosSetName ],
3623 kwargs={ "values": addValue } )
3624 threads.append( t )
3625 t.start()
3626 for t in threads:
3627 t.join()
3628 # NOTE: This is the tuple
3629 containsResponses.append( t.result )
3630
3631 containsResults = main.TRUE
3632 for i in range( len( main.activeNodes ) ):
3633 if containsResponses[ i ] == main.ERROR:
3634 containsResults = main.FALSE
3635 else:
3636 containsResults = containsResults and\
3637 containsResponses[ i ][ 1 ]
3638 utilities.assert_equals( expect=main.TRUE,
3639 actual=containsResults,
3640 onpass="Set contains is functional",
3641 onfail="Set contains failed" )
3642
3643 main.step( "Distributed Set containsAll()" )
3644 containsAllResponses = []
3645 threads = []
3646 for i in main.activeNodes:
3647 t = main.Thread( target=main.CLIs[i].setTestGet,
3648 name="setContainsAll-" + str( i ),
3649 args=[ onosSetName ],
3650 kwargs={ "values": addAllValue } )
3651 threads.append( t )
3652 t.start()
3653 for t in threads:
3654 t.join()
3655 # NOTE: This is the tuple
3656 containsAllResponses.append( t.result )
3657
3658 containsAllResults = main.TRUE
3659 for i in range( len( main.activeNodes ) ):
3660 if containsResponses[ i ] == main.ERROR:
3661 containsResults = main.FALSE
3662 else:
3663 containsResults = containsResults and\
3664 containsResponses[ i ][ 1 ]
3665 utilities.assert_equals( expect=main.TRUE,
3666 actual=containsAllResults,
3667 onpass="Set containsAll is functional",
3668 onfail="Set containsAll failed" )
3669
3670 main.step( "Distributed Set remove()" )
3671 onosSet.remove( addValue )
3672 removeResponses = []
3673 threads = []
3674 for i in main.activeNodes:
3675 t = main.Thread( target=main.CLIs[i].setTestRemove,
3676 name="setTestRemove-" + str( i ),
3677 args=[ onosSetName, addValue ] )
3678 threads.append( t )
3679 t.start()
3680 for t in threads:
3681 t.join()
3682 removeResponses.append( t.result )
3683
3684 # main.TRUE = successfully changed the set
3685 # main.FALSE = action resulted in no change in set
3686 # main.ERROR - Some error in executing the function
3687 removeResults = main.TRUE
3688 for i in range( len( main.activeNodes ) ):
3689 if removeResponses[ i ] == main.TRUE:
3690 # All is well
3691 pass
3692 elif removeResponses[ i ] == main.FALSE:
3693 # not in set, probably fine
3694 pass
3695 elif removeResponses[ i ] == main.ERROR:
3696 # Error in execution
3697 removeResults = main.FALSE
3698 else:
3699 # unexpected result
3700 removeResults = main.FALSE
3701 if removeResults != main.TRUE:
3702 main.log.error( "Error executing set remove" )
3703
3704 # Check if set is still correct
3705 size = len( onosSet )
3706 getResponses = []
3707 threads = []
3708 for i in main.activeNodes:
3709 t = main.Thread( target=main.CLIs[i].setTestGet,
3710 name="setTestGet-" + str( i ),
3711 args=[ onosSetName ] )
3712 threads.append( t )
3713 t.start()
3714 for t in threads:
3715 t.join()
3716 getResponses.append( t.result )
3717 getResults = main.TRUE
3718 for i in range( len( main.activeNodes ) ):
3719 node = str( main.activeNodes[i] + 1 )
3720 if isinstance( getResponses[ i ], list):
3721 current = set( getResponses[ i ] )
3722 if len( current ) == len( getResponses[ i ] ):
3723 # no repeats
3724 if onosSet != current:
3725 main.log.error( "ONOS" + node +
3726 " has incorrect view" +
3727 " of set " + onosSetName + ":\n" +
3728 str( getResponses[ i ] ) )
3729 main.log.debug( "Expected: " + str( onosSet ) )
3730 main.log.debug( "Actual: " + str( current ) )
3731 getResults = main.FALSE
3732 else:
3733 # error, set is not a set
3734 main.log.error( "ONOS" + node +
3735 " has repeat elements in" +
3736 " set " + onosSetName + ":\n" +
3737 str( getResponses[ i ] ) )
3738 getResults = main.FALSE
3739 elif getResponses[ i ] == main.ERROR:
3740 getResults = main.FALSE
3741 sizeResponses = []
3742 threads = []
3743 for i in main.activeNodes:
3744 t = main.Thread( target=main.CLIs[i].setTestSize,
3745 name="setTestSize-" + str( i ),
3746 args=[ onosSetName ] )
3747 threads.append( t )
3748 t.start()
3749 for t in threads:
3750 t.join()
3751 sizeResponses.append( t.result )
3752 sizeResults = main.TRUE
3753 for i in range( len( main.activeNodes ) ):
3754 node = str( main.activeNodes[i] + 1 )
3755 if size != sizeResponses[ i ]:
3756 sizeResults = main.FALSE
3757 main.log.error( "ONOS" + node +
3758 " expected a size of " + str( size ) +
3759 " for set " + onosSetName +
3760 " but got " + str( sizeResponses[ i ] ) )
3761 removeResults = removeResults and getResults and sizeResults
3762 utilities.assert_equals( expect=main.TRUE,
3763 actual=removeResults,
3764 onpass="Set remove correct",
3765 onfail="Set remove was incorrect" )
3766
3767 main.step( "Distributed Set removeAll()" )
3768 onosSet.difference_update( addAllValue.split() )
3769 removeAllResponses = []
3770 threads = []
3771 try:
3772 for i in main.activeNodes:
3773 t = main.Thread( target=main.CLIs[i].setTestRemove,
3774 name="setTestRemoveAll-" + str( i ),
3775 args=[ onosSetName, addAllValue ] )
3776 threads.append( t )
3777 t.start()
3778 for t in threads:
3779 t.join()
3780 removeAllResponses.append( t.result )
3781 except Exception, e:
3782 main.log.exception(e)
3783
3784 # main.TRUE = successfully changed the set
3785 # main.FALSE = action resulted in no change in set
3786 # main.ERROR - Some error in executing the function
3787 removeAllResults = main.TRUE
3788 for i in range( len( main.activeNodes ) ):
3789 if removeAllResponses[ i ] == main.TRUE:
3790 # All is well
3791 pass
3792 elif removeAllResponses[ i ] == main.FALSE:
3793 # not in set, probably fine
3794 pass
3795 elif removeAllResponses[ i ] == main.ERROR:
3796 # Error in execution
3797 removeAllResults = main.FALSE
3798 else:
3799 # unexpected result
3800 removeAllResults = main.FALSE
3801 if removeAllResults != main.TRUE:
3802 main.log.error( "Error executing set removeAll" )
3803
3804 # Check if set is still correct
3805 size = len( onosSet )
3806 getResponses = []
3807 threads = []
3808 for i in main.activeNodes:
3809 t = main.Thread( target=main.CLIs[i].setTestGet,
3810 name="setTestGet-" + str( i ),
3811 args=[ onosSetName ] )
3812 threads.append( t )
3813 t.start()
3814 for t in threads:
3815 t.join()
3816 getResponses.append( t.result )
3817 getResults = main.TRUE
3818 for i in range( len( main.activeNodes ) ):
3819 node = str( main.activeNodes[i] + 1 )
3820 if isinstance( getResponses[ i ], list):
3821 current = set( getResponses[ i ] )
3822 if len( current ) == len( getResponses[ i ] ):
3823 # no repeats
3824 if onosSet != current:
3825 main.log.error( "ONOS" + node +
3826 " has incorrect view" +
3827 " of set " + onosSetName + ":\n" +
3828 str( getResponses[ i ] ) )
3829 main.log.debug( "Expected: " + str( onosSet ) )
3830 main.log.debug( "Actual: " + str( current ) )
3831 getResults = main.FALSE
3832 else:
3833 # error, set is not a set
3834 main.log.error( "ONOS" + node +
3835 " has repeat elements in" +
3836 " set " + onosSetName + ":\n" +
3837 str( getResponses[ i ] ) )
3838 getResults = main.FALSE
3839 elif getResponses[ i ] == main.ERROR:
3840 getResults = main.FALSE
3841 sizeResponses = []
3842 threads = []
3843 for i in main.activeNodes:
3844 t = main.Thread( target=main.CLIs[i].setTestSize,
3845 name="setTestSize-" + str( i ),
3846 args=[ onosSetName ] )
3847 threads.append( t )
3848 t.start()
3849 for t in threads:
3850 t.join()
3851 sizeResponses.append( t.result )
3852 sizeResults = main.TRUE
3853 for i in range( len( main.activeNodes ) ):
3854 node = str( main.activeNodes[i] + 1 )
3855 if size != sizeResponses[ i ]:
3856 sizeResults = main.FALSE
3857 main.log.error( "ONOS" + node +
3858 " expected a size of " + str( size ) +
3859 " for set " + onosSetName +
3860 " but got " + str( sizeResponses[ i ] ) )
3861 removeAllResults = removeAllResults and getResults and sizeResults
3862 utilities.assert_equals( expect=main.TRUE,
3863 actual=removeAllResults,
3864 onpass="Set removeAll correct",
3865 onfail="Set removeAll was incorrect" )
3866
3867 main.step( "Distributed Set addAll()" )
3868 onosSet.update( addAllValue.split() )
3869 addResponses = []
3870 threads = []
3871 for i in main.activeNodes:
3872 t = main.Thread( target=main.CLIs[i].setTestAdd,
3873 name="setTestAddAll-" + str( i ),
3874 args=[ onosSetName, addAllValue ] )
3875 threads.append( t )
3876 t.start()
3877 for t in threads:
3878 t.join()
3879 addResponses.append( t.result )
3880
3881 # main.TRUE = successfully changed the set
3882 # main.FALSE = action resulted in no change in set
3883 # main.ERROR - Some error in executing the function
3884 addAllResults = main.TRUE
3885 for i in range( len( main.activeNodes ) ):
3886 if addResponses[ i ] == main.TRUE:
3887 # All is well
3888 pass
3889 elif addResponses[ i ] == main.FALSE:
3890 # Already in set, probably fine
3891 pass
3892 elif addResponses[ i ] == main.ERROR:
3893 # Error in execution
3894 addAllResults = main.FALSE
3895 else:
3896 # unexpected result
3897 addAllResults = main.FALSE
3898 if addAllResults != main.TRUE:
3899 main.log.error( "Error executing set addAll" )
3900
3901 # Check if set is still correct
3902 size = len( onosSet )
3903 getResponses = []
3904 threads = []
3905 for i in main.activeNodes:
3906 t = main.Thread( target=main.CLIs[i].setTestGet,
3907 name="setTestGet-" + str( i ),
3908 args=[ onosSetName ] )
3909 threads.append( t )
3910 t.start()
3911 for t in threads:
3912 t.join()
3913 getResponses.append( t.result )
3914 getResults = main.TRUE
3915 for i in range( len( main.activeNodes ) ):
3916 node = str( main.activeNodes[i] + 1 )
3917 if isinstance( getResponses[ i ], list):
3918 current = set( getResponses[ i ] )
3919 if len( current ) == len( getResponses[ i ] ):
3920 # no repeats
3921 if onosSet != current:
3922 main.log.error( "ONOS" + node +
3923 " has incorrect view" +
3924 " of set " + onosSetName + ":\n" +
3925 str( getResponses[ i ] ) )
3926 main.log.debug( "Expected: " + str( onosSet ) )
3927 main.log.debug( "Actual: " + str( current ) )
3928 getResults = main.FALSE
3929 else:
3930 # error, set is not a set
3931 main.log.error( "ONOS" + node +
3932 " has repeat elements in" +
3933 " set " + onosSetName + ":\n" +
3934 str( getResponses[ i ] ) )
3935 getResults = main.FALSE
3936 elif getResponses[ i ] == main.ERROR:
3937 getResults = main.FALSE
3938 sizeResponses = []
3939 threads = []
3940 for i in main.activeNodes:
3941 t = main.Thread( target=main.CLIs[i].setTestSize,
3942 name="setTestSize-" + str( i ),
3943 args=[ onosSetName ] )
3944 threads.append( t )
3945 t.start()
3946 for t in threads:
3947 t.join()
3948 sizeResponses.append( t.result )
3949 sizeResults = main.TRUE
3950 for i in range( len( main.activeNodes ) ):
3951 node = str( main.activeNodes[i] + 1 )
3952 if size != sizeResponses[ i ]:
3953 sizeResults = main.FALSE
3954 main.log.error( "ONOS" + node +
3955 " expected a size of " + str( size ) +
3956 " for set " + onosSetName +
3957 " but got " + str( sizeResponses[ i ] ) )
3958 addAllResults = addAllResults and getResults and sizeResults
3959 utilities.assert_equals( expect=main.TRUE,
3960 actual=addAllResults,
3961 onpass="Set addAll correct",
3962 onfail="Set addAll was incorrect" )
3963
3964 main.step( "Distributed Set clear()" )
3965 onosSet.clear()
3966 clearResponses = []
3967 threads = []
3968 for i in main.activeNodes:
3969 t = main.Thread( target=main.CLIs[i].setTestRemove,
3970 name="setTestClear-" + str( i ),
3971 args=[ onosSetName, " "], # Values doesn't matter
3972 kwargs={ "clear": True } )
3973 threads.append( t )
3974 t.start()
3975 for t in threads:
3976 t.join()
3977 clearResponses.append( t.result )
3978
3979 # main.TRUE = successfully changed the set
3980 # main.FALSE = action resulted in no change in set
3981 # main.ERROR - Some error in executing the function
3982 clearResults = main.TRUE
3983 for i in range( len( main.activeNodes ) ):
3984 if clearResponses[ i ] == main.TRUE:
3985 # All is well
3986 pass
3987 elif clearResponses[ i ] == main.FALSE:
3988 # Nothing set, probably fine
3989 pass
3990 elif clearResponses[ i ] == main.ERROR:
3991 # Error in execution
3992 clearResults = main.FALSE
3993 else:
3994 # unexpected result
3995 clearResults = main.FALSE
3996 if clearResults != main.TRUE:
3997 main.log.error( "Error executing set clear" )
3998
3999 # Check if set is still correct
4000 size = len( onosSet )
4001 getResponses = []
4002 threads = []
4003 for i in main.activeNodes:
4004 t = main.Thread( target=main.CLIs[i].setTestGet,
4005 name="setTestGet-" + str( i ),
4006 args=[ onosSetName ] )
4007 threads.append( t )
4008 t.start()
4009 for t in threads:
4010 t.join()
4011 getResponses.append( t.result )
4012 getResults = main.TRUE
4013 for i in range( len( main.activeNodes ) ):
4014 node = str( main.activeNodes[i] + 1 )
4015 if isinstance( getResponses[ i ], list):
4016 current = set( getResponses[ i ] )
4017 if len( current ) == len( getResponses[ i ] ):
4018 # no repeats
4019 if onosSet != current:
4020 main.log.error( "ONOS" + node +
4021 " has incorrect view" +
4022 " of set " + onosSetName + ":\n" +
4023 str( getResponses[ i ] ) )
4024 main.log.debug( "Expected: " + str( onosSet ) )
4025 main.log.debug( "Actual: " + str( current ) )
4026 getResults = main.FALSE
4027 else:
4028 # error, set is not a set
4029 main.log.error( "ONOS" + node +
4030 " has repeat elements in" +
4031 " set " + onosSetName + ":\n" +
4032 str( getResponses[ i ] ) )
4033 getResults = main.FALSE
4034 elif getResponses[ i ] == main.ERROR:
4035 getResults = main.FALSE
4036 sizeResponses = []
4037 threads = []
4038 for i in main.activeNodes:
4039 t = main.Thread( target=main.CLIs[i].setTestSize,
4040 name="setTestSize-" + str( i ),
4041 args=[ onosSetName ] )
4042 threads.append( t )
4043 t.start()
4044 for t in threads:
4045 t.join()
4046 sizeResponses.append( t.result )
4047 sizeResults = main.TRUE
4048 for i in range( len( main.activeNodes ) ):
4049 node = str( main.activeNodes[i] + 1 )
4050 if size != sizeResponses[ i ]:
4051 sizeResults = main.FALSE
4052 main.log.error( "ONOS" + node +
4053 " expected a size of " + str( size ) +
4054 " for set " + onosSetName +
4055 " but got " + str( sizeResponses[ i ] ) )
4056 clearResults = clearResults and getResults and sizeResults
4057 utilities.assert_equals( expect=main.TRUE,
4058 actual=clearResults,
4059 onpass="Set clear correct",
4060 onfail="Set clear was incorrect" )
4061
4062 main.step( "Distributed Set addAll()" )
4063 onosSet.update( addAllValue.split() )
4064 addResponses = []
4065 threads = []
4066 for i in main.activeNodes:
4067 t = main.Thread( target=main.CLIs[i].setTestAdd,
4068 name="setTestAddAll-" + str( i ),
4069 args=[ onosSetName, addAllValue ] )
4070 threads.append( t )
4071 t.start()
4072 for t in threads:
4073 t.join()
4074 addResponses.append( t.result )
4075
4076 # main.TRUE = successfully changed the set
4077 # main.FALSE = action resulted in no change in set
4078 # main.ERROR - Some error in executing the function
4079 addAllResults = main.TRUE
4080 for i in range( len( main.activeNodes ) ):
4081 if addResponses[ i ] == main.TRUE:
4082 # All is well
4083 pass
4084 elif addResponses[ i ] == main.FALSE:
4085 # Already in set, probably fine
4086 pass
4087 elif addResponses[ i ] == main.ERROR:
4088 # Error in execution
4089 addAllResults = main.FALSE
4090 else:
4091 # unexpected result
4092 addAllResults = main.FALSE
4093 if addAllResults != main.TRUE:
4094 main.log.error( "Error executing set addAll" )
4095
4096 # Check if set is still correct
4097 size = len( onosSet )
4098 getResponses = []
4099 threads = []
4100 for i in main.activeNodes:
4101 t = main.Thread( target=main.CLIs[i].setTestGet,
4102 name="setTestGet-" + str( i ),
4103 args=[ onosSetName ] )
4104 threads.append( t )
4105 t.start()
4106 for t in threads:
4107 t.join()
4108 getResponses.append( t.result )
4109 getResults = main.TRUE
4110 for i in range( len( main.activeNodes ) ):
4111 node = str( main.activeNodes[i] + 1 )
4112 if isinstance( getResponses[ i ], list):
4113 current = set( getResponses[ i ] )
4114 if len( current ) == len( getResponses[ i ] ):
4115 # no repeats
4116 if onosSet != current:
4117 main.log.error( "ONOS" + node +
4118 " has incorrect view" +
4119 " of set " + onosSetName + ":\n" +
4120 str( getResponses[ i ] ) )
4121 main.log.debug( "Expected: " + str( onosSet ) )
4122 main.log.debug( "Actual: " + str( current ) )
4123 getResults = main.FALSE
4124 else:
4125 # error, set is not a set
4126 main.log.error( "ONOS" + node +
4127 " has repeat elements in" +
4128 " set " + onosSetName + ":\n" +
4129 str( getResponses[ i ] ) )
4130 getResults = main.FALSE
4131 elif getResponses[ i ] == main.ERROR:
4132 getResults = main.FALSE
4133 sizeResponses = []
4134 threads = []
4135 for i in main.activeNodes:
4136 t = main.Thread( target=main.CLIs[i].setTestSize,
4137 name="setTestSize-" + str( i ),
4138 args=[ onosSetName ] )
4139 threads.append( t )
4140 t.start()
4141 for t in threads:
4142 t.join()
4143 sizeResponses.append( t.result )
4144 sizeResults = main.TRUE
4145 for i in range( len( main.activeNodes ) ):
4146 node = str( main.activeNodes[i] + 1 )
4147 if size != sizeResponses[ i ]:
4148 sizeResults = main.FALSE
4149 main.log.error( "ONOS" + node +
4150 " expected a size of " + str( size ) +
4151 " for set " + onosSetName +
4152 " but got " + str( sizeResponses[ i ] ) )
4153 addAllResults = addAllResults and getResults and sizeResults
4154 utilities.assert_equals( expect=main.TRUE,
4155 actual=addAllResults,
4156 onpass="Set addAll correct",
4157 onfail="Set addAll was incorrect" )
4158
4159 main.step( "Distributed Set retain()" )
4160 onosSet.intersection_update( retainValue.split() )
4161 retainResponses = []
4162 threads = []
4163 for i in main.activeNodes:
4164 t = main.Thread( target=main.CLIs[i].setTestRemove,
4165 name="setTestRetain-" + str( i ),
4166 args=[ onosSetName, retainValue ],
4167 kwargs={ "retain": True } )
4168 threads.append( t )
4169 t.start()
4170 for t in threads:
4171 t.join()
4172 retainResponses.append( t.result )
4173
4174 # main.TRUE = successfully changed the set
4175 # main.FALSE = action resulted in no change in set
4176 # main.ERROR - Some error in executing the function
4177 retainResults = main.TRUE
4178 for i in range( len( main.activeNodes ) ):
4179 if retainResponses[ i ] == main.TRUE:
4180 # All is well
4181 pass
4182 elif retainResponses[ i ] == main.FALSE:
4183 # Already in set, probably fine
4184 pass
4185 elif retainResponses[ i ] == main.ERROR:
4186 # Error in execution
4187 retainResults = main.FALSE
4188 else:
4189 # unexpected result
4190 retainResults = main.FALSE
4191 if retainResults != main.TRUE:
4192 main.log.error( "Error executing set retain" )
4193
4194 # Check if set is still correct
4195 size = len( onosSet )
4196 getResponses = []
4197 threads = []
4198 for i in main.activeNodes:
4199 t = main.Thread( target=main.CLIs[i].setTestGet,
4200 name="setTestGet-" + str( i ),
4201 args=[ onosSetName ] )
4202 threads.append( t )
4203 t.start()
4204 for t in threads:
4205 t.join()
4206 getResponses.append( t.result )
4207 getResults = main.TRUE
4208 for i in range( len( main.activeNodes ) ):
4209 node = str( main.activeNodes[i] + 1 )
4210 if isinstance( getResponses[ i ], list):
4211 current = set( getResponses[ i ] )
4212 if len( current ) == len( getResponses[ i ] ):
4213 # no repeats
4214 if onosSet != current:
4215 main.log.error( "ONOS" + node +
4216 " has incorrect view" +
4217 " of set " + onosSetName + ":\n" +
4218 str( getResponses[ i ] ) )
4219 main.log.debug( "Expected: " + str( onosSet ) )
4220 main.log.debug( "Actual: " + str( current ) )
4221 getResults = main.FALSE
4222 else:
4223 # error, set is not a set
4224 main.log.error( "ONOS" + node +
4225 " has repeat elements in" +
4226 " set " + onosSetName + ":\n" +
4227 str( getResponses[ i ] ) )
4228 getResults = main.FALSE
4229 elif getResponses[ i ] == main.ERROR:
4230 getResults = main.FALSE
4231 sizeResponses = []
4232 threads = []
4233 for i in main.activeNodes:
4234 t = main.Thread( target=main.CLIs[i].setTestSize,
4235 name="setTestSize-" + str( i ),
4236 args=[ onosSetName ] )
4237 threads.append( t )
4238 t.start()
4239 for t in threads:
4240 t.join()
4241 sizeResponses.append( t.result )
4242 sizeResults = main.TRUE
4243 for i in range( len( main.activeNodes ) ):
4244 node = str( main.activeNodes[i] + 1 )
4245 if size != sizeResponses[ i ]:
4246 sizeResults = main.FALSE
4247 main.log.error( "ONOS" + node + " expected a size of " +
4248 str( size ) + " for set " + onosSetName +
4249 " but got " + str( sizeResponses[ i ] ) )
4250 retainResults = retainResults and getResults and sizeResults
4251 utilities.assert_equals( expect=main.TRUE,
4252 actual=retainResults,
4253 onpass="Set retain correct",
4254 onfail="Set retain was incorrect" )
4255
4256 # Transactional maps
4257 main.step( "Partitioned Transactional maps put" )
4258 tMapValue = "Testing"
4259 numKeys = 100
4260 putResult = True
4261 node = main.activeNodes[0]
4262 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4263 if putResponses and len( putResponses ) == 100:
4264 for i in putResponses:
4265 if putResponses[ i ][ 'value' ] != tMapValue:
4266 putResult = False
4267 else:
4268 putResult = False
4269 if not putResult:
4270 main.log.debug( "Put response values: " + str( putResponses ) )
4271 utilities.assert_equals( expect=True,
4272 actual=putResult,
4273 onpass="Partitioned Transactional Map put successful",
4274 onfail="Partitioned Transactional Map put values are incorrect" )
4275
4276 main.step( "Partitioned Transactional maps get" )
4277 getCheck = True
4278 for n in range( 1, numKeys + 1 ):
4279 getResponses = []
4280 threads = []
4281 valueCheck = True
4282 for i in main.activeNodes:
4283 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4284 name="TMap-get-" + str( i ),
4285 args=[ "Key" + str( n ) ] )
4286 threads.append( t )
4287 t.start()
4288 for t in threads:
4289 t.join()
4290 getResponses.append( t.result )
4291 for node in getResponses:
4292 if node != tMapValue:
4293 valueCheck = False
4294 if not valueCheck:
4295 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4296 main.log.warn( getResponses )
4297 getCheck = getCheck and valueCheck
4298 utilities.assert_equals( expect=True,
4299 actual=getCheck,
4300 onpass="Partitioned Transactional Map get values were correct",
4301 onfail="Partitioned Transactional Map values incorrect" )