blob: 6eb81d95888c85e0d729255ee0cc50bc3a1560b7 [file] [log] [blame]
Jon Hall6e709752016-02-01 13:38:46 -08001"""
2Description: This test is to determine if ONOS can handle
3 a full network partion
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
14CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
28class HAfullNetPartition:
29
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
51 import imp
52 import pexpect
53 import time
Jon Halla440e872016-03-31 15:15:50 -070054 import json
Jon Hall6e709752016-02-01 13:38:46 -080055 main.log.info( "ONOS HA test: Partition ONOS nodes into two sub-clusters - " +
56 "initialization" )
57 main.case( "Setting up test environment" )
58 main.caseExplanation = "Setup the test environment including " +\
59 "installing ONOS, starting Mininet and ONOS" +\
60 "cli sessions."
61
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
69 main.numCtrls = int( main.params[ 'num_controllers' ] )
70 if main.ONOSbench.maxNodes:
71 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
74 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070081 # These are for csv plotting in jenkins
82 global labels
83 global data
84 labels = []
85 data = []
Jon Hall6e709752016-02-01 13:38:46 -080086
87 # FIXME: just get controller port from params?
88 # TODO: do we really need all these?
89 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
90 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
91 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
92 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
93 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
94 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
95 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
96
97 try:
Jon Hall53c5e662016-04-13 16:06:56 -070098 from tests.HA.dependencies.HA import HA
Jon Hall41d39f12016-04-11 22:54:35 -070099 main.HA = HA()
Jon Hall6e709752016-02-01 13:38:46 -0800100 except Exception as e:
101 main.log.exception( e )
102 main.cleanup()
103 main.exit()
104
105 main.CLIs = []
106 main.nodes = []
107 ipList = []
108 for i in range( 1, main.numCtrls + 1 ):
109 try:
110 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
111 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
112 ipList.append( main.nodes[ -1 ].ip_address )
113 except AttributeError:
114 break
115
116 main.step( "Create cell file" )
117 cellAppString = main.params[ 'ENV' ][ 'appString' ]
118 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
119 main.Mininet1.ip_address,
120 cellAppString, ipList )
121 main.step( "Applying cell variable to environment" )
122 cellResult = main.ONOSbench.setCell( cellName )
123 verifyResult = main.ONOSbench.verifyCell()
124
125 # FIXME:this is short term fix
126 main.log.info( "Removing raft logs" )
127 main.ONOSbench.onosRemoveRaftLogs()
128
129 main.log.info( "Uninstalling ONOS" )
130 for node in main.nodes:
131 main.ONOSbench.onosUninstall( node.ip_address )
132
133 # Make sure ONOS is DEAD
134 main.log.info( "Killing any ONOS processes" )
135 killResults = main.TRUE
136 for node in main.nodes:
137 killed = main.ONOSbench.onosKill( node.ip_address )
138 killResults = killResults and killed
139
140 cleanInstallResult = main.TRUE
141 gitPullResult = main.TRUE
142
143 main.step( "Starting Mininet" )
144 # scp topo file to mininet
145 # TODO: move to params?
146 topoName = "obelisk.py"
147 filePath = main.ONOSbench.home + "/tools/test/topos/"
148 main.ONOSbench.scp( main.Mininet1,
149 filePath + topoName,
150 main.Mininet1.home,
151 direction="to" )
152 mnResult = main.Mininet1.startNet( )
153 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
154 onpass="Mininet Started",
155 onfail="Error starting Mininet" )
156
157 main.step( "Git checkout and pull " + gitBranch )
158 if PULLCODE:
159 main.ONOSbench.gitCheckout( gitBranch )
160 gitPullResult = main.ONOSbench.gitPull()
161 # values of 1 or 3 are good
162 utilities.assert_lesser( expect=0, actual=gitPullResult,
163 onpass="Git pull successful",
164 onfail="Git pull failed" )
165 main.ONOSbench.getVersion( report=True )
166
167 main.step( "Using mvn clean install" )
168 cleanInstallResult = main.TRUE
169 if PULLCODE and gitPullResult == main.TRUE:
170 cleanInstallResult = main.ONOSbench.cleanInstall()
171 else:
172 main.log.warn( "Did not pull new code so skipping mvn " +
173 "clean install" )
174 utilities.assert_equals( expect=main.TRUE,
175 actual=cleanInstallResult,
176 onpass="MCI successful",
177 onfail="MCI failed" )
178 # GRAPHS
179 # NOTE: important params here:
180 # job = name of Jenkins job
181 # Plot Name = Plot-HA, only can be used if multiple plots
182 # index = The number of the graph under plot name
183 job = "HAfullNetPartition"
184 plotName = "Plot-HA"
Jon Hall843f8bc2016-03-18 14:28:13 -0700185 index = "1"
Jon Hall6e709752016-02-01 13:38:46 -0800186 graphs = '<ac:structured-macro ac:name="html">\n'
187 graphs += '<ac:plain-text-body><![CDATA[\n'
188 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
189 '/plot/' + plotName + '/getPlot?index=' + index +\
190 '&width=500&height=300"' +\
191 'noborder="0" width="500" height="300" scrolling="yes" ' +\
192 'seamless="seamless"></iframe>\n'
193 graphs += ']]></ac:plain-text-body>\n'
194 graphs += '</ac:structured-macro>\n'
195 main.log.wiki(graphs)
196
197 main.step( "Creating ONOS package" )
198 # copy gen-partions file to ONOS
199 # NOTE: this assumes TestON and ONOS are on the same machine
Jon Hall53c5e662016-04-13 16:06:56 -0700200 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
Jon Hall6e709752016-02-01 13:38:46 -0800201 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
202 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
203 main.ONOSbench.ip_address,
204 srcFile,
205 dstDir,
206 pwd=main.ONOSbench.pwd,
207 direction="from" )
208 packageResult = main.ONOSbench.onosPackage()
209 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
210 onpass="ONOS package successful",
211 onfail="ONOS package failed" )
212
213 main.step( "Installing ONOS package" )
214 onosInstallResult = main.TRUE
215 for node in main.nodes:
216 tmpResult = main.ONOSbench.onosInstall( options="-f",
217 node=node.ip_address )
218 onosInstallResult = onosInstallResult and tmpResult
219 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
220 onpass="ONOS install successful",
221 onfail="ONOS install failed" )
222 # clean up gen-partitions file
223 try:
224 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
225 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
226 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
227 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
228 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
229 str( main.ONOSbench.handle.before ) )
230 except ( pexpect.TIMEOUT, pexpect.EOF ):
231 main.log.exception( "ONOSbench: pexpect exception found:" +
232 main.ONOSbench.handle.before )
233 main.cleanup()
234 main.exit()
235
236 main.step( "Checking if ONOS is up yet" )
237 for i in range( 2 ):
238 onosIsupResult = main.TRUE
239 for node in main.nodes:
240 started = main.ONOSbench.isup( node.ip_address )
241 if not started:
242 main.log.error( node.name + " hasn't started" )
243 onosIsupResult = onosIsupResult and started
244 if onosIsupResult == main.TRUE:
245 break
246 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
247 onpass="ONOS startup successful",
248 onfail="ONOS startup failed" )
249
250 main.log.step( "Starting ONOS CLI sessions" )
251 cliResults = main.TRUE
252 threads = []
253 for i in range( main.numCtrls ):
254 t = main.Thread( target=main.CLIs[i].startOnosCli,
255 name="startOnosCli-" + str( i ),
256 args=[main.nodes[i].ip_address] )
257 threads.append( t )
258 t.start()
259
260 for t in threads:
261 t.join()
262 cliResults = cliResults and t.result
263 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
264 onpass="ONOS cli startup successful",
265 onfail="ONOS cli startup failed" )
266
267 # Create a list of active nodes for use when some nodes are stopped
268 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
269
270 if main.params[ 'tcpdump' ].lower() == "true":
271 main.step( "Start Packet Capture MN" )
272 main.Mininet2.startTcpdump(
273 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
274 + "-MN.pcap",
275 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
276 port=main.params[ 'MNtcpdump' ][ 'port' ] )
277
Jon Halla440e872016-03-31 15:15:50 -0700278 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -0700279 nodeResults = utilities.retry( main.HA.nodesCheck,
280 False,
281 args=[main.activeNodes],
282 attempts=5 )
Jon Halla440e872016-03-31 15:15:50 -0700283
Jon Hall41d39f12016-04-11 22:54:35 -0700284 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Halla440e872016-03-31 15:15:50 -0700285 onpass="Nodes check successful",
286 onfail="Nodes check NOT successful" )
287
288 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700289 for i in main.activeNodes:
290 cli = main.CLIs[i]
Jon Halla440e872016-03-31 15:15:50 -0700291 main.log.debug( "{} components not ACTIVE: \n{}".format(
292 cli.name,
293 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
294
Jon Hall6e709752016-02-01 13:38:46 -0800295 if cliResults == main.FALSE:
296 main.log.error( "Failed to start ONOS, stopping test" )
297 main.cleanup()
298 main.exit()
299
Jon Hall172b7ba2016-04-07 18:12:20 -0700300 main.step( "Activate apps defined in the params file" )
301 # get data from the params
302 apps = main.params.get( 'apps' )
303 if apps:
304 apps = apps.split(',')
305 main.log.warn( apps )
306 activateResult = True
307 for app in apps:
308 main.CLIs[ 0 ].app( app, "Activate" )
309 # TODO: check this worked
310 time.sleep( 10 ) # wait for apps to activate
311 for app in apps:
312 state = main.CLIs[ 0 ].appStatus( app )
313 if state == "ACTIVE":
314 activateResult = activeResult and True
315 else:
316 main.log.error( "{} is in {} state".format( app, state ) )
317 activeResult = False
318 utilities.assert_equals( expect=True,
319 actual=activateResult,
320 onpass="Successfully activated apps",
321 onfail="Failed to activate apps" )
322 else:
323 main.log.warn( "No apps were specified to be loaded after startup" )
324
325 main.step( "Set ONOS configurations" )
326 config = main.params.get( 'ONOS_Configuration' )
327 if config:
328 main.log.debug( config )
329 checkResult = main.TRUE
330 for component in config:
331 for setting in config[component]:
332 value = config[component][setting]
333 check = main.CLIs[ 0 ].setCfg( component, setting, value )
334 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
335 checkResult = check and checkResult
336 utilities.assert_equals( expect=main.TRUE,
337 actual=checkResult,
338 onpass="Successfully set config",
339 onfail="Failed to set config" )
340 else:
341 main.log.warn( "No configurations were specified to be changed after startup" )
342
Jon Hall9d2dcad2016-04-08 10:15:20 -0700343 main.step( "App Ids check" )
344 appCheck = main.TRUE
345 threads = []
346 for i in main.activeNodes:
347 t = main.Thread( target=main.CLIs[i].appToIDCheck,
348 name="appToIDCheck-" + str( i ),
349 args=[] )
350 threads.append( t )
351 t.start()
352
353 for t in threads:
354 t.join()
355 appCheck = appCheck and t.result
356 if appCheck != main.TRUE:
357 node = main.activeNodes[0]
358 main.log.warn( main.CLIs[node].apps() )
359 main.log.warn( main.CLIs[node].appIDs() )
360 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
361 onpass="App Ids seem to be correct",
362 onfail="Something is wrong with app Ids" )
363
Jon Hall6e709752016-02-01 13:38:46 -0800364 def CASE2( self, main ):
365 """
366 Assign devices to controllers
367 """
368 import re
369 assert main.numCtrls, "main.numCtrls not defined"
370 assert main, "main not defined"
371 assert utilities.assert_equals, "utilities.assert_equals not defined"
372 assert main.CLIs, "main.CLIs not defined"
373 assert main.nodes, "main.nodes not defined"
374 assert ONOS1Port, "ONOS1Port not defined"
375 assert ONOS2Port, "ONOS2Port not defined"
376 assert ONOS3Port, "ONOS3Port not defined"
377 assert ONOS4Port, "ONOS4Port not defined"
378 assert ONOS5Port, "ONOS5Port not defined"
379 assert ONOS6Port, "ONOS6Port not defined"
380 assert ONOS7Port, "ONOS7Port not defined"
381
382 main.case( "Assigning devices to controllers" )
383 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
384 "and check that an ONOS node becomes the " +\
385 "master of the device."
386 main.step( "Assign switches to controllers" )
387
388 ipList = []
389 for i in range( main.numCtrls ):
390 ipList.append( main.nodes[ i ].ip_address )
391 swList = []
392 for i in range( 1, 29 ):
393 swList.append( "s" + str( i ) )
394 main.Mininet1.assignSwController( sw=swList, ip=ipList )
395
396 mastershipCheck = main.TRUE
397 for i in range( 1, 29 ):
398 response = main.Mininet1.getSwController( "s" + str( i ) )
399 try:
400 main.log.info( str( response ) )
401 except Exception:
402 main.log.info( repr( response ) )
403 for node in main.nodes:
404 if re.search( "tcp:" + node.ip_address, response ):
405 mastershipCheck = mastershipCheck and main.TRUE
406 else:
407 main.log.error( "Error, node " + node.ip_address + " is " +
408 "not in the list of controllers s" +
409 str( i ) + " is connecting to." )
410 mastershipCheck = main.FALSE
411 utilities.assert_equals(
412 expect=main.TRUE,
413 actual=mastershipCheck,
414 onpass="Switch mastership assigned correctly",
415 onfail="Switches not assigned correctly to controllers" )
416
417 def CASE21( self, main ):
418 """
419 Assign mastership to controllers
420 """
421 import time
422 assert main.numCtrls, "main.numCtrls not defined"
423 assert main, "main not defined"
424 assert utilities.assert_equals, "utilities.assert_equals not defined"
425 assert main.CLIs, "main.CLIs not defined"
426 assert main.nodes, "main.nodes not defined"
427 assert ONOS1Port, "ONOS1Port not defined"
428 assert ONOS2Port, "ONOS2Port not defined"
429 assert ONOS3Port, "ONOS3Port not defined"
430 assert ONOS4Port, "ONOS4Port not defined"
431 assert ONOS5Port, "ONOS5Port not defined"
432 assert ONOS6Port, "ONOS6Port not defined"
433 assert ONOS7Port, "ONOS7Port not defined"
434
435 main.case( "Assigning Controller roles for switches" )
436 main.caseExplanation = "Check that ONOS is connected to each " +\
437 "device. Then manually assign" +\
438 " mastership to specific ONOS nodes using" +\
439 " 'device-role'"
440 main.step( "Assign mastership of switches to specific controllers" )
441 # Manually assign mastership to the controller we want
442 roleCall = main.TRUE
443
444 ipList = [ ]
445 deviceList = []
446 onosCli = main.CLIs[ main.activeNodes[0] ]
447 try:
448 # Assign mastership to specific controllers. This assignment was
449 # determined for a 7 node cluser, but will work with any sized
450 # cluster
451 for i in range( 1, 29 ): # switches 1 through 28
452 # set up correct variables:
453 if i == 1:
454 c = 0
455 ip = main.nodes[ c ].ip_address # ONOS1
456 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
457 elif i == 2:
458 c = 1 % main.numCtrls
459 ip = main.nodes[ c ].ip_address # ONOS2
460 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
461 elif i == 3:
462 c = 1 % main.numCtrls
463 ip = main.nodes[ c ].ip_address # ONOS2
464 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
465 elif i == 4:
466 c = 3 % main.numCtrls
467 ip = main.nodes[ c ].ip_address # ONOS4
468 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
469 elif i == 5:
470 c = 2 % main.numCtrls
471 ip = main.nodes[ c ].ip_address # ONOS3
472 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
473 elif i == 6:
474 c = 2 % main.numCtrls
475 ip = main.nodes[ c ].ip_address # ONOS3
476 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
477 elif i == 7:
478 c = 5 % main.numCtrls
479 ip = main.nodes[ c ].ip_address # ONOS6
480 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
481 elif i >= 8 and i <= 17:
482 c = 4 % main.numCtrls
483 ip = main.nodes[ c ].ip_address # ONOS5
484 dpid = '3' + str( i ).zfill( 3 )
485 deviceId = onosCli.getDevice( dpid ).get( 'id' )
486 elif i >= 18 and i <= 27:
487 c = 6 % main.numCtrls
488 ip = main.nodes[ c ].ip_address # ONOS7
489 dpid = '6' + str( i ).zfill( 3 )
490 deviceId = onosCli.getDevice( dpid ).get( 'id' )
491 elif i == 28:
492 c = 0
493 ip = main.nodes[ c ].ip_address # ONOS1
494 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
495 else:
496 main.log.error( "You didn't write an else statement for " +
497 "switch s" + str( i ) )
498 roleCall = main.FALSE
499 # Assign switch
500 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
501 # TODO: make this controller dynamic
502 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
503 ipList.append( ip )
504 deviceList.append( deviceId )
505 except ( AttributeError, AssertionError ):
506 main.log.exception( "Something is wrong with ONOS device view" )
507 main.log.info( onosCli.devices() )
508 utilities.assert_equals(
509 expect=main.TRUE,
510 actual=roleCall,
511 onpass="Re-assigned switch mastership to designated controller",
512 onfail="Something wrong with deviceRole calls" )
513
514 main.step( "Check mastership was correctly assigned" )
515 roleCheck = main.TRUE
516 # NOTE: This is due to the fact that device mastership change is not
517 # atomic and is actually a multi step process
518 time.sleep( 5 )
519 for i in range( len( ipList ) ):
520 ip = ipList[i]
521 deviceId = deviceList[i]
522 # Check assignment
523 master = onosCli.getRole( deviceId ).get( 'master' )
524 if ip in master:
525 roleCheck = roleCheck and main.TRUE
526 else:
527 roleCheck = roleCheck and main.FALSE
528 main.log.error( "Error, controller " + ip + " is not" +
529 " master " + "of device " +
530 str( deviceId ) + ". Master is " +
531 repr( master ) + "." )
532 utilities.assert_equals(
533 expect=main.TRUE,
534 actual=roleCheck,
535 onpass="Switches were successfully reassigned to designated " +
536 "controller",
537 onfail="Switches were not successfully reassigned" )
538
539 def CASE3( self, main ):
540 """
541 Assign intents
542 """
543 import time
544 import json
545 assert main.numCtrls, "main.numCtrls not defined"
546 assert main, "main not defined"
547 assert utilities.assert_equals, "utilities.assert_equals not defined"
548 assert main.CLIs, "main.CLIs not defined"
549 assert main.nodes, "main.nodes not defined"
550 main.case( "Adding host Intents" )
551 main.caseExplanation = "Discover hosts by using pingall then " +\
552 "assign predetermined host-to-host intents." +\
553 " After installation, check that the intent" +\
554 " is distributed to all nodes and the state" +\
555 " is INSTALLED"
556
557 # install onos-app-fwd
558 main.step( "Install reactive forwarding app" )
559 onosCli = main.CLIs[ main.activeNodes[0] ]
560 installResults = onosCli.activateApp( "org.onosproject.fwd" )
561 utilities.assert_equals( expect=main.TRUE, actual=installResults,
562 onpass="Install fwd successful",
563 onfail="Install fwd failed" )
564
565 main.step( "Check app ids" )
566 appCheck = main.TRUE
567 threads = []
568 for i in main.activeNodes:
569 t = main.Thread( target=main.CLIs[i].appToIDCheck,
570 name="appToIDCheck-" + str( i ),
571 args=[] )
572 threads.append( t )
573 t.start()
574
575 for t in threads:
576 t.join()
577 appCheck = appCheck and t.result
578 if appCheck != main.TRUE:
579 main.log.warn( onosCli.apps() )
580 main.log.warn( onosCli.appIDs() )
581 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
582 onpass="App Ids seem to be correct",
583 onfail="Something is wrong with app Ids" )
584
585 main.step( "Discovering Hosts( Via pingall for now )" )
586 # FIXME: Once we have a host discovery mechanism, use that instead
587 # REACTIVE FWD test
588 pingResult = main.FALSE
589 passMsg = "Reactive Pingall test passed"
590 time1 = time.time()
591 pingResult = main.Mininet1.pingall()
592 time2 = time.time()
593 if not pingResult:
594 main.log.warn("First pingall failed. Trying again...")
595 pingResult = main.Mininet1.pingall()
596 passMsg += " on the second try"
597 utilities.assert_equals(
598 expect=main.TRUE,
599 actual=pingResult,
600 onpass= passMsg,
601 onfail="Reactive Pingall failed, " +
602 "one or more ping pairs failed" )
603 main.log.info( "Time for pingall: %2f seconds" %
604 ( time2 - time1 ) )
605 # timeout for fwd flows
606 time.sleep( 11 )
607 # uninstall onos-app-fwd
608 main.step( "Uninstall reactive forwarding app" )
609 node = main.activeNodes[0]
610 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
611 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
612 onpass="Uninstall fwd successful",
613 onfail="Uninstall fwd failed" )
614
615 main.step( "Check app ids" )
616 threads = []
617 appCheck2 = main.TRUE
618 for i in main.activeNodes:
619 t = main.Thread( target=main.CLIs[i].appToIDCheck,
620 name="appToIDCheck-" + str( i ),
621 args=[] )
622 threads.append( t )
623 t.start()
624
625 for t in threads:
626 t.join()
627 appCheck2 = appCheck2 and t.result
628 if appCheck2 != main.TRUE:
629 node = main.activeNodes[0]
630 main.log.warn( main.CLIs[node].apps() )
631 main.log.warn( main.CLIs[node].appIDs() )
632 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
633 onpass="App Ids seem to be correct",
634 onfail="Something is wrong with app Ids" )
635
636 main.step( "Add host intents via cli" )
637 intentIds = []
638 # TODO: move the host numbers to params
639 # Maybe look at all the paths we ping?
640 intentAddResult = True
641 hostResult = main.TRUE
642 for i in range( 8, 18 ):
643 main.log.info( "Adding host intent between h" + str( i ) +
644 " and h" + str( i + 10 ) )
645 host1 = "00:00:00:00:00:" + \
646 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
647 host2 = "00:00:00:00:00:" + \
648 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
649 # NOTE: getHost can return None
650 host1Dict = onosCli.getHost( host1 )
651 host2Dict = onosCli.getHost( host2 )
652 host1Id = None
653 host2Id = None
654 if host1Dict and host2Dict:
655 host1Id = host1Dict.get( 'id', None )
656 host2Id = host2Dict.get( 'id', None )
657 if host1Id and host2Id:
658 nodeNum = ( i % len( main.activeNodes ) )
659 node = main.activeNodes[nodeNum]
660 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
661 if tmpId:
662 main.log.info( "Added intent with id: " + tmpId )
663 intentIds.append( tmpId )
664 else:
665 main.log.error( "addHostIntent returned: " +
666 repr( tmpId ) )
667 else:
668 main.log.error( "Error, getHost() failed for h" + str( i ) +
669 " and/or h" + str( i + 10 ) )
670 node = main.activeNodes[0]
671 hosts = main.CLIs[node].hosts()
672 main.log.warn( "Hosts output: " )
673 try:
674 main.log.warn( json.dumps( json.loads( hosts ),
675 sort_keys=True,
676 indent=4,
677 separators=( ',', ': ' ) ) )
678 except ( ValueError, TypeError ):
679 main.log.warn( repr( hosts ) )
680 hostResult = main.FALSE
681 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
682 onpass="Found a host id for each host",
683 onfail="Error looking up host ids" )
684
685 intentStart = time.time()
686 onosIds = onosCli.getAllIntentsId()
687 main.log.info( "Submitted intents: " + str( intentIds ) )
688 main.log.info( "Intents in ONOS: " + str( onosIds ) )
689 for intent in intentIds:
690 if intent in onosIds:
691 pass # intent submitted is in onos
692 else:
693 intentAddResult = False
694 if intentAddResult:
695 intentStop = time.time()
696 else:
697 intentStop = None
698 # Print the intent states
699 intents = onosCli.intents()
700 intentStates = []
701 installedCheck = True
702 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
703 count = 0
704 try:
705 for intent in json.loads( intents ):
706 state = intent.get( 'state', None )
707 if "INSTALLED" not in state:
708 installedCheck = False
709 intentId = intent.get( 'id', None )
710 intentStates.append( ( intentId, state ) )
711 except ( ValueError, TypeError ):
712 main.log.exception( "Error parsing intents" )
713 # add submitted intents not in the store
714 tmplist = [ i for i, s in intentStates ]
715 missingIntents = False
716 for i in intentIds:
717 if i not in tmplist:
718 intentStates.append( ( i, " - " ) )
719 missingIntents = True
720 intentStates.sort()
721 for i, s in intentStates:
722 count += 1
723 main.log.info( "%-6s%-15s%-15s" %
724 ( str( count ), str( i ), str( s ) ) )
725 leaders = onosCli.leaders()
726 try:
727 missing = False
728 if leaders:
729 parsedLeaders = json.loads( leaders )
730 main.log.warn( json.dumps( parsedLeaders,
731 sort_keys=True,
732 indent=4,
733 separators=( ',', ': ' ) ) )
734 # check for all intent partitions
735 topics = []
736 for i in range( 14 ):
737 topics.append( "intent-partition-" + str( i ) )
738 main.log.debug( topics )
739 ONOStopics = [ j['topic'] for j in parsedLeaders ]
740 for topic in topics:
741 if topic not in ONOStopics:
742 main.log.error( "Error: " + topic +
743 " not in leaders" )
744 missing = True
745 else:
746 main.log.error( "leaders() returned None" )
747 except ( ValueError, TypeError ):
748 main.log.exception( "Error parsing leaders" )
749 main.log.error( repr( leaders ) )
750 # Check all nodes
751 if missing:
752 for i in main.activeNodes:
753 response = main.CLIs[i].leaders( jsonFormat=False)
754 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
755 str( response ) )
756
757 partitions = onosCli.partitions()
758 try:
759 if partitions :
760 parsedPartitions = json.loads( partitions )
761 main.log.warn( json.dumps( parsedPartitions,
762 sort_keys=True,
763 indent=4,
764 separators=( ',', ': ' ) ) )
765 # TODO check for a leader in all paritions
766 # TODO check for consistency among nodes
767 else:
768 main.log.error( "partitions() returned None" )
769 except ( ValueError, TypeError ):
770 main.log.exception( "Error parsing partitions" )
771 main.log.error( repr( partitions ) )
772 pendingMap = onosCli.pendingMap()
773 try:
774 if pendingMap :
775 parsedPending = json.loads( pendingMap )
776 main.log.warn( json.dumps( parsedPending,
777 sort_keys=True,
778 indent=4,
779 separators=( ',', ': ' ) ) )
780 # TODO check something here?
781 else:
782 main.log.error( "pendingMap() returned None" )
783 except ( ValueError, TypeError ):
784 main.log.exception( "Error parsing pending map" )
785 main.log.error( repr( pendingMap ) )
786
787 intentAddResult = bool( intentAddResult and not missingIntents and
788 installedCheck )
789 if not intentAddResult:
790 main.log.error( "Error in pushing host intents to ONOS" )
791
792 main.step( "Intent Anti-Entropy dispersion" )
793 for j in range(100):
794 correct = True
795 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
796 for i in main.activeNodes:
797 onosIds = []
798 ids = main.CLIs[i].getAllIntentsId()
799 onosIds.append( ids )
800 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
801 str( sorted( onosIds ) ) )
802 if sorted( ids ) != sorted( intentIds ):
803 main.log.warn( "Set of intent IDs doesn't match" )
804 correct = False
805 break
806 else:
807 intents = json.loads( main.CLIs[i].intents() )
808 for intent in intents:
809 if intent[ 'state' ] != "INSTALLED":
810 main.log.warn( "Intent " + intent[ 'id' ] +
811 " is " + intent[ 'state' ] )
812 correct = False
813 break
814 if correct:
815 break
816 else:
817 time.sleep(1)
818 if not intentStop:
819 intentStop = time.time()
820 global gossipTime
821 gossipTime = intentStop - intentStart
822 main.log.info( "It took about " + str( gossipTime ) +
823 " seconds for all intents to appear in each node" )
824 gossipPeriod = int( main.params['timers']['gossip'] )
825 maxGossipTime = gossipPeriod * len( main.activeNodes )
826 utilities.assert_greater_equals(
827 expect=maxGossipTime, actual=gossipTime,
828 onpass="ECM anti-entropy for intents worked within " +
829 "expected time",
830 onfail="Intent ECM anti-entropy took too long. " +
831 "Expected time:{}, Actual time:{}".format( maxGossipTime,
832 gossipTime ) )
833 if gossipTime <= maxGossipTime:
834 intentAddResult = True
835
836 if not intentAddResult or "key" in pendingMap:
837 import time
838 installedCheck = True
839 main.log.info( "Sleeping 60 seconds to see if intents are found" )
840 time.sleep( 60 )
841 onosIds = onosCli.getAllIntentsId()
842 main.log.info( "Submitted intents: " + str( intentIds ) )
843 main.log.info( "Intents in ONOS: " + str( onosIds ) )
844 # Print the intent states
845 intents = onosCli.intents()
846 intentStates = []
847 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
848 count = 0
849 try:
850 for intent in json.loads( intents ):
851 # Iter through intents of a node
852 state = intent.get( 'state', None )
853 if "INSTALLED" not in state:
854 installedCheck = False
855 intentId = intent.get( 'id', None )
856 intentStates.append( ( intentId, state ) )
857 except ( ValueError, TypeError ):
858 main.log.exception( "Error parsing intents" )
859 # add submitted intents not in the store
860 tmplist = [ i for i, s in intentStates ]
861 for i in intentIds:
862 if i not in tmplist:
863 intentStates.append( ( i, " - " ) )
864 intentStates.sort()
865 for i, s in intentStates:
866 count += 1
867 main.log.info( "%-6s%-15s%-15s" %
868 ( str( count ), str( i ), str( s ) ) )
869 leaders = onosCli.leaders()
870 try:
871 missing = False
872 if leaders:
873 parsedLeaders = json.loads( leaders )
874 main.log.warn( json.dumps( parsedLeaders,
875 sort_keys=True,
876 indent=4,
877 separators=( ',', ': ' ) ) )
878 # check for all intent partitions
879 # check for election
880 topics = []
881 for i in range( 14 ):
882 topics.append( "intent-partition-" + str( i ) )
883 # FIXME: this should only be after we start the app
884 topics.append( "org.onosproject.election" )
885 main.log.debug( topics )
886 ONOStopics = [ j['topic'] for j in parsedLeaders ]
887 for topic in topics:
888 if topic not in ONOStopics:
889 main.log.error( "Error: " + topic +
890 " not in leaders" )
891 missing = True
892 else:
893 main.log.error( "leaders() returned None" )
894 except ( ValueError, TypeError ):
895 main.log.exception( "Error parsing leaders" )
896 main.log.error( repr( leaders ) )
897 # Check all nodes
898 if missing:
899 for i in main.activeNodes:
900 node = main.CLIs[i]
901 response = node.leaders( jsonFormat=False)
902 main.log.warn( str( node.name ) + " leaders output: \n" +
903 str( response ) )
904
905 partitions = onosCli.partitions()
906 try:
907 if partitions :
908 parsedPartitions = json.loads( partitions )
909 main.log.warn( json.dumps( parsedPartitions,
910 sort_keys=True,
911 indent=4,
912 separators=( ',', ': ' ) ) )
913 # TODO check for a leader in all paritions
914 # TODO check for consistency among nodes
915 else:
916 main.log.error( "partitions() returned None" )
917 except ( ValueError, TypeError ):
918 main.log.exception( "Error parsing partitions" )
919 main.log.error( repr( partitions ) )
920 pendingMap = onosCli.pendingMap()
921 try:
922 if pendingMap :
923 parsedPending = json.loads( pendingMap )
924 main.log.warn( json.dumps( parsedPending,
925 sort_keys=True,
926 indent=4,
927 separators=( ',', ': ' ) ) )
928 # TODO check something here?
929 else:
930 main.log.error( "pendingMap() returned None" )
931 except ( ValueError, TypeError ):
932 main.log.exception( "Error parsing pending map" )
933 main.log.error( repr( pendingMap ) )
934
935 def CASE4( self, main ):
936 """
937 Ping across added host intents
938 """
939 import json
940 import time
941 assert main.numCtrls, "main.numCtrls not defined"
942 assert main, "main not defined"
943 assert utilities.assert_equals, "utilities.assert_equals not defined"
944 assert main.CLIs, "main.CLIs not defined"
945 assert main.nodes, "main.nodes not defined"
946 main.case( "Verify connectivity by sending traffic across Intents" )
947 main.caseExplanation = "Ping across added host intents to check " +\
948 "functionality and check the state of " +\
949 "the intent"
Jon Hall6e709752016-02-01 13:38:46 -0800950
Jon Hall41d39f12016-04-11 22:54:35 -0700951 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall6e709752016-02-01 13:38:46 -0800952 main.step( "Check Intent state" )
953 installedCheck = False
954 loopCount = 0
955 while not installedCheck and loopCount < 40:
956 installedCheck = True
957 # Print the intent states
958 intents = onosCli.intents()
959 intentStates = []
960 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
961 count = 0
962 # Iter through intents of a node
963 try:
964 for intent in json.loads( intents ):
965 state = intent.get( 'state', None )
966 if "INSTALLED" not in state:
967 installedCheck = False
968 intentId = intent.get( 'id', None )
969 intentStates.append( ( intentId, state ) )
970 except ( ValueError, TypeError ):
971 main.log.exception( "Error parsing intents." )
972 # Print states
973 intentStates.sort()
974 for i, s in intentStates:
975 count += 1
976 main.log.info( "%-6s%-15s%-15s" %
977 ( str( count ), str( i ), str( s ) ) )
978 if not installedCheck:
979 time.sleep( 1 )
980 loopCount += 1
981 utilities.assert_equals( expect=True, actual=installedCheck,
982 onpass="Intents are all INSTALLED",
983 onfail="Intents are not all in " +
984 "INSTALLED state" )
985
Jon Hall9d2dcad2016-04-08 10:15:20 -0700986 main.step( "Ping across added host intents" )
Jon Hall9d2dcad2016-04-08 10:15:20 -0700987 PingResult = main.TRUE
988 for i in range( 8, 18 ):
989 ping = main.Mininet1.pingHost( src="h" + str( i ),
990 target="h" + str( i + 10 ) )
991 PingResult = PingResult and ping
992 if ping == main.FALSE:
993 main.log.warn( "Ping failed between h" + str( i ) +
994 " and h" + str( i + 10 ) )
995 elif ping == main.TRUE:
996 main.log.info( "Ping test passed!" )
997 # Don't set PingResult or you'd override failures
998 if PingResult == main.FALSE:
999 main.log.error(
1000 "Intents have not been installed correctly, pings failed." )
1001 # TODO: pretty print
1002 main.log.warn( "ONOS1 intents: " )
1003 try:
1004 tmpIntents = onosCli.intents()
1005 main.log.warn( json.dumps( json.loads( tmpIntents ),
1006 sort_keys=True,
1007 indent=4,
1008 separators=( ',', ': ' ) ) )
1009 except ( ValueError, TypeError ):
1010 main.log.warn( repr( tmpIntents ) )
1011 utilities.assert_equals(
1012 expect=main.TRUE,
1013 actual=PingResult,
1014 onpass="Intents have been installed correctly and pings work",
1015 onfail="Intents have not been installed correctly, pings failed." )
1016
Jon Hall6e709752016-02-01 13:38:46 -08001017 main.step( "Check leadership of topics" )
1018 leaders = onosCli.leaders()
1019 topicCheck = main.TRUE
1020 try:
1021 if leaders:
1022 parsedLeaders = json.loads( leaders )
1023 main.log.warn( json.dumps( parsedLeaders,
1024 sort_keys=True,
1025 indent=4,
1026 separators=( ',', ': ' ) ) )
1027 # check for all intent partitions
1028 # check for election
1029 # TODO: Look at Devices as topics now that it uses this system
1030 topics = []
1031 for i in range( 14 ):
1032 topics.append( "intent-partition-" + str( i ) )
1033 # FIXME: this should only be after we start the app
1034 # FIXME: topics.append( "org.onosproject.election" )
1035 # Print leaders output
1036 main.log.debug( topics )
1037 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1038 for topic in topics:
1039 if topic not in ONOStopics:
1040 main.log.error( "Error: " + topic +
1041 " not in leaders" )
1042 topicCheck = main.FALSE
1043 else:
1044 main.log.error( "leaders() returned None" )
1045 topicCheck = main.FALSE
1046 except ( ValueError, TypeError ):
1047 topicCheck = main.FALSE
1048 main.log.exception( "Error parsing leaders" )
1049 main.log.error( repr( leaders ) )
1050 # TODO: Check for a leader of these topics
1051 # Check all nodes
1052 if topicCheck:
1053 for i in main.activeNodes:
1054 node = main.CLIs[i]
1055 response = node.leaders( jsonFormat=False)
1056 main.log.warn( str( node.name ) + " leaders output: \n" +
1057 str( response ) )
1058
1059 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1060 onpass="intent Partitions is in leaders",
1061 onfail="Some topics were lost " )
1062 # Print partitions
1063 partitions = onosCli.partitions()
1064 try:
1065 if partitions :
1066 parsedPartitions = json.loads( partitions )
1067 main.log.warn( json.dumps( parsedPartitions,
1068 sort_keys=True,
1069 indent=4,
1070 separators=( ',', ': ' ) ) )
1071 # TODO check for a leader in all paritions
1072 # TODO check for consistency among nodes
1073 else:
1074 main.log.error( "partitions() returned None" )
1075 except ( ValueError, TypeError ):
1076 main.log.exception( "Error parsing partitions" )
1077 main.log.error( repr( partitions ) )
1078 # Print Pending Map
1079 pendingMap = onosCli.pendingMap()
1080 try:
1081 if pendingMap :
1082 parsedPending = json.loads( pendingMap )
1083 main.log.warn( json.dumps( parsedPending,
1084 sort_keys=True,
1085 indent=4,
1086 separators=( ',', ': ' ) ) )
1087 # TODO check something here?
1088 else:
1089 main.log.error( "pendingMap() returned None" )
1090 except ( ValueError, TypeError ):
1091 main.log.exception( "Error parsing pending map" )
1092 main.log.error( repr( pendingMap ) )
1093
1094 if not installedCheck:
1095 main.log.info( "Waiting 60 seconds to see if the state of " +
1096 "intents change" )
1097 time.sleep( 60 )
1098 # Print the intent states
1099 intents = onosCli.intents()
1100 intentStates = []
1101 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1102 count = 0
1103 # Iter through intents of a node
1104 try:
1105 for intent in json.loads( intents ):
1106 state = intent.get( 'state', None )
1107 if "INSTALLED" not in state:
1108 installedCheck = False
1109 intentId = intent.get( 'id', None )
1110 intentStates.append( ( intentId, state ) )
1111 except ( ValueError, TypeError ):
1112 main.log.exception( "Error parsing intents." )
1113 intentStates.sort()
1114 for i, s in intentStates:
1115 count += 1
1116 main.log.info( "%-6s%-15s%-15s" %
1117 ( str( count ), str( i ), str( s ) ) )
1118 leaders = onosCli.leaders()
1119 try:
1120 missing = False
1121 if leaders:
1122 parsedLeaders = json.loads( leaders )
1123 main.log.warn( json.dumps( parsedLeaders,
1124 sort_keys=True,
1125 indent=4,
1126 separators=( ',', ': ' ) ) )
1127 # check for all intent partitions
1128 # check for election
1129 topics = []
1130 for i in range( 14 ):
1131 topics.append( "intent-partition-" + str( i ) )
1132 # FIXME: this should only be after we start the app
1133 topics.append( "org.onosproject.election" )
1134 main.log.debug( topics )
1135 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1136 for topic in topics:
1137 if topic not in ONOStopics:
1138 main.log.error( "Error: " + topic +
1139 " not in leaders" )
1140 missing = True
1141 else:
1142 main.log.error( "leaders() returned None" )
1143 except ( ValueError, TypeError ):
1144 main.log.exception( "Error parsing leaders" )
1145 main.log.error( repr( leaders ) )
1146 if missing:
1147 for i in main.activeNodes:
1148 node = main.CLIs[i]
1149 response = node.leaders( jsonFormat=False)
1150 main.log.warn( str( node.name ) + " leaders output: \n" +
1151 str( response ) )
1152
1153 partitions = onosCli.partitions()
1154 try:
1155 if partitions :
1156 parsedPartitions = json.loads( partitions )
1157 main.log.warn( json.dumps( parsedPartitions,
1158 sort_keys=True,
1159 indent=4,
1160 separators=( ',', ': ' ) ) )
1161 # TODO check for a leader in all paritions
1162 # TODO check for consistency among nodes
1163 else:
1164 main.log.error( "partitions() returned None" )
1165 except ( ValueError, TypeError ):
1166 main.log.exception( "Error parsing partitions" )
1167 main.log.error( repr( partitions ) )
1168 pendingMap = onosCli.pendingMap()
1169 try:
1170 if pendingMap :
1171 parsedPending = json.loads( pendingMap )
1172 main.log.warn( json.dumps( parsedPending,
1173 sort_keys=True,
1174 indent=4,
1175 separators=( ',', ': ' ) ) )
1176 # TODO check something here?
1177 else:
1178 main.log.error( "pendingMap() returned None" )
1179 except ( ValueError, TypeError ):
1180 main.log.exception( "Error parsing pending map" )
1181 main.log.error( repr( pendingMap ) )
1182 # Print flowrules
1183 node = main.activeNodes[0]
1184 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1185 main.step( "Wait a minute then ping again" )
1186 # the wait is above
1187 PingResult = main.TRUE
1188 for i in range( 8, 18 ):
1189 ping = main.Mininet1.pingHost( src="h" + str( i ),
1190 target="h" + str( i + 10 ) )
1191 PingResult = PingResult and ping
1192 if ping == main.FALSE:
1193 main.log.warn( "Ping failed between h" + str( i ) +
1194 " and h" + str( i + 10 ) )
1195 elif ping == main.TRUE:
1196 main.log.info( "Ping test passed!" )
1197 # Don't set PingResult or you'd override failures
1198 if PingResult == main.FALSE:
1199 main.log.error(
1200 "Intents have not been installed correctly, pings failed." )
1201 # TODO: pretty print
1202 main.log.warn( "ONOS1 intents: " )
1203 try:
1204 tmpIntents = onosCli.intents()
1205 main.log.warn( json.dumps( json.loads( tmpIntents ),
1206 sort_keys=True,
1207 indent=4,
1208 separators=( ',', ': ' ) ) )
1209 except ( ValueError, TypeError ):
1210 main.log.warn( repr( tmpIntents ) )
1211 utilities.assert_equals(
1212 expect=main.TRUE,
1213 actual=PingResult,
1214 onpass="Intents have been installed correctly and pings work",
1215 onfail="Intents have not been installed correctly, pings failed." )
1216
1217 def CASE5( self, main ):
1218 """
1219 Reading state of ONOS
1220 """
1221 import json
1222 import time
1223 assert main.numCtrls, "main.numCtrls not defined"
1224 assert main, "main not defined"
1225 assert utilities.assert_equals, "utilities.assert_equals not defined"
1226 assert main.CLIs, "main.CLIs not defined"
1227 assert main.nodes, "main.nodes not defined"
1228
1229 main.case( "Setting up and gathering data for current state" )
1230 # The general idea for this test case is to pull the state of
1231 # ( intents,flows, topology,... ) from each ONOS node
1232 # We can then compare them with each other and also with past states
1233
1234 main.step( "Check that each switch has a master" )
1235 global mastershipState
1236 mastershipState = '[]'
1237
1238 # Assert that each device has a master
1239 rolesNotNull = main.TRUE
1240 threads = []
1241 for i in main.activeNodes:
1242 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1243 name="rolesNotNull-" + str( i ),
1244 args=[] )
1245 threads.append( t )
1246 t.start()
1247
1248 for t in threads:
1249 t.join()
1250 rolesNotNull = rolesNotNull and t.result
1251 utilities.assert_equals(
1252 expect=main.TRUE,
1253 actual=rolesNotNull,
1254 onpass="Each device has a master",
1255 onfail="Some devices don't have a master assigned" )
1256
1257 main.step( "Get the Mastership of each switch from each controller" )
1258 ONOSMastership = []
1259 mastershipCheck = main.FALSE
1260 consistentMastership = True
1261 rolesResults = True
1262 threads = []
1263 for i in main.activeNodes:
1264 t = main.Thread( target=main.CLIs[i].roles,
1265 name="roles-" + str( i ),
1266 args=[] )
1267 threads.append( t )
1268 t.start()
1269
1270 for t in threads:
1271 t.join()
1272 ONOSMastership.append( t.result )
1273
1274 for i in range( len( ONOSMastership ) ):
1275 node = str( main.activeNodes[i] + 1 )
1276 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1277 main.log.error( "Error in getting ONOS" + node + " roles" )
1278 main.log.warn( "ONOS" + node + " mastership response: " +
1279 repr( ONOSMastership[i] ) )
1280 rolesResults = False
1281 utilities.assert_equals(
1282 expect=True,
1283 actual=rolesResults,
1284 onpass="No error in reading roles output",
1285 onfail="Error in reading roles from ONOS" )
1286
1287 main.step( "Check for consistency in roles from each controller" )
1288 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1289 main.log.info(
1290 "Switch roles are consistent across all ONOS nodes" )
1291 else:
1292 consistentMastership = False
1293 utilities.assert_equals(
1294 expect=True,
1295 actual=consistentMastership,
1296 onpass="Switch roles are consistent across all ONOS nodes",
1297 onfail="ONOS nodes have different views of switch roles" )
1298
1299 if rolesResults and not consistentMastership:
1300 for i in range( len( main.activeNodes ) ):
1301 node = str( main.activeNodes[i] + 1 )
1302 try:
1303 main.log.warn(
1304 "ONOS" + node + " roles: ",
1305 json.dumps(
1306 json.loads( ONOSMastership[ i ] ),
1307 sort_keys=True,
1308 indent=4,
1309 separators=( ',', ': ' ) ) )
1310 except ( ValueError, TypeError ):
1311 main.log.warn( repr( ONOSMastership[ i ] ) )
1312 elif rolesResults and consistentMastership:
1313 mastershipCheck = main.TRUE
1314 mastershipState = ONOSMastership[ 0 ]
1315
1316 main.step( "Get the intents from each controller" )
1317 global intentState
1318 intentState = []
1319 ONOSIntents = []
1320 intentCheck = main.FALSE
1321 consistentIntents = True
1322 intentsResults = True
1323 threads = []
1324 for i in main.activeNodes:
1325 t = main.Thread( target=main.CLIs[i].intents,
1326 name="intents-" + str( i ),
1327 args=[],
1328 kwargs={ 'jsonFormat': True } )
1329 threads.append( t )
1330 t.start()
1331
1332 for t in threads:
1333 t.join()
1334 ONOSIntents.append( t.result )
1335
1336 for i in range( len( ONOSIntents ) ):
1337 node = str( main.activeNodes[i] + 1 )
1338 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1339 main.log.error( "Error in getting ONOS" + node + " intents" )
1340 main.log.warn( "ONOS" + node + " intents response: " +
1341 repr( ONOSIntents[ i ] ) )
1342 intentsResults = False
1343 utilities.assert_equals(
1344 expect=True,
1345 actual=intentsResults,
1346 onpass="No error in reading intents output",
1347 onfail="Error in reading intents from ONOS" )
1348
1349 main.step( "Check for consistency in Intents from each controller" )
1350 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1351 main.log.info( "Intents are consistent across all ONOS " +
1352 "nodes" )
1353 else:
1354 consistentIntents = False
1355 main.log.error( "Intents not consistent" )
1356 utilities.assert_equals(
1357 expect=True,
1358 actual=consistentIntents,
1359 onpass="Intents are consistent across all ONOS nodes",
1360 onfail="ONOS nodes have different views of intents" )
1361
1362 if intentsResults:
1363 # Try to make it easy to figure out what is happening
1364 #
1365 # Intent ONOS1 ONOS2 ...
1366 # 0x01 INSTALLED INSTALLING
1367 # ... ... ...
1368 # ... ... ...
1369 title = " Id"
1370 for n in main.activeNodes:
1371 title += " " * 10 + "ONOS" + str( n + 1 )
1372 main.log.warn( title )
1373 # get all intent keys in the cluster
1374 keys = []
Jon Halla440e872016-03-31 15:15:50 -07001375 try:
1376 # Get the set of all intent keys
Jon Hall6e709752016-02-01 13:38:46 -08001377 for nodeStr in ONOSIntents:
1378 node = json.loads( nodeStr )
1379 for intent in node:
Jon Halla440e872016-03-31 15:15:50 -07001380 keys.append( intent.get( 'id' ) )
1381 keys = set( keys )
1382 # For each intent key, print the state on each node
1383 for key in keys:
1384 row = "%-13s" % key
1385 for nodeStr in ONOSIntents:
1386 node = json.loads( nodeStr )
1387 for intent in node:
1388 if intent.get( 'id', "Error" ) == key:
1389 row += "%-15s" % intent.get( 'state' )
1390 main.log.warn( row )
1391 # End of intent state table
1392 except ValueError as e:
1393 main.log.exception( e )
1394 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
Jon Hall6e709752016-02-01 13:38:46 -08001395
1396 if intentsResults and not consistentIntents:
1397 # print the json objects
1398 n = str( main.activeNodes[-1] + 1 )
1399 main.log.debug( "ONOS" + n + " intents: " )
1400 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1401 sort_keys=True,
1402 indent=4,
1403 separators=( ',', ': ' ) ) )
1404 for i in range( len( ONOSIntents ) ):
1405 node = str( main.activeNodes[i] + 1 )
1406 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1407 main.log.debug( "ONOS" + node + " intents: " )
1408 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1409 sort_keys=True,
1410 indent=4,
1411 separators=( ',', ': ' ) ) )
1412 else:
1413 main.log.debug( "ONOS" + node + " intents match ONOS" +
1414 n + " intents" )
1415 elif intentsResults and consistentIntents:
1416 intentCheck = main.TRUE
1417 intentState = ONOSIntents[ 0 ]
1418
1419 main.step( "Get the flows from each controller" )
1420 global flowState
1421 flowState = []
1422 ONOSFlows = []
1423 ONOSFlowsJson = []
1424 flowCheck = main.FALSE
1425 consistentFlows = True
1426 flowsResults = True
1427 threads = []
1428 for i in main.activeNodes:
1429 t = main.Thread( target=main.CLIs[i].flows,
1430 name="flows-" + str( i ),
1431 args=[],
1432 kwargs={ 'jsonFormat': True } )
1433 threads.append( t )
1434 t.start()
1435
1436 # NOTE: Flows command can take some time to run
1437 time.sleep(30)
1438 for t in threads:
1439 t.join()
1440 result = t.result
1441 ONOSFlows.append( result )
1442
1443 for i in range( len( ONOSFlows ) ):
1444 num = str( main.activeNodes[i] + 1 )
1445 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1446 main.log.error( "Error in getting ONOS" + num + " flows" )
1447 main.log.warn( "ONOS" + num + " flows response: " +
1448 repr( ONOSFlows[ i ] ) )
1449 flowsResults = False
1450 ONOSFlowsJson.append( None )
1451 else:
1452 try:
1453 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1454 except ( ValueError, TypeError ):
1455 # FIXME: change this to log.error?
1456 main.log.exception( "Error in parsing ONOS" + num +
1457 " response as json." )
1458 main.log.error( repr( ONOSFlows[ i ] ) )
1459 ONOSFlowsJson.append( None )
1460 flowsResults = False
1461 utilities.assert_equals(
1462 expect=True,
1463 actual=flowsResults,
1464 onpass="No error in reading flows output",
1465 onfail="Error in reading flows from ONOS" )
1466
1467 main.step( "Check for consistency in Flows from each controller" )
1468 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1469 if all( tmp ):
1470 main.log.info( "Flow count is consistent across all ONOS nodes" )
1471 else:
1472 consistentFlows = False
1473 utilities.assert_equals(
1474 expect=True,
1475 actual=consistentFlows,
1476 onpass="The flow count is consistent across all ONOS nodes",
1477 onfail="ONOS nodes have different flow counts" )
1478
1479 if flowsResults and not consistentFlows:
1480 for i in range( len( ONOSFlows ) ):
1481 node = str( main.activeNodes[i] + 1 )
1482 try:
1483 main.log.warn(
1484 "ONOS" + node + " flows: " +
1485 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1486 indent=4, separators=( ',', ': ' ) ) )
1487 except ( ValueError, TypeError ):
1488 main.log.warn( "ONOS" + node + " flows: " +
1489 repr( ONOSFlows[ i ] ) )
1490 elif flowsResults and consistentFlows:
1491 flowCheck = main.TRUE
1492 flowState = ONOSFlows[ 0 ]
1493
1494 main.step( "Get the OF Table entries" )
1495 global flows
1496 flows = []
1497 for i in range( 1, 29 ):
1498 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1499 if flowCheck == main.FALSE:
1500 for table in flows:
1501 main.log.warn( table )
1502 # TODO: Compare switch flow tables with ONOS flow tables
1503
1504 main.step( "Start continuous pings" )
1505 main.Mininet2.pingLong(
1506 src=main.params[ 'PING' ][ 'source1' ],
1507 target=main.params[ 'PING' ][ 'target1' ],
1508 pingTime=500 )
1509 main.Mininet2.pingLong(
1510 src=main.params[ 'PING' ][ 'source2' ],
1511 target=main.params[ 'PING' ][ 'target2' ],
1512 pingTime=500 )
1513 main.Mininet2.pingLong(
1514 src=main.params[ 'PING' ][ 'source3' ],
1515 target=main.params[ 'PING' ][ 'target3' ],
1516 pingTime=500 )
1517 main.Mininet2.pingLong(
1518 src=main.params[ 'PING' ][ 'source4' ],
1519 target=main.params[ 'PING' ][ 'target4' ],
1520 pingTime=500 )
1521 main.Mininet2.pingLong(
1522 src=main.params[ 'PING' ][ 'source5' ],
1523 target=main.params[ 'PING' ][ 'target5' ],
1524 pingTime=500 )
1525 main.Mininet2.pingLong(
1526 src=main.params[ 'PING' ][ 'source6' ],
1527 target=main.params[ 'PING' ][ 'target6' ],
1528 pingTime=500 )
1529 main.Mininet2.pingLong(
1530 src=main.params[ 'PING' ][ 'source7' ],
1531 target=main.params[ 'PING' ][ 'target7' ],
1532 pingTime=500 )
1533 main.Mininet2.pingLong(
1534 src=main.params[ 'PING' ][ 'source8' ],
1535 target=main.params[ 'PING' ][ 'target8' ],
1536 pingTime=500 )
1537 main.Mininet2.pingLong(
1538 src=main.params[ 'PING' ][ 'source9' ],
1539 target=main.params[ 'PING' ][ 'target9' ],
1540 pingTime=500 )
1541 main.Mininet2.pingLong(
1542 src=main.params[ 'PING' ][ 'source10' ],
1543 target=main.params[ 'PING' ][ 'target10' ],
1544 pingTime=500 )
1545
1546 main.step( "Collecting topology information from ONOS" )
1547 devices = []
1548 threads = []
1549 for i in main.activeNodes:
1550 t = main.Thread( target=main.CLIs[i].devices,
1551 name="devices-" + str( i ),
1552 args=[ ] )
1553 threads.append( t )
1554 t.start()
1555
1556 for t in threads:
1557 t.join()
1558 devices.append( t.result )
1559 hosts = []
1560 threads = []
1561 for i in main.activeNodes:
1562 t = main.Thread( target=main.CLIs[i].hosts,
1563 name="hosts-" + str( i ),
1564 args=[ ] )
1565 threads.append( t )
1566 t.start()
1567
1568 for t in threads:
1569 t.join()
1570 try:
1571 hosts.append( json.loads( t.result ) )
1572 except ( ValueError, TypeError ):
1573 # FIXME: better handling of this, print which node
1574 # Maybe use thread name?
1575 main.log.exception( "Error parsing json output of hosts" )
1576 main.log.warn( repr( t.result ) )
1577 hosts.append( None )
1578
1579 ports = []
1580 threads = []
1581 for i in main.activeNodes:
1582 t = main.Thread( target=main.CLIs[i].ports,
1583 name="ports-" + str( i ),
1584 args=[ ] )
1585 threads.append( t )
1586 t.start()
1587
1588 for t in threads:
1589 t.join()
1590 ports.append( t.result )
1591 links = []
1592 threads = []
1593 for i in main.activeNodes:
1594 t = main.Thread( target=main.CLIs[i].links,
1595 name="links-" + str( i ),
1596 args=[ ] )
1597 threads.append( t )
1598 t.start()
1599
1600 for t in threads:
1601 t.join()
1602 links.append( t.result )
1603 clusters = []
1604 threads = []
1605 for i in main.activeNodes:
1606 t = main.Thread( target=main.CLIs[i].clusters,
1607 name="clusters-" + str( i ),
1608 args=[ ] )
1609 threads.append( t )
1610 t.start()
1611
1612 for t in threads:
1613 t.join()
1614 clusters.append( t.result )
1615 # Compare json objects for hosts and dataplane clusters
1616
1617 # hosts
1618 main.step( "Host view is consistent across ONOS nodes" )
1619 consistentHostsResult = main.TRUE
1620 for controller in range( len( hosts ) ):
1621 controllerStr = str( main.activeNodes[controller] + 1 )
1622 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1623 if hosts[ controller ] == hosts[ 0 ]:
1624 continue
1625 else: # hosts not consistent
1626 main.log.error( "hosts from ONOS" +
1627 controllerStr +
1628 " is inconsistent with ONOS1" )
1629 main.log.warn( repr( hosts[ controller ] ) )
1630 consistentHostsResult = main.FALSE
1631
1632 else:
1633 main.log.error( "Error in getting ONOS hosts from ONOS" +
1634 controllerStr )
1635 consistentHostsResult = main.FALSE
1636 main.log.warn( "ONOS" + controllerStr +
1637 " hosts response: " +
1638 repr( hosts[ controller ] ) )
1639 utilities.assert_equals(
1640 expect=main.TRUE,
1641 actual=consistentHostsResult,
1642 onpass="Hosts view is consistent across all ONOS nodes",
1643 onfail="ONOS nodes have different views of hosts" )
1644
1645 main.step( "Each host has an IP address" )
1646 ipResult = main.TRUE
1647 for controller in range( 0, len( hosts ) ):
1648 controllerStr = str( main.activeNodes[controller] + 1 )
1649 if hosts[ controller ]:
1650 for host in hosts[ controller ]:
1651 if not host.get( 'ipAddresses', [ ] ):
1652 main.log.error( "Error with host ips on controller" +
1653 controllerStr + ": " + str( host ) )
1654 ipResult = main.FALSE
1655 utilities.assert_equals(
1656 expect=main.TRUE,
1657 actual=ipResult,
1658 onpass="The ips of the hosts aren't empty",
1659 onfail="The ip of at least one host is missing" )
1660
1661 # Strongly connected clusters of devices
1662 main.step( "Cluster view is consistent across ONOS nodes" )
1663 consistentClustersResult = main.TRUE
1664 for controller in range( len( clusters ) ):
1665 controllerStr = str( main.activeNodes[controller] + 1 )
1666 if "Error" not in clusters[ controller ]:
1667 if clusters[ controller ] == clusters[ 0 ]:
1668 continue
1669 else: # clusters not consistent
1670 main.log.error( "clusters from ONOS" + controllerStr +
1671 " is inconsistent with ONOS1" )
1672 consistentClustersResult = main.FALSE
1673
1674 else:
1675 main.log.error( "Error in getting dataplane clusters " +
1676 "from ONOS" + controllerStr )
1677 consistentClustersResult = main.FALSE
1678 main.log.warn( "ONOS" + controllerStr +
1679 " clusters response: " +
1680 repr( clusters[ controller ] ) )
1681 utilities.assert_equals(
1682 expect=main.TRUE,
1683 actual=consistentClustersResult,
1684 onpass="Clusters view is consistent across all ONOS nodes",
1685 onfail="ONOS nodes have different views of clusters" )
Jon Hall172b7ba2016-04-07 18:12:20 -07001686 if consistentClustersResult != main.TRUE:
1687 main.log.debug( clusters )
Jon Hall6e709752016-02-01 13:38:46 -08001688 # there should always only be one cluster
1689 main.step( "Cluster view correct across ONOS nodes" )
1690 try:
1691 numClusters = len( json.loads( clusters[ 0 ] ) )
1692 except ( ValueError, TypeError ):
1693 main.log.exception( "Error parsing clusters[0]: " +
1694 repr( clusters[ 0 ] ) )
1695 numClusters = "ERROR"
1696 clusterResults = main.FALSE
1697 if numClusters == 1:
1698 clusterResults = main.TRUE
1699 utilities.assert_equals(
1700 expect=1,
1701 actual=numClusters,
1702 onpass="ONOS shows 1 SCC",
1703 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1704
1705 main.step( "Comparing ONOS topology to MN" )
1706 devicesResults = main.TRUE
1707 linksResults = main.TRUE
1708 hostsResults = main.TRUE
1709 mnSwitches = main.Mininet1.getSwitches()
1710 mnLinks = main.Mininet1.getLinks()
1711 mnHosts = main.Mininet1.getHosts()
1712 for controller in main.activeNodes:
1713 controllerStr = str( main.activeNodes[controller] + 1 )
1714 if devices[ controller ] and ports[ controller ] and\
1715 "Error" not in devices[ controller ] and\
1716 "Error" not in ports[ controller ]:
1717 currentDevicesResult = main.Mininet1.compareSwitches(
1718 mnSwitches,
1719 json.loads( devices[ controller ] ),
1720 json.loads( ports[ controller ] ) )
1721 else:
1722 currentDevicesResult = main.FALSE
1723 utilities.assert_equals( expect=main.TRUE,
1724 actual=currentDevicesResult,
1725 onpass="ONOS" + controllerStr +
1726 " Switches view is correct",
1727 onfail="ONOS" + controllerStr +
1728 " Switches view is incorrect" )
1729 if links[ controller ] and "Error" not in links[ controller ]:
1730 currentLinksResult = main.Mininet1.compareLinks(
1731 mnSwitches, mnLinks,
1732 json.loads( links[ controller ] ) )
1733 else:
1734 currentLinksResult = main.FALSE
1735 utilities.assert_equals( expect=main.TRUE,
1736 actual=currentLinksResult,
1737 onpass="ONOS" + controllerStr +
1738 " links view is correct",
1739 onfail="ONOS" + controllerStr +
1740 " links view is incorrect" )
1741
1742 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1743 currentHostsResult = main.Mininet1.compareHosts(
1744 mnHosts,
1745 hosts[ controller ] )
1746 else:
1747 currentHostsResult = main.FALSE
1748 utilities.assert_equals( expect=main.TRUE,
1749 actual=currentHostsResult,
1750 onpass="ONOS" + controllerStr +
1751 " hosts exist in Mininet",
1752 onfail="ONOS" + controllerStr +
1753 " hosts don't match Mininet" )
1754
1755 devicesResults = devicesResults and currentDevicesResult
1756 linksResults = linksResults and currentLinksResult
1757 hostsResults = hostsResults and currentHostsResult
1758
1759 main.step( "Device information is correct" )
1760 utilities.assert_equals(
1761 expect=main.TRUE,
1762 actual=devicesResults,
1763 onpass="Device information is correct",
1764 onfail="Device information is incorrect" )
1765
1766 main.step( "Links are correct" )
1767 utilities.assert_equals(
1768 expect=main.TRUE,
1769 actual=linksResults,
1770 onpass="Link are correct",
1771 onfail="Links are incorrect" )
1772
1773 main.step( "Hosts are correct" )
1774 utilities.assert_equals(
1775 expect=main.TRUE,
1776 actual=hostsResults,
1777 onpass="Hosts are correct",
1778 onfail="Hosts are incorrect" )
1779
1780 def CASE61( self, main ):
1781 """
1782 The Failure case.
1783 """
1784 import math
1785 assert main.numCtrls, "main.numCtrls not defined"
1786 assert main, "main not defined"
1787 assert utilities.assert_equals, "utilities.assert_equals not defined"
1788 assert main.CLIs, "main.CLIs not defined"
1789 assert main.nodes, "main.nodes not defined"
1790 main.case( "Partition ONOS nodes into two distinct partitions" )
1791
1792 main.step( "Checking ONOS Logs for errors" )
1793 for node in main.nodes:
1794 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1795 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1796
1797 n = len( main.nodes ) # Number of nodes
1798 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1799 main.partition = [ 0 ] # ONOS node to partition, listed by index in main.nodes
1800 if n > 3:
1801 main.partition.append( p - 1 )
1802 # NOTE: This only works for cluster sizes of 3,5, or 7.
1803
1804 main.step( "Partitioning ONOS nodes" )
1805 nodeList = [ str( i + 1 ) for i in main.partition ]
1806 main.log.info( "Nodes to be partitioned: " + str( nodeList ) )
1807 partitionResults = main.TRUE
1808 for i in range( 0, n ):
1809 this = main.nodes[i]
1810 if i not in main.partition:
1811 for j in main.partition:
1812 foe = main.nodes[j]
1813 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1814 #CMD HERE
1815 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1816 this.handle.sendline( cmdStr )
1817 this.handle.expect( "\$" )
1818 main.log.debug( this.handle.before )
1819 else:
1820 for j in range( 0, n ):
1821 if j not in main.partition:
1822 foe = main.nodes[j]
1823 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1824 #CMD HERE
1825 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1826 this.handle.sendline( cmdStr )
1827 this.handle.expect( "\$" )
1828 main.log.debug( this.handle.before )
1829 main.activeNodes.remove( i )
1830 # NOTE: When dynamic clustering is finished, we need to start checking
1831 # main.partion nodes still work when partitioned
1832 utilities.assert_equals( expect=main.TRUE, actual=partitionResults,
1833 onpass="Firewall rules set successfully",
1834 onfail="Error setting firewall rules" )
1835
1836 main.log.step( "Sleeping 60 seconds" )
1837 time.sleep( 60 )
1838
1839 def CASE62( self, main ):
1840 """
1841 Healing Partition
1842 """
1843 import time
1844 assert main.numCtrls, "main.numCtrls not defined"
1845 assert main, "main not defined"
1846 assert utilities.assert_equals, "utilities.assert_equals not defined"
1847 assert main.CLIs, "main.CLIs not defined"
1848 assert main.nodes, "main.nodes not defined"
1849 assert main.partition, "main.partition not defined"
1850 main.case( "Healing Partition" )
1851
1852 main.step( "Deleteing firewall rules" )
1853 healResults = main.TRUE
1854 for node in main.nodes:
1855 cmdStr = "sudo iptables -F"
1856 node.handle.sendline( cmdStr )
1857 node.handle.expect( "\$" )
1858 main.log.debug( node.handle.before )
1859 utilities.assert_equals( expect=main.TRUE, actual=healResults,
1860 onpass="Firewall rules removed",
1861 onfail="Error removing firewall rules" )
1862
1863 for node in main.partition:
1864 main.activeNodes.append( node )
1865 main.activeNodes.sort()
1866 try:
1867 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1868 "List of active nodes has duplicates, this likely indicates something was run out of order"
1869 except AssertionError:
1870 main.log.exception( "" )
1871 main.cleanup()
1872 main.exit()
1873
1874 def CASE7( self, main ):
1875 """
1876 Check state after ONOS failure
1877 """
1878 import json
1879 assert main.numCtrls, "main.numCtrls not defined"
1880 assert main, "main not defined"
1881 assert utilities.assert_equals, "utilities.assert_equals not defined"
1882 assert main.CLIs, "main.CLIs not defined"
1883 assert main.nodes, "main.nodes not defined"
1884 try:
1885 main.partition
1886 except AttributeError:
1887 main.partition = []
1888
1889 main.case( "Running ONOS Constant State Tests" )
1890
1891 main.step( "Check that each switch has a master" )
1892 # Assert that each device has a master
1893 rolesNotNull = main.TRUE
1894 threads = []
1895 for i in main.activeNodes:
1896 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1897 name="rolesNotNull-" + str( i ),
1898 args=[ ] )
1899 threads.append( t )
1900 t.start()
1901
1902 for t in threads:
1903 t.join()
1904 rolesNotNull = rolesNotNull and t.result
1905 utilities.assert_equals(
1906 expect=main.TRUE,
1907 actual=rolesNotNull,
1908 onpass="Each device has a master",
1909 onfail="Some devices don't have a master assigned" )
1910
1911 main.step( "Read device roles from ONOS" )
1912 ONOSMastership = []
1913 mastershipCheck = main.FALSE
1914 consistentMastership = True
1915 rolesResults = True
1916 threads = []
1917 for i in main.activeNodes:
1918 t = main.Thread( target=main.CLIs[i].roles,
1919 name="roles-" + str( i ),
1920 args=[] )
1921 threads.append( t )
1922 t.start()
1923
1924 for t in threads:
1925 t.join()
1926 ONOSMastership.append( t.result )
1927
1928 for i in range( len( ONOSMastership ) ):
1929 node = str( main.activeNodes[i] + 1 )
1930 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1931 main.log.error( "Error in getting ONOS" + node + " roles" )
1932 main.log.warn( "ONOS" + node + " mastership response: " +
1933 repr( ONOSMastership[i] ) )
1934 rolesResults = False
1935 utilities.assert_equals(
1936 expect=True,
1937 actual=rolesResults,
1938 onpass="No error in reading roles output",
1939 onfail="Error in reading roles from ONOS" )
1940
1941 main.step( "Check for consistency in roles from each controller" )
1942 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1943 main.log.info(
1944 "Switch roles are consistent across all ONOS nodes" )
1945 else:
1946 consistentMastership = False
1947 utilities.assert_equals(
1948 expect=True,
1949 actual=consistentMastership,
1950 onpass="Switch roles are consistent across all ONOS nodes",
1951 onfail="ONOS nodes have different views of switch roles" )
1952
1953 if rolesResults and not consistentMastership:
1954 for i in range( len( ONOSMastership ) ):
1955 node = str( main.activeNodes[i] + 1 )
1956 main.log.warn( "ONOS" + node + " roles: ",
1957 json.dumps( json.loads( ONOSMastership[ i ] ),
1958 sort_keys=True,
1959 indent=4,
1960 separators=( ',', ': ' ) ) )
1961
1962 # NOTE: we expect mastership to change on controller failure
1963
1964 main.step( "Get the intents and compare across all nodes" )
1965 ONOSIntents = []
1966 intentCheck = main.FALSE
1967 consistentIntents = True
1968 intentsResults = True
1969 threads = []
1970 for i in main.activeNodes:
1971 t = main.Thread( target=main.CLIs[i].intents,
1972 name="intents-" + str( i ),
1973 args=[],
1974 kwargs={ 'jsonFormat': True } )
1975 threads.append( t )
1976 t.start()
1977
1978 for t in threads:
1979 t.join()
1980 ONOSIntents.append( t.result )
1981
1982 for i in range( len( ONOSIntents) ):
1983 node = str( main.activeNodes[i] + 1 )
1984 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1985 main.log.error( "Error in getting ONOS" + node + " intents" )
1986 main.log.warn( "ONOS" + node + " intents response: " +
1987 repr( ONOSIntents[ i ] ) )
1988 intentsResults = False
1989 utilities.assert_equals(
1990 expect=True,
1991 actual=intentsResults,
1992 onpass="No error in reading intents output",
1993 onfail="Error in reading intents from ONOS" )
1994
1995 main.step( "Check for consistency in Intents from each controller" )
1996 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1997 main.log.info( "Intents are consistent across all ONOS " +
1998 "nodes" )
1999 else:
2000 consistentIntents = False
2001
2002 # Try to make it easy to figure out what is happening
2003 #
2004 # Intent ONOS1 ONOS2 ...
2005 # 0x01 INSTALLED INSTALLING
2006 # ... ... ...
2007 # ... ... ...
2008 title = " ID"
2009 for n in main.activeNodes:
2010 title += " " * 10 + "ONOS" + str( n + 1 )
2011 main.log.warn( title )
2012 # get all intent keys in the cluster
2013 keys = []
2014 for nodeStr in ONOSIntents:
2015 node = json.loads( nodeStr )
2016 for intent in node:
2017 keys.append( intent.get( 'id' ) )
2018 keys = set( keys )
2019 for key in keys:
2020 row = "%-13s" % key
2021 for nodeStr in ONOSIntents:
2022 node = json.loads( nodeStr )
2023 for intent in node:
2024 if intent.get( 'id' ) == key:
2025 row += "%-15s" % intent.get( 'state' )
2026 main.log.warn( row )
2027 # End table view
2028
2029 utilities.assert_equals(
2030 expect=True,
2031 actual=consistentIntents,
2032 onpass="Intents are consistent across all ONOS nodes",
2033 onfail="ONOS nodes have different views of intents" )
2034 intentStates = []
2035 for node in ONOSIntents: # Iter through ONOS nodes
2036 nodeStates = []
2037 # Iter through intents of a node
2038 try:
2039 for intent in json.loads( node ):
2040 nodeStates.append( intent[ 'state' ] )
2041 except ( ValueError, TypeError ):
2042 main.log.exception( "Error in parsing intents" )
2043 main.log.error( repr( node ) )
2044 intentStates.append( nodeStates )
2045 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2046 main.log.info( dict( out ) )
2047
2048 if intentsResults and not consistentIntents:
2049 for i in range( len( main.activeNodes ) ):
2050 node = str( main.activeNodes[i] + 1 )
2051 main.log.warn( "ONOS" + node + " intents: " )
2052 main.log.warn( json.dumps(
2053 json.loads( ONOSIntents[ i ] ),
2054 sort_keys=True,
2055 indent=4,
2056 separators=( ',', ': ' ) ) )
2057 elif intentsResults and consistentIntents:
2058 intentCheck = main.TRUE
2059
2060 # NOTE: Store has no durability, so intents are lost across system
2061 # restarts
2062 main.step( "Compare current intents with intents before the failure" )
2063 # NOTE: this requires case 5 to pass for intentState to be set.
2064 # maybe we should stop the test if that fails?
2065 sameIntents = main.FALSE
2066 try:
2067 intentState
2068 except NameError:
2069 main.log.warn( "No previous intent state was saved" )
2070 else:
2071 if intentState and intentState == ONOSIntents[ 0 ]:
2072 sameIntents = main.TRUE
2073 main.log.info( "Intents are consistent with before failure" )
2074 # TODO: possibly the states have changed? we may need to figure out
2075 # what the acceptable states are
2076 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2077 sameIntents = main.TRUE
2078 try:
2079 before = json.loads( intentState )
2080 after = json.loads( ONOSIntents[ 0 ] )
2081 for intent in before:
2082 if intent not in after:
2083 sameIntents = main.FALSE
2084 main.log.debug( "Intent is not currently in ONOS " +
2085 "(at least in the same form):" )
2086 main.log.debug( json.dumps( intent ) )
2087 except ( ValueError, TypeError ):
2088 main.log.exception( "Exception printing intents" )
2089 main.log.debug( repr( ONOSIntents[0] ) )
2090 main.log.debug( repr( intentState ) )
2091 if sameIntents == main.FALSE:
2092 try:
2093 main.log.debug( "ONOS intents before: " )
2094 main.log.debug( json.dumps( json.loads( intentState ),
2095 sort_keys=True, indent=4,
2096 separators=( ',', ': ' ) ) )
2097 main.log.debug( "Current ONOS intents: " )
2098 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2099 sort_keys=True, indent=4,
2100 separators=( ',', ': ' ) ) )
2101 except ( ValueError, TypeError ):
2102 main.log.exception( "Exception printing intents" )
2103 main.log.debug( repr( ONOSIntents[0] ) )
2104 main.log.debug( repr( intentState ) )
2105 utilities.assert_equals(
2106 expect=main.TRUE,
2107 actual=sameIntents,
2108 onpass="Intents are consistent with before failure",
2109 onfail="The Intents changed during failure" )
2110 intentCheck = intentCheck and sameIntents
2111
2112 main.step( "Get the OF Table entries and compare to before " +
2113 "component failure" )
2114 FlowTables = main.TRUE
2115 for i in range( 28 ):
2116 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2117 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
Jon Hall41d39f12016-04-11 22:54:35 -07002118 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2119 FlowTables = FlowTables and curSwitch
2120 if curSwitch == main.FALSE:
Jon Hall6e709752016-02-01 13:38:46 -08002121 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2122 utilities.assert_equals(
2123 expect=main.TRUE,
2124 actual=FlowTables,
2125 onpass="No changes were found in the flow tables",
2126 onfail="Changes were found in the flow tables" )
2127
2128 main.Mininet2.pingLongKill()
2129 '''
2130 main.step( "Check the continuous pings to ensure that no packets " +
2131 "were dropped during component failure" )
2132 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2133 main.params[ 'TESTONIP' ] )
2134 LossInPings = main.FALSE
2135 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2136 for i in range( 8, 18 ):
2137 main.log.info(
2138 "Checking for a loss in pings along flow from s" +
2139 str( i ) )
2140 LossInPings = main.Mininet2.checkForLoss(
2141 "/tmp/ping.h" +
2142 str( i ) ) or LossInPings
2143 if LossInPings == main.TRUE:
2144 main.log.info( "Loss in ping detected" )
2145 elif LossInPings == main.ERROR:
2146 main.log.info( "There are multiple mininet process running" )
2147 elif LossInPings == main.FALSE:
2148 main.log.info( "No Loss in the pings" )
2149 main.log.info( "No loss of dataplane connectivity" )
2150 utilities.assert_equals(
2151 expect=main.FALSE,
2152 actual=LossInPings,
2153 onpass="No Loss of connectivity",
2154 onfail="Loss of dataplane connectivity detected" )
2155 '''
2156
2157 main.step( "Leadership Election is still functional" )
2158 # Test of LeadershipElection
2159 leaderList = []
2160
2161 partitioned = []
2162 for i in main.partition:
2163 partitioned.append( main.nodes[i].ip_address )
2164 leaderResult = main.TRUE
2165
2166 for i in main.activeNodes:
2167 cli = main.CLIs[i]
2168 leaderN = cli.electionTestLeader()
2169 leaderList.append( leaderN )
2170 if leaderN == main.FALSE:
2171 # error in response
2172 main.log.error( "Something is wrong with " +
2173 "electionTestLeader function, check the" +
2174 " error logs" )
2175 leaderResult = main.FALSE
2176 elif leaderN is None:
2177 main.log.error( cli.name +
2178 " shows no leader for the election-app was" +
2179 " elected after the old one died" )
2180 leaderResult = main.FALSE
2181 elif leaderN in partitioned:
2182 main.log.error( cli.name + " shows " + str( leaderN ) +
2183 " as leader for the election-app, but it " +
2184 "was partitioned" )
2185 leaderResult = main.FALSE
2186 if len( set( leaderList ) ) != 1:
2187 leaderResult = main.FALSE
2188 main.log.error(
2189 "Inconsistent view of leader for the election test app" )
2190 # TODO: print the list
2191 utilities.assert_equals(
2192 expect=main.TRUE,
2193 actual=leaderResult,
2194 onpass="Leadership election passed",
2195 onfail="Something went wrong with Leadership election" )
2196
2197 def CASE8( self, main ):
2198 """
2199 Compare topo
2200 """
2201 import json
2202 import time
2203 assert main.numCtrls, "main.numCtrls not defined"
2204 assert main, "main not defined"
2205 assert utilities.assert_equals, "utilities.assert_equals not defined"
2206 assert main.CLIs, "main.CLIs not defined"
2207 assert main.nodes, "main.nodes not defined"
2208
2209 main.case( "Compare ONOS Topology view to Mininet topology" )
2210 main.caseExplanation = "Compare topology objects between Mininet" +\
2211 " and ONOS"
2212 topoResult = main.FALSE
2213 topoFailMsg = "ONOS topology don't match Mininet"
2214 elapsed = 0
2215 count = 0
2216 main.step( "Comparing ONOS topology to MN topology" )
2217 startTime = time.time()
2218 # Give time for Gossip to work
2219 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2220 devicesResults = main.TRUE
2221 linksResults = main.TRUE
2222 hostsResults = main.TRUE
2223 hostAttachmentResults = True
2224 count += 1
2225 cliStart = time.time()
2226 devices = []
2227 threads = []
2228 for i in main.activeNodes:
2229 t = main.Thread( target=utilities.retry,
2230 name="devices-" + str( i ),
2231 args=[ main.CLIs[i].devices, [ None ] ],
2232 kwargs= { 'sleep': 5, 'attempts': 5,
2233 'randomTime': True } )
2234 threads.append( t )
2235 t.start()
2236
2237 for t in threads:
2238 t.join()
2239 devices.append( t.result )
2240 hosts = []
2241 ipResult = main.TRUE
2242 threads = []
2243 for i in main.activeNodes:
2244 t = main.Thread( target=utilities.retry,
2245 name="hosts-" + str( i ),
2246 args=[ main.CLIs[i].hosts, [ None ] ],
2247 kwargs= { 'sleep': 5, 'attempts': 5,
2248 'randomTime': True } )
2249 threads.append( t )
2250 t.start()
2251
2252 for t in threads:
2253 t.join()
2254 try:
2255 hosts.append( json.loads( t.result ) )
2256 except ( ValueError, TypeError ):
2257 main.log.exception( "Error parsing hosts results" )
2258 main.log.error( repr( t.result ) )
2259 hosts.append( None )
2260 for controller in range( 0, len( hosts ) ):
2261 controllerStr = str( main.activeNodes[controller] + 1 )
2262 if hosts[ controller ]:
2263 for host in hosts[ controller ]:
2264 if host is None or host.get( 'ipAddresses', [] ) == []:
2265 main.log.error(
2266 "Error with host ipAddresses on controller" +
2267 controllerStr + ": " + str( host ) )
2268 ipResult = main.FALSE
2269 ports = []
2270 threads = []
2271 for i in main.activeNodes:
2272 t = main.Thread( target=utilities.retry,
2273 name="ports-" + str( i ),
2274 args=[ main.CLIs[i].ports, [ None ] ],
2275 kwargs= { 'sleep': 5, 'attempts': 5,
2276 'randomTime': True } )
2277 threads.append( t )
2278 t.start()
2279
2280 for t in threads:
2281 t.join()
2282 ports.append( t.result )
2283 links = []
2284 threads = []
2285 for i in main.activeNodes:
2286 t = main.Thread( target=utilities.retry,
2287 name="links-" + str( i ),
2288 args=[ main.CLIs[i].links, [ None ] ],
2289 kwargs= { 'sleep': 5, 'attempts': 5,
2290 'randomTime': True } )
2291 threads.append( t )
2292 t.start()
2293
2294 for t in threads:
2295 t.join()
2296 links.append( t.result )
2297 clusters = []
2298 threads = []
2299 for i in main.activeNodes:
2300 t = main.Thread( target=utilities.retry,
2301 name="clusters-" + str( i ),
2302 args=[ main.CLIs[i].clusters, [ None ] ],
2303 kwargs= { 'sleep': 5, 'attempts': 5,
2304 'randomTime': True } )
2305 threads.append( t )
2306 t.start()
2307
2308 for t in threads:
2309 t.join()
2310 clusters.append( t.result )
2311
2312 elapsed = time.time() - startTime
2313 cliTime = time.time() - cliStart
2314 print "Elapsed time: " + str( elapsed )
2315 print "CLI time: " + str( cliTime )
2316
2317 if all( e is None for e in devices ) and\
2318 all( e is None for e in hosts ) and\
2319 all( e is None for e in ports ) and\
2320 all( e is None for e in links ) and\
2321 all( e is None for e in clusters ):
2322 topoFailMsg = "Could not get topology from ONOS"
2323 main.log.error( topoFailMsg )
2324 continue # Try again, No use trying to compare
2325
2326 mnSwitches = main.Mininet1.getSwitches()
2327 mnLinks = main.Mininet1.getLinks()
2328 mnHosts = main.Mininet1.getHosts()
2329 for controller in range( len( main.activeNodes ) ):
2330 controllerStr = str( main.activeNodes[controller] + 1 )
2331 if devices[ controller ] and ports[ controller ] and\
2332 "Error" not in devices[ controller ] and\
2333 "Error" not in ports[ controller ]:
2334
2335 try:
2336 currentDevicesResult = main.Mininet1.compareSwitches(
2337 mnSwitches,
2338 json.loads( devices[ controller ] ),
2339 json.loads( ports[ controller ] ) )
2340 except ( TypeError, ValueError ) as e:
2341 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2342 devices[ controller ], ports[ controller ] ) )
2343 else:
2344 currentDevicesResult = main.FALSE
2345 utilities.assert_equals( expect=main.TRUE,
2346 actual=currentDevicesResult,
2347 onpass="ONOS" + controllerStr +
2348 " Switches view is correct",
2349 onfail="ONOS" + controllerStr +
2350 " Switches view is incorrect" )
2351
2352 if links[ controller ] and "Error" not in links[ controller ]:
2353 currentLinksResult = main.Mininet1.compareLinks(
2354 mnSwitches, mnLinks,
2355 json.loads( links[ controller ] ) )
2356 else:
2357 currentLinksResult = main.FALSE
2358 utilities.assert_equals( expect=main.TRUE,
2359 actual=currentLinksResult,
2360 onpass="ONOS" + controllerStr +
2361 " links view is correct",
2362 onfail="ONOS" + controllerStr +
2363 " links view is incorrect" )
2364 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2365 currentHostsResult = main.Mininet1.compareHosts(
2366 mnHosts,
2367 hosts[ controller ] )
2368 elif hosts[ controller ] == []:
2369 currentHostsResult = main.TRUE
2370 else:
2371 currentHostsResult = main.FALSE
2372 utilities.assert_equals( expect=main.TRUE,
2373 actual=currentHostsResult,
2374 onpass="ONOS" + controllerStr +
2375 " hosts exist in Mininet",
2376 onfail="ONOS" + controllerStr +
2377 " hosts don't match Mininet" )
2378 # CHECKING HOST ATTACHMENT POINTS
2379 hostAttachment = True
2380 zeroHosts = False
2381 # FIXME: topo-HA/obelisk specific mappings:
2382 # key is mac and value is dpid
2383 mappings = {}
2384 for i in range( 1, 29 ): # hosts 1 through 28
2385 # set up correct variables:
2386 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2387 if i == 1:
2388 deviceId = "1000".zfill(16)
2389 elif i == 2:
2390 deviceId = "2000".zfill(16)
2391 elif i == 3:
2392 deviceId = "3000".zfill(16)
2393 elif i == 4:
2394 deviceId = "3004".zfill(16)
2395 elif i == 5:
2396 deviceId = "5000".zfill(16)
2397 elif i == 6:
2398 deviceId = "6000".zfill(16)
2399 elif i == 7:
2400 deviceId = "6007".zfill(16)
2401 elif i >= 8 and i <= 17:
2402 dpid = '3' + str( i ).zfill( 3 )
2403 deviceId = dpid.zfill(16)
2404 elif i >= 18 and i <= 27:
2405 dpid = '6' + str( i ).zfill( 3 )
2406 deviceId = dpid.zfill(16)
2407 elif i == 28:
2408 deviceId = "2800".zfill(16)
2409 mappings[ macId ] = deviceId
2410 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2411 if hosts[ controller ] == []:
2412 main.log.warn( "There are no hosts discovered" )
2413 zeroHosts = True
2414 else:
2415 for host in hosts[ controller ]:
2416 mac = None
2417 location = None
2418 device = None
2419 port = None
2420 try:
2421 mac = host.get( 'mac' )
2422 assert mac, "mac field could not be found for this host object"
2423
2424 location = host.get( 'location' )
2425 assert location, "location field could not be found for this host object"
2426
2427 # Trim the protocol identifier off deviceId
2428 device = str( location.get( 'elementId' ) ).split(':')[1]
2429 assert device, "elementId field could not be found for this host location object"
2430
2431 port = location.get( 'port' )
2432 assert port, "port field could not be found for this host location object"
2433
2434 # Now check if this matches where they should be
2435 if mac and device and port:
2436 if str( port ) != "1":
2437 main.log.error( "The attachment port is incorrect for " +
2438 "host " + str( mac ) +
2439 ". Expected: 1 Actual: " + str( port) )
2440 hostAttachment = False
2441 if device != mappings[ str( mac ) ]:
2442 main.log.error( "The attachment device is incorrect for " +
2443 "host " + str( mac ) +
2444 ". Expected: " + mappings[ str( mac ) ] +
2445 " Actual: " + device )
2446 hostAttachment = False
2447 else:
2448 hostAttachment = False
2449 except AssertionError:
2450 main.log.exception( "Json object not as expected" )
2451 main.log.error( repr( host ) )
2452 hostAttachment = False
2453 else:
2454 main.log.error( "No hosts json output or \"Error\"" +
2455 " in output. hosts = " +
2456 repr( hosts[ controller ] ) )
2457 if zeroHosts is False:
2458 hostAttachment = True
2459
2460 # END CHECKING HOST ATTACHMENT POINTS
2461 devicesResults = devicesResults and currentDevicesResult
2462 linksResults = linksResults and currentLinksResult
2463 hostsResults = hostsResults and currentHostsResult
2464 hostAttachmentResults = hostAttachmentResults and\
2465 hostAttachment
Jon Halla440e872016-03-31 15:15:50 -07002466 topoResult = ( devicesResults and linksResults
2467 and hostsResults and ipResult and
2468 hostAttachmentResults )
Jon Hall6e709752016-02-01 13:38:46 -08002469 utilities.assert_equals( expect=True,
2470 actual=topoResult,
2471 onpass="ONOS topology matches Mininet",
2472 onfail=topoFailMsg )
2473 # End of While loop to pull ONOS state
2474
2475 # Compare json objects for hosts and dataplane clusters
2476
2477 # hosts
2478 main.step( "Hosts view is consistent across all ONOS nodes" )
2479 consistentHostsResult = main.TRUE
2480 for controller in range( len( hosts ) ):
2481 controllerStr = str( main.activeNodes[controller] + 1 )
2482 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2483 if hosts[ controller ] == hosts[ 0 ]:
2484 continue
2485 else: # hosts not consistent
2486 main.log.error( "hosts from ONOS" + controllerStr +
2487 " is inconsistent with ONOS1" )
2488 main.log.warn( repr( hosts[ controller ] ) )
2489 consistentHostsResult = main.FALSE
2490
2491 else:
2492 main.log.error( "Error in getting ONOS hosts from ONOS" +
2493 controllerStr )
2494 consistentHostsResult = main.FALSE
2495 main.log.warn( "ONOS" + controllerStr +
2496 " hosts response: " +
2497 repr( hosts[ controller ] ) )
2498 utilities.assert_equals(
2499 expect=main.TRUE,
2500 actual=consistentHostsResult,
2501 onpass="Hosts view is consistent across all ONOS nodes",
2502 onfail="ONOS nodes have different views of hosts" )
2503
2504 main.step( "Hosts information is correct" )
2505 hostsResults = hostsResults and ipResult
2506 utilities.assert_equals(
2507 expect=main.TRUE,
2508 actual=hostsResults,
2509 onpass="Host information is correct",
2510 onfail="Host information is incorrect" )
2511
2512 main.step( "Host attachment points to the network" )
2513 utilities.assert_equals(
2514 expect=True,
2515 actual=hostAttachmentResults,
2516 onpass="Hosts are correctly attached to the network",
2517 onfail="ONOS did not correctly attach hosts to the network" )
2518
2519 # Strongly connected clusters of devices
2520 main.step( "Clusters view is consistent across all ONOS nodes" )
2521 consistentClustersResult = main.TRUE
2522 for controller in range( len( clusters ) ):
2523 controllerStr = str( main.activeNodes[controller] + 1 )
2524 if "Error" not in clusters[ controller ]:
2525 if clusters[ controller ] == clusters[ 0 ]:
2526 continue
2527 else: # clusters not consistent
2528 main.log.error( "clusters from ONOS" +
2529 controllerStr +
2530 " is inconsistent with ONOS1" )
2531 consistentClustersResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002532 else:
2533 main.log.error( "Error in getting dataplane clusters " +
2534 "from ONOS" + controllerStr )
2535 consistentClustersResult = main.FALSE
2536 main.log.warn( "ONOS" + controllerStr +
2537 " clusters response: " +
2538 repr( clusters[ controller ] ) )
2539 utilities.assert_equals(
2540 expect=main.TRUE,
2541 actual=consistentClustersResult,
2542 onpass="Clusters view is consistent across all ONOS nodes",
2543 onfail="ONOS nodes have different views of clusters" )
2544
2545 main.step( "There is only one SCC" )
2546 # there should always only be one cluster
2547 try:
2548 numClusters = len( json.loads( clusters[ 0 ] ) )
2549 except ( ValueError, TypeError ):
2550 main.log.exception( "Error parsing clusters[0]: " +
2551 repr( clusters[0] ) )
Jon Halla440e872016-03-31 15:15:50 -07002552 numClusters = "ERROR"
Jon Hall6e709752016-02-01 13:38:46 -08002553 clusterResults = main.FALSE
2554 if numClusters == 1:
2555 clusterResults = main.TRUE
2556 utilities.assert_equals(
2557 expect=1,
2558 actual=numClusters,
2559 onpass="ONOS shows 1 SCC",
2560 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2561
2562 topoResult = ( devicesResults and linksResults
2563 and hostsResults and consistentHostsResult
2564 and consistentClustersResult and clusterResults
2565 and ipResult and hostAttachmentResults )
2566
2567 topoResult = topoResult and int( count <= 2 )
2568 note = "note it takes about " + str( int( cliTime ) ) + \
2569 " seconds for the test to make all the cli calls to fetch " +\
2570 "the topology from each ONOS instance"
2571 main.log.info(
2572 "Very crass estimate for topology discovery/convergence( " +
2573 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2574 str( count ) + " tries" )
2575
2576 main.step( "Device information is correct" )
2577 utilities.assert_equals(
2578 expect=main.TRUE,
2579 actual=devicesResults,
2580 onpass="Device information is correct",
2581 onfail="Device information is incorrect" )
2582
2583 main.step( "Links are correct" )
2584 utilities.assert_equals(
2585 expect=main.TRUE,
2586 actual=linksResults,
2587 onpass="Link are correct",
2588 onfail="Links are incorrect" )
2589
Jon Halla440e872016-03-31 15:15:50 -07002590 main.step( "Hosts are correct" )
2591 utilities.assert_equals(
2592 expect=main.TRUE,
2593 actual=hostsResults,
2594 onpass="Hosts are correct",
2595 onfail="Hosts are incorrect" )
2596
Jon Hall6e709752016-02-01 13:38:46 -08002597 # FIXME: move this to an ONOS state case
2598 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -07002599 nodeResults = utilities.retry( main.HA.nodesCheck,
2600 False,
2601 args=[main.activeNodes],
2602 attempts=5 )
Jon Hall6e709752016-02-01 13:38:46 -08002603
Jon Hall41d39f12016-04-11 22:54:35 -07002604 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Hall6e709752016-02-01 13:38:46 -08002605 onpass="Nodes check successful",
2606 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002607 if not nodeResults:
Jon Hall41d39f12016-04-11 22:54:35 -07002608 for i in main.activeNodes:
Jon Halla440e872016-03-31 15:15:50 -07002609 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hall41d39f12016-04-11 22:54:35 -07002610 main.CLIs[i].name,
2611 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08002612
2613 def CASE9( self, main ):
2614 """
2615 Link s3-s28 down
2616 """
2617 import time
2618 assert main.numCtrls, "main.numCtrls not defined"
2619 assert main, "main not defined"
2620 assert utilities.assert_equals, "utilities.assert_equals not defined"
2621 assert main.CLIs, "main.CLIs not defined"
2622 assert main.nodes, "main.nodes not defined"
2623 # NOTE: You should probably run a topology check after this
2624
2625 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2626
2627 description = "Turn off a link to ensure that Link Discovery " +\
2628 "is working properly"
2629 main.case( description )
2630
2631 main.step( "Kill Link between s3 and s28" )
2632 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2633 main.log.info( "Waiting " + str( linkSleep ) +
2634 " seconds for link down to be discovered" )
2635 time.sleep( linkSleep )
2636 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2637 onpass="Link down successful",
2638 onfail="Failed to bring link down" )
2639 # TODO do some sort of check here
2640
2641 def CASE10( self, main ):
2642 """
2643 Link s3-s28 up
2644 """
2645 import time
2646 assert main.numCtrls, "main.numCtrls not defined"
2647 assert main, "main not defined"
2648 assert utilities.assert_equals, "utilities.assert_equals not defined"
2649 assert main.CLIs, "main.CLIs not defined"
2650 assert main.nodes, "main.nodes not defined"
2651 # NOTE: You should probably run a topology check after this
2652
2653 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2654
2655 description = "Restore a link to ensure that Link Discovery is " + \
2656 "working properly"
2657 main.case( description )
2658
2659 main.step( "Bring link between s3 and s28 back up" )
2660 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2661 main.log.info( "Waiting " + str( linkSleep ) +
2662 " seconds for link up to be discovered" )
2663 time.sleep( linkSleep )
2664 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2665 onpass="Link up successful",
2666 onfail="Failed to bring link up" )
2667 # TODO do some sort of check here
2668
2669 def CASE11( self, main ):
2670 """
2671 Switch Down
2672 """
2673 # NOTE: You should probably run a topology check after this
2674 import time
2675 assert main.numCtrls, "main.numCtrls not defined"
2676 assert main, "main not defined"
2677 assert utilities.assert_equals, "utilities.assert_equals not defined"
2678 assert main.CLIs, "main.CLIs not defined"
2679 assert main.nodes, "main.nodes not defined"
2680
2681 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2682
2683 description = "Killing a switch to ensure it is discovered correctly"
2684 onosCli = main.CLIs[ main.activeNodes[0] ]
2685 main.case( description )
2686 switch = main.params[ 'kill' ][ 'switch' ]
2687 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2688
2689 # TODO: Make this switch parameterizable
2690 main.step( "Kill " + switch )
2691 main.log.info( "Deleting " + switch )
2692 main.Mininet1.delSwitch( switch )
2693 main.log.info( "Waiting " + str( switchSleep ) +
2694 " seconds for switch down to be discovered" )
2695 time.sleep( switchSleep )
2696 device = onosCli.getDevice( dpid=switchDPID )
2697 # Peek at the deleted switch
2698 main.log.warn( str( device ) )
2699 result = main.FALSE
2700 if device and device[ 'available' ] is False:
2701 result = main.TRUE
2702 utilities.assert_equals( expect=main.TRUE, actual=result,
2703 onpass="Kill switch successful",
2704 onfail="Failed to kill switch?" )
2705
2706 def CASE12( self, main ):
2707 """
2708 Switch Up
2709 """
2710 # NOTE: You should probably run a topology check after this
2711 import time
2712 assert main.numCtrls, "main.numCtrls not defined"
2713 assert main, "main not defined"
2714 assert utilities.assert_equals, "utilities.assert_equals not defined"
2715 assert main.CLIs, "main.CLIs not defined"
2716 assert main.nodes, "main.nodes not defined"
2717 assert ONOS1Port, "ONOS1Port not defined"
2718 assert ONOS2Port, "ONOS2Port not defined"
2719 assert ONOS3Port, "ONOS3Port not defined"
2720 assert ONOS4Port, "ONOS4Port not defined"
2721 assert ONOS5Port, "ONOS5Port not defined"
2722 assert ONOS6Port, "ONOS6Port not defined"
2723 assert ONOS7Port, "ONOS7Port not defined"
2724
2725 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2726 switch = main.params[ 'kill' ][ 'switch' ]
2727 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2728 links = main.params[ 'kill' ][ 'links' ].split()
2729 onosCli = main.CLIs[ main.activeNodes[0] ]
2730 description = "Adding a switch to ensure it is discovered correctly"
2731 main.case( description )
2732
2733 main.step( "Add back " + switch )
2734 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2735 for peer in links:
2736 main.Mininet1.addLink( switch, peer )
2737 ipList = [ node.ip_address for node in main.nodes ]
2738 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2739 main.log.info( "Waiting " + str( switchSleep ) +
2740 " seconds for switch up to be discovered" )
2741 time.sleep( switchSleep )
2742 device = onosCli.getDevice( dpid=switchDPID )
2743 # Peek at the deleted switch
2744 main.log.warn( str( device ) )
2745 result = main.FALSE
2746 if device and device[ 'available' ]:
2747 result = main.TRUE
2748 utilities.assert_equals( expect=main.TRUE, actual=result,
2749 onpass="add switch successful",
2750 onfail="Failed to add switch?" )
2751
2752 def CASE13( self, main ):
2753 """
2754 Clean up
2755 """
2756 import os
2757 import time
2758 assert main.numCtrls, "main.numCtrls not defined"
2759 assert main, "main not defined"
2760 assert utilities.assert_equals, "utilities.assert_equals not defined"
2761 assert main.CLIs, "main.CLIs not defined"
2762 assert main.nodes, "main.nodes not defined"
2763
2764 # printing colors to terminal
2765 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2766 'blue': '\033[94m', 'green': '\033[92m',
2767 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2768 main.case( "Test Cleanup" )
2769 main.step( "Killing tcpdumps" )
2770 main.Mininet2.stopTcpdump()
2771
2772 testname = main.TEST
2773 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2774 main.step( "Copying MN pcap and ONOS log files to test station" )
2775 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2776 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2777 # NOTE: MN Pcap file is being saved to logdir.
2778 # We scp this file as MN and TestON aren't necessarily the same vm
2779
2780 # FIXME: To be replaced with a Jenkin's post script
2781 # TODO: Load these from params
2782 # NOTE: must end in /
2783 logFolder = "/opt/onos/log/"
2784 logFiles = [ "karaf.log", "karaf.log.1" ]
2785 # NOTE: must end in /
2786 for f in logFiles:
2787 for node in main.nodes:
2788 dstName = main.logdir + "/" + node.name + "-" + f
2789 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2790 logFolder + f, dstName )
2791 # std*.log's
2792 # NOTE: must end in /
2793 logFolder = "/opt/onos/var/"
2794 logFiles = [ "stderr.log", "stdout.log" ]
2795 # NOTE: must end in /
2796 for f in logFiles:
2797 for node in main.nodes:
2798 dstName = main.logdir + "/" + node.name + "-" + f
2799 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2800 logFolder + f, dstName )
2801 else:
2802 main.log.debug( "skipping saving log files" )
2803
2804 main.step( "Stopping Mininet" )
2805 mnResult = main.Mininet1.stopNet()
2806 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2807 onpass="Mininet stopped",
2808 onfail="MN cleanup NOT successful" )
2809
2810 main.step( "Checking ONOS Logs for errors" )
2811 for node in main.nodes:
2812 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2813 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2814
2815 try:
2816 timerLog = open( main.logdir + "/Timers.csv", 'w')
2817 # Overwrite with empty line and close
2818 labels = "Gossip Intents"
2819 data = str( gossipTime )
2820 timerLog.write( labels + "\n" + data )
2821 timerLog.close()
2822 except NameError, e:
2823 main.log.exception(e)
2824
2825 def CASE14( self, main ):
2826 """
2827 start election app on all onos nodes
2828 """
2829 assert main.numCtrls, "main.numCtrls not defined"
2830 assert main, "main not defined"
2831 assert utilities.assert_equals, "utilities.assert_equals not defined"
2832 assert main.CLIs, "main.CLIs not defined"
2833 assert main.nodes, "main.nodes not defined"
2834
2835 main.case("Start Leadership Election app")
2836 main.step( "Install leadership election app" )
2837 onosCli = main.CLIs[ main.activeNodes[0] ]
2838 appResult = onosCli.activateApp( "org.onosproject.election" )
2839 utilities.assert_equals(
2840 expect=main.TRUE,
2841 actual=appResult,
2842 onpass="Election app installed",
2843 onfail="Something went wrong with installing Leadership election" )
2844
2845 main.step( "Run for election on each node" )
Jon Hall6e709752016-02-01 13:38:46 -08002846 for i in main.activeNodes:
2847 main.CLIs[i].electionTestRun()
Jon Hall25463a82016-04-13 14:03:52 -07002848 time.sleep(5)
2849 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2850 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall6e709752016-02-01 13:38:46 -08002851 utilities.assert_equals(
Jon Hall25463a82016-04-13 14:03:52 -07002852 expect=True,
2853 actual=sameResult,
2854 onpass="All nodes see the same leaderboards",
2855 onfail="Inconsistent leaderboards" )
Jon Hall6e709752016-02-01 13:38:46 -08002856
Jon Hall25463a82016-04-13 14:03:52 -07002857 if sameResult:
2858 leader = leaders[ 0 ][ 0 ]
2859 if main.nodes[main.activeNodes[0]].ip_address in leader:
2860 correctLeader = True
2861 else:
2862 correctLeader = False
2863 main.step( "First node was elected leader" )
2864 utilities.assert_equals(
2865 expect=True,
2866 actual=correctLeader,
2867 onpass="Correct leader was elected",
2868 onfail="Incorrect leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002869
2870 def CASE15( self, main ):
2871 """
2872 Check that Leadership Election is still functional
2873 15.1 Run election on each node
2874 15.2 Check that each node has the same leaders and candidates
2875 15.3 Find current leader and withdraw
2876 15.4 Check that a new node was elected leader
2877 15.5 Check that that new leader was the candidate of old leader
2878 15.6 Run for election on old leader
2879 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2880 15.8 Make sure that the old leader was added to the candidate list
2881
2882 old and new variable prefixes refer to data from before vs after
2883 withdrawl and later before withdrawl vs after re-election
2884 """
2885 import time
2886 assert main.numCtrls, "main.numCtrls not defined"
2887 assert main, "main not defined"
2888 assert utilities.assert_equals, "utilities.assert_equals not defined"
2889 assert main.CLIs, "main.CLIs not defined"
2890 assert main.nodes, "main.nodes not defined"
2891
2892 description = "Check that Leadership Election is still functional"
2893 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002894 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall6e709752016-02-01 13:38:46 -08002895
Jon Halla440e872016-03-31 15:15:50 -07002896 oldLeaders = [] # list of lists of each nodes' candidates before
2897 newLeaders = [] # list of lists of each nodes' candidates after
Jon Hall6e709752016-02-01 13:38:46 -08002898 oldLeader = '' # the old leader from oldLeaders, None if not same
2899 newLeader = '' # the new leaders fron newLoeaders, None if not same
2900 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2901 expectNoLeader = False # True when there is only one leader
2902 if main.numCtrls == 1:
2903 expectNoLeader = True
2904
2905 main.step( "Run for election on each node" )
2906 electionResult = main.TRUE
2907
2908 for i in main.activeNodes: # run test election on each node
2909 if main.CLIs[i].electionTestRun() == main.FALSE:
2910 electionResult = main.FALSE
2911 utilities.assert_equals(
2912 expect=main.TRUE,
2913 actual=electionResult,
2914 onpass="All nodes successfully ran for leadership",
2915 onfail="At least one node failed to run for leadership" )
2916
2917 if electionResult == main.FALSE:
2918 main.log.error(
2919 "Skipping Test Case because Election Test App isn't loaded" )
2920 main.skipCase()
2921
2922 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07002923 failMessage = "Nodes have different leaderboards"
Jon Halla440e872016-03-31 15:15:50 -07002924 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
Jon Hall41d39f12016-04-11 22:54:35 -07002925 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Halla440e872016-03-31 15:15:50 -07002926 if sameResult:
2927 oldLeader = oldLeaders[ 0 ][ 0 ]
2928 main.log.warn( oldLeader )
Jon Hall6e709752016-02-01 13:38:46 -08002929 else:
Jon Halla440e872016-03-31 15:15:50 -07002930 oldLeader = None
Jon Hall6e709752016-02-01 13:38:46 -08002931 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002932 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08002933 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07002934 onpass="Leaderboards are consistent for the election topic",
Jon Hall6e709752016-02-01 13:38:46 -08002935 onfail=failMessage )
2936
2937 main.step( "Find current leader and withdraw" )
2938 withdrawResult = main.TRUE
2939 # do some sanity checking on leader before using it
2940 if oldLeader is None:
2941 main.log.error( "Leadership isn't consistent." )
2942 withdrawResult = main.FALSE
2943 # Get the CLI of the oldLeader
2944 for i in main.activeNodes:
2945 if oldLeader == main.nodes[ i ].ip_address:
2946 oldLeaderCLI = main.CLIs[ i ]
2947 break
2948 else: # FOR/ELSE statement
2949 main.log.error( "Leader election, could not find current leader" )
2950 if oldLeader:
2951 withdrawResult = oldLeaderCLI.electionTestWithdraw()
2952 utilities.assert_equals(
2953 expect=main.TRUE,
2954 actual=withdrawResult,
2955 onpass="Node was withdrawn from election",
2956 onfail="Node was not withdrawn from election" )
2957
2958 main.step( "Check that a new node was elected leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002959 failMessage = "Nodes have different leaders"
Jon Hall6e709752016-02-01 13:38:46 -08002960 # Get new leaders and candidates
Jon Hall41d39f12016-04-11 22:54:35 -07002961 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall3a7843a2016-04-12 03:01:09 -07002962 newLeader = None
Jon Halla440e872016-03-31 15:15:50 -07002963 if newLeaderResult:
Jon Hall3a7843a2016-04-12 03:01:09 -07002964 if newLeaders[ 0 ][ 0 ] == 'none':
2965 main.log.error( "No leader was elected on at least 1 node" )
2966 if not expectNoLeader:
2967 newLeaderResult = False
Jon Hall25463a82016-04-13 14:03:52 -07002968 newLeader = newLeaders[ 0 ][ 0 ]
Jon Hall6e709752016-02-01 13:38:46 -08002969
2970 # Check that the new leader is not the older leader, which was withdrawn
2971 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07002972 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08002973 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
2974 " as the current leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002975 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002976 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08002977 actual=newLeaderResult,
2978 onpass="Leadership election passed",
2979 onfail="Something went wrong with Leadership election" )
2980
Jon Halla440e872016-03-31 15:15:50 -07002981 main.step( "Check that that new leader was the candidate of old leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002982 # candidates[ 2 ] should become the top candidate after withdrawl
2983 correctCandidateResult = main.TRUE
2984 if expectNoLeader:
2985 if newLeader == 'none':
2986 main.log.info( "No leader expected. None found. Pass" )
2987 correctCandidateResult = main.TRUE
2988 else:
2989 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2990 correctCandidateResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002991 elif len( oldLeaders[0] ) >= 3:
2992 if newLeader == oldLeaders[ 0 ][ 2 ]:
2993 # correct leader was elected
2994 correctCandidateResult = main.TRUE
2995 else:
2996 correctCandidateResult = main.FALSE
2997 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
2998 newLeader, oldLeaders[ 0 ][ 2 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08002999 else:
3000 main.log.warn( "Could not determine who should be the correct leader" )
Jon Halla440e872016-03-31 15:15:50 -07003001 main.log.debug( oldLeaders[ 0 ] )
Jon Hall6e709752016-02-01 13:38:46 -08003002 correctCandidateResult = main.FALSE
3003 utilities.assert_equals(
3004 expect=main.TRUE,
3005 actual=correctCandidateResult,
3006 onpass="Correct Candidate Elected",
3007 onfail="Incorrect Candidate Elected" )
3008
3009 main.step( "Run for election on old leader( just so everyone " +
3010 "is in the hat )" )
3011 if oldLeaderCLI is not None:
3012 runResult = oldLeaderCLI.electionTestRun()
3013 else:
3014 main.log.error( "No old leader to re-elect" )
3015 runResult = main.FALSE
3016 utilities.assert_equals(
3017 expect=main.TRUE,
3018 actual=runResult,
3019 onpass="App re-ran for election",
3020 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07003021
Jon Hall6e709752016-02-01 13:38:46 -08003022 main.step(
3023 "Check that oldLeader is a candidate, and leader if only 1 node" )
3024 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07003025 # Get new leaders and candidates
3026 reRunLeaders = []
3027 time.sleep( 5 ) # Paremterize
Jon Hall41d39f12016-04-11 22:54:35 -07003028 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall6e709752016-02-01 13:38:46 -08003029
3030 # Check that the re-elected node is last on the candidate List
Jon Hall3a7843a2016-04-12 03:01:09 -07003031 if not reRunLeaders[0]:
3032 positionResult = main.FALSE
3033 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
Jon Halla440e872016-03-31 15:15:50 -07003034 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3035 str( reRunLeaders[ 0 ] ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08003036 positionResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08003037 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003038 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08003039 actual=positionResult,
3040 onpass="Old leader successfully re-ran for election",
3041 onfail="Something went wrong with Leadership election after " +
3042 "the old leader re-ran for election" )
3043
3044 def CASE16( self, main ):
3045 """
3046 Install Distributed Primitives app
3047 """
3048 import time
3049 assert main.numCtrls, "main.numCtrls not defined"
3050 assert main, "main not defined"
3051 assert utilities.assert_equals, "utilities.assert_equals not defined"
3052 assert main.CLIs, "main.CLIs not defined"
3053 assert main.nodes, "main.nodes not defined"
3054
3055 # Variables for the distributed primitives tests
3056 global pCounterName
Jon Hall6e709752016-02-01 13:38:46 -08003057 global pCounterValue
Jon Hall6e709752016-02-01 13:38:46 -08003058 global onosSet
3059 global onosSetName
3060 pCounterName = "TestON-Partitions"
Jon Hall6e709752016-02-01 13:38:46 -08003061 pCounterValue = 0
Jon Hall6e709752016-02-01 13:38:46 -08003062 onosSet = set([])
3063 onosSetName = "TestON-set"
3064
3065 description = "Install Primitives app"
3066 main.case( description )
3067 main.step( "Install Primitives app" )
3068 appName = "org.onosproject.distributedprimitives"
3069 node = main.activeNodes[0]
3070 appResults = main.CLIs[node].activateApp( appName )
3071 utilities.assert_equals( expect=main.TRUE,
3072 actual=appResults,
3073 onpass="Primitives app activated",
3074 onfail="Primitives app not activated" )
3075 time.sleep( 5 ) # To allow all nodes to activate
3076
3077 def CASE17( self, main ):
3078 """
3079 Check for basic functionality with distributed primitives
3080 """
3081 # Make sure variables are defined/set
3082 assert main.numCtrls, "main.numCtrls not defined"
3083 assert main, "main not defined"
3084 assert utilities.assert_equals, "utilities.assert_equals not defined"
3085 assert main.CLIs, "main.CLIs not defined"
3086 assert main.nodes, "main.nodes not defined"
3087 assert pCounterName, "pCounterName not defined"
Jon Hall6e709752016-02-01 13:38:46 -08003088 assert onosSetName, "onosSetName not defined"
3089 # NOTE: assert fails if value is 0/None/Empty/False
3090 try:
3091 pCounterValue
3092 except NameError:
3093 main.log.error( "pCounterValue not defined, setting to 0" )
3094 pCounterValue = 0
3095 try:
Jon Hall6e709752016-02-01 13:38:46 -08003096 onosSet
3097 except NameError:
3098 main.log.error( "onosSet not defined, setting to empty Set" )
3099 onosSet = set([])
3100 # Variables for the distributed primitives tests. These are local only
3101 addValue = "a"
3102 addAllValue = "a b c d e f"
3103 retainValue = "c d e f"
3104
3105 description = "Check for basic functionality with distributed " +\
3106 "primitives"
3107 main.case( description )
3108 main.caseExplanation = "Test the methods of the distributed " +\
3109 "primitives (counters and sets) throught the cli"
3110 # DISTRIBUTED ATOMIC COUNTERS
3111 # Partitioned counters
3112 main.step( "Increment then get a default counter on each node" )
3113 pCounters = []
3114 threads = []
3115 addedPValues = []
3116 for i in main.activeNodes:
3117 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3118 name="counterAddAndGet-" + str( i ),
3119 args=[ pCounterName ] )
3120 pCounterValue += 1
3121 addedPValues.append( pCounterValue )
3122 threads.append( t )
3123 t.start()
3124
3125 for t in threads:
3126 t.join()
3127 pCounters.append( t.result )
3128 # Check that counter incremented numController times
3129 pCounterResults = True
3130 for i in addedPValues:
3131 tmpResult = i in pCounters
3132 pCounterResults = pCounterResults and tmpResult
3133 if not tmpResult:
3134 main.log.error( str( i ) + " is not in partitioned "
3135 "counter incremented results" )
3136 utilities.assert_equals( expect=True,
3137 actual=pCounterResults,
3138 onpass="Default counter incremented",
3139 onfail="Error incrementing default" +
3140 " counter" )
3141
3142 main.step( "Get then Increment a default counter on each node" )
3143 pCounters = []
3144 threads = []
3145 addedPValues = []
3146 for i in main.activeNodes:
3147 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3148 name="counterGetAndAdd-" + str( i ),
3149 args=[ pCounterName ] )
3150 addedPValues.append( pCounterValue )
3151 pCounterValue += 1
3152 threads.append( t )
3153 t.start()
3154
3155 for t in threads:
3156 t.join()
3157 pCounters.append( t.result )
3158 # Check that counter incremented numController times
3159 pCounterResults = True
3160 for i in addedPValues:
3161 tmpResult = i in pCounters
3162 pCounterResults = pCounterResults and tmpResult
3163 if not tmpResult:
3164 main.log.error( str( i ) + " is not in partitioned "
3165 "counter incremented results" )
3166 utilities.assert_equals( expect=True,
3167 actual=pCounterResults,
3168 onpass="Default counter incremented",
3169 onfail="Error incrementing default" +
3170 " counter" )
3171
3172 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003173 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Hall6e709752016-02-01 13:38:46 -08003174 utilities.assert_equals( expect=main.TRUE,
3175 actual=incrementCheck,
3176 onpass="Added counters are correct",
3177 onfail="Added counters are incorrect" )
3178
3179 main.step( "Add -8 to then get a default counter on each node" )
3180 pCounters = []
3181 threads = []
3182 addedPValues = []
3183 for i in main.activeNodes:
3184 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3185 name="counterIncrement-" + str( i ),
3186 args=[ pCounterName ],
3187 kwargs={ "delta": -8 } )
3188 pCounterValue += -8
3189 addedPValues.append( pCounterValue )
3190 threads.append( t )
3191 t.start()
3192
3193 for t in threads:
3194 t.join()
3195 pCounters.append( t.result )
3196 # Check that counter incremented numController times
3197 pCounterResults = True
3198 for i in addedPValues:
3199 tmpResult = i in pCounters
3200 pCounterResults = pCounterResults and tmpResult
3201 if not tmpResult:
3202 main.log.error( str( i ) + " is not in partitioned "
3203 "counter incremented results" )
3204 utilities.assert_equals( expect=True,
3205 actual=pCounterResults,
3206 onpass="Default counter incremented",
3207 onfail="Error incrementing default" +
3208 " counter" )
3209
3210 main.step( "Add 5 to then get a default counter on each node" )
3211 pCounters = []
3212 threads = []
3213 addedPValues = []
3214 for i in main.activeNodes:
3215 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3216 name="counterIncrement-" + str( i ),
3217 args=[ pCounterName ],
3218 kwargs={ "delta": 5 } )
3219 pCounterValue += 5
3220 addedPValues.append( pCounterValue )
3221 threads.append( t )
3222 t.start()
3223
3224 for t in threads:
3225 t.join()
3226 pCounters.append( t.result )
3227 # Check that counter incremented numController times
3228 pCounterResults = True
3229 for i in addedPValues:
3230 tmpResult = i in pCounters
3231 pCounterResults = pCounterResults and tmpResult
3232 if not tmpResult:
3233 main.log.error( str( i ) + " is not in partitioned "
3234 "counter incremented results" )
3235 utilities.assert_equals( expect=True,
3236 actual=pCounterResults,
3237 onpass="Default counter incremented",
3238 onfail="Error incrementing default" +
3239 " counter" )
3240
3241 main.step( "Get then add 5 to a default counter on each node" )
3242 pCounters = []
3243 threads = []
3244 addedPValues = []
3245 for i in main.activeNodes:
3246 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3247 name="counterIncrement-" + str( i ),
3248 args=[ pCounterName ],
3249 kwargs={ "delta": 5 } )
3250 addedPValues.append( pCounterValue )
3251 pCounterValue += 5
3252 threads.append( t )
3253 t.start()
3254
3255 for t in threads:
3256 t.join()
3257 pCounters.append( t.result )
3258 # Check that counter incremented numController times
3259 pCounterResults = True
3260 for i in addedPValues:
3261 tmpResult = i in pCounters
3262 pCounterResults = pCounterResults and tmpResult
3263 if not tmpResult:
3264 main.log.error( str( i ) + " is not in partitioned "
3265 "counter incremented results" )
3266 utilities.assert_equals( expect=True,
3267 actual=pCounterResults,
3268 onpass="Default counter incremented",
3269 onfail="Error incrementing default" +
3270 " counter" )
3271
3272 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003273 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Hall6e709752016-02-01 13:38:46 -08003274 utilities.assert_equals( expect=main.TRUE,
3275 actual=incrementCheck,
3276 onpass="Added counters are correct",
3277 onfail="Added counters are incorrect" )
3278
Jon Hall6e709752016-02-01 13:38:46 -08003279 # DISTRIBUTED SETS
3280 main.step( "Distributed Set get" )
3281 size = len( onosSet )
3282 getResponses = []
3283 threads = []
3284 for i in main.activeNodes:
3285 t = main.Thread( target=main.CLIs[i].setTestGet,
3286 name="setTestGet-" + str( i ),
3287 args=[ onosSetName ] )
3288 threads.append( t )
3289 t.start()
3290 for t in threads:
3291 t.join()
3292 getResponses.append( t.result )
3293
3294 getResults = main.TRUE
3295 for i in range( len( main.activeNodes ) ):
3296 node = str( main.activeNodes[i] + 1 )
3297 if isinstance( getResponses[ i ], list):
3298 current = set( getResponses[ i ] )
3299 if len( current ) == len( getResponses[ i ] ):
3300 # no repeats
3301 if onosSet != current:
3302 main.log.error( "ONOS" + node +
3303 " has incorrect view" +
3304 " of set " + onosSetName + ":\n" +
3305 str( getResponses[ i ] ) )
3306 main.log.debug( "Expected: " + str( onosSet ) )
3307 main.log.debug( "Actual: " + str( current ) )
3308 getResults = main.FALSE
3309 else:
3310 # error, set is not a set
3311 main.log.error( "ONOS" + node +
3312 " has repeat elements in" +
3313 " set " + onosSetName + ":\n" +
3314 str( getResponses[ i ] ) )
3315 getResults = main.FALSE
3316 elif getResponses[ i ] == main.ERROR:
3317 getResults = main.FALSE
3318 utilities.assert_equals( expect=main.TRUE,
3319 actual=getResults,
3320 onpass="Set elements are correct",
3321 onfail="Set elements are incorrect" )
3322
3323 main.step( "Distributed Set size" )
3324 sizeResponses = []
3325 threads = []
3326 for i in main.activeNodes:
3327 t = main.Thread( target=main.CLIs[i].setTestSize,
3328 name="setTestSize-" + str( i ),
3329 args=[ onosSetName ] )
3330 threads.append( t )
3331 t.start()
3332 for t in threads:
3333 t.join()
3334 sizeResponses.append( t.result )
3335
3336 sizeResults = main.TRUE
3337 for i in range( len( main.activeNodes ) ):
3338 node = str( main.activeNodes[i] + 1 )
3339 if size != sizeResponses[ i ]:
3340 sizeResults = main.FALSE
3341 main.log.error( "ONOS" + node +
3342 " expected a size of " + str( size ) +
3343 " for set " + onosSetName +
3344 " but got " + str( sizeResponses[ i ] ) )
3345 utilities.assert_equals( expect=main.TRUE,
3346 actual=sizeResults,
3347 onpass="Set sizes are correct",
3348 onfail="Set sizes are incorrect" )
3349
3350 main.step( "Distributed Set add()" )
3351 onosSet.add( addValue )
3352 addResponses = []
3353 threads = []
3354 for i in main.activeNodes:
3355 t = main.Thread( target=main.CLIs[i].setTestAdd,
3356 name="setTestAdd-" + str( i ),
3357 args=[ onosSetName, addValue ] )
3358 threads.append( t )
3359 t.start()
3360 for t in threads:
3361 t.join()
3362 addResponses.append( t.result )
3363
3364 # main.TRUE = successfully changed the set
3365 # main.FALSE = action resulted in no change in set
3366 # main.ERROR - Some error in executing the function
3367 addResults = main.TRUE
3368 for i in range( len( main.activeNodes ) ):
3369 if addResponses[ i ] == main.TRUE:
3370 # All is well
3371 pass
3372 elif addResponses[ i ] == main.FALSE:
3373 # Already in set, probably fine
3374 pass
3375 elif addResponses[ i ] == main.ERROR:
3376 # Error in execution
3377 addResults = main.FALSE
3378 else:
3379 # unexpected result
3380 addResults = main.FALSE
3381 if addResults != main.TRUE:
3382 main.log.error( "Error executing set add" )
3383
3384 # Check if set is still correct
3385 size = len( onosSet )
3386 getResponses = []
3387 threads = []
3388 for i in main.activeNodes:
3389 t = main.Thread( target=main.CLIs[i].setTestGet,
3390 name="setTestGet-" + str( i ),
3391 args=[ onosSetName ] )
3392 threads.append( t )
3393 t.start()
3394 for t in threads:
3395 t.join()
3396 getResponses.append( t.result )
3397 getResults = main.TRUE
3398 for i in range( len( main.activeNodes ) ):
3399 node = str( main.activeNodes[i] + 1 )
3400 if isinstance( getResponses[ i ], list):
3401 current = set( getResponses[ i ] )
3402 if len( current ) == len( getResponses[ i ] ):
3403 # no repeats
3404 if onosSet != current:
3405 main.log.error( "ONOS" + node + " has incorrect view" +
3406 " of set " + onosSetName + ":\n" +
3407 str( getResponses[ i ] ) )
3408 main.log.debug( "Expected: " + str( onosSet ) )
3409 main.log.debug( "Actual: " + str( current ) )
3410 getResults = main.FALSE
3411 else:
3412 # error, set is not a set
3413 main.log.error( "ONOS" + node + " has repeat elements in" +
3414 " set " + onosSetName + ":\n" +
3415 str( getResponses[ i ] ) )
3416 getResults = main.FALSE
3417 elif getResponses[ i ] == main.ERROR:
3418 getResults = main.FALSE
3419 sizeResponses = []
3420 threads = []
3421 for i in main.activeNodes:
3422 t = main.Thread( target=main.CLIs[i].setTestSize,
3423 name="setTestSize-" + str( i ),
3424 args=[ onosSetName ] )
3425 threads.append( t )
3426 t.start()
3427 for t in threads:
3428 t.join()
3429 sizeResponses.append( t.result )
3430 sizeResults = main.TRUE
3431 for i in range( len( main.activeNodes ) ):
3432 node = str( main.activeNodes[i] + 1 )
3433 if size != sizeResponses[ i ]:
3434 sizeResults = main.FALSE
3435 main.log.error( "ONOS" + node +
3436 " expected a size of " + str( size ) +
3437 " for set " + onosSetName +
3438 " but got " + str( sizeResponses[ i ] ) )
3439 addResults = addResults and getResults and sizeResults
3440 utilities.assert_equals( expect=main.TRUE,
3441 actual=addResults,
3442 onpass="Set add correct",
3443 onfail="Set add was incorrect" )
3444
3445 main.step( "Distributed Set addAll()" )
3446 onosSet.update( addAllValue.split() )
3447 addResponses = []
3448 threads = []
3449 for i in main.activeNodes:
3450 t = main.Thread( target=main.CLIs[i].setTestAdd,
3451 name="setTestAddAll-" + str( i ),
3452 args=[ onosSetName, addAllValue ] )
3453 threads.append( t )
3454 t.start()
3455 for t in threads:
3456 t.join()
3457 addResponses.append( t.result )
3458
3459 # main.TRUE = successfully changed the set
3460 # main.FALSE = action resulted in no change in set
3461 # main.ERROR - Some error in executing the function
3462 addAllResults = main.TRUE
3463 for i in range( len( main.activeNodes ) ):
3464 if addResponses[ i ] == main.TRUE:
3465 # All is well
3466 pass
3467 elif addResponses[ i ] == main.FALSE:
3468 # Already in set, probably fine
3469 pass
3470 elif addResponses[ i ] == main.ERROR:
3471 # Error in execution
3472 addAllResults = main.FALSE
3473 else:
3474 # unexpected result
3475 addAllResults = main.FALSE
3476 if addAllResults != main.TRUE:
3477 main.log.error( "Error executing set addAll" )
3478
3479 # Check if set is still correct
3480 size = len( onosSet )
3481 getResponses = []
3482 threads = []
3483 for i in main.activeNodes:
3484 t = main.Thread( target=main.CLIs[i].setTestGet,
3485 name="setTestGet-" + str( i ),
3486 args=[ onosSetName ] )
3487 threads.append( t )
3488 t.start()
3489 for t in threads:
3490 t.join()
3491 getResponses.append( t.result )
3492 getResults = main.TRUE
3493 for i in range( len( main.activeNodes ) ):
3494 node = str( main.activeNodes[i] + 1 )
3495 if isinstance( getResponses[ i ], list):
3496 current = set( getResponses[ i ] )
3497 if len( current ) == len( getResponses[ i ] ):
3498 # no repeats
3499 if onosSet != current:
3500 main.log.error( "ONOS" + node +
3501 " has incorrect view" +
3502 " of set " + onosSetName + ":\n" +
3503 str( getResponses[ i ] ) )
3504 main.log.debug( "Expected: " + str( onosSet ) )
3505 main.log.debug( "Actual: " + str( current ) )
3506 getResults = main.FALSE
3507 else:
3508 # error, set is not a set
3509 main.log.error( "ONOS" + node +
3510 " has repeat elements in" +
3511 " set " + onosSetName + ":\n" +
3512 str( getResponses[ i ] ) )
3513 getResults = main.FALSE
3514 elif getResponses[ i ] == main.ERROR:
3515 getResults = main.FALSE
3516 sizeResponses = []
3517 threads = []
3518 for i in main.activeNodes:
3519 t = main.Thread( target=main.CLIs[i].setTestSize,
3520 name="setTestSize-" + str( i ),
3521 args=[ onosSetName ] )
3522 threads.append( t )
3523 t.start()
3524 for t in threads:
3525 t.join()
3526 sizeResponses.append( t.result )
3527 sizeResults = main.TRUE
3528 for i in range( len( main.activeNodes ) ):
3529 node = str( main.activeNodes[i] + 1 )
3530 if size != sizeResponses[ i ]:
3531 sizeResults = main.FALSE
3532 main.log.error( "ONOS" + node +
3533 " expected a size of " + str( size ) +
3534 " for set " + onosSetName +
3535 " but got " + str( sizeResponses[ i ] ) )
3536 addAllResults = addAllResults and getResults and sizeResults
3537 utilities.assert_equals( expect=main.TRUE,
3538 actual=addAllResults,
3539 onpass="Set addAll correct",
3540 onfail="Set addAll was incorrect" )
3541
3542 main.step( "Distributed Set contains()" )
3543 containsResponses = []
3544 threads = []
3545 for i in main.activeNodes:
3546 t = main.Thread( target=main.CLIs[i].setTestGet,
3547 name="setContains-" + str( i ),
3548 args=[ onosSetName ],
3549 kwargs={ "values": addValue } )
3550 threads.append( t )
3551 t.start()
3552 for t in threads:
3553 t.join()
3554 # NOTE: This is the tuple
3555 containsResponses.append( t.result )
3556
3557 containsResults = main.TRUE
3558 for i in range( len( main.activeNodes ) ):
3559 if containsResponses[ i ] == main.ERROR:
3560 containsResults = main.FALSE
3561 else:
3562 containsResults = containsResults and\
3563 containsResponses[ i ][ 1 ]
3564 utilities.assert_equals( expect=main.TRUE,
3565 actual=containsResults,
3566 onpass="Set contains is functional",
3567 onfail="Set contains failed" )
3568
3569 main.step( "Distributed Set containsAll()" )
3570 containsAllResponses = []
3571 threads = []
3572 for i in main.activeNodes:
3573 t = main.Thread( target=main.CLIs[i].setTestGet,
3574 name="setContainsAll-" + str( i ),
3575 args=[ onosSetName ],
3576 kwargs={ "values": addAllValue } )
3577 threads.append( t )
3578 t.start()
3579 for t in threads:
3580 t.join()
3581 # NOTE: This is the tuple
3582 containsAllResponses.append( t.result )
3583
3584 containsAllResults = main.TRUE
3585 for i in range( len( main.activeNodes ) ):
3586 if containsResponses[ i ] == main.ERROR:
3587 containsResults = main.FALSE
3588 else:
3589 containsResults = containsResults and\
3590 containsResponses[ i ][ 1 ]
3591 utilities.assert_equals( expect=main.TRUE,
3592 actual=containsAllResults,
3593 onpass="Set containsAll is functional",
3594 onfail="Set containsAll failed" )
3595
3596 main.step( "Distributed Set remove()" )
3597 onosSet.remove( addValue )
3598 removeResponses = []
3599 threads = []
3600 for i in main.activeNodes:
3601 t = main.Thread( target=main.CLIs[i].setTestRemove,
3602 name="setTestRemove-" + str( i ),
3603 args=[ onosSetName, addValue ] )
3604 threads.append( t )
3605 t.start()
3606 for t in threads:
3607 t.join()
3608 removeResponses.append( t.result )
3609
3610 # main.TRUE = successfully changed the set
3611 # main.FALSE = action resulted in no change in set
3612 # main.ERROR - Some error in executing the function
3613 removeResults = main.TRUE
3614 for i in range( len( main.activeNodes ) ):
3615 if removeResponses[ i ] == main.TRUE:
3616 # All is well
3617 pass
3618 elif removeResponses[ i ] == main.FALSE:
3619 # not in set, probably fine
3620 pass
3621 elif removeResponses[ i ] == main.ERROR:
3622 # Error in execution
3623 removeResults = main.FALSE
3624 else:
3625 # unexpected result
3626 removeResults = main.FALSE
3627 if removeResults != main.TRUE:
3628 main.log.error( "Error executing set remove" )
3629
3630 # Check if set is still correct
3631 size = len( onosSet )
3632 getResponses = []
3633 threads = []
3634 for i in main.activeNodes:
3635 t = main.Thread( target=main.CLIs[i].setTestGet,
3636 name="setTestGet-" + str( i ),
3637 args=[ onosSetName ] )
3638 threads.append( t )
3639 t.start()
3640 for t in threads:
3641 t.join()
3642 getResponses.append( t.result )
3643 getResults = main.TRUE
3644 for i in range( len( main.activeNodes ) ):
3645 node = str( main.activeNodes[i] + 1 )
3646 if isinstance( getResponses[ i ], list):
3647 current = set( getResponses[ i ] )
3648 if len( current ) == len( getResponses[ i ] ):
3649 # no repeats
3650 if onosSet != current:
3651 main.log.error( "ONOS" + node +
3652 " has incorrect view" +
3653 " of set " + onosSetName + ":\n" +
3654 str( getResponses[ i ] ) )
3655 main.log.debug( "Expected: " + str( onosSet ) )
3656 main.log.debug( "Actual: " + str( current ) )
3657 getResults = main.FALSE
3658 else:
3659 # error, set is not a set
3660 main.log.error( "ONOS" + node +
3661 " has repeat elements in" +
3662 " set " + onosSetName + ":\n" +
3663 str( getResponses[ i ] ) )
3664 getResults = main.FALSE
3665 elif getResponses[ i ] == main.ERROR:
3666 getResults = main.FALSE
3667 sizeResponses = []
3668 threads = []
3669 for i in main.activeNodes:
3670 t = main.Thread( target=main.CLIs[i].setTestSize,
3671 name="setTestSize-" + str( i ),
3672 args=[ onosSetName ] )
3673 threads.append( t )
3674 t.start()
3675 for t in threads:
3676 t.join()
3677 sizeResponses.append( t.result )
3678 sizeResults = main.TRUE
3679 for i in range( len( main.activeNodes ) ):
3680 node = str( main.activeNodes[i] + 1 )
3681 if size != sizeResponses[ i ]:
3682 sizeResults = main.FALSE
3683 main.log.error( "ONOS" + node +
3684 " expected a size of " + str( size ) +
3685 " for set " + onosSetName +
3686 " but got " + str( sizeResponses[ i ] ) )
3687 removeResults = removeResults and getResults and sizeResults
3688 utilities.assert_equals( expect=main.TRUE,
3689 actual=removeResults,
3690 onpass="Set remove correct",
3691 onfail="Set remove was incorrect" )
3692
3693 main.step( "Distributed Set removeAll()" )
3694 onosSet.difference_update( addAllValue.split() )
3695 removeAllResponses = []
3696 threads = []
3697 try:
3698 for i in main.activeNodes:
3699 t = main.Thread( target=main.CLIs[i].setTestRemove,
3700 name="setTestRemoveAll-" + str( i ),
3701 args=[ onosSetName, addAllValue ] )
3702 threads.append( t )
3703 t.start()
3704 for t in threads:
3705 t.join()
3706 removeAllResponses.append( t.result )
3707 except Exception, e:
3708 main.log.exception(e)
3709
3710 # main.TRUE = successfully changed the set
3711 # main.FALSE = action resulted in no change in set
3712 # main.ERROR - Some error in executing the function
3713 removeAllResults = main.TRUE
3714 for i in range( len( main.activeNodes ) ):
3715 if removeAllResponses[ i ] == main.TRUE:
3716 # All is well
3717 pass
3718 elif removeAllResponses[ i ] == main.FALSE:
3719 # not in set, probably fine
3720 pass
3721 elif removeAllResponses[ i ] == main.ERROR:
3722 # Error in execution
3723 removeAllResults = main.FALSE
3724 else:
3725 # unexpected result
3726 removeAllResults = main.FALSE
3727 if removeAllResults != main.TRUE:
3728 main.log.error( "Error executing set removeAll" )
3729
3730 # Check if set is still correct
3731 size = len( onosSet )
3732 getResponses = []
3733 threads = []
3734 for i in main.activeNodes:
3735 t = main.Thread( target=main.CLIs[i].setTestGet,
3736 name="setTestGet-" + str( i ),
3737 args=[ onosSetName ] )
3738 threads.append( t )
3739 t.start()
3740 for t in threads:
3741 t.join()
3742 getResponses.append( t.result )
3743 getResults = main.TRUE
3744 for i in range( len( main.activeNodes ) ):
3745 node = str( main.activeNodes[i] + 1 )
3746 if isinstance( getResponses[ i ], list):
3747 current = set( getResponses[ i ] )
3748 if len( current ) == len( getResponses[ i ] ):
3749 # no repeats
3750 if onosSet != current:
3751 main.log.error( "ONOS" + node +
3752 " has incorrect view" +
3753 " of set " + onosSetName + ":\n" +
3754 str( getResponses[ i ] ) )
3755 main.log.debug( "Expected: " + str( onosSet ) )
3756 main.log.debug( "Actual: " + str( current ) )
3757 getResults = main.FALSE
3758 else:
3759 # error, set is not a set
3760 main.log.error( "ONOS" + node +
3761 " has repeat elements in" +
3762 " set " + onosSetName + ":\n" +
3763 str( getResponses[ i ] ) )
3764 getResults = main.FALSE
3765 elif getResponses[ i ] == main.ERROR:
3766 getResults = main.FALSE
3767 sizeResponses = []
3768 threads = []
3769 for i in main.activeNodes:
3770 t = main.Thread( target=main.CLIs[i].setTestSize,
3771 name="setTestSize-" + str( i ),
3772 args=[ onosSetName ] )
3773 threads.append( t )
3774 t.start()
3775 for t in threads:
3776 t.join()
3777 sizeResponses.append( t.result )
3778 sizeResults = main.TRUE
3779 for i in range( len( main.activeNodes ) ):
3780 node = str( main.activeNodes[i] + 1 )
3781 if size != sizeResponses[ i ]:
3782 sizeResults = main.FALSE
3783 main.log.error( "ONOS" + node +
3784 " expected a size of " + str( size ) +
3785 " for set " + onosSetName +
3786 " but got " + str( sizeResponses[ i ] ) )
3787 removeAllResults = removeAllResults and getResults and sizeResults
3788 utilities.assert_equals( expect=main.TRUE,
3789 actual=removeAllResults,
3790 onpass="Set removeAll correct",
3791 onfail="Set removeAll was incorrect" )
3792
3793 main.step( "Distributed Set addAll()" )
3794 onosSet.update( addAllValue.split() )
3795 addResponses = []
3796 threads = []
3797 for i in main.activeNodes:
3798 t = main.Thread( target=main.CLIs[i].setTestAdd,
3799 name="setTestAddAll-" + str( i ),
3800 args=[ onosSetName, addAllValue ] )
3801 threads.append( t )
3802 t.start()
3803 for t in threads:
3804 t.join()
3805 addResponses.append( t.result )
3806
3807 # main.TRUE = successfully changed the set
3808 # main.FALSE = action resulted in no change in set
3809 # main.ERROR - Some error in executing the function
3810 addAllResults = main.TRUE
3811 for i in range( len( main.activeNodes ) ):
3812 if addResponses[ i ] == main.TRUE:
3813 # All is well
3814 pass
3815 elif addResponses[ i ] == main.FALSE:
3816 # Already in set, probably fine
3817 pass
3818 elif addResponses[ i ] == main.ERROR:
3819 # Error in execution
3820 addAllResults = main.FALSE
3821 else:
3822 # unexpected result
3823 addAllResults = main.FALSE
3824 if addAllResults != main.TRUE:
3825 main.log.error( "Error executing set addAll" )
3826
3827 # Check if set is still correct
3828 size = len( onosSet )
3829 getResponses = []
3830 threads = []
3831 for i in main.activeNodes:
3832 t = main.Thread( target=main.CLIs[i].setTestGet,
3833 name="setTestGet-" + str( i ),
3834 args=[ onosSetName ] )
3835 threads.append( t )
3836 t.start()
3837 for t in threads:
3838 t.join()
3839 getResponses.append( t.result )
3840 getResults = main.TRUE
3841 for i in range( len( main.activeNodes ) ):
3842 node = str( main.activeNodes[i] + 1 )
3843 if isinstance( getResponses[ i ], list):
3844 current = set( getResponses[ i ] )
3845 if len( current ) == len( getResponses[ i ] ):
3846 # no repeats
3847 if onosSet != current:
3848 main.log.error( "ONOS" + node +
3849 " has incorrect view" +
3850 " of set " + onosSetName + ":\n" +
3851 str( getResponses[ i ] ) )
3852 main.log.debug( "Expected: " + str( onosSet ) )
3853 main.log.debug( "Actual: " + str( current ) )
3854 getResults = main.FALSE
3855 else:
3856 # error, set is not a set
3857 main.log.error( "ONOS" + node +
3858 " has repeat elements in" +
3859 " set " + onosSetName + ":\n" +
3860 str( getResponses[ i ] ) )
3861 getResults = main.FALSE
3862 elif getResponses[ i ] == main.ERROR:
3863 getResults = main.FALSE
3864 sizeResponses = []
3865 threads = []
3866 for i in main.activeNodes:
3867 t = main.Thread( target=main.CLIs[i].setTestSize,
3868 name="setTestSize-" + str( i ),
3869 args=[ onosSetName ] )
3870 threads.append( t )
3871 t.start()
3872 for t in threads:
3873 t.join()
3874 sizeResponses.append( t.result )
3875 sizeResults = main.TRUE
3876 for i in range( len( main.activeNodes ) ):
3877 node = str( main.activeNodes[i] + 1 )
3878 if size != sizeResponses[ i ]:
3879 sizeResults = main.FALSE
3880 main.log.error( "ONOS" + node +
3881 " expected a size of " + str( size ) +
3882 " for set " + onosSetName +
3883 " but got " + str( sizeResponses[ i ] ) )
3884 addAllResults = addAllResults and getResults and sizeResults
3885 utilities.assert_equals( expect=main.TRUE,
3886 actual=addAllResults,
3887 onpass="Set addAll correct",
3888 onfail="Set addAll was incorrect" )
3889
3890 main.step( "Distributed Set clear()" )
3891 onosSet.clear()
3892 clearResponses = []
3893 threads = []
3894 for i in main.activeNodes:
3895 t = main.Thread( target=main.CLIs[i].setTestRemove,
3896 name="setTestClear-" + str( i ),
3897 args=[ onosSetName, " "], # Values doesn't matter
3898 kwargs={ "clear": True } )
3899 threads.append( t )
3900 t.start()
3901 for t in threads:
3902 t.join()
3903 clearResponses.append( t.result )
3904
3905 # main.TRUE = successfully changed the set
3906 # main.FALSE = action resulted in no change in set
3907 # main.ERROR - Some error in executing the function
3908 clearResults = main.TRUE
3909 for i in range( len( main.activeNodes ) ):
3910 if clearResponses[ i ] == main.TRUE:
3911 # All is well
3912 pass
3913 elif clearResponses[ i ] == main.FALSE:
3914 # Nothing set, probably fine
3915 pass
3916 elif clearResponses[ i ] == main.ERROR:
3917 # Error in execution
3918 clearResults = main.FALSE
3919 else:
3920 # unexpected result
3921 clearResults = main.FALSE
3922 if clearResults != main.TRUE:
3923 main.log.error( "Error executing set clear" )
3924
3925 # Check if set is still correct
3926 size = len( onosSet )
3927 getResponses = []
3928 threads = []
3929 for i in main.activeNodes:
3930 t = main.Thread( target=main.CLIs[i].setTestGet,
3931 name="setTestGet-" + str( i ),
3932 args=[ onosSetName ] )
3933 threads.append( t )
3934 t.start()
3935 for t in threads:
3936 t.join()
3937 getResponses.append( t.result )
3938 getResults = main.TRUE
3939 for i in range( len( main.activeNodes ) ):
3940 node = str( main.activeNodes[i] + 1 )
3941 if isinstance( getResponses[ i ], list):
3942 current = set( getResponses[ i ] )
3943 if len( current ) == len( getResponses[ i ] ):
3944 # no repeats
3945 if onosSet != current:
3946 main.log.error( "ONOS" + node +
3947 " has incorrect view" +
3948 " of set " + onosSetName + ":\n" +
3949 str( getResponses[ i ] ) )
3950 main.log.debug( "Expected: " + str( onosSet ) )
3951 main.log.debug( "Actual: " + str( current ) )
3952 getResults = main.FALSE
3953 else:
3954 # error, set is not a set
3955 main.log.error( "ONOS" + node +
3956 " has repeat elements in" +
3957 " set " + onosSetName + ":\n" +
3958 str( getResponses[ i ] ) )
3959 getResults = main.FALSE
3960 elif getResponses[ i ] == main.ERROR:
3961 getResults = main.FALSE
3962 sizeResponses = []
3963 threads = []
3964 for i in main.activeNodes:
3965 t = main.Thread( target=main.CLIs[i].setTestSize,
3966 name="setTestSize-" + str( i ),
3967 args=[ onosSetName ] )
3968 threads.append( t )
3969 t.start()
3970 for t in threads:
3971 t.join()
3972 sizeResponses.append( t.result )
3973 sizeResults = main.TRUE
3974 for i in range( len( main.activeNodes ) ):
3975 node = str( main.activeNodes[i] + 1 )
3976 if size != sizeResponses[ i ]:
3977 sizeResults = main.FALSE
3978 main.log.error( "ONOS" + node +
3979 " expected a size of " + str( size ) +
3980 " for set " + onosSetName +
3981 " but got " + str( sizeResponses[ i ] ) )
3982 clearResults = clearResults and getResults and sizeResults
3983 utilities.assert_equals( expect=main.TRUE,
3984 actual=clearResults,
3985 onpass="Set clear correct",
3986 onfail="Set clear was incorrect" )
3987
3988 main.step( "Distributed Set addAll()" )
3989 onosSet.update( addAllValue.split() )
3990 addResponses = []
3991 threads = []
3992 for i in main.activeNodes:
3993 t = main.Thread( target=main.CLIs[i].setTestAdd,
3994 name="setTestAddAll-" + str( i ),
3995 args=[ onosSetName, addAllValue ] )
3996 threads.append( t )
3997 t.start()
3998 for t in threads:
3999 t.join()
4000 addResponses.append( t.result )
4001
4002 # main.TRUE = successfully changed the set
4003 # main.FALSE = action resulted in no change in set
4004 # main.ERROR - Some error in executing the function
4005 addAllResults = main.TRUE
4006 for i in range( len( main.activeNodes ) ):
4007 if addResponses[ i ] == main.TRUE:
4008 # All is well
4009 pass
4010 elif addResponses[ i ] == main.FALSE:
4011 # Already in set, probably fine
4012 pass
4013 elif addResponses[ i ] == main.ERROR:
4014 # Error in execution
4015 addAllResults = main.FALSE
4016 else:
4017 # unexpected result
4018 addAllResults = main.FALSE
4019 if addAllResults != main.TRUE:
4020 main.log.error( "Error executing set addAll" )
4021
4022 # Check if set is still correct
4023 size = len( onosSet )
4024 getResponses = []
4025 threads = []
4026 for i in main.activeNodes:
4027 t = main.Thread( target=main.CLIs[i].setTestGet,
4028 name="setTestGet-" + str( i ),
4029 args=[ onosSetName ] )
4030 threads.append( t )
4031 t.start()
4032 for t in threads:
4033 t.join()
4034 getResponses.append( t.result )
4035 getResults = main.TRUE
4036 for i in range( len( main.activeNodes ) ):
4037 node = str( main.activeNodes[i] + 1 )
4038 if isinstance( getResponses[ i ], list):
4039 current = set( getResponses[ i ] )
4040 if len( current ) == len( getResponses[ i ] ):
4041 # no repeats
4042 if onosSet != current:
4043 main.log.error( "ONOS" + node +
4044 " has incorrect view" +
4045 " of set " + onosSetName + ":\n" +
4046 str( getResponses[ i ] ) )
4047 main.log.debug( "Expected: " + str( onosSet ) )
4048 main.log.debug( "Actual: " + str( current ) )
4049 getResults = main.FALSE
4050 else:
4051 # error, set is not a set
4052 main.log.error( "ONOS" + node +
4053 " has repeat elements in" +
4054 " set " + onosSetName + ":\n" +
4055 str( getResponses[ i ] ) )
4056 getResults = main.FALSE
4057 elif getResponses[ i ] == main.ERROR:
4058 getResults = main.FALSE
4059 sizeResponses = []
4060 threads = []
4061 for i in main.activeNodes:
4062 t = main.Thread( target=main.CLIs[i].setTestSize,
4063 name="setTestSize-" + str( i ),
4064 args=[ onosSetName ] )
4065 threads.append( t )
4066 t.start()
4067 for t in threads:
4068 t.join()
4069 sizeResponses.append( t.result )
4070 sizeResults = main.TRUE
4071 for i in range( len( main.activeNodes ) ):
4072 node = str( main.activeNodes[i] + 1 )
4073 if size != sizeResponses[ i ]:
4074 sizeResults = main.FALSE
4075 main.log.error( "ONOS" + node +
4076 " expected a size of " + str( size ) +
4077 " for set " + onosSetName +
4078 " but got " + str( sizeResponses[ i ] ) )
4079 addAllResults = addAllResults and getResults and sizeResults
4080 utilities.assert_equals( expect=main.TRUE,
4081 actual=addAllResults,
4082 onpass="Set addAll correct",
4083 onfail="Set addAll was incorrect" )
4084
4085 main.step( "Distributed Set retain()" )
4086 onosSet.intersection_update( retainValue.split() )
4087 retainResponses = []
4088 threads = []
4089 for i in main.activeNodes:
4090 t = main.Thread( target=main.CLIs[i].setTestRemove,
4091 name="setTestRetain-" + str( i ),
4092 args=[ onosSetName, retainValue ],
4093 kwargs={ "retain": True } )
4094 threads.append( t )
4095 t.start()
4096 for t in threads:
4097 t.join()
4098 retainResponses.append( t.result )
4099
4100 # main.TRUE = successfully changed the set
4101 # main.FALSE = action resulted in no change in set
4102 # main.ERROR - Some error in executing the function
4103 retainResults = main.TRUE
4104 for i in range( len( main.activeNodes ) ):
4105 if retainResponses[ i ] == main.TRUE:
4106 # All is well
4107 pass
4108 elif retainResponses[ i ] == main.FALSE:
4109 # Already in set, probably fine
4110 pass
4111 elif retainResponses[ i ] == main.ERROR:
4112 # Error in execution
4113 retainResults = main.FALSE
4114 else:
4115 # unexpected result
4116 retainResults = main.FALSE
4117 if retainResults != main.TRUE:
4118 main.log.error( "Error executing set retain" )
4119
4120 # Check if set is still correct
4121 size = len( onosSet )
4122 getResponses = []
4123 threads = []
4124 for i in main.activeNodes:
4125 t = main.Thread( target=main.CLIs[i].setTestGet,
4126 name="setTestGet-" + str( i ),
4127 args=[ onosSetName ] )
4128 threads.append( t )
4129 t.start()
4130 for t in threads:
4131 t.join()
4132 getResponses.append( t.result )
4133 getResults = main.TRUE
4134 for i in range( len( main.activeNodes ) ):
4135 node = str( main.activeNodes[i] + 1 )
4136 if isinstance( getResponses[ i ], list):
4137 current = set( getResponses[ i ] )
4138 if len( current ) == len( getResponses[ i ] ):
4139 # no repeats
4140 if onosSet != current:
4141 main.log.error( "ONOS" + node +
4142 " has incorrect view" +
4143 " of set " + onosSetName + ":\n" +
4144 str( getResponses[ i ] ) )
4145 main.log.debug( "Expected: " + str( onosSet ) )
4146 main.log.debug( "Actual: " + str( current ) )
4147 getResults = main.FALSE
4148 else:
4149 # error, set is not a set
4150 main.log.error( "ONOS" + node +
4151 " has repeat elements in" +
4152 " set " + onosSetName + ":\n" +
4153 str( getResponses[ i ] ) )
4154 getResults = main.FALSE
4155 elif getResponses[ i ] == main.ERROR:
4156 getResults = main.FALSE
4157 sizeResponses = []
4158 threads = []
4159 for i in main.activeNodes:
4160 t = main.Thread( target=main.CLIs[i].setTestSize,
4161 name="setTestSize-" + str( i ),
4162 args=[ onosSetName ] )
4163 threads.append( t )
4164 t.start()
4165 for t in threads:
4166 t.join()
4167 sizeResponses.append( t.result )
4168 sizeResults = main.TRUE
4169 for i in range( len( main.activeNodes ) ):
4170 node = str( main.activeNodes[i] + 1 )
4171 if size != sizeResponses[ i ]:
4172 sizeResults = main.FALSE
4173 main.log.error( "ONOS" + node + " expected a size of " +
4174 str( size ) + " for set " + onosSetName +
4175 " but got " + str( sizeResponses[ i ] ) )
4176 retainResults = retainResults and getResults and sizeResults
4177 utilities.assert_equals( expect=main.TRUE,
4178 actual=retainResults,
4179 onpass="Set retain correct",
4180 onfail="Set retain was incorrect" )
4181
4182 # Transactional maps
4183 main.step( "Partitioned Transactional maps put" )
4184 tMapValue = "Testing"
4185 numKeys = 100
4186 putResult = True
4187 node = main.activeNodes[0]
4188 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4189 if putResponses and len( putResponses ) == 100:
4190 for i in putResponses:
4191 if putResponses[ i ][ 'value' ] != tMapValue:
4192 putResult = False
4193 else:
4194 putResult = False
4195 if not putResult:
4196 main.log.debug( "Put response values: " + str( putResponses ) )
4197 utilities.assert_equals( expect=True,
4198 actual=putResult,
4199 onpass="Partitioned Transactional Map put successful",
4200 onfail="Partitioned Transactional Map put values are incorrect" )
4201
4202 main.step( "Partitioned Transactional maps get" )
4203 getCheck = True
4204 for n in range( 1, numKeys + 1 ):
4205 getResponses = []
4206 threads = []
4207 valueCheck = True
4208 for i in main.activeNodes:
4209 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4210 name="TMap-get-" + str( i ),
4211 args=[ "Key" + str( n ) ] )
4212 threads.append( t )
4213 t.start()
4214 for t in threads:
4215 t.join()
4216 getResponses.append( t.result )
4217 for node in getResponses:
4218 if node != tMapValue:
4219 valueCheck = False
4220 if not valueCheck:
4221 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4222 main.log.warn( getResponses )
4223 getCheck = getCheck and valueCheck
4224 utilities.assert_equals( expect=True,
4225 actual=getCheck,
4226 onpass="Partitioned Transactional Map get values were correct",
4227 onfail="Partitioned Transactional Map values incorrect" )