blob: 25b977228658fd9fea0d1c77c4d00ce7766cf674 [file] [log] [blame]
Jon Hall6e709752016-02-01 13:38:46 -08001"""
2Description: This test is to determine if ONOS can handle
3 a full network partion
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
14CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
28class HAfullNetPartition:
29
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
51 import imp
52 import pexpect
53 import time
Jon Halla440e872016-03-31 15:15:50 -070054 import json
Jon Hall6e709752016-02-01 13:38:46 -080055 main.log.info( "ONOS HA test: Partition ONOS nodes into two sub-clusters - " +
56 "initialization" )
57 main.case( "Setting up test environment" )
58 main.caseExplanation = "Setup the test environment including " +\
59 "installing ONOS, starting Mininet and ONOS" +\
60 "cli sessions."
61
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
69 main.numCtrls = int( main.params[ 'num_controllers' ] )
70 if main.ONOSbench.maxNodes:
71 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
74 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070081 # These are for csv plotting in jenkins
82 global labels
83 global data
84 labels = []
85 data = []
Jon Hall6e709752016-02-01 13:38:46 -080086
87 # FIXME: just get controller port from params?
88 # TODO: do we really need all these?
89 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
90 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
91 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
92 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
93 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
94 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
95 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
96
97 try:
Jon Hall53c5e662016-04-13 16:06:56 -070098 from tests.HA.dependencies.HA import HA
Jon Hall41d39f12016-04-11 22:54:35 -070099 main.HA = HA()
Jon Hall6e709752016-02-01 13:38:46 -0800100 except Exception as e:
101 main.log.exception( e )
102 main.cleanup()
103 main.exit()
104
105 main.CLIs = []
106 main.nodes = []
107 ipList = []
108 for i in range( 1, main.numCtrls + 1 ):
109 try:
110 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
111 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
112 ipList.append( main.nodes[ -1 ].ip_address )
113 except AttributeError:
114 break
115
116 main.step( "Create cell file" )
117 cellAppString = main.params[ 'ENV' ][ 'appString' ]
118 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
119 main.Mininet1.ip_address,
120 cellAppString, ipList )
121 main.step( "Applying cell variable to environment" )
122 cellResult = main.ONOSbench.setCell( cellName )
123 verifyResult = main.ONOSbench.verifyCell()
124
125 # FIXME:this is short term fix
126 main.log.info( "Removing raft logs" )
127 main.ONOSbench.onosRemoveRaftLogs()
128
129 main.log.info( "Uninstalling ONOS" )
130 for node in main.nodes:
131 main.ONOSbench.onosUninstall( node.ip_address )
132
133 # Make sure ONOS is DEAD
134 main.log.info( "Killing any ONOS processes" )
135 killResults = main.TRUE
136 for node in main.nodes:
137 killed = main.ONOSbench.onosKill( node.ip_address )
138 killResults = killResults and killed
139
140 cleanInstallResult = main.TRUE
141 gitPullResult = main.TRUE
142
143 main.step( "Starting Mininet" )
144 # scp topo file to mininet
145 # TODO: move to params?
146 topoName = "obelisk.py"
147 filePath = main.ONOSbench.home + "/tools/test/topos/"
148 main.ONOSbench.scp( main.Mininet1,
149 filePath + topoName,
150 main.Mininet1.home,
151 direction="to" )
152 mnResult = main.Mininet1.startNet( )
153 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
154 onpass="Mininet Started",
155 onfail="Error starting Mininet" )
156
157 main.step( "Git checkout and pull " + gitBranch )
158 if PULLCODE:
159 main.ONOSbench.gitCheckout( gitBranch )
160 gitPullResult = main.ONOSbench.gitPull()
161 # values of 1 or 3 are good
162 utilities.assert_lesser( expect=0, actual=gitPullResult,
163 onpass="Git pull successful",
164 onfail="Git pull failed" )
165 main.ONOSbench.getVersion( report=True )
166
167 main.step( "Using mvn clean install" )
168 cleanInstallResult = main.TRUE
169 if PULLCODE and gitPullResult == main.TRUE:
170 cleanInstallResult = main.ONOSbench.cleanInstall()
171 else:
172 main.log.warn( "Did not pull new code so skipping mvn " +
173 "clean install" )
174 utilities.assert_equals( expect=main.TRUE,
175 actual=cleanInstallResult,
176 onpass="MCI successful",
177 onfail="MCI failed" )
178 # GRAPHS
179 # NOTE: important params here:
180 # job = name of Jenkins job
181 # Plot Name = Plot-HA, only can be used if multiple plots
182 # index = The number of the graph under plot name
183 job = "HAfullNetPartition"
184 plotName = "Plot-HA"
Jon Hall843f8bc2016-03-18 14:28:13 -0700185 index = "1"
Jon Hall6e709752016-02-01 13:38:46 -0800186 graphs = '<ac:structured-macro ac:name="html">\n'
187 graphs += '<ac:plain-text-body><![CDATA[\n'
188 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
189 '/plot/' + plotName + '/getPlot?index=' + index +\
190 '&width=500&height=300"' +\
191 'noborder="0" width="500" height="300" scrolling="yes" ' +\
192 'seamless="seamless"></iframe>\n'
193 graphs += ']]></ac:plain-text-body>\n'
194 graphs += '</ac:structured-macro>\n'
195 main.log.wiki(graphs)
196
197 main.step( "Creating ONOS package" )
198 # copy gen-partions file to ONOS
199 # NOTE: this assumes TestON and ONOS are on the same machine
Jon Hall53c5e662016-04-13 16:06:56 -0700200 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
Jon Hall6e709752016-02-01 13:38:46 -0800201 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
202 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
203 main.ONOSbench.ip_address,
204 srcFile,
205 dstDir,
206 pwd=main.ONOSbench.pwd,
207 direction="from" )
208 packageResult = main.ONOSbench.onosPackage()
209 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
210 onpass="ONOS package successful",
211 onfail="ONOS package failed" )
212
213 main.step( "Installing ONOS package" )
214 onosInstallResult = main.TRUE
215 for node in main.nodes:
216 tmpResult = main.ONOSbench.onosInstall( options="-f",
217 node=node.ip_address )
218 onosInstallResult = onosInstallResult and tmpResult
219 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
220 onpass="ONOS install successful",
221 onfail="ONOS install failed" )
222 # clean up gen-partitions file
223 try:
224 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
225 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
226 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
227 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
228 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
229 str( main.ONOSbench.handle.before ) )
230 except ( pexpect.TIMEOUT, pexpect.EOF ):
231 main.log.exception( "ONOSbench: pexpect exception found:" +
232 main.ONOSbench.handle.before )
233 main.cleanup()
234 main.exit()
235
236 main.step( "Checking if ONOS is up yet" )
237 for i in range( 2 ):
238 onosIsupResult = main.TRUE
239 for node in main.nodes:
240 started = main.ONOSbench.isup( node.ip_address )
241 if not started:
242 main.log.error( node.name + " hasn't started" )
243 onosIsupResult = onosIsupResult and started
244 if onosIsupResult == main.TRUE:
245 break
246 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
247 onpass="ONOS startup successful",
248 onfail="ONOS startup failed" )
249
250 main.log.step( "Starting ONOS CLI sessions" )
251 cliResults = main.TRUE
252 threads = []
253 for i in range( main.numCtrls ):
254 t = main.Thread( target=main.CLIs[i].startOnosCli,
255 name="startOnosCli-" + str( i ),
256 args=[main.nodes[i].ip_address] )
257 threads.append( t )
258 t.start()
259
260 for t in threads:
261 t.join()
262 cliResults = cliResults and t.result
263 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
264 onpass="ONOS cli startup successful",
265 onfail="ONOS cli startup failed" )
266
267 # Create a list of active nodes for use when some nodes are stopped
268 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
269
270 if main.params[ 'tcpdump' ].lower() == "true":
271 main.step( "Start Packet Capture MN" )
272 main.Mininet2.startTcpdump(
273 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
274 + "-MN.pcap",
275 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
276 port=main.params[ 'MNtcpdump' ][ 'port' ] )
277
Jon Halla440e872016-03-31 15:15:50 -0700278 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -0700279 nodeResults = utilities.retry( main.HA.nodesCheck,
280 False,
281 args=[main.activeNodes],
282 attempts=5 )
Jon Halla440e872016-03-31 15:15:50 -0700283
Jon Hall41d39f12016-04-11 22:54:35 -0700284 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Halla440e872016-03-31 15:15:50 -0700285 onpass="Nodes check successful",
286 onfail="Nodes check NOT successful" )
287
288 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700289 for i in main.activeNodes:
290 cli = main.CLIs[i]
Jon Halla440e872016-03-31 15:15:50 -0700291 main.log.debug( "{} components not ACTIVE: \n{}".format(
292 cli.name,
293 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall6e709752016-02-01 13:38:46 -0800294 main.log.error( "Failed to start ONOS, stopping test" )
295 main.cleanup()
296 main.exit()
297
Jon Hall172b7ba2016-04-07 18:12:20 -0700298 main.step( "Activate apps defined in the params file" )
299 # get data from the params
300 apps = main.params.get( 'apps' )
301 if apps:
302 apps = apps.split(',')
303 main.log.warn( apps )
304 activateResult = True
305 for app in apps:
306 main.CLIs[ 0 ].app( app, "Activate" )
307 # TODO: check this worked
308 time.sleep( 10 ) # wait for apps to activate
309 for app in apps:
310 state = main.CLIs[ 0 ].appStatus( app )
311 if state == "ACTIVE":
312 activateResult = activeResult and True
313 else:
314 main.log.error( "{} is in {} state".format( app, state ) )
315 activeResult = False
316 utilities.assert_equals( expect=True,
317 actual=activateResult,
318 onpass="Successfully activated apps",
319 onfail="Failed to activate apps" )
320 else:
321 main.log.warn( "No apps were specified to be loaded after startup" )
322
323 main.step( "Set ONOS configurations" )
324 config = main.params.get( 'ONOS_Configuration' )
325 if config:
326 main.log.debug( config )
327 checkResult = main.TRUE
328 for component in config:
329 for setting in config[component]:
330 value = config[component][setting]
331 check = main.CLIs[ 0 ].setCfg( component, setting, value )
332 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
333 checkResult = check and checkResult
334 utilities.assert_equals( expect=main.TRUE,
335 actual=checkResult,
336 onpass="Successfully set config",
337 onfail="Failed to set config" )
338 else:
339 main.log.warn( "No configurations were specified to be changed after startup" )
340
Jon Hall9d2dcad2016-04-08 10:15:20 -0700341 main.step( "App Ids check" )
342 appCheck = main.TRUE
343 threads = []
344 for i in main.activeNodes:
345 t = main.Thread( target=main.CLIs[i].appToIDCheck,
346 name="appToIDCheck-" + str( i ),
347 args=[] )
348 threads.append( t )
349 t.start()
350
351 for t in threads:
352 t.join()
353 appCheck = appCheck and t.result
354 if appCheck != main.TRUE:
355 node = main.activeNodes[0]
356 main.log.warn( main.CLIs[node].apps() )
357 main.log.warn( main.CLIs[node].appIDs() )
358 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
359 onpass="App Ids seem to be correct",
360 onfail="Something is wrong with app Ids" )
361
Jon Hall6e709752016-02-01 13:38:46 -0800362 def CASE2( self, main ):
363 """
364 Assign devices to controllers
365 """
366 import re
367 assert main.numCtrls, "main.numCtrls not defined"
368 assert main, "main not defined"
369 assert utilities.assert_equals, "utilities.assert_equals not defined"
370 assert main.CLIs, "main.CLIs not defined"
371 assert main.nodes, "main.nodes not defined"
372 assert ONOS1Port, "ONOS1Port not defined"
373 assert ONOS2Port, "ONOS2Port not defined"
374 assert ONOS3Port, "ONOS3Port not defined"
375 assert ONOS4Port, "ONOS4Port not defined"
376 assert ONOS5Port, "ONOS5Port not defined"
377 assert ONOS6Port, "ONOS6Port not defined"
378 assert ONOS7Port, "ONOS7Port not defined"
379
380 main.case( "Assigning devices to controllers" )
381 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
382 "and check that an ONOS node becomes the " +\
383 "master of the device."
384 main.step( "Assign switches to controllers" )
385
386 ipList = []
387 for i in range( main.numCtrls ):
388 ipList.append( main.nodes[ i ].ip_address )
389 swList = []
390 for i in range( 1, 29 ):
391 swList.append( "s" + str( i ) )
392 main.Mininet1.assignSwController( sw=swList, ip=ipList )
393
394 mastershipCheck = main.TRUE
395 for i in range( 1, 29 ):
396 response = main.Mininet1.getSwController( "s" + str( i ) )
397 try:
398 main.log.info( str( response ) )
399 except Exception:
400 main.log.info( repr( response ) )
401 for node in main.nodes:
402 if re.search( "tcp:" + node.ip_address, response ):
403 mastershipCheck = mastershipCheck and main.TRUE
404 else:
405 main.log.error( "Error, node " + node.ip_address + " is " +
406 "not in the list of controllers s" +
407 str( i ) + " is connecting to." )
408 mastershipCheck = main.FALSE
409 utilities.assert_equals(
410 expect=main.TRUE,
411 actual=mastershipCheck,
412 onpass="Switch mastership assigned correctly",
413 onfail="Switches not assigned correctly to controllers" )
414
415 def CASE21( self, main ):
416 """
417 Assign mastership to controllers
418 """
419 import time
420 assert main.numCtrls, "main.numCtrls not defined"
421 assert main, "main not defined"
422 assert utilities.assert_equals, "utilities.assert_equals not defined"
423 assert main.CLIs, "main.CLIs not defined"
424 assert main.nodes, "main.nodes not defined"
425 assert ONOS1Port, "ONOS1Port not defined"
426 assert ONOS2Port, "ONOS2Port not defined"
427 assert ONOS3Port, "ONOS3Port not defined"
428 assert ONOS4Port, "ONOS4Port not defined"
429 assert ONOS5Port, "ONOS5Port not defined"
430 assert ONOS6Port, "ONOS6Port not defined"
431 assert ONOS7Port, "ONOS7Port not defined"
432
433 main.case( "Assigning Controller roles for switches" )
434 main.caseExplanation = "Check that ONOS is connected to each " +\
435 "device. Then manually assign" +\
436 " mastership to specific ONOS nodes using" +\
437 " 'device-role'"
438 main.step( "Assign mastership of switches to specific controllers" )
439 # Manually assign mastership to the controller we want
440 roleCall = main.TRUE
441
442 ipList = [ ]
443 deviceList = []
444 onosCli = main.CLIs[ main.activeNodes[0] ]
445 try:
446 # Assign mastership to specific controllers. This assignment was
447 # determined for a 7 node cluser, but will work with any sized
448 # cluster
449 for i in range( 1, 29 ): # switches 1 through 28
450 # set up correct variables:
451 if i == 1:
452 c = 0
453 ip = main.nodes[ c ].ip_address # ONOS1
454 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
455 elif i == 2:
456 c = 1 % main.numCtrls
457 ip = main.nodes[ c ].ip_address # ONOS2
458 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
459 elif i == 3:
460 c = 1 % main.numCtrls
461 ip = main.nodes[ c ].ip_address # ONOS2
462 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
463 elif i == 4:
464 c = 3 % main.numCtrls
465 ip = main.nodes[ c ].ip_address # ONOS4
466 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
467 elif i == 5:
468 c = 2 % main.numCtrls
469 ip = main.nodes[ c ].ip_address # ONOS3
470 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
471 elif i == 6:
472 c = 2 % main.numCtrls
473 ip = main.nodes[ c ].ip_address # ONOS3
474 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
475 elif i == 7:
476 c = 5 % main.numCtrls
477 ip = main.nodes[ c ].ip_address # ONOS6
478 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
479 elif i >= 8 and i <= 17:
480 c = 4 % main.numCtrls
481 ip = main.nodes[ c ].ip_address # ONOS5
482 dpid = '3' + str( i ).zfill( 3 )
483 deviceId = onosCli.getDevice( dpid ).get( 'id' )
484 elif i >= 18 and i <= 27:
485 c = 6 % main.numCtrls
486 ip = main.nodes[ c ].ip_address # ONOS7
487 dpid = '6' + str( i ).zfill( 3 )
488 deviceId = onosCli.getDevice( dpid ).get( 'id' )
489 elif i == 28:
490 c = 0
491 ip = main.nodes[ c ].ip_address # ONOS1
492 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
493 else:
494 main.log.error( "You didn't write an else statement for " +
495 "switch s" + str( i ) )
496 roleCall = main.FALSE
497 # Assign switch
498 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
499 # TODO: make this controller dynamic
500 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
501 ipList.append( ip )
502 deviceList.append( deviceId )
503 except ( AttributeError, AssertionError ):
504 main.log.exception( "Something is wrong with ONOS device view" )
505 main.log.info( onosCli.devices() )
506 utilities.assert_equals(
507 expect=main.TRUE,
508 actual=roleCall,
509 onpass="Re-assigned switch mastership to designated controller",
510 onfail="Something wrong with deviceRole calls" )
511
512 main.step( "Check mastership was correctly assigned" )
513 roleCheck = main.TRUE
514 # NOTE: This is due to the fact that device mastership change is not
515 # atomic and is actually a multi step process
516 time.sleep( 5 )
517 for i in range( len( ipList ) ):
518 ip = ipList[i]
519 deviceId = deviceList[i]
520 # Check assignment
521 master = onosCli.getRole( deviceId ).get( 'master' )
522 if ip in master:
523 roleCheck = roleCheck and main.TRUE
524 else:
525 roleCheck = roleCheck and main.FALSE
526 main.log.error( "Error, controller " + ip + " is not" +
527 " master " + "of device " +
528 str( deviceId ) + ". Master is " +
529 repr( master ) + "." )
530 utilities.assert_equals(
531 expect=main.TRUE,
532 actual=roleCheck,
533 onpass="Switches were successfully reassigned to designated " +
534 "controller",
535 onfail="Switches were not successfully reassigned" )
536
537 def CASE3( self, main ):
538 """
539 Assign intents
540 """
541 import time
542 import json
543 assert main.numCtrls, "main.numCtrls not defined"
544 assert main, "main not defined"
545 assert utilities.assert_equals, "utilities.assert_equals not defined"
546 assert main.CLIs, "main.CLIs not defined"
547 assert main.nodes, "main.nodes not defined"
548 main.case( "Adding host Intents" )
549 main.caseExplanation = "Discover hosts by using pingall then " +\
550 "assign predetermined host-to-host intents." +\
551 " After installation, check that the intent" +\
552 " is distributed to all nodes and the state" +\
553 " is INSTALLED"
554
555 # install onos-app-fwd
556 main.step( "Install reactive forwarding app" )
557 onosCli = main.CLIs[ main.activeNodes[0] ]
558 installResults = onosCli.activateApp( "org.onosproject.fwd" )
559 utilities.assert_equals( expect=main.TRUE, actual=installResults,
560 onpass="Install fwd successful",
561 onfail="Install fwd failed" )
562
563 main.step( "Check app ids" )
564 appCheck = main.TRUE
565 threads = []
566 for i in main.activeNodes:
567 t = main.Thread( target=main.CLIs[i].appToIDCheck,
568 name="appToIDCheck-" + str( i ),
569 args=[] )
570 threads.append( t )
571 t.start()
572
573 for t in threads:
574 t.join()
575 appCheck = appCheck and t.result
576 if appCheck != main.TRUE:
577 main.log.warn( onosCli.apps() )
578 main.log.warn( onosCli.appIDs() )
579 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
580 onpass="App Ids seem to be correct",
581 onfail="Something is wrong with app Ids" )
582
583 main.step( "Discovering Hosts( Via pingall for now )" )
584 # FIXME: Once we have a host discovery mechanism, use that instead
585 # REACTIVE FWD test
586 pingResult = main.FALSE
587 passMsg = "Reactive Pingall test passed"
588 time1 = time.time()
589 pingResult = main.Mininet1.pingall()
590 time2 = time.time()
591 if not pingResult:
592 main.log.warn("First pingall failed. Trying again...")
593 pingResult = main.Mininet1.pingall()
594 passMsg += " on the second try"
595 utilities.assert_equals(
596 expect=main.TRUE,
597 actual=pingResult,
598 onpass= passMsg,
599 onfail="Reactive Pingall failed, " +
600 "one or more ping pairs failed" )
601 main.log.info( "Time for pingall: %2f seconds" %
602 ( time2 - time1 ) )
603 # timeout for fwd flows
604 time.sleep( 11 )
605 # uninstall onos-app-fwd
606 main.step( "Uninstall reactive forwarding app" )
607 node = main.activeNodes[0]
608 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
609 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
610 onpass="Uninstall fwd successful",
611 onfail="Uninstall fwd failed" )
612
613 main.step( "Check app ids" )
614 threads = []
615 appCheck2 = main.TRUE
616 for i in main.activeNodes:
617 t = main.Thread( target=main.CLIs[i].appToIDCheck,
618 name="appToIDCheck-" + str( i ),
619 args=[] )
620 threads.append( t )
621 t.start()
622
623 for t in threads:
624 t.join()
625 appCheck2 = appCheck2 and t.result
626 if appCheck2 != main.TRUE:
627 node = main.activeNodes[0]
628 main.log.warn( main.CLIs[node].apps() )
629 main.log.warn( main.CLIs[node].appIDs() )
630 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
631 onpass="App Ids seem to be correct",
632 onfail="Something is wrong with app Ids" )
633
634 main.step( "Add host intents via cli" )
635 intentIds = []
636 # TODO: move the host numbers to params
637 # Maybe look at all the paths we ping?
638 intentAddResult = True
639 hostResult = main.TRUE
640 for i in range( 8, 18 ):
641 main.log.info( "Adding host intent between h" + str( i ) +
642 " and h" + str( i + 10 ) )
643 host1 = "00:00:00:00:00:" + \
644 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
645 host2 = "00:00:00:00:00:" + \
646 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
647 # NOTE: getHost can return None
648 host1Dict = onosCli.getHost( host1 )
649 host2Dict = onosCli.getHost( host2 )
650 host1Id = None
651 host2Id = None
652 if host1Dict and host2Dict:
653 host1Id = host1Dict.get( 'id', None )
654 host2Id = host2Dict.get( 'id', None )
655 if host1Id and host2Id:
656 nodeNum = ( i % len( main.activeNodes ) )
657 node = main.activeNodes[nodeNum]
658 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
659 if tmpId:
660 main.log.info( "Added intent with id: " + tmpId )
661 intentIds.append( tmpId )
662 else:
663 main.log.error( "addHostIntent returned: " +
664 repr( tmpId ) )
665 else:
666 main.log.error( "Error, getHost() failed for h" + str( i ) +
667 " and/or h" + str( i + 10 ) )
668 node = main.activeNodes[0]
669 hosts = main.CLIs[node].hosts()
670 main.log.warn( "Hosts output: " )
671 try:
672 main.log.warn( json.dumps( json.loads( hosts ),
673 sort_keys=True,
674 indent=4,
675 separators=( ',', ': ' ) ) )
676 except ( ValueError, TypeError ):
677 main.log.warn( repr( hosts ) )
678 hostResult = main.FALSE
679 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
680 onpass="Found a host id for each host",
681 onfail="Error looking up host ids" )
682
683 intentStart = time.time()
684 onosIds = onosCli.getAllIntentsId()
685 main.log.info( "Submitted intents: " + str( intentIds ) )
686 main.log.info( "Intents in ONOS: " + str( onosIds ) )
687 for intent in intentIds:
688 if intent in onosIds:
689 pass # intent submitted is in onos
690 else:
691 intentAddResult = False
692 if intentAddResult:
693 intentStop = time.time()
694 else:
695 intentStop = None
696 # Print the intent states
697 intents = onosCli.intents()
698 intentStates = []
699 installedCheck = True
700 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
701 count = 0
702 try:
703 for intent in json.loads( intents ):
704 state = intent.get( 'state', None )
705 if "INSTALLED" not in state:
706 installedCheck = False
707 intentId = intent.get( 'id', None )
708 intentStates.append( ( intentId, state ) )
709 except ( ValueError, TypeError ):
710 main.log.exception( "Error parsing intents" )
711 # add submitted intents not in the store
712 tmplist = [ i for i, s in intentStates ]
713 missingIntents = False
714 for i in intentIds:
715 if i not in tmplist:
716 intentStates.append( ( i, " - " ) )
717 missingIntents = True
718 intentStates.sort()
719 for i, s in intentStates:
720 count += 1
721 main.log.info( "%-6s%-15s%-15s" %
722 ( str( count ), str( i ), str( s ) ) )
723 leaders = onosCli.leaders()
724 try:
725 missing = False
726 if leaders:
727 parsedLeaders = json.loads( leaders )
728 main.log.warn( json.dumps( parsedLeaders,
729 sort_keys=True,
730 indent=4,
731 separators=( ',', ': ' ) ) )
732 # check for all intent partitions
733 topics = []
734 for i in range( 14 ):
735 topics.append( "intent-partition-" + str( i ) )
736 main.log.debug( topics )
737 ONOStopics = [ j['topic'] for j in parsedLeaders ]
738 for topic in topics:
739 if topic not in ONOStopics:
740 main.log.error( "Error: " + topic +
741 " not in leaders" )
742 missing = True
743 else:
744 main.log.error( "leaders() returned None" )
745 except ( ValueError, TypeError ):
746 main.log.exception( "Error parsing leaders" )
747 main.log.error( repr( leaders ) )
748 # Check all nodes
749 if missing:
750 for i in main.activeNodes:
751 response = main.CLIs[i].leaders( jsonFormat=False)
752 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
753 str( response ) )
754
755 partitions = onosCli.partitions()
756 try:
757 if partitions :
758 parsedPartitions = json.loads( partitions )
759 main.log.warn( json.dumps( parsedPartitions,
760 sort_keys=True,
761 indent=4,
762 separators=( ',', ': ' ) ) )
763 # TODO check for a leader in all paritions
764 # TODO check for consistency among nodes
765 else:
766 main.log.error( "partitions() returned None" )
767 except ( ValueError, TypeError ):
768 main.log.exception( "Error parsing partitions" )
769 main.log.error( repr( partitions ) )
770 pendingMap = onosCli.pendingMap()
771 try:
772 if pendingMap :
773 parsedPending = json.loads( pendingMap )
774 main.log.warn( json.dumps( parsedPending,
775 sort_keys=True,
776 indent=4,
777 separators=( ',', ': ' ) ) )
778 # TODO check something here?
779 else:
780 main.log.error( "pendingMap() returned None" )
781 except ( ValueError, TypeError ):
782 main.log.exception( "Error parsing pending map" )
783 main.log.error( repr( pendingMap ) )
784
785 intentAddResult = bool( intentAddResult and not missingIntents and
786 installedCheck )
787 if not intentAddResult:
788 main.log.error( "Error in pushing host intents to ONOS" )
789
790 main.step( "Intent Anti-Entropy dispersion" )
791 for j in range(100):
792 correct = True
793 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
794 for i in main.activeNodes:
795 onosIds = []
796 ids = main.CLIs[i].getAllIntentsId()
797 onosIds.append( ids )
798 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
799 str( sorted( onosIds ) ) )
800 if sorted( ids ) != sorted( intentIds ):
801 main.log.warn( "Set of intent IDs doesn't match" )
802 correct = False
803 break
804 else:
805 intents = json.loads( main.CLIs[i].intents() )
806 for intent in intents:
807 if intent[ 'state' ] != "INSTALLED":
808 main.log.warn( "Intent " + intent[ 'id' ] +
809 " is " + intent[ 'state' ] )
810 correct = False
811 break
812 if correct:
813 break
814 else:
815 time.sleep(1)
816 if not intentStop:
817 intentStop = time.time()
818 global gossipTime
819 gossipTime = intentStop - intentStart
820 main.log.info( "It took about " + str( gossipTime ) +
821 " seconds for all intents to appear in each node" )
822 gossipPeriod = int( main.params['timers']['gossip'] )
823 maxGossipTime = gossipPeriod * len( main.activeNodes )
824 utilities.assert_greater_equals(
825 expect=maxGossipTime, actual=gossipTime,
826 onpass="ECM anti-entropy for intents worked within " +
827 "expected time",
828 onfail="Intent ECM anti-entropy took too long. " +
829 "Expected time:{}, Actual time:{}".format( maxGossipTime,
830 gossipTime ) )
831 if gossipTime <= maxGossipTime:
832 intentAddResult = True
833
834 if not intentAddResult or "key" in pendingMap:
835 import time
836 installedCheck = True
837 main.log.info( "Sleeping 60 seconds to see if intents are found" )
838 time.sleep( 60 )
839 onosIds = onosCli.getAllIntentsId()
840 main.log.info( "Submitted intents: " + str( intentIds ) )
841 main.log.info( "Intents in ONOS: " + str( onosIds ) )
842 # Print the intent states
843 intents = onosCli.intents()
844 intentStates = []
845 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
846 count = 0
847 try:
848 for intent in json.loads( intents ):
849 # Iter through intents of a node
850 state = intent.get( 'state', None )
851 if "INSTALLED" not in state:
852 installedCheck = False
853 intentId = intent.get( 'id', None )
854 intentStates.append( ( intentId, state ) )
855 except ( ValueError, TypeError ):
856 main.log.exception( "Error parsing intents" )
857 # add submitted intents not in the store
858 tmplist = [ i for i, s in intentStates ]
859 for i in intentIds:
860 if i not in tmplist:
861 intentStates.append( ( i, " - " ) )
862 intentStates.sort()
863 for i, s in intentStates:
864 count += 1
865 main.log.info( "%-6s%-15s%-15s" %
866 ( str( count ), str( i ), str( s ) ) )
867 leaders = onosCli.leaders()
868 try:
869 missing = False
870 if leaders:
871 parsedLeaders = json.loads( leaders )
872 main.log.warn( json.dumps( parsedLeaders,
873 sort_keys=True,
874 indent=4,
875 separators=( ',', ': ' ) ) )
876 # check for all intent partitions
877 # check for election
878 topics = []
879 for i in range( 14 ):
880 topics.append( "intent-partition-" + str( i ) )
881 # FIXME: this should only be after we start the app
882 topics.append( "org.onosproject.election" )
883 main.log.debug( topics )
884 ONOStopics = [ j['topic'] for j in parsedLeaders ]
885 for topic in topics:
886 if topic not in ONOStopics:
887 main.log.error( "Error: " + topic +
888 " not in leaders" )
889 missing = True
890 else:
891 main.log.error( "leaders() returned None" )
892 except ( ValueError, TypeError ):
893 main.log.exception( "Error parsing leaders" )
894 main.log.error( repr( leaders ) )
895 # Check all nodes
896 if missing:
897 for i in main.activeNodes:
898 node = main.CLIs[i]
899 response = node.leaders( jsonFormat=False)
900 main.log.warn( str( node.name ) + " leaders output: \n" +
901 str( response ) )
902
903 partitions = onosCli.partitions()
904 try:
905 if partitions :
906 parsedPartitions = json.loads( partitions )
907 main.log.warn( json.dumps( parsedPartitions,
908 sort_keys=True,
909 indent=4,
910 separators=( ',', ': ' ) ) )
911 # TODO check for a leader in all paritions
912 # TODO check for consistency among nodes
913 else:
914 main.log.error( "partitions() returned None" )
915 except ( ValueError, TypeError ):
916 main.log.exception( "Error parsing partitions" )
917 main.log.error( repr( partitions ) )
918 pendingMap = onosCli.pendingMap()
919 try:
920 if pendingMap :
921 parsedPending = json.loads( pendingMap )
922 main.log.warn( json.dumps( parsedPending,
923 sort_keys=True,
924 indent=4,
925 separators=( ',', ': ' ) ) )
926 # TODO check something here?
927 else:
928 main.log.error( "pendingMap() returned None" )
929 except ( ValueError, TypeError ):
930 main.log.exception( "Error parsing pending map" )
931 main.log.error( repr( pendingMap ) )
932
933 def CASE4( self, main ):
934 """
935 Ping across added host intents
936 """
937 import json
938 import time
939 assert main.numCtrls, "main.numCtrls not defined"
940 assert main, "main not defined"
941 assert utilities.assert_equals, "utilities.assert_equals not defined"
942 assert main.CLIs, "main.CLIs not defined"
943 assert main.nodes, "main.nodes not defined"
944 main.case( "Verify connectivity by sending traffic across Intents" )
945 main.caseExplanation = "Ping across added host intents to check " +\
946 "functionality and check the state of " +\
947 "the intent"
Jon Hall6e709752016-02-01 13:38:46 -0800948
Jon Hall41d39f12016-04-11 22:54:35 -0700949 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall6e709752016-02-01 13:38:46 -0800950 main.step( "Check Intent state" )
951 installedCheck = False
952 loopCount = 0
953 while not installedCheck and loopCount < 40:
954 installedCheck = True
955 # Print the intent states
956 intents = onosCli.intents()
957 intentStates = []
958 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
959 count = 0
960 # Iter through intents of a node
961 try:
962 for intent in json.loads( intents ):
963 state = intent.get( 'state', None )
964 if "INSTALLED" not in state:
965 installedCheck = False
966 intentId = intent.get( 'id', None )
967 intentStates.append( ( intentId, state ) )
968 except ( ValueError, TypeError ):
969 main.log.exception( "Error parsing intents." )
970 # Print states
971 intentStates.sort()
972 for i, s in intentStates:
973 count += 1
974 main.log.info( "%-6s%-15s%-15s" %
975 ( str( count ), str( i ), str( s ) ) )
976 if not installedCheck:
977 time.sleep( 1 )
978 loopCount += 1
979 utilities.assert_equals( expect=True, actual=installedCheck,
980 onpass="Intents are all INSTALLED",
981 onfail="Intents are not all in " +
982 "INSTALLED state" )
983
Jon Hall9d2dcad2016-04-08 10:15:20 -0700984 main.step( "Ping across added host intents" )
Jon Hall9d2dcad2016-04-08 10:15:20 -0700985 PingResult = main.TRUE
986 for i in range( 8, 18 ):
987 ping = main.Mininet1.pingHost( src="h" + str( i ),
988 target="h" + str( i + 10 ) )
989 PingResult = PingResult and ping
990 if ping == main.FALSE:
991 main.log.warn( "Ping failed between h" + str( i ) +
992 " and h" + str( i + 10 ) )
993 elif ping == main.TRUE:
994 main.log.info( "Ping test passed!" )
995 # Don't set PingResult or you'd override failures
996 if PingResult == main.FALSE:
997 main.log.error(
998 "Intents have not been installed correctly, pings failed." )
999 # TODO: pretty print
1000 main.log.warn( "ONOS1 intents: " )
1001 try:
1002 tmpIntents = onosCli.intents()
1003 main.log.warn( json.dumps( json.loads( tmpIntents ),
1004 sort_keys=True,
1005 indent=4,
1006 separators=( ',', ': ' ) ) )
1007 except ( ValueError, TypeError ):
1008 main.log.warn( repr( tmpIntents ) )
1009 utilities.assert_equals(
1010 expect=main.TRUE,
1011 actual=PingResult,
1012 onpass="Intents have been installed correctly and pings work",
1013 onfail="Intents have not been installed correctly, pings failed." )
1014
Jon Hall6e709752016-02-01 13:38:46 -08001015 main.step( "Check leadership of topics" )
1016 leaders = onosCli.leaders()
1017 topicCheck = main.TRUE
1018 try:
1019 if leaders:
1020 parsedLeaders = json.loads( leaders )
1021 main.log.warn( json.dumps( parsedLeaders,
1022 sort_keys=True,
1023 indent=4,
1024 separators=( ',', ': ' ) ) )
1025 # check for all intent partitions
1026 # check for election
1027 # TODO: Look at Devices as topics now that it uses this system
1028 topics = []
1029 for i in range( 14 ):
1030 topics.append( "intent-partition-" + str( i ) )
1031 # FIXME: this should only be after we start the app
1032 # FIXME: topics.append( "org.onosproject.election" )
1033 # Print leaders output
1034 main.log.debug( topics )
1035 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1036 for topic in topics:
1037 if topic not in ONOStopics:
1038 main.log.error( "Error: " + topic +
1039 " not in leaders" )
1040 topicCheck = main.FALSE
1041 else:
1042 main.log.error( "leaders() returned None" )
1043 topicCheck = main.FALSE
1044 except ( ValueError, TypeError ):
1045 topicCheck = main.FALSE
1046 main.log.exception( "Error parsing leaders" )
1047 main.log.error( repr( leaders ) )
1048 # TODO: Check for a leader of these topics
1049 # Check all nodes
1050 if topicCheck:
1051 for i in main.activeNodes:
1052 node = main.CLIs[i]
1053 response = node.leaders( jsonFormat=False)
1054 main.log.warn( str( node.name ) + " leaders output: \n" +
1055 str( response ) )
1056
1057 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1058 onpass="intent Partitions is in leaders",
1059 onfail="Some topics were lost " )
1060 # Print partitions
1061 partitions = onosCli.partitions()
1062 try:
1063 if partitions :
1064 parsedPartitions = json.loads( partitions )
1065 main.log.warn( json.dumps( parsedPartitions,
1066 sort_keys=True,
1067 indent=4,
1068 separators=( ',', ': ' ) ) )
1069 # TODO check for a leader in all paritions
1070 # TODO check for consistency among nodes
1071 else:
1072 main.log.error( "partitions() returned None" )
1073 except ( ValueError, TypeError ):
1074 main.log.exception( "Error parsing partitions" )
1075 main.log.error( repr( partitions ) )
1076 # Print Pending Map
1077 pendingMap = onosCli.pendingMap()
1078 try:
1079 if pendingMap :
1080 parsedPending = json.loads( pendingMap )
1081 main.log.warn( json.dumps( parsedPending,
1082 sort_keys=True,
1083 indent=4,
1084 separators=( ',', ': ' ) ) )
1085 # TODO check something here?
1086 else:
1087 main.log.error( "pendingMap() returned None" )
1088 except ( ValueError, TypeError ):
1089 main.log.exception( "Error parsing pending map" )
1090 main.log.error( repr( pendingMap ) )
1091
1092 if not installedCheck:
1093 main.log.info( "Waiting 60 seconds to see if the state of " +
1094 "intents change" )
1095 time.sleep( 60 )
1096 # Print the intent states
1097 intents = onosCli.intents()
1098 intentStates = []
1099 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1100 count = 0
1101 # Iter through intents of a node
1102 try:
1103 for intent in json.loads( intents ):
1104 state = intent.get( 'state', None )
1105 if "INSTALLED" not in state:
1106 installedCheck = False
1107 intentId = intent.get( 'id', None )
1108 intentStates.append( ( intentId, state ) )
1109 except ( ValueError, TypeError ):
1110 main.log.exception( "Error parsing intents." )
1111 intentStates.sort()
1112 for i, s in intentStates:
1113 count += 1
1114 main.log.info( "%-6s%-15s%-15s" %
1115 ( str( count ), str( i ), str( s ) ) )
1116 leaders = onosCli.leaders()
1117 try:
1118 missing = False
1119 if leaders:
1120 parsedLeaders = json.loads( leaders )
1121 main.log.warn( json.dumps( parsedLeaders,
1122 sort_keys=True,
1123 indent=4,
1124 separators=( ',', ': ' ) ) )
1125 # check for all intent partitions
1126 # check for election
1127 topics = []
1128 for i in range( 14 ):
1129 topics.append( "intent-partition-" + str( i ) )
1130 # FIXME: this should only be after we start the app
1131 topics.append( "org.onosproject.election" )
1132 main.log.debug( topics )
1133 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1134 for topic in topics:
1135 if topic not in ONOStopics:
1136 main.log.error( "Error: " + topic +
1137 " not in leaders" )
1138 missing = True
1139 else:
1140 main.log.error( "leaders() returned None" )
1141 except ( ValueError, TypeError ):
1142 main.log.exception( "Error parsing leaders" )
1143 main.log.error( repr( leaders ) )
1144 if missing:
1145 for i in main.activeNodes:
1146 node = main.CLIs[i]
1147 response = node.leaders( jsonFormat=False)
1148 main.log.warn( str( node.name ) + " leaders output: \n" +
1149 str( response ) )
1150
1151 partitions = onosCli.partitions()
1152 try:
1153 if partitions :
1154 parsedPartitions = json.loads( partitions )
1155 main.log.warn( json.dumps( parsedPartitions,
1156 sort_keys=True,
1157 indent=4,
1158 separators=( ',', ': ' ) ) )
1159 # TODO check for a leader in all paritions
1160 # TODO check for consistency among nodes
1161 else:
1162 main.log.error( "partitions() returned None" )
1163 except ( ValueError, TypeError ):
1164 main.log.exception( "Error parsing partitions" )
1165 main.log.error( repr( partitions ) )
1166 pendingMap = onosCli.pendingMap()
1167 try:
1168 if pendingMap :
1169 parsedPending = json.loads( pendingMap )
1170 main.log.warn( json.dumps( parsedPending,
1171 sort_keys=True,
1172 indent=4,
1173 separators=( ',', ': ' ) ) )
1174 # TODO check something here?
1175 else:
1176 main.log.error( "pendingMap() returned None" )
1177 except ( ValueError, TypeError ):
1178 main.log.exception( "Error parsing pending map" )
1179 main.log.error( repr( pendingMap ) )
1180 # Print flowrules
1181 node = main.activeNodes[0]
1182 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1183 main.step( "Wait a minute then ping again" )
1184 # the wait is above
1185 PingResult = main.TRUE
1186 for i in range( 8, 18 ):
1187 ping = main.Mininet1.pingHost( src="h" + str( i ),
1188 target="h" + str( i + 10 ) )
1189 PingResult = PingResult and ping
1190 if ping == main.FALSE:
1191 main.log.warn( "Ping failed between h" + str( i ) +
1192 " and h" + str( i + 10 ) )
1193 elif ping == main.TRUE:
1194 main.log.info( "Ping test passed!" )
1195 # Don't set PingResult or you'd override failures
1196 if PingResult == main.FALSE:
1197 main.log.error(
1198 "Intents have not been installed correctly, pings failed." )
1199 # TODO: pretty print
1200 main.log.warn( "ONOS1 intents: " )
1201 try:
1202 tmpIntents = onosCli.intents()
1203 main.log.warn( json.dumps( json.loads( tmpIntents ),
1204 sort_keys=True,
1205 indent=4,
1206 separators=( ',', ': ' ) ) )
1207 except ( ValueError, TypeError ):
1208 main.log.warn( repr( tmpIntents ) )
1209 utilities.assert_equals(
1210 expect=main.TRUE,
1211 actual=PingResult,
1212 onpass="Intents have been installed correctly and pings work",
1213 onfail="Intents have not been installed correctly, pings failed." )
1214
1215 def CASE5( self, main ):
1216 """
1217 Reading state of ONOS
1218 """
1219 import json
1220 import time
1221 assert main.numCtrls, "main.numCtrls not defined"
1222 assert main, "main not defined"
1223 assert utilities.assert_equals, "utilities.assert_equals not defined"
1224 assert main.CLIs, "main.CLIs not defined"
1225 assert main.nodes, "main.nodes not defined"
1226
1227 main.case( "Setting up and gathering data for current state" )
1228 # The general idea for this test case is to pull the state of
1229 # ( intents,flows, topology,... ) from each ONOS node
1230 # We can then compare them with each other and also with past states
1231
1232 main.step( "Check that each switch has a master" )
1233 global mastershipState
1234 mastershipState = '[]'
1235
1236 # Assert that each device has a master
1237 rolesNotNull = main.TRUE
1238 threads = []
1239 for i in main.activeNodes:
1240 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1241 name="rolesNotNull-" + str( i ),
1242 args=[] )
1243 threads.append( t )
1244 t.start()
1245
1246 for t in threads:
1247 t.join()
1248 rolesNotNull = rolesNotNull and t.result
1249 utilities.assert_equals(
1250 expect=main.TRUE,
1251 actual=rolesNotNull,
1252 onpass="Each device has a master",
1253 onfail="Some devices don't have a master assigned" )
1254
1255 main.step( "Get the Mastership of each switch from each controller" )
1256 ONOSMastership = []
1257 mastershipCheck = main.FALSE
1258 consistentMastership = True
1259 rolesResults = True
1260 threads = []
1261 for i in main.activeNodes:
1262 t = main.Thread( target=main.CLIs[i].roles,
1263 name="roles-" + str( i ),
1264 args=[] )
1265 threads.append( t )
1266 t.start()
1267
1268 for t in threads:
1269 t.join()
1270 ONOSMastership.append( t.result )
1271
1272 for i in range( len( ONOSMastership ) ):
1273 node = str( main.activeNodes[i] + 1 )
1274 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1275 main.log.error( "Error in getting ONOS" + node + " roles" )
1276 main.log.warn( "ONOS" + node + " mastership response: " +
1277 repr( ONOSMastership[i] ) )
1278 rolesResults = False
1279 utilities.assert_equals(
1280 expect=True,
1281 actual=rolesResults,
1282 onpass="No error in reading roles output",
1283 onfail="Error in reading roles from ONOS" )
1284
1285 main.step( "Check for consistency in roles from each controller" )
1286 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1287 main.log.info(
1288 "Switch roles are consistent across all ONOS nodes" )
1289 else:
1290 consistentMastership = False
1291 utilities.assert_equals(
1292 expect=True,
1293 actual=consistentMastership,
1294 onpass="Switch roles are consistent across all ONOS nodes",
1295 onfail="ONOS nodes have different views of switch roles" )
1296
1297 if rolesResults and not consistentMastership:
1298 for i in range( len( main.activeNodes ) ):
1299 node = str( main.activeNodes[i] + 1 )
1300 try:
1301 main.log.warn(
1302 "ONOS" + node + " roles: ",
1303 json.dumps(
1304 json.loads( ONOSMastership[ i ] ),
1305 sort_keys=True,
1306 indent=4,
1307 separators=( ',', ': ' ) ) )
1308 except ( ValueError, TypeError ):
1309 main.log.warn( repr( ONOSMastership[ i ] ) )
1310 elif rolesResults and consistentMastership:
1311 mastershipCheck = main.TRUE
1312 mastershipState = ONOSMastership[ 0 ]
1313
1314 main.step( "Get the intents from each controller" )
1315 global intentState
1316 intentState = []
1317 ONOSIntents = []
1318 intentCheck = main.FALSE
1319 consistentIntents = True
1320 intentsResults = True
1321 threads = []
1322 for i in main.activeNodes:
1323 t = main.Thread( target=main.CLIs[i].intents,
1324 name="intents-" + str( i ),
1325 args=[],
1326 kwargs={ 'jsonFormat': True } )
1327 threads.append( t )
1328 t.start()
1329
1330 for t in threads:
1331 t.join()
1332 ONOSIntents.append( t.result )
1333
1334 for i in range( len( ONOSIntents ) ):
1335 node = str( main.activeNodes[i] + 1 )
1336 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1337 main.log.error( "Error in getting ONOS" + node + " intents" )
1338 main.log.warn( "ONOS" + node + " intents response: " +
1339 repr( ONOSIntents[ i ] ) )
1340 intentsResults = False
1341 utilities.assert_equals(
1342 expect=True,
1343 actual=intentsResults,
1344 onpass="No error in reading intents output",
1345 onfail="Error in reading intents from ONOS" )
1346
1347 main.step( "Check for consistency in Intents from each controller" )
1348 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1349 main.log.info( "Intents are consistent across all ONOS " +
1350 "nodes" )
1351 else:
1352 consistentIntents = False
1353 main.log.error( "Intents not consistent" )
1354 utilities.assert_equals(
1355 expect=True,
1356 actual=consistentIntents,
1357 onpass="Intents are consistent across all ONOS nodes",
1358 onfail="ONOS nodes have different views of intents" )
1359
1360 if intentsResults:
1361 # Try to make it easy to figure out what is happening
1362 #
1363 # Intent ONOS1 ONOS2 ...
1364 # 0x01 INSTALLED INSTALLING
1365 # ... ... ...
1366 # ... ... ...
1367 title = " Id"
1368 for n in main.activeNodes:
1369 title += " " * 10 + "ONOS" + str( n + 1 )
1370 main.log.warn( title )
1371 # get all intent keys in the cluster
1372 keys = []
Jon Halla440e872016-03-31 15:15:50 -07001373 try:
1374 # Get the set of all intent keys
Jon Hall6e709752016-02-01 13:38:46 -08001375 for nodeStr in ONOSIntents:
1376 node = json.loads( nodeStr )
1377 for intent in node:
Jon Halla440e872016-03-31 15:15:50 -07001378 keys.append( intent.get( 'id' ) )
1379 keys = set( keys )
1380 # For each intent key, print the state on each node
1381 for key in keys:
1382 row = "%-13s" % key
1383 for nodeStr in ONOSIntents:
1384 node = json.loads( nodeStr )
1385 for intent in node:
1386 if intent.get( 'id', "Error" ) == key:
1387 row += "%-15s" % intent.get( 'state' )
1388 main.log.warn( row )
1389 # End of intent state table
1390 except ValueError as e:
1391 main.log.exception( e )
1392 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
Jon Hall6e709752016-02-01 13:38:46 -08001393
1394 if intentsResults and not consistentIntents:
1395 # print the json objects
1396 n = str( main.activeNodes[-1] + 1 )
1397 main.log.debug( "ONOS" + n + " intents: " )
1398 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1399 sort_keys=True,
1400 indent=4,
1401 separators=( ',', ': ' ) ) )
1402 for i in range( len( ONOSIntents ) ):
1403 node = str( main.activeNodes[i] + 1 )
1404 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1405 main.log.debug( "ONOS" + node + " intents: " )
1406 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1407 sort_keys=True,
1408 indent=4,
1409 separators=( ',', ': ' ) ) )
1410 else:
1411 main.log.debug( "ONOS" + node + " intents match ONOS" +
1412 n + " intents" )
1413 elif intentsResults and consistentIntents:
1414 intentCheck = main.TRUE
1415 intentState = ONOSIntents[ 0 ]
1416
1417 main.step( "Get the flows from each controller" )
1418 global flowState
1419 flowState = []
1420 ONOSFlows = []
1421 ONOSFlowsJson = []
1422 flowCheck = main.FALSE
1423 consistentFlows = True
1424 flowsResults = True
1425 threads = []
1426 for i in main.activeNodes:
1427 t = main.Thread( target=main.CLIs[i].flows,
1428 name="flows-" + str( i ),
1429 args=[],
1430 kwargs={ 'jsonFormat': True } )
1431 threads.append( t )
1432 t.start()
1433
1434 # NOTE: Flows command can take some time to run
1435 time.sleep(30)
1436 for t in threads:
1437 t.join()
1438 result = t.result
1439 ONOSFlows.append( result )
1440
1441 for i in range( len( ONOSFlows ) ):
1442 num = str( main.activeNodes[i] + 1 )
1443 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1444 main.log.error( "Error in getting ONOS" + num + " flows" )
1445 main.log.warn( "ONOS" + num + " flows response: " +
1446 repr( ONOSFlows[ i ] ) )
1447 flowsResults = False
1448 ONOSFlowsJson.append( None )
1449 else:
1450 try:
1451 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1452 except ( ValueError, TypeError ):
1453 # FIXME: change this to log.error?
1454 main.log.exception( "Error in parsing ONOS" + num +
1455 " response as json." )
1456 main.log.error( repr( ONOSFlows[ i ] ) )
1457 ONOSFlowsJson.append( None )
1458 flowsResults = False
1459 utilities.assert_equals(
1460 expect=True,
1461 actual=flowsResults,
1462 onpass="No error in reading flows output",
1463 onfail="Error in reading flows from ONOS" )
1464
1465 main.step( "Check for consistency in Flows from each controller" )
1466 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1467 if all( tmp ):
1468 main.log.info( "Flow count is consistent across all ONOS nodes" )
1469 else:
1470 consistentFlows = False
1471 utilities.assert_equals(
1472 expect=True,
1473 actual=consistentFlows,
1474 onpass="The flow count is consistent across all ONOS nodes",
1475 onfail="ONOS nodes have different flow counts" )
1476
1477 if flowsResults and not consistentFlows:
1478 for i in range( len( ONOSFlows ) ):
1479 node = str( main.activeNodes[i] + 1 )
1480 try:
1481 main.log.warn(
1482 "ONOS" + node + " flows: " +
1483 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1484 indent=4, separators=( ',', ': ' ) ) )
1485 except ( ValueError, TypeError ):
1486 main.log.warn( "ONOS" + node + " flows: " +
1487 repr( ONOSFlows[ i ] ) )
1488 elif flowsResults and consistentFlows:
1489 flowCheck = main.TRUE
1490 flowState = ONOSFlows[ 0 ]
1491
1492 main.step( "Get the OF Table entries" )
1493 global flows
1494 flows = []
1495 for i in range( 1, 29 ):
1496 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1497 if flowCheck == main.FALSE:
1498 for table in flows:
1499 main.log.warn( table )
1500 # TODO: Compare switch flow tables with ONOS flow tables
1501
1502 main.step( "Start continuous pings" )
1503 main.Mininet2.pingLong(
1504 src=main.params[ 'PING' ][ 'source1' ],
1505 target=main.params[ 'PING' ][ 'target1' ],
1506 pingTime=500 )
1507 main.Mininet2.pingLong(
1508 src=main.params[ 'PING' ][ 'source2' ],
1509 target=main.params[ 'PING' ][ 'target2' ],
1510 pingTime=500 )
1511 main.Mininet2.pingLong(
1512 src=main.params[ 'PING' ][ 'source3' ],
1513 target=main.params[ 'PING' ][ 'target3' ],
1514 pingTime=500 )
1515 main.Mininet2.pingLong(
1516 src=main.params[ 'PING' ][ 'source4' ],
1517 target=main.params[ 'PING' ][ 'target4' ],
1518 pingTime=500 )
1519 main.Mininet2.pingLong(
1520 src=main.params[ 'PING' ][ 'source5' ],
1521 target=main.params[ 'PING' ][ 'target5' ],
1522 pingTime=500 )
1523 main.Mininet2.pingLong(
1524 src=main.params[ 'PING' ][ 'source6' ],
1525 target=main.params[ 'PING' ][ 'target6' ],
1526 pingTime=500 )
1527 main.Mininet2.pingLong(
1528 src=main.params[ 'PING' ][ 'source7' ],
1529 target=main.params[ 'PING' ][ 'target7' ],
1530 pingTime=500 )
1531 main.Mininet2.pingLong(
1532 src=main.params[ 'PING' ][ 'source8' ],
1533 target=main.params[ 'PING' ][ 'target8' ],
1534 pingTime=500 )
1535 main.Mininet2.pingLong(
1536 src=main.params[ 'PING' ][ 'source9' ],
1537 target=main.params[ 'PING' ][ 'target9' ],
1538 pingTime=500 )
1539 main.Mininet2.pingLong(
1540 src=main.params[ 'PING' ][ 'source10' ],
1541 target=main.params[ 'PING' ][ 'target10' ],
1542 pingTime=500 )
1543
1544 main.step( "Collecting topology information from ONOS" )
1545 devices = []
1546 threads = []
1547 for i in main.activeNodes:
1548 t = main.Thread( target=main.CLIs[i].devices,
1549 name="devices-" + str( i ),
1550 args=[ ] )
1551 threads.append( t )
1552 t.start()
1553
1554 for t in threads:
1555 t.join()
1556 devices.append( t.result )
1557 hosts = []
1558 threads = []
1559 for i in main.activeNodes:
1560 t = main.Thread( target=main.CLIs[i].hosts,
1561 name="hosts-" + str( i ),
1562 args=[ ] )
1563 threads.append( t )
1564 t.start()
1565
1566 for t in threads:
1567 t.join()
1568 try:
1569 hosts.append( json.loads( t.result ) )
1570 except ( ValueError, TypeError ):
1571 # FIXME: better handling of this, print which node
1572 # Maybe use thread name?
1573 main.log.exception( "Error parsing json output of hosts" )
1574 main.log.warn( repr( t.result ) )
1575 hosts.append( None )
1576
1577 ports = []
1578 threads = []
1579 for i in main.activeNodes:
1580 t = main.Thread( target=main.CLIs[i].ports,
1581 name="ports-" + str( i ),
1582 args=[ ] )
1583 threads.append( t )
1584 t.start()
1585
1586 for t in threads:
1587 t.join()
1588 ports.append( t.result )
1589 links = []
1590 threads = []
1591 for i in main.activeNodes:
1592 t = main.Thread( target=main.CLIs[i].links,
1593 name="links-" + str( i ),
1594 args=[ ] )
1595 threads.append( t )
1596 t.start()
1597
1598 for t in threads:
1599 t.join()
1600 links.append( t.result )
1601 clusters = []
1602 threads = []
1603 for i in main.activeNodes:
1604 t = main.Thread( target=main.CLIs[i].clusters,
1605 name="clusters-" + str( i ),
1606 args=[ ] )
1607 threads.append( t )
1608 t.start()
1609
1610 for t in threads:
1611 t.join()
1612 clusters.append( t.result )
1613 # Compare json objects for hosts and dataplane clusters
1614
1615 # hosts
1616 main.step( "Host view is consistent across ONOS nodes" )
1617 consistentHostsResult = main.TRUE
1618 for controller in range( len( hosts ) ):
1619 controllerStr = str( main.activeNodes[controller] + 1 )
1620 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1621 if hosts[ controller ] == hosts[ 0 ]:
1622 continue
1623 else: # hosts not consistent
1624 main.log.error( "hosts from ONOS" +
1625 controllerStr +
1626 " is inconsistent with ONOS1" )
1627 main.log.warn( repr( hosts[ controller ] ) )
1628 consistentHostsResult = main.FALSE
1629
1630 else:
1631 main.log.error( "Error in getting ONOS hosts from ONOS" +
1632 controllerStr )
1633 consistentHostsResult = main.FALSE
1634 main.log.warn( "ONOS" + controllerStr +
1635 " hosts response: " +
1636 repr( hosts[ controller ] ) )
1637 utilities.assert_equals(
1638 expect=main.TRUE,
1639 actual=consistentHostsResult,
1640 onpass="Hosts view is consistent across all ONOS nodes",
1641 onfail="ONOS nodes have different views of hosts" )
1642
1643 main.step( "Each host has an IP address" )
1644 ipResult = main.TRUE
1645 for controller in range( 0, len( hosts ) ):
1646 controllerStr = str( main.activeNodes[controller] + 1 )
1647 if hosts[ controller ]:
1648 for host in hosts[ controller ]:
1649 if not host.get( 'ipAddresses', [ ] ):
1650 main.log.error( "Error with host ips on controller" +
1651 controllerStr + ": " + str( host ) )
1652 ipResult = main.FALSE
1653 utilities.assert_equals(
1654 expect=main.TRUE,
1655 actual=ipResult,
1656 onpass="The ips of the hosts aren't empty",
1657 onfail="The ip of at least one host is missing" )
1658
1659 # Strongly connected clusters of devices
1660 main.step( "Cluster view is consistent across ONOS nodes" )
1661 consistentClustersResult = main.TRUE
1662 for controller in range( len( clusters ) ):
1663 controllerStr = str( main.activeNodes[controller] + 1 )
1664 if "Error" not in clusters[ controller ]:
1665 if clusters[ controller ] == clusters[ 0 ]:
1666 continue
1667 else: # clusters not consistent
1668 main.log.error( "clusters from ONOS" + controllerStr +
1669 " is inconsistent with ONOS1" )
1670 consistentClustersResult = main.FALSE
1671
1672 else:
1673 main.log.error( "Error in getting dataplane clusters " +
1674 "from ONOS" + controllerStr )
1675 consistentClustersResult = main.FALSE
1676 main.log.warn( "ONOS" + controllerStr +
1677 " clusters response: " +
1678 repr( clusters[ controller ] ) )
1679 utilities.assert_equals(
1680 expect=main.TRUE,
1681 actual=consistentClustersResult,
1682 onpass="Clusters view is consistent across all ONOS nodes",
1683 onfail="ONOS nodes have different views of clusters" )
Jon Hall172b7ba2016-04-07 18:12:20 -07001684 if consistentClustersResult != main.TRUE:
1685 main.log.debug( clusters )
Jon Hall6e709752016-02-01 13:38:46 -08001686 # there should always only be one cluster
1687 main.step( "Cluster view correct across ONOS nodes" )
1688 try:
1689 numClusters = len( json.loads( clusters[ 0 ] ) )
1690 except ( ValueError, TypeError ):
1691 main.log.exception( "Error parsing clusters[0]: " +
1692 repr( clusters[ 0 ] ) )
1693 numClusters = "ERROR"
1694 clusterResults = main.FALSE
1695 if numClusters == 1:
1696 clusterResults = main.TRUE
1697 utilities.assert_equals(
1698 expect=1,
1699 actual=numClusters,
1700 onpass="ONOS shows 1 SCC",
1701 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1702
1703 main.step( "Comparing ONOS topology to MN" )
1704 devicesResults = main.TRUE
1705 linksResults = main.TRUE
1706 hostsResults = main.TRUE
1707 mnSwitches = main.Mininet1.getSwitches()
1708 mnLinks = main.Mininet1.getLinks()
1709 mnHosts = main.Mininet1.getHosts()
1710 for controller in main.activeNodes:
1711 controllerStr = str( main.activeNodes[controller] + 1 )
1712 if devices[ controller ] and ports[ controller ] and\
1713 "Error" not in devices[ controller ] and\
1714 "Error" not in ports[ controller ]:
1715 currentDevicesResult = main.Mininet1.compareSwitches(
1716 mnSwitches,
1717 json.loads( devices[ controller ] ),
1718 json.loads( ports[ controller ] ) )
1719 else:
1720 currentDevicesResult = main.FALSE
1721 utilities.assert_equals( expect=main.TRUE,
1722 actual=currentDevicesResult,
1723 onpass="ONOS" + controllerStr +
1724 " Switches view is correct",
1725 onfail="ONOS" + controllerStr +
1726 " Switches view is incorrect" )
1727 if links[ controller ] and "Error" not in links[ controller ]:
1728 currentLinksResult = main.Mininet1.compareLinks(
1729 mnSwitches, mnLinks,
1730 json.loads( links[ controller ] ) )
1731 else:
1732 currentLinksResult = main.FALSE
1733 utilities.assert_equals( expect=main.TRUE,
1734 actual=currentLinksResult,
1735 onpass="ONOS" + controllerStr +
1736 " links view is correct",
1737 onfail="ONOS" + controllerStr +
1738 " links view is incorrect" )
1739
1740 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1741 currentHostsResult = main.Mininet1.compareHosts(
1742 mnHosts,
1743 hosts[ controller ] )
1744 else:
1745 currentHostsResult = main.FALSE
1746 utilities.assert_equals( expect=main.TRUE,
1747 actual=currentHostsResult,
1748 onpass="ONOS" + controllerStr +
1749 " hosts exist in Mininet",
1750 onfail="ONOS" + controllerStr +
1751 " hosts don't match Mininet" )
1752
1753 devicesResults = devicesResults and currentDevicesResult
1754 linksResults = linksResults and currentLinksResult
1755 hostsResults = hostsResults and currentHostsResult
1756
1757 main.step( "Device information is correct" )
1758 utilities.assert_equals(
1759 expect=main.TRUE,
1760 actual=devicesResults,
1761 onpass="Device information is correct",
1762 onfail="Device information is incorrect" )
1763
1764 main.step( "Links are correct" )
1765 utilities.assert_equals(
1766 expect=main.TRUE,
1767 actual=linksResults,
1768 onpass="Link are correct",
1769 onfail="Links are incorrect" )
1770
1771 main.step( "Hosts are correct" )
1772 utilities.assert_equals(
1773 expect=main.TRUE,
1774 actual=hostsResults,
1775 onpass="Hosts are correct",
1776 onfail="Hosts are incorrect" )
1777
1778 def CASE61( self, main ):
1779 """
1780 The Failure case.
1781 """
1782 import math
1783 assert main.numCtrls, "main.numCtrls not defined"
1784 assert main, "main not defined"
1785 assert utilities.assert_equals, "utilities.assert_equals not defined"
1786 assert main.CLIs, "main.CLIs not defined"
1787 assert main.nodes, "main.nodes not defined"
1788 main.case( "Partition ONOS nodes into two distinct partitions" )
1789
1790 main.step( "Checking ONOS Logs for errors" )
1791 for node in main.nodes:
1792 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1793 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1794
1795 n = len( main.nodes ) # Number of nodes
1796 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1797 main.partition = [ 0 ] # ONOS node to partition, listed by index in main.nodes
1798 if n > 3:
1799 main.partition.append( p - 1 )
1800 # NOTE: This only works for cluster sizes of 3,5, or 7.
1801
1802 main.step( "Partitioning ONOS nodes" )
1803 nodeList = [ str( i + 1 ) for i in main.partition ]
1804 main.log.info( "Nodes to be partitioned: " + str( nodeList ) )
1805 partitionResults = main.TRUE
1806 for i in range( 0, n ):
1807 this = main.nodes[i]
1808 if i not in main.partition:
1809 for j in main.partition:
1810 foe = main.nodes[j]
1811 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1812 #CMD HERE
1813 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1814 this.handle.sendline( cmdStr )
1815 this.handle.expect( "\$" )
1816 main.log.debug( this.handle.before )
1817 else:
1818 for j in range( 0, n ):
1819 if j not in main.partition:
1820 foe = main.nodes[j]
1821 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1822 #CMD HERE
1823 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1824 this.handle.sendline( cmdStr )
1825 this.handle.expect( "\$" )
1826 main.log.debug( this.handle.before )
1827 main.activeNodes.remove( i )
1828 # NOTE: When dynamic clustering is finished, we need to start checking
1829 # main.partion nodes still work when partitioned
1830 utilities.assert_equals( expect=main.TRUE, actual=partitionResults,
1831 onpass="Firewall rules set successfully",
1832 onfail="Error setting firewall rules" )
1833
1834 main.log.step( "Sleeping 60 seconds" )
1835 time.sleep( 60 )
1836
1837 def CASE62( self, main ):
1838 """
1839 Healing Partition
1840 """
1841 import time
1842 assert main.numCtrls, "main.numCtrls not defined"
1843 assert main, "main not defined"
1844 assert utilities.assert_equals, "utilities.assert_equals not defined"
1845 assert main.CLIs, "main.CLIs not defined"
1846 assert main.nodes, "main.nodes not defined"
1847 assert main.partition, "main.partition not defined"
1848 main.case( "Healing Partition" )
1849
1850 main.step( "Deleteing firewall rules" )
1851 healResults = main.TRUE
1852 for node in main.nodes:
1853 cmdStr = "sudo iptables -F"
1854 node.handle.sendline( cmdStr )
1855 node.handle.expect( "\$" )
1856 main.log.debug( node.handle.before )
1857 utilities.assert_equals( expect=main.TRUE, actual=healResults,
1858 onpass="Firewall rules removed",
1859 onfail="Error removing firewall rules" )
1860
1861 for node in main.partition:
1862 main.activeNodes.append( node )
1863 main.activeNodes.sort()
1864 try:
1865 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1866 "List of active nodes has duplicates, this likely indicates something was run out of order"
1867 except AssertionError:
1868 main.log.exception( "" )
1869 main.cleanup()
1870 main.exit()
1871
1872 def CASE7( self, main ):
1873 """
1874 Check state after ONOS failure
1875 """
1876 import json
1877 assert main.numCtrls, "main.numCtrls not defined"
1878 assert main, "main not defined"
1879 assert utilities.assert_equals, "utilities.assert_equals not defined"
1880 assert main.CLIs, "main.CLIs not defined"
1881 assert main.nodes, "main.nodes not defined"
1882 try:
1883 main.partition
1884 except AttributeError:
1885 main.partition = []
1886
1887 main.case( "Running ONOS Constant State Tests" )
1888
1889 main.step( "Check that each switch has a master" )
1890 # Assert that each device has a master
1891 rolesNotNull = main.TRUE
1892 threads = []
1893 for i in main.activeNodes:
1894 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1895 name="rolesNotNull-" + str( i ),
1896 args=[ ] )
1897 threads.append( t )
1898 t.start()
1899
1900 for t in threads:
1901 t.join()
1902 rolesNotNull = rolesNotNull and t.result
1903 utilities.assert_equals(
1904 expect=main.TRUE,
1905 actual=rolesNotNull,
1906 onpass="Each device has a master",
1907 onfail="Some devices don't have a master assigned" )
1908
1909 main.step( "Read device roles from ONOS" )
1910 ONOSMastership = []
1911 mastershipCheck = main.FALSE
1912 consistentMastership = True
1913 rolesResults = True
1914 threads = []
1915 for i in main.activeNodes:
1916 t = main.Thread( target=main.CLIs[i].roles,
1917 name="roles-" + str( i ),
1918 args=[] )
1919 threads.append( t )
1920 t.start()
1921
1922 for t in threads:
1923 t.join()
1924 ONOSMastership.append( t.result )
1925
1926 for i in range( len( ONOSMastership ) ):
1927 node = str( main.activeNodes[i] + 1 )
1928 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1929 main.log.error( "Error in getting ONOS" + node + " roles" )
1930 main.log.warn( "ONOS" + node + " mastership response: " +
1931 repr( ONOSMastership[i] ) )
1932 rolesResults = False
1933 utilities.assert_equals(
1934 expect=True,
1935 actual=rolesResults,
1936 onpass="No error in reading roles output",
1937 onfail="Error in reading roles from ONOS" )
1938
1939 main.step( "Check for consistency in roles from each controller" )
1940 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1941 main.log.info(
1942 "Switch roles are consistent across all ONOS nodes" )
1943 else:
1944 consistentMastership = False
1945 utilities.assert_equals(
1946 expect=True,
1947 actual=consistentMastership,
1948 onpass="Switch roles are consistent across all ONOS nodes",
1949 onfail="ONOS nodes have different views of switch roles" )
1950
1951 if rolesResults and not consistentMastership:
1952 for i in range( len( ONOSMastership ) ):
1953 node = str( main.activeNodes[i] + 1 )
1954 main.log.warn( "ONOS" + node + " roles: ",
1955 json.dumps( json.loads( ONOSMastership[ i ] ),
1956 sort_keys=True,
1957 indent=4,
1958 separators=( ',', ': ' ) ) )
1959
1960 # NOTE: we expect mastership to change on controller failure
1961
1962 main.step( "Get the intents and compare across all nodes" )
1963 ONOSIntents = []
1964 intentCheck = main.FALSE
1965 consistentIntents = True
1966 intentsResults = True
1967 threads = []
1968 for i in main.activeNodes:
1969 t = main.Thread( target=main.CLIs[i].intents,
1970 name="intents-" + str( i ),
1971 args=[],
1972 kwargs={ 'jsonFormat': True } )
1973 threads.append( t )
1974 t.start()
1975
1976 for t in threads:
1977 t.join()
1978 ONOSIntents.append( t.result )
1979
1980 for i in range( len( ONOSIntents) ):
1981 node = str( main.activeNodes[i] + 1 )
1982 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1983 main.log.error( "Error in getting ONOS" + node + " intents" )
1984 main.log.warn( "ONOS" + node + " intents response: " +
1985 repr( ONOSIntents[ i ] ) )
1986 intentsResults = False
1987 utilities.assert_equals(
1988 expect=True,
1989 actual=intentsResults,
1990 onpass="No error in reading intents output",
1991 onfail="Error in reading intents from ONOS" )
1992
1993 main.step( "Check for consistency in Intents from each controller" )
1994 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1995 main.log.info( "Intents are consistent across all ONOS " +
1996 "nodes" )
1997 else:
1998 consistentIntents = False
1999
2000 # Try to make it easy to figure out what is happening
2001 #
2002 # Intent ONOS1 ONOS2 ...
2003 # 0x01 INSTALLED INSTALLING
2004 # ... ... ...
2005 # ... ... ...
2006 title = " ID"
2007 for n in main.activeNodes:
2008 title += " " * 10 + "ONOS" + str( n + 1 )
2009 main.log.warn( title )
2010 # get all intent keys in the cluster
2011 keys = []
2012 for nodeStr in ONOSIntents:
2013 node = json.loads( nodeStr )
2014 for intent in node:
2015 keys.append( intent.get( 'id' ) )
2016 keys = set( keys )
2017 for key in keys:
2018 row = "%-13s" % key
2019 for nodeStr in ONOSIntents:
2020 node = json.loads( nodeStr )
2021 for intent in node:
2022 if intent.get( 'id' ) == key:
2023 row += "%-15s" % intent.get( 'state' )
2024 main.log.warn( row )
2025 # End table view
2026
2027 utilities.assert_equals(
2028 expect=True,
2029 actual=consistentIntents,
2030 onpass="Intents are consistent across all ONOS nodes",
2031 onfail="ONOS nodes have different views of intents" )
2032 intentStates = []
2033 for node in ONOSIntents: # Iter through ONOS nodes
2034 nodeStates = []
2035 # Iter through intents of a node
2036 try:
2037 for intent in json.loads( node ):
2038 nodeStates.append( intent[ 'state' ] )
2039 except ( ValueError, TypeError ):
2040 main.log.exception( "Error in parsing intents" )
2041 main.log.error( repr( node ) )
2042 intentStates.append( nodeStates )
2043 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2044 main.log.info( dict( out ) )
2045
2046 if intentsResults and not consistentIntents:
2047 for i in range( len( main.activeNodes ) ):
2048 node = str( main.activeNodes[i] + 1 )
2049 main.log.warn( "ONOS" + node + " intents: " )
2050 main.log.warn( json.dumps(
2051 json.loads( ONOSIntents[ i ] ),
2052 sort_keys=True,
2053 indent=4,
2054 separators=( ',', ': ' ) ) )
2055 elif intentsResults and consistentIntents:
2056 intentCheck = main.TRUE
2057
2058 # NOTE: Store has no durability, so intents are lost across system
2059 # restarts
2060 main.step( "Compare current intents with intents before the failure" )
2061 # NOTE: this requires case 5 to pass for intentState to be set.
2062 # maybe we should stop the test if that fails?
2063 sameIntents = main.FALSE
2064 try:
2065 intentState
2066 except NameError:
2067 main.log.warn( "No previous intent state was saved" )
2068 else:
2069 if intentState and intentState == ONOSIntents[ 0 ]:
2070 sameIntents = main.TRUE
2071 main.log.info( "Intents are consistent with before failure" )
2072 # TODO: possibly the states have changed? we may need to figure out
2073 # what the acceptable states are
2074 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2075 sameIntents = main.TRUE
2076 try:
2077 before = json.loads( intentState )
2078 after = json.loads( ONOSIntents[ 0 ] )
2079 for intent in before:
2080 if intent not in after:
2081 sameIntents = main.FALSE
2082 main.log.debug( "Intent is not currently in ONOS " +
2083 "(at least in the same form):" )
2084 main.log.debug( json.dumps( intent ) )
2085 except ( ValueError, TypeError ):
2086 main.log.exception( "Exception printing intents" )
2087 main.log.debug( repr( ONOSIntents[0] ) )
2088 main.log.debug( repr( intentState ) )
2089 if sameIntents == main.FALSE:
2090 try:
2091 main.log.debug( "ONOS intents before: " )
2092 main.log.debug( json.dumps( json.loads( intentState ),
2093 sort_keys=True, indent=4,
2094 separators=( ',', ': ' ) ) )
2095 main.log.debug( "Current ONOS intents: " )
2096 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2097 sort_keys=True, indent=4,
2098 separators=( ',', ': ' ) ) )
2099 except ( ValueError, TypeError ):
2100 main.log.exception( "Exception printing intents" )
2101 main.log.debug( repr( ONOSIntents[0] ) )
2102 main.log.debug( repr( intentState ) )
2103 utilities.assert_equals(
2104 expect=main.TRUE,
2105 actual=sameIntents,
2106 onpass="Intents are consistent with before failure",
2107 onfail="The Intents changed during failure" )
2108 intentCheck = intentCheck and sameIntents
2109
2110 main.step( "Get the OF Table entries and compare to before " +
2111 "component failure" )
2112 FlowTables = main.TRUE
2113 for i in range( 28 ):
2114 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2115 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
Jon Hall41d39f12016-04-11 22:54:35 -07002116 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2117 FlowTables = FlowTables and curSwitch
2118 if curSwitch == main.FALSE:
Jon Hall6e709752016-02-01 13:38:46 -08002119 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2120 utilities.assert_equals(
2121 expect=main.TRUE,
2122 actual=FlowTables,
2123 onpass="No changes were found in the flow tables",
2124 onfail="Changes were found in the flow tables" )
2125
2126 main.Mininet2.pingLongKill()
2127 '''
2128 main.step( "Check the continuous pings to ensure that no packets " +
2129 "were dropped during component failure" )
2130 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2131 main.params[ 'TESTONIP' ] )
2132 LossInPings = main.FALSE
2133 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2134 for i in range( 8, 18 ):
2135 main.log.info(
2136 "Checking for a loss in pings along flow from s" +
2137 str( i ) )
2138 LossInPings = main.Mininet2.checkForLoss(
2139 "/tmp/ping.h" +
2140 str( i ) ) or LossInPings
2141 if LossInPings == main.TRUE:
2142 main.log.info( "Loss in ping detected" )
2143 elif LossInPings == main.ERROR:
2144 main.log.info( "There are multiple mininet process running" )
2145 elif LossInPings == main.FALSE:
2146 main.log.info( "No Loss in the pings" )
2147 main.log.info( "No loss of dataplane connectivity" )
2148 utilities.assert_equals(
2149 expect=main.FALSE,
2150 actual=LossInPings,
2151 onpass="No Loss of connectivity",
2152 onfail="Loss of dataplane connectivity detected" )
2153 '''
2154
2155 main.step( "Leadership Election is still functional" )
2156 # Test of LeadershipElection
2157 leaderList = []
2158
2159 partitioned = []
2160 for i in main.partition:
2161 partitioned.append( main.nodes[i].ip_address )
2162 leaderResult = main.TRUE
2163
2164 for i in main.activeNodes:
2165 cli = main.CLIs[i]
2166 leaderN = cli.electionTestLeader()
2167 leaderList.append( leaderN )
2168 if leaderN == main.FALSE:
2169 # error in response
2170 main.log.error( "Something is wrong with " +
2171 "electionTestLeader function, check the" +
2172 " error logs" )
2173 leaderResult = main.FALSE
2174 elif leaderN is None:
2175 main.log.error( cli.name +
2176 " shows no leader for the election-app was" +
2177 " elected after the old one died" )
2178 leaderResult = main.FALSE
2179 elif leaderN in partitioned:
2180 main.log.error( cli.name + " shows " + str( leaderN ) +
2181 " as leader for the election-app, but it " +
2182 "was partitioned" )
2183 leaderResult = main.FALSE
2184 if len( set( leaderList ) ) != 1:
2185 leaderResult = main.FALSE
2186 main.log.error(
2187 "Inconsistent view of leader for the election test app" )
2188 # TODO: print the list
2189 utilities.assert_equals(
2190 expect=main.TRUE,
2191 actual=leaderResult,
2192 onpass="Leadership election passed",
2193 onfail="Something went wrong with Leadership election" )
2194
2195 def CASE8( self, main ):
2196 """
2197 Compare topo
2198 """
2199 import json
2200 import time
2201 assert main.numCtrls, "main.numCtrls not defined"
2202 assert main, "main not defined"
2203 assert utilities.assert_equals, "utilities.assert_equals not defined"
2204 assert main.CLIs, "main.CLIs not defined"
2205 assert main.nodes, "main.nodes not defined"
2206
2207 main.case( "Compare ONOS Topology view to Mininet topology" )
2208 main.caseExplanation = "Compare topology objects between Mininet" +\
2209 " and ONOS"
2210 topoResult = main.FALSE
2211 topoFailMsg = "ONOS topology don't match Mininet"
2212 elapsed = 0
2213 count = 0
2214 main.step( "Comparing ONOS topology to MN topology" )
2215 startTime = time.time()
2216 # Give time for Gossip to work
2217 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2218 devicesResults = main.TRUE
2219 linksResults = main.TRUE
2220 hostsResults = main.TRUE
2221 hostAttachmentResults = True
2222 count += 1
2223 cliStart = time.time()
2224 devices = []
2225 threads = []
2226 for i in main.activeNodes:
2227 t = main.Thread( target=utilities.retry,
2228 name="devices-" + str( i ),
2229 args=[ main.CLIs[i].devices, [ None ] ],
2230 kwargs= { 'sleep': 5, 'attempts': 5,
2231 'randomTime': True } )
2232 threads.append( t )
2233 t.start()
2234
2235 for t in threads:
2236 t.join()
2237 devices.append( t.result )
2238 hosts = []
2239 ipResult = main.TRUE
2240 threads = []
2241 for i in main.activeNodes:
2242 t = main.Thread( target=utilities.retry,
2243 name="hosts-" + str( i ),
2244 args=[ main.CLIs[i].hosts, [ None ] ],
2245 kwargs= { 'sleep': 5, 'attempts': 5,
2246 'randomTime': True } )
2247 threads.append( t )
2248 t.start()
2249
2250 for t in threads:
2251 t.join()
2252 try:
2253 hosts.append( json.loads( t.result ) )
2254 except ( ValueError, TypeError ):
2255 main.log.exception( "Error parsing hosts results" )
2256 main.log.error( repr( t.result ) )
2257 hosts.append( None )
2258 for controller in range( 0, len( hosts ) ):
2259 controllerStr = str( main.activeNodes[controller] + 1 )
2260 if hosts[ controller ]:
2261 for host in hosts[ controller ]:
2262 if host is None or host.get( 'ipAddresses', [] ) == []:
2263 main.log.error(
2264 "Error with host ipAddresses on controller" +
2265 controllerStr + ": " + str( host ) )
2266 ipResult = main.FALSE
2267 ports = []
2268 threads = []
2269 for i in main.activeNodes:
2270 t = main.Thread( target=utilities.retry,
2271 name="ports-" + str( i ),
2272 args=[ main.CLIs[i].ports, [ None ] ],
2273 kwargs= { 'sleep': 5, 'attempts': 5,
2274 'randomTime': True } )
2275 threads.append( t )
2276 t.start()
2277
2278 for t in threads:
2279 t.join()
2280 ports.append( t.result )
2281 links = []
2282 threads = []
2283 for i in main.activeNodes:
2284 t = main.Thread( target=utilities.retry,
2285 name="links-" + str( i ),
2286 args=[ main.CLIs[i].links, [ None ] ],
2287 kwargs= { 'sleep': 5, 'attempts': 5,
2288 'randomTime': True } )
2289 threads.append( t )
2290 t.start()
2291
2292 for t in threads:
2293 t.join()
2294 links.append( t.result )
2295 clusters = []
2296 threads = []
2297 for i in main.activeNodes:
2298 t = main.Thread( target=utilities.retry,
2299 name="clusters-" + str( i ),
2300 args=[ main.CLIs[i].clusters, [ None ] ],
2301 kwargs= { 'sleep': 5, 'attempts': 5,
2302 'randomTime': True } )
2303 threads.append( t )
2304 t.start()
2305
2306 for t in threads:
2307 t.join()
2308 clusters.append( t.result )
2309
2310 elapsed = time.time() - startTime
2311 cliTime = time.time() - cliStart
2312 print "Elapsed time: " + str( elapsed )
2313 print "CLI time: " + str( cliTime )
2314
2315 if all( e is None for e in devices ) and\
2316 all( e is None for e in hosts ) and\
2317 all( e is None for e in ports ) and\
2318 all( e is None for e in links ) and\
2319 all( e is None for e in clusters ):
2320 topoFailMsg = "Could not get topology from ONOS"
2321 main.log.error( topoFailMsg )
2322 continue # Try again, No use trying to compare
2323
2324 mnSwitches = main.Mininet1.getSwitches()
2325 mnLinks = main.Mininet1.getLinks()
2326 mnHosts = main.Mininet1.getHosts()
2327 for controller in range( len( main.activeNodes ) ):
2328 controllerStr = str( main.activeNodes[controller] + 1 )
2329 if devices[ controller ] and ports[ controller ] and\
2330 "Error" not in devices[ controller ] and\
2331 "Error" not in ports[ controller ]:
2332
2333 try:
2334 currentDevicesResult = main.Mininet1.compareSwitches(
2335 mnSwitches,
2336 json.loads( devices[ controller ] ),
2337 json.loads( ports[ controller ] ) )
2338 except ( TypeError, ValueError ) as e:
2339 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2340 devices[ controller ], ports[ controller ] ) )
2341 else:
2342 currentDevicesResult = main.FALSE
2343 utilities.assert_equals( expect=main.TRUE,
2344 actual=currentDevicesResult,
2345 onpass="ONOS" + controllerStr +
2346 " Switches view is correct",
2347 onfail="ONOS" + controllerStr +
2348 " Switches view is incorrect" )
2349
2350 if links[ controller ] and "Error" not in links[ controller ]:
2351 currentLinksResult = main.Mininet1.compareLinks(
2352 mnSwitches, mnLinks,
2353 json.loads( links[ controller ] ) )
2354 else:
2355 currentLinksResult = main.FALSE
2356 utilities.assert_equals( expect=main.TRUE,
2357 actual=currentLinksResult,
2358 onpass="ONOS" + controllerStr +
2359 " links view is correct",
2360 onfail="ONOS" + controllerStr +
2361 " links view is incorrect" )
2362 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2363 currentHostsResult = main.Mininet1.compareHosts(
2364 mnHosts,
2365 hosts[ controller ] )
2366 elif hosts[ controller ] == []:
2367 currentHostsResult = main.TRUE
2368 else:
2369 currentHostsResult = main.FALSE
2370 utilities.assert_equals( expect=main.TRUE,
2371 actual=currentHostsResult,
2372 onpass="ONOS" + controllerStr +
2373 " hosts exist in Mininet",
2374 onfail="ONOS" + controllerStr +
2375 " hosts don't match Mininet" )
2376 # CHECKING HOST ATTACHMENT POINTS
2377 hostAttachment = True
2378 zeroHosts = False
2379 # FIXME: topo-HA/obelisk specific mappings:
2380 # key is mac and value is dpid
2381 mappings = {}
2382 for i in range( 1, 29 ): # hosts 1 through 28
2383 # set up correct variables:
2384 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2385 if i == 1:
2386 deviceId = "1000".zfill(16)
2387 elif i == 2:
2388 deviceId = "2000".zfill(16)
2389 elif i == 3:
2390 deviceId = "3000".zfill(16)
2391 elif i == 4:
2392 deviceId = "3004".zfill(16)
2393 elif i == 5:
2394 deviceId = "5000".zfill(16)
2395 elif i == 6:
2396 deviceId = "6000".zfill(16)
2397 elif i == 7:
2398 deviceId = "6007".zfill(16)
2399 elif i >= 8 and i <= 17:
2400 dpid = '3' + str( i ).zfill( 3 )
2401 deviceId = dpid.zfill(16)
2402 elif i >= 18 and i <= 27:
2403 dpid = '6' + str( i ).zfill( 3 )
2404 deviceId = dpid.zfill(16)
2405 elif i == 28:
2406 deviceId = "2800".zfill(16)
2407 mappings[ macId ] = deviceId
2408 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2409 if hosts[ controller ] == []:
2410 main.log.warn( "There are no hosts discovered" )
2411 zeroHosts = True
2412 else:
2413 for host in hosts[ controller ]:
2414 mac = None
2415 location = None
2416 device = None
2417 port = None
2418 try:
2419 mac = host.get( 'mac' )
2420 assert mac, "mac field could not be found for this host object"
2421
2422 location = host.get( 'location' )
2423 assert location, "location field could not be found for this host object"
2424
2425 # Trim the protocol identifier off deviceId
2426 device = str( location.get( 'elementId' ) ).split(':')[1]
2427 assert device, "elementId field could not be found for this host location object"
2428
2429 port = location.get( 'port' )
2430 assert port, "port field could not be found for this host location object"
2431
2432 # Now check if this matches where they should be
2433 if mac and device and port:
2434 if str( port ) != "1":
2435 main.log.error( "The attachment port is incorrect for " +
2436 "host " + str( mac ) +
2437 ". Expected: 1 Actual: " + str( port) )
2438 hostAttachment = False
2439 if device != mappings[ str( mac ) ]:
2440 main.log.error( "The attachment device is incorrect for " +
2441 "host " + str( mac ) +
2442 ". Expected: " + mappings[ str( mac ) ] +
2443 " Actual: " + device )
2444 hostAttachment = False
2445 else:
2446 hostAttachment = False
2447 except AssertionError:
2448 main.log.exception( "Json object not as expected" )
2449 main.log.error( repr( host ) )
2450 hostAttachment = False
2451 else:
2452 main.log.error( "No hosts json output or \"Error\"" +
2453 " in output. hosts = " +
2454 repr( hosts[ controller ] ) )
2455 if zeroHosts is False:
2456 hostAttachment = True
2457
2458 # END CHECKING HOST ATTACHMENT POINTS
2459 devicesResults = devicesResults and currentDevicesResult
2460 linksResults = linksResults and currentLinksResult
2461 hostsResults = hostsResults and currentHostsResult
2462 hostAttachmentResults = hostAttachmentResults and\
2463 hostAttachment
Jon Halla440e872016-03-31 15:15:50 -07002464 topoResult = ( devicesResults and linksResults
2465 and hostsResults and ipResult and
2466 hostAttachmentResults )
Jon Hall6e709752016-02-01 13:38:46 -08002467 utilities.assert_equals( expect=True,
2468 actual=topoResult,
2469 onpass="ONOS topology matches Mininet",
2470 onfail=topoFailMsg )
2471 # End of While loop to pull ONOS state
2472
2473 # Compare json objects for hosts and dataplane clusters
2474
2475 # hosts
2476 main.step( "Hosts view is consistent across all ONOS nodes" )
2477 consistentHostsResult = main.TRUE
2478 for controller in range( len( hosts ) ):
2479 controllerStr = str( main.activeNodes[controller] + 1 )
2480 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2481 if hosts[ controller ] == hosts[ 0 ]:
2482 continue
2483 else: # hosts not consistent
2484 main.log.error( "hosts from ONOS" + controllerStr +
2485 " is inconsistent with ONOS1" )
2486 main.log.warn( repr( hosts[ controller ] ) )
2487 consistentHostsResult = main.FALSE
2488
2489 else:
2490 main.log.error( "Error in getting ONOS hosts from ONOS" +
2491 controllerStr )
2492 consistentHostsResult = main.FALSE
2493 main.log.warn( "ONOS" + controllerStr +
2494 " hosts response: " +
2495 repr( hosts[ controller ] ) )
2496 utilities.assert_equals(
2497 expect=main.TRUE,
2498 actual=consistentHostsResult,
2499 onpass="Hosts view is consistent across all ONOS nodes",
2500 onfail="ONOS nodes have different views of hosts" )
2501
2502 main.step( "Hosts information is correct" )
2503 hostsResults = hostsResults and ipResult
2504 utilities.assert_equals(
2505 expect=main.TRUE,
2506 actual=hostsResults,
2507 onpass="Host information is correct",
2508 onfail="Host information is incorrect" )
2509
2510 main.step( "Host attachment points to the network" )
2511 utilities.assert_equals(
2512 expect=True,
2513 actual=hostAttachmentResults,
2514 onpass="Hosts are correctly attached to the network",
2515 onfail="ONOS did not correctly attach hosts to the network" )
2516
2517 # Strongly connected clusters of devices
2518 main.step( "Clusters view is consistent across all ONOS nodes" )
2519 consistentClustersResult = main.TRUE
2520 for controller in range( len( clusters ) ):
2521 controllerStr = str( main.activeNodes[controller] + 1 )
2522 if "Error" not in clusters[ controller ]:
2523 if clusters[ controller ] == clusters[ 0 ]:
2524 continue
2525 else: # clusters not consistent
2526 main.log.error( "clusters from ONOS" +
2527 controllerStr +
2528 " is inconsistent with ONOS1" )
2529 consistentClustersResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002530 else:
2531 main.log.error( "Error in getting dataplane clusters " +
2532 "from ONOS" + controllerStr )
2533 consistentClustersResult = main.FALSE
2534 main.log.warn( "ONOS" + controllerStr +
2535 " clusters response: " +
2536 repr( clusters[ controller ] ) )
2537 utilities.assert_equals(
2538 expect=main.TRUE,
2539 actual=consistentClustersResult,
2540 onpass="Clusters view is consistent across all ONOS nodes",
2541 onfail="ONOS nodes have different views of clusters" )
2542
2543 main.step( "There is only one SCC" )
2544 # there should always only be one cluster
2545 try:
2546 numClusters = len( json.loads( clusters[ 0 ] ) )
2547 except ( ValueError, TypeError ):
2548 main.log.exception( "Error parsing clusters[0]: " +
2549 repr( clusters[0] ) )
Jon Halla440e872016-03-31 15:15:50 -07002550 numClusters = "ERROR"
Jon Hall6e709752016-02-01 13:38:46 -08002551 clusterResults = main.FALSE
2552 if numClusters == 1:
2553 clusterResults = main.TRUE
2554 utilities.assert_equals(
2555 expect=1,
2556 actual=numClusters,
2557 onpass="ONOS shows 1 SCC",
2558 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2559
2560 topoResult = ( devicesResults and linksResults
2561 and hostsResults and consistentHostsResult
2562 and consistentClustersResult and clusterResults
2563 and ipResult and hostAttachmentResults )
2564
2565 topoResult = topoResult and int( count <= 2 )
2566 note = "note it takes about " + str( int( cliTime ) ) + \
2567 " seconds for the test to make all the cli calls to fetch " +\
2568 "the topology from each ONOS instance"
2569 main.log.info(
2570 "Very crass estimate for topology discovery/convergence( " +
2571 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2572 str( count ) + " tries" )
2573
2574 main.step( "Device information is correct" )
2575 utilities.assert_equals(
2576 expect=main.TRUE,
2577 actual=devicesResults,
2578 onpass="Device information is correct",
2579 onfail="Device information is incorrect" )
2580
2581 main.step( "Links are correct" )
2582 utilities.assert_equals(
2583 expect=main.TRUE,
2584 actual=linksResults,
2585 onpass="Link are correct",
2586 onfail="Links are incorrect" )
2587
Jon Halla440e872016-03-31 15:15:50 -07002588 main.step( "Hosts are correct" )
2589 utilities.assert_equals(
2590 expect=main.TRUE,
2591 actual=hostsResults,
2592 onpass="Hosts are correct",
2593 onfail="Hosts are incorrect" )
2594
Jon Hall6e709752016-02-01 13:38:46 -08002595 # FIXME: move this to an ONOS state case
2596 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -07002597 nodeResults = utilities.retry( main.HA.nodesCheck,
2598 False,
2599 args=[main.activeNodes],
2600 attempts=5 )
Jon Hall6e709752016-02-01 13:38:46 -08002601
Jon Hall41d39f12016-04-11 22:54:35 -07002602 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Hall6e709752016-02-01 13:38:46 -08002603 onpass="Nodes check successful",
2604 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002605 if not nodeResults:
Jon Hall41d39f12016-04-11 22:54:35 -07002606 for i in main.activeNodes:
Jon Halla440e872016-03-31 15:15:50 -07002607 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hall41d39f12016-04-11 22:54:35 -07002608 main.CLIs[i].name,
2609 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08002610
2611 def CASE9( self, main ):
2612 """
2613 Link s3-s28 down
2614 """
2615 import time
2616 assert main.numCtrls, "main.numCtrls not defined"
2617 assert main, "main not defined"
2618 assert utilities.assert_equals, "utilities.assert_equals not defined"
2619 assert main.CLIs, "main.CLIs not defined"
2620 assert main.nodes, "main.nodes not defined"
2621 # NOTE: You should probably run a topology check after this
2622
2623 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2624
2625 description = "Turn off a link to ensure that Link Discovery " +\
2626 "is working properly"
2627 main.case( description )
2628
2629 main.step( "Kill Link between s3 and s28" )
2630 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2631 main.log.info( "Waiting " + str( linkSleep ) +
2632 " seconds for link down to be discovered" )
2633 time.sleep( linkSleep )
2634 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2635 onpass="Link down successful",
2636 onfail="Failed to bring link down" )
2637 # TODO do some sort of check here
2638
2639 def CASE10( self, main ):
2640 """
2641 Link s3-s28 up
2642 """
2643 import time
2644 assert main.numCtrls, "main.numCtrls not defined"
2645 assert main, "main not defined"
2646 assert utilities.assert_equals, "utilities.assert_equals not defined"
2647 assert main.CLIs, "main.CLIs not defined"
2648 assert main.nodes, "main.nodes not defined"
2649 # NOTE: You should probably run a topology check after this
2650
2651 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2652
2653 description = "Restore a link to ensure that Link Discovery is " + \
2654 "working properly"
2655 main.case( description )
2656
2657 main.step( "Bring link between s3 and s28 back up" )
2658 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2659 main.log.info( "Waiting " + str( linkSleep ) +
2660 " seconds for link up to be discovered" )
2661 time.sleep( linkSleep )
2662 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2663 onpass="Link up successful",
2664 onfail="Failed to bring link up" )
2665 # TODO do some sort of check here
2666
2667 def CASE11( self, main ):
2668 """
2669 Switch Down
2670 """
2671 # NOTE: You should probably run a topology check after this
2672 import time
2673 assert main.numCtrls, "main.numCtrls not defined"
2674 assert main, "main not defined"
2675 assert utilities.assert_equals, "utilities.assert_equals not defined"
2676 assert main.CLIs, "main.CLIs not defined"
2677 assert main.nodes, "main.nodes not defined"
2678
2679 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2680
2681 description = "Killing a switch to ensure it is discovered correctly"
2682 onosCli = main.CLIs[ main.activeNodes[0] ]
2683 main.case( description )
2684 switch = main.params[ 'kill' ][ 'switch' ]
2685 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2686
2687 # TODO: Make this switch parameterizable
2688 main.step( "Kill " + switch )
2689 main.log.info( "Deleting " + switch )
2690 main.Mininet1.delSwitch( switch )
2691 main.log.info( "Waiting " + str( switchSleep ) +
2692 " seconds for switch down to be discovered" )
2693 time.sleep( switchSleep )
2694 device = onosCli.getDevice( dpid=switchDPID )
2695 # Peek at the deleted switch
2696 main.log.warn( str( device ) )
2697 result = main.FALSE
2698 if device and device[ 'available' ] is False:
2699 result = main.TRUE
2700 utilities.assert_equals( expect=main.TRUE, actual=result,
2701 onpass="Kill switch successful",
2702 onfail="Failed to kill switch?" )
2703
2704 def CASE12( self, main ):
2705 """
2706 Switch Up
2707 """
2708 # NOTE: You should probably run a topology check after this
2709 import time
2710 assert main.numCtrls, "main.numCtrls not defined"
2711 assert main, "main not defined"
2712 assert utilities.assert_equals, "utilities.assert_equals not defined"
2713 assert main.CLIs, "main.CLIs not defined"
2714 assert main.nodes, "main.nodes not defined"
2715 assert ONOS1Port, "ONOS1Port not defined"
2716 assert ONOS2Port, "ONOS2Port not defined"
2717 assert ONOS3Port, "ONOS3Port not defined"
2718 assert ONOS4Port, "ONOS4Port not defined"
2719 assert ONOS5Port, "ONOS5Port not defined"
2720 assert ONOS6Port, "ONOS6Port not defined"
2721 assert ONOS7Port, "ONOS7Port not defined"
2722
2723 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2724 switch = main.params[ 'kill' ][ 'switch' ]
2725 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2726 links = main.params[ 'kill' ][ 'links' ].split()
2727 onosCli = main.CLIs[ main.activeNodes[0] ]
2728 description = "Adding a switch to ensure it is discovered correctly"
2729 main.case( description )
2730
2731 main.step( "Add back " + switch )
2732 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2733 for peer in links:
2734 main.Mininet1.addLink( switch, peer )
2735 ipList = [ node.ip_address for node in main.nodes ]
2736 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2737 main.log.info( "Waiting " + str( switchSleep ) +
2738 " seconds for switch up to be discovered" )
2739 time.sleep( switchSleep )
2740 device = onosCli.getDevice( dpid=switchDPID )
2741 # Peek at the deleted switch
2742 main.log.warn( str( device ) )
2743 result = main.FALSE
2744 if device and device[ 'available' ]:
2745 result = main.TRUE
2746 utilities.assert_equals( expect=main.TRUE, actual=result,
2747 onpass="add switch successful",
2748 onfail="Failed to add switch?" )
2749
2750 def CASE13( self, main ):
2751 """
2752 Clean up
2753 """
2754 import os
2755 import time
2756 assert main.numCtrls, "main.numCtrls not defined"
2757 assert main, "main not defined"
2758 assert utilities.assert_equals, "utilities.assert_equals not defined"
2759 assert main.CLIs, "main.CLIs not defined"
2760 assert main.nodes, "main.nodes not defined"
2761
2762 # printing colors to terminal
2763 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2764 'blue': '\033[94m', 'green': '\033[92m',
2765 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2766 main.case( "Test Cleanup" )
2767 main.step( "Killing tcpdumps" )
2768 main.Mininet2.stopTcpdump()
2769
2770 testname = main.TEST
2771 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2772 main.step( "Copying MN pcap and ONOS log files to test station" )
2773 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2774 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2775 # NOTE: MN Pcap file is being saved to logdir.
2776 # We scp this file as MN and TestON aren't necessarily the same vm
2777
2778 # FIXME: To be replaced with a Jenkin's post script
2779 # TODO: Load these from params
2780 # NOTE: must end in /
2781 logFolder = "/opt/onos/log/"
2782 logFiles = [ "karaf.log", "karaf.log.1" ]
2783 # NOTE: must end in /
2784 for f in logFiles:
2785 for node in main.nodes:
2786 dstName = main.logdir + "/" + node.name + "-" + f
2787 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2788 logFolder + f, dstName )
2789 # std*.log's
2790 # NOTE: must end in /
2791 logFolder = "/opt/onos/var/"
2792 logFiles = [ "stderr.log", "stdout.log" ]
2793 # NOTE: must end in /
2794 for f in logFiles:
2795 for node in main.nodes:
2796 dstName = main.logdir + "/" + node.name + "-" + f
2797 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2798 logFolder + f, dstName )
2799 else:
2800 main.log.debug( "skipping saving log files" )
2801
2802 main.step( "Stopping Mininet" )
2803 mnResult = main.Mininet1.stopNet()
2804 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2805 onpass="Mininet stopped",
2806 onfail="MN cleanup NOT successful" )
2807
2808 main.step( "Checking ONOS Logs for errors" )
2809 for node in main.nodes:
2810 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2811 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2812
2813 try:
2814 timerLog = open( main.logdir + "/Timers.csv", 'w')
2815 # Overwrite with empty line and close
2816 labels = "Gossip Intents"
2817 data = str( gossipTime )
2818 timerLog.write( labels + "\n" + data )
2819 timerLog.close()
2820 except NameError, e:
2821 main.log.exception(e)
2822
2823 def CASE14( self, main ):
2824 """
2825 start election app on all onos nodes
2826 """
2827 assert main.numCtrls, "main.numCtrls not defined"
2828 assert main, "main not defined"
2829 assert utilities.assert_equals, "utilities.assert_equals not defined"
2830 assert main.CLIs, "main.CLIs not defined"
2831 assert main.nodes, "main.nodes not defined"
2832
2833 main.case("Start Leadership Election app")
2834 main.step( "Install leadership election app" )
2835 onosCli = main.CLIs[ main.activeNodes[0] ]
2836 appResult = onosCli.activateApp( "org.onosproject.election" )
2837 utilities.assert_equals(
2838 expect=main.TRUE,
2839 actual=appResult,
2840 onpass="Election app installed",
2841 onfail="Something went wrong with installing Leadership election" )
2842
2843 main.step( "Run for election on each node" )
Jon Hall6e709752016-02-01 13:38:46 -08002844 for i in main.activeNodes:
2845 main.CLIs[i].electionTestRun()
Jon Hall25463a82016-04-13 14:03:52 -07002846 time.sleep(5)
2847 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2848 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall6e709752016-02-01 13:38:46 -08002849 utilities.assert_equals(
Jon Hall25463a82016-04-13 14:03:52 -07002850 expect=True,
2851 actual=sameResult,
2852 onpass="All nodes see the same leaderboards",
2853 onfail="Inconsistent leaderboards" )
Jon Hall6e709752016-02-01 13:38:46 -08002854
Jon Hall25463a82016-04-13 14:03:52 -07002855 if sameResult:
2856 leader = leaders[ 0 ][ 0 ]
2857 if main.nodes[main.activeNodes[0]].ip_address in leader:
2858 correctLeader = True
2859 else:
2860 correctLeader = False
2861 main.step( "First node was elected leader" )
2862 utilities.assert_equals(
2863 expect=True,
2864 actual=correctLeader,
2865 onpass="Correct leader was elected",
2866 onfail="Incorrect leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002867
2868 def CASE15( self, main ):
2869 """
2870 Check that Leadership Election is still functional
2871 15.1 Run election on each node
2872 15.2 Check that each node has the same leaders and candidates
2873 15.3 Find current leader and withdraw
2874 15.4 Check that a new node was elected leader
2875 15.5 Check that that new leader was the candidate of old leader
2876 15.6 Run for election on old leader
2877 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2878 15.8 Make sure that the old leader was added to the candidate list
2879
2880 old and new variable prefixes refer to data from before vs after
2881 withdrawl and later before withdrawl vs after re-election
2882 """
2883 import time
2884 assert main.numCtrls, "main.numCtrls not defined"
2885 assert main, "main not defined"
2886 assert utilities.assert_equals, "utilities.assert_equals not defined"
2887 assert main.CLIs, "main.CLIs not defined"
2888 assert main.nodes, "main.nodes not defined"
2889
2890 description = "Check that Leadership Election is still functional"
2891 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002892 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall6e709752016-02-01 13:38:46 -08002893
Jon Halla440e872016-03-31 15:15:50 -07002894 oldLeaders = [] # list of lists of each nodes' candidates before
2895 newLeaders = [] # list of lists of each nodes' candidates after
Jon Hall6e709752016-02-01 13:38:46 -08002896 oldLeader = '' # the old leader from oldLeaders, None if not same
2897 newLeader = '' # the new leaders fron newLoeaders, None if not same
2898 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2899 expectNoLeader = False # True when there is only one leader
2900 if main.numCtrls == 1:
2901 expectNoLeader = True
2902
2903 main.step( "Run for election on each node" )
2904 electionResult = main.TRUE
2905
2906 for i in main.activeNodes: # run test election on each node
2907 if main.CLIs[i].electionTestRun() == main.FALSE:
2908 electionResult = main.FALSE
2909 utilities.assert_equals(
2910 expect=main.TRUE,
2911 actual=electionResult,
2912 onpass="All nodes successfully ran for leadership",
2913 onfail="At least one node failed to run for leadership" )
2914
2915 if electionResult == main.FALSE:
2916 main.log.error(
2917 "Skipping Test Case because Election Test App isn't loaded" )
2918 main.skipCase()
2919
2920 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07002921 failMessage = "Nodes have different leaderboards"
Jon Halla440e872016-03-31 15:15:50 -07002922 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
Jon Hall41d39f12016-04-11 22:54:35 -07002923 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Halla440e872016-03-31 15:15:50 -07002924 if sameResult:
2925 oldLeader = oldLeaders[ 0 ][ 0 ]
2926 main.log.warn( oldLeader )
Jon Hall6e709752016-02-01 13:38:46 -08002927 else:
Jon Halla440e872016-03-31 15:15:50 -07002928 oldLeader = None
Jon Hall6e709752016-02-01 13:38:46 -08002929 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002930 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08002931 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07002932 onpass="Leaderboards are consistent for the election topic",
Jon Hall6e709752016-02-01 13:38:46 -08002933 onfail=failMessage )
2934
2935 main.step( "Find current leader and withdraw" )
2936 withdrawResult = main.TRUE
2937 # do some sanity checking on leader before using it
2938 if oldLeader is None:
2939 main.log.error( "Leadership isn't consistent." )
2940 withdrawResult = main.FALSE
2941 # Get the CLI of the oldLeader
2942 for i in main.activeNodes:
2943 if oldLeader == main.nodes[ i ].ip_address:
2944 oldLeaderCLI = main.CLIs[ i ]
2945 break
2946 else: # FOR/ELSE statement
2947 main.log.error( "Leader election, could not find current leader" )
2948 if oldLeader:
2949 withdrawResult = oldLeaderCLI.electionTestWithdraw()
2950 utilities.assert_equals(
2951 expect=main.TRUE,
2952 actual=withdrawResult,
2953 onpass="Node was withdrawn from election",
2954 onfail="Node was not withdrawn from election" )
2955
2956 main.step( "Check that a new node was elected leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002957 failMessage = "Nodes have different leaders"
Jon Hall6e709752016-02-01 13:38:46 -08002958 # Get new leaders and candidates
Jon Hall41d39f12016-04-11 22:54:35 -07002959 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall3a7843a2016-04-12 03:01:09 -07002960 newLeader = None
Jon Halla440e872016-03-31 15:15:50 -07002961 if newLeaderResult:
Jon Hall3a7843a2016-04-12 03:01:09 -07002962 if newLeaders[ 0 ][ 0 ] == 'none':
2963 main.log.error( "No leader was elected on at least 1 node" )
2964 if not expectNoLeader:
2965 newLeaderResult = False
Jon Hall25463a82016-04-13 14:03:52 -07002966 newLeader = newLeaders[ 0 ][ 0 ]
Jon Hall6e709752016-02-01 13:38:46 -08002967
2968 # Check that the new leader is not the older leader, which was withdrawn
2969 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07002970 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08002971 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
2972 " as the current leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002973 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002974 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08002975 actual=newLeaderResult,
2976 onpass="Leadership election passed",
2977 onfail="Something went wrong with Leadership election" )
2978
Jon Halla440e872016-03-31 15:15:50 -07002979 main.step( "Check that that new leader was the candidate of old leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002980 # candidates[ 2 ] should become the top candidate after withdrawl
2981 correctCandidateResult = main.TRUE
2982 if expectNoLeader:
2983 if newLeader == 'none':
2984 main.log.info( "No leader expected. None found. Pass" )
2985 correctCandidateResult = main.TRUE
2986 else:
2987 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2988 correctCandidateResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002989 elif len( oldLeaders[0] ) >= 3:
2990 if newLeader == oldLeaders[ 0 ][ 2 ]:
2991 # correct leader was elected
2992 correctCandidateResult = main.TRUE
2993 else:
2994 correctCandidateResult = main.FALSE
2995 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
2996 newLeader, oldLeaders[ 0 ][ 2 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08002997 else:
2998 main.log.warn( "Could not determine who should be the correct leader" )
Jon Halla440e872016-03-31 15:15:50 -07002999 main.log.debug( oldLeaders[ 0 ] )
Jon Hall6e709752016-02-01 13:38:46 -08003000 correctCandidateResult = main.FALSE
3001 utilities.assert_equals(
3002 expect=main.TRUE,
3003 actual=correctCandidateResult,
3004 onpass="Correct Candidate Elected",
3005 onfail="Incorrect Candidate Elected" )
3006
3007 main.step( "Run for election on old leader( just so everyone " +
3008 "is in the hat )" )
3009 if oldLeaderCLI is not None:
3010 runResult = oldLeaderCLI.electionTestRun()
3011 else:
3012 main.log.error( "No old leader to re-elect" )
3013 runResult = main.FALSE
3014 utilities.assert_equals(
3015 expect=main.TRUE,
3016 actual=runResult,
3017 onpass="App re-ran for election",
3018 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07003019
Jon Hall6e709752016-02-01 13:38:46 -08003020 main.step(
3021 "Check that oldLeader is a candidate, and leader if only 1 node" )
3022 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07003023 # Get new leaders and candidates
3024 reRunLeaders = []
3025 time.sleep( 5 ) # Paremterize
Jon Hall41d39f12016-04-11 22:54:35 -07003026 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall6e709752016-02-01 13:38:46 -08003027
3028 # Check that the re-elected node is last on the candidate List
Jon Hall3a7843a2016-04-12 03:01:09 -07003029 if not reRunLeaders[0]:
3030 positionResult = main.FALSE
3031 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
Jon Halla440e872016-03-31 15:15:50 -07003032 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3033 str( reRunLeaders[ 0 ] ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08003034 positionResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08003035 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003036 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08003037 actual=positionResult,
3038 onpass="Old leader successfully re-ran for election",
3039 onfail="Something went wrong with Leadership election after " +
3040 "the old leader re-ran for election" )
3041
3042 def CASE16( self, main ):
3043 """
3044 Install Distributed Primitives app
3045 """
3046 import time
3047 assert main.numCtrls, "main.numCtrls not defined"
3048 assert main, "main not defined"
3049 assert utilities.assert_equals, "utilities.assert_equals not defined"
3050 assert main.CLIs, "main.CLIs not defined"
3051 assert main.nodes, "main.nodes not defined"
3052
3053 # Variables for the distributed primitives tests
3054 global pCounterName
Jon Hall6e709752016-02-01 13:38:46 -08003055 global pCounterValue
Jon Hall6e709752016-02-01 13:38:46 -08003056 global onosSet
3057 global onosSetName
3058 pCounterName = "TestON-Partitions"
Jon Hall6e709752016-02-01 13:38:46 -08003059 pCounterValue = 0
Jon Hall6e709752016-02-01 13:38:46 -08003060 onosSet = set([])
3061 onosSetName = "TestON-set"
3062
3063 description = "Install Primitives app"
3064 main.case( description )
3065 main.step( "Install Primitives app" )
3066 appName = "org.onosproject.distributedprimitives"
3067 node = main.activeNodes[0]
3068 appResults = main.CLIs[node].activateApp( appName )
3069 utilities.assert_equals( expect=main.TRUE,
3070 actual=appResults,
3071 onpass="Primitives app activated",
3072 onfail="Primitives app not activated" )
3073 time.sleep( 5 ) # To allow all nodes to activate
3074
3075 def CASE17( self, main ):
3076 """
3077 Check for basic functionality with distributed primitives
3078 """
3079 # Make sure variables are defined/set
3080 assert main.numCtrls, "main.numCtrls not defined"
3081 assert main, "main not defined"
3082 assert utilities.assert_equals, "utilities.assert_equals not defined"
3083 assert main.CLIs, "main.CLIs not defined"
3084 assert main.nodes, "main.nodes not defined"
3085 assert pCounterName, "pCounterName not defined"
Jon Hall6e709752016-02-01 13:38:46 -08003086 assert onosSetName, "onosSetName not defined"
3087 # NOTE: assert fails if value is 0/None/Empty/False
3088 try:
3089 pCounterValue
3090 except NameError:
3091 main.log.error( "pCounterValue not defined, setting to 0" )
3092 pCounterValue = 0
3093 try:
Jon Hall6e709752016-02-01 13:38:46 -08003094 onosSet
3095 except NameError:
3096 main.log.error( "onosSet not defined, setting to empty Set" )
3097 onosSet = set([])
3098 # Variables for the distributed primitives tests. These are local only
3099 addValue = "a"
3100 addAllValue = "a b c d e f"
3101 retainValue = "c d e f"
3102
3103 description = "Check for basic functionality with distributed " +\
3104 "primitives"
3105 main.case( description )
3106 main.caseExplanation = "Test the methods of the distributed " +\
3107 "primitives (counters and sets) throught the cli"
3108 # DISTRIBUTED ATOMIC COUNTERS
3109 # Partitioned counters
3110 main.step( "Increment then get a default counter on each node" )
3111 pCounters = []
3112 threads = []
3113 addedPValues = []
3114 for i in main.activeNodes:
3115 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3116 name="counterAddAndGet-" + str( i ),
3117 args=[ pCounterName ] )
3118 pCounterValue += 1
3119 addedPValues.append( pCounterValue )
3120 threads.append( t )
3121 t.start()
3122
3123 for t in threads:
3124 t.join()
3125 pCounters.append( t.result )
3126 # Check that counter incremented numController times
3127 pCounterResults = True
3128 for i in addedPValues:
3129 tmpResult = i in pCounters
3130 pCounterResults = pCounterResults and tmpResult
3131 if not tmpResult:
3132 main.log.error( str( i ) + " is not in partitioned "
3133 "counter incremented results" )
3134 utilities.assert_equals( expect=True,
3135 actual=pCounterResults,
3136 onpass="Default counter incremented",
3137 onfail="Error incrementing default" +
3138 " counter" )
3139
3140 main.step( "Get then Increment a default counter on each node" )
3141 pCounters = []
3142 threads = []
3143 addedPValues = []
3144 for i in main.activeNodes:
3145 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3146 name="counterGetAndAdd-" + str( i ),
3147 args=[ pCounterName ] )
3148 addedPValues.append( pCounterValue )
3149 pCounterValue += 1
3150 threads.append( t )
3151 t.start()
3152
3153 for t in threads:
3154 t.join()
3155 pCounters.append( t.result )
3156 # Check that counter incremented numController times
3157 pCounterResults = True
3158 for i in addedPValues:
3159 tmpResult = i in pCounters
3160 pCounterResults = pCounterResults and tmpResult
3161 if not tmpResult:
3162 main.log.error( str( i ) + " is not in partitioned "
3163 "counter incremented results" )
3164 utilities.assert_equals( expect=True,
3165 actual=pCounterResults,
3166 onpass="Default counter incremented",
3167 onfail="Error incrementing default" +
3168 " counter" )
3169
3170 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003171 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Hall6e709752016-02-01 13:38:46 -08003172 utilities.assert_equals( expect=main.TRUE,
3173 actual=incrementCheck,
3174 onpass="Added counters are correct",
3175 onfail="Added counters are incorrect" )
3176
3177 main.step( "Add -8 to then get a default counter on each node" )
3178 pCounters = []
3179 threads = []
3180 addedPValues = []
3181 for i in main.activeNodes:
3182 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3183 name="counterIncrement-" + str( i ),
3184 args=[ pCounterName ],
3185 kwargs={ "delta": -8 } )
3186 pCounterValue += -8
3187 addedPValues.append( pCounterValue )
3188 threads.append( t )
3189 t.start()
3190
3191 for t in threads:
3192 t.join()
3193 pCounters.append( t.result )
3194 # Check that counter incremented numController times
3195 pCounterResults = True
3196 for i in addedPValues:
3197 tmpResult = i in pCounters
3198 pCounterResults = pCounterResults and tmpResult
3199 if not tmpResult:
3200 main.log.error( str( i ) + " is not in partitioned "
3201 "counter incremented results" )
3202 utilities.assert_equals( expect=True,
3203 actual=pCounterResults,
3204 onpass="Default counter incremented",
3205 onfail="Error incrementing default" +
3206 " counter" )
3207
3208 main.step( "Add 5 to then get a default counter on each node" )
3209 pCounters = []
3210 threads = []
3211 addedPValues = []
3212 for i in main.activeNodes:
3213 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3214 name="counterIncrement-" + str( i ),
3215 args=[ pCounterName ],
3216 kwargs={ "delta": 5 } )
3217 pCounterValue += 5
3218 addedPValues.append( pCounterValue )
3219 threads.append( t )
3220 t.start()
3221
3222 for t in threads:
3223 t.join()
3224 pCounters.append( t.result )
3225 # Check that counter incremented numController times
3226 pCounterResults = True
3227 for i in addedPValues:
3228 tmpResult = i in pCounters
3229 pCounterResults = pCounterResults and tmpResult
3230 if not tmpResult:
3231 main.log.error( str( i ) + " is not in partitioned "
3232 "counter incremented results" )
3233 utilities.assert_equals( expect=True,
3234 actual=pCounterResults,
3235 onpass="Default counter incremented",
3236 onfail="Error incrementing default" +
3237 " counter" )
3238
3239 main.step( "Get then add 5 to a default counter on each node" )
3240 pCounters = []
3241 threads = []
3242 addedPValues = []
3243 for i in main.activeNodes:
3244 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3245 name="counterIncrement-" + str( i ),
3246 args=[ pCounterName ],
3247 kwargs={ "delta": 5 } )
3248 addedPValues.append( pCounterValue )
3249 pCounterValue += 5
3250 threads.append( t )
3251 t.start()
3252
3253 for t in threads:
3254 t.join()
3255 pCounters.append( t.result )
3256 # Check that counter incremented numController times
3257 pCounterResults = True
3258 for i in addedPValues:
3259 tmpResult = i in pCounters
3260 pCounterResults = pCounterResults and tmpResult
3261 if not tmpResult:
3262 main.log.error( str( i ) + " is not in partitioned "
3263 "counter incremented results" )
3264 utilities.assert_equals( expect=True,
3265 actual=pCounterResults,
3266 onpass="Default counter incremented",
3267 onfail="Error incrementing default" +
3268 " counter" )
3269
3270 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003271 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Hall6e709752016-02-01 13:38:46 -08003272 utilities.assert_equals( expect=main.TRUE,
3273 actual=incrementCheck,
3274 onpass="Added counters are correct",
3275 onfail="Added counters are incorrect" )
3276
Jon Hall6e709752016-02-01 13:38:46 -08003277 # DISTRIBUTED SETS
3278 main.step( "Distributed Set get" )
3279 size = len( onosSet )
3280 getResponses = []
3281 threads = []
3282 for i in main.activeNodes:
3283 t = main.Thread( target=main.CLIs[i].setTestGet,
3284 name="setTestGet-" + str( i ),
3285 args=[ onosSetName ] )
3286 threads.append( t )
3287 t.start()
3288 for t in threads:
3289 t.join()
3290 getResponses.append( t.result )
3291
3292 getResults = main.TRUE
3293 for i in range( len( main.activeNodes ) ):
3294 node = str( main.activeNodes[i] + 1 )
3295 if isinstance( getResponses[ i ], list):
3296 current = set( getResponses[ i ] )
3297 if len( current ) == len( getResponses[ i ] ):
3298 # no repeats
3299 if onosSet != current:
3300 main.log.error( "ONOS" + node +
3301 " has incorrect view" +
3302 " of set " + onosSetName + ":\n" +
3303 str( getResponses[ i ] ) )
3304 main.log.debug( "Expected: " + str( onosSet ) )
3305 main.log.debug( "Actual: " + str( current ) )
3306 getResults = main.FALSE
3307 else:
3308 # error, set is not a set
3309 main.log.error( "ONOS" + node +
3310 " has repeat elements in" +
3311 " set " + onosSetName + ":\n" +
3312 str( getResponses[ i ] ) )
3313 getResults = main.FALSE
3314 elif getResponses[ i ] == main.ERROR:
3315 getResults = main.FALSE
3316 utilities.assert_equals( expect=main.TRUE,
3317 actual=getResults,
3318 onpass="Set elements are correct",
3319 onfail="Set elements are incorrect" )
3320
3321 main.step( "Distributed Set size" )
3322 sizeResponses = []
3323 threads = []
3324 for i in main.activeNodes:
3325 t = main.Thread( target=main.CLIs[i].setTestSize,
3326 name="setTestSize-" + str( i ),
3327 args=[ onosSetName ] )
3328 threads.append( t )
3329 t.start()
3330 for t in threads:
3331 t.join()
3332 sizeResponses.append( t.result )
3333
3334 sizeResults = main.TRUE
3335 for i in range( len( main.activeNodes ) ):
3336 node = str( main.activeNodes[i] + 1 )
3337 if size != sizeResponses[ i ]:
3338 sizeResults = main.FALSE
3339 main.log.error( "ONOS" + node +
3340 " expected a size of " + str( size ) +
3341 " for set " + onosSetName +
3342 " but got " + str( sizeResponses[ i ] ) )
3343 utilities.assert_equals( expect=main.TRUE,
3344 actual=sizeResults,
3345 onpass="Set sizes are correct",
3346 onfail="Set sizes are incorrect" )
3347
3348 main.step( "Distributed Set add()" )
3349 onosSet.add( addValue )
3350 addResponses = []
3351 threads = []
3352 for i in main.activeNodes:
3353 t = main.Thread( target=main.CLIs[i].setTestAdd,
3354 name="setTestAdd-" + str( i ),
3355 args=[ onosSetName, addValue ] )
3356 threads.append( t )
3357 t.start()
3358 for t in threads:
3359 t.join()
3360 addResponses.append( t.result )
3361
3362 # main.TRUE = successfully changed the set
3363 # main.FALSE = action resulted in no change in set
3364 # main.ERROR - Some error in executing the function
3365 addResults = main.TRUE
3366 for i in range( len( main.activeNodes ) ):
3367 if addResponses[ i ] == main.TRUE:
3368 # All is well
3369 pass
3370 elif addResponses[ i ] == main.FALSE:
3371 # Already in set, probably fine
3372 pass
3373 elif addResponses[ i ] == main.ERROR:
3374 # Error in execution
3375 addResults = main.FALSE
3376 else:
3377 # unexpected result
3378 addResults = main.FALSE
3379 if addResults != main.TRUE:
3380 main.log.error( "Error executing set add" )
3381
3382 # Check if set is still correct
3383 size = len( onosSet )
3384 getResponses = []
3385 threads = []
3386 for i in main.activeNodes:
3387 t = main.Thread( target=main.CLIs[i].setTestGet,
3388 name="setTestGet-" + str( i ),
3389 args=[ onosSetName ] )
3390 threads.append( t )
3391 t.start()
3392 for t in threads:
3393 t.join()
3394 getResponses.append( t.result )
3395 getResults = main.TRUE
3396 for i in range( len( main.activeNodes ) ):
3397 node = str( main.activeNodes[i] + 1 )
3398 if isinstance( getResponses[ i ], list):
3399 current = set( getResponses[ i ] )
3400 if len( current ) == len( getResponses[ i ] ):
3401 # no repeats
3402 if onosSet != current:
3403 main.log.error( "ONOS" + node + " has incorrect view" +
3404 " of set " + onosSetName + ":\n" +
3405 str( getResponses[ i ] ) )
3406 main.log.debug( "Expected: " + str( onosSet ) )
3407 main.log.debug( "Actual: " + str( current ) )
3408 getResults = main.FALSE
3409 else:
3410 # error, set is not a set
3411 main.log.error( "ONOS" + node + " has repeat elements in" +
3412 " set " + onosSetName + ":\n" +
3413 str( getResponses[ i ] ) )
3414 getResults = main.FALSE
3415 elif getResponses[ i ] == main.ERROR:
3416 getResults = main.FALSE
3417 sizeResponses = []
3418 threads = []
3419 for i in main.activeNodes:
3420 t = main.Thread( target=main.CLIs[i].setTestSize,
3421 name="setTestSize-" + str( i ),
3422 args=[ onosSetName ] )
3423 threads.append( t )
3424 t.start()
3425 for t in threads:
3426 t.join()
3427 sizeResponses.append( t.result )
3428 sizeResults = main.TRUE
3429 for i in range( len( main.activeNodes ) ):
3430 node = str( main.activeNodes[i] + 1 )
3431 if size != sizeResponses[ i ]:
3432 sizeResults = main.FALSE
3433 main.log.error( "ONOS" + node +
3434 " expected a size of " + str( size ) +
3435 " for set " + onosSetName +
3436 " but got " + str( sizeResponses[ i ] ) )
3437 addResults = addResults and getResults and sizeResults
3438 utilities.assert_equals( expect=main.TRUE,
3439 actual=addResults,
3440 onpass="Set add correct",
3441 onfail="Set add was incorrect" )
3442
3443 main.step( "Distributed Set addAll()" )
3444 onosSet.update( addAllValue.split() )
3445 addResponses = []
3446 threads = []
3447 for i in main.activeNodes:
3448 t = main.Thread( target=main.CLIs[i].setTestAdd,
3449 name="setTestAddAll-" + str( i ),
3450 args=[ onosSetName, addAllValue ] )
3451 threads.append( t )
3452 t.start()
3453 for t in threads:
3454 t.join()
3455 addResponses.append( t.result )
3456
3457 # main.TRUE = successfully changed the set
3458 # main.FALSE = action resulted in no change in set
3459 # main.ERROR - Some error in executing the function
3460 addAllResults = main.TRUE
3461 for i in range( len( main.activeNodes ) ):
3462 if addResponses[ i ] == main.TRUE:
3463 # All is well
3464 pass
3465 elif addResponses[ i ] == main.FALSE:
3466 # Already in set, probably fine
3467 pass
3468 elif addResponses[ i ] == main.ERROR:
3469 # Error in execution
3470 addAllResults = main.FALSE
3471 else:
3472 # unexpected result
3473 addAllResults = main.FALSE
3474 if addAllResults != main.TRUE:
3475 main.log.error( "Error executing set addAll" )
3476
3477 # Check if set is still correct
3478 size = len( onosSet )
3479 getResponses = []
3480 threads = []
3481 for i in main.activeNodes:
3482 t = main.Thread( target=main.CLIs[i].setTestGet,
3483 name="setTestGet-" + str( i ),
3484 args=[ onosSetName ] )
3485 threads.append( t )
3486 t.start()
3487 for t in threads:
3488 t.join()
3489 getResponses.append( t.result )
3490 getResults = main.TRUE
3491 for i in range( len( main.activeNodes ) ):
3492 node = str( main.activeNodes[i] + 1 )
3493 if isinstance( getResponses[ i ], list):
3494 current = set( getResponses[ i ] )
3495 if len( current ) == len( getResponses[ i ] ):
3496 # no repeats
3497 if onosSet != current:
3498 main.log.error( "ONOS" + node +
3499 " has incorrect view" +
3500 " of set " + onosSetName + ":\n" +
3501 str( getResponses[ i ] ) )
3502 main.log.debug( "Expected: " + str( onosSet ) )
3503 main.log.debug( "Actual: " + str( current ) )
3504 getResults = main.FALSE
3505 else:
3506 # error, set is not a set
3507 main.log.error( "ONOS" + node +
3508 " has repeat elements in" +
3509 " set " + onosSetName + ":\n" +
3510 str( getResponses[ i ] ) )
3511 getResults = main.FALSE
3512 elif getResponses[ i ] == main.ERROR:
3513 getResults = main.FALSE
3514 sizeResponses = []
3515 threads = []
3516 for i in main.activeNodes:
3517 t = main.Thread( target=main.CLIs[i].setTestSize,
3518 name="setTestSize-" + str( i ),
3519 args=[ onosSetName ] )
3520 threads.append( t )
3521 t.start()
3522 for t in threads:
3523 t.join()
3524 sizeResponses.append( t.result )
3525 sizeResults = main.TRUE
3526 for i in range( len( main.activeNodes ) ):
3527 node = str( main.activeNodes[i] + 1 )
3528 if size != sizeResponses[ i ]:
3529 sizeResults = main.FALSE
3530 main.log.error( "ONOS" + node +
3531 " expected a size of " + str( size ) +
3532 " for set " + onosSetName +
3533 " but got " + str( sizeResponses[ i ] ) )
3534 addAllResults = addAllResults and getResults and sizeResults
3535 utilities.assert_equals( expect=main.TRUE,
3536 actual=addAllResults,
3537 onpass="Set addAll correct",
3538 onfail="Set addAll was incorrect" )
3539
3540 main.step( "Distributed Set contains()" )
3541 containsResponses = []
3542 threads = []
3543 for i in main.activeNodes:
3544 t = main.Thread( target=main.CLIs[i].setTestGet,
3545 name="setContains-" + str( i ),
3546 args=[ onosSetName ],
3547 kwargs={ "values": addValue } )
3548 threads.append( t )
3549 t.start()
3550 for t in threads:
3551 t.join()
3552 # NOTE: This is the tuple
3553 containsResponses.append( t.result )
3554
3555 containsResults = main.TRUE
3556 for i in range( len( main.activeNodes ) ):
3557 if containsResponses[ i ] == main.ERROR:
3558 containsResults = main.FALSE
3559 else:
3560 containsResults = containsResults and\
3561 containsResponses[ i ][ 1 ]
3562 utilities.assert_equals( expect=main.TRUE,
3563 actual=containsResults,
3564 onpass="Set contains is functional",
3565 onfail="Set contains failed" )
3566
3567 main.step( "Distributed Set containsAll()" )
3568 containsAllResponses = []
3569 threads = []
3570 for i in main.activeNodes:
3571 t = main.Thread( target=main.CLIs[i].setTestGet,
3572 name="setContainsAll-" + str( i ),
3573 args=[ onosSetName ],
3574 kwargs={ "values": addAllValue } )
3575 threads.append( t )
3576 t.start()
3577 for t in threads:
3578 t.join()
3579 # NOTE: This is the tuple
3580 containsAllResponses.append( t.result )
3581
3582 containsAllResults = main.TRUE
3583 for i in range( len( main.activeNodes ) ):
3584 if containsResponses[ i ] == main.ERROR:
3585 containsResults = main.FALSE
3586 else:
3587 containsResults = containsResults and\
3588 containsResponses[ i ][ 1 ]
3589 utilities.assert_equals( expect=main.TRUE,
3590 actual=containsAllResults,
3591 onpass="Set containsAll is functional",
3592 onfail="Set containsAll failed" )
3593
3594 main.step( "Distributed Set remove()" )
3595 onosSet.remove( addValue )
3596 removeResponses = []
3597 threads = []
3598 for i in main.activeNodes:
3599 t = main.Thread( target=main.CLIs[i].setTestRemove,
3600 name="setTestRemove-" + str( i ),
3601 args=[ onosSetName, addValue ] )
3602 threads.append( t )
3603 t.start()
3604 for t in threads:
3605 t.join()
3606 removeResponses.append( t.result )
3607
3608 # main.TRUE = successfully changed the set
3609 # main.FALSE = action resulted in no change in set
3610 # main.ERROR - Some error in executing the function
3611 removeResults = main.TRUE
3612 for i in range( len( main.activeNodes ) ):
3613 if removeResponses[ i ] == main.TRUE:
3614 # All is well
3615 pass
3616 elif removeResponses[ i ] == main.FALSE:
3617 # not in set, probably fine
3618 pass
3619 elif removeResponses[ i ] == main.ERROR:
3620 # Error in execution
3621 removeResults = main.FALSE
3622 else:
3623 # unexpected result
3624 removeResults = main.FALSE
3625 if removeResults != main.TRUE:
3626 main.log.error( "Error executing set remove" )
3627
3628 # Check if set is still correct
3629 size = len( onosSet )
3630 getResponses = []
3631 threads = []
3632 for i in main.activeNodes:
3633 t = main.Thread( target=main.CLIs[i].setTestGet,
3634 name="setTestGet-" + str( i ),
3635 args=[ onosSetName ] )
3636 threads.append( t )
3637 t.start()
3638 for t in threads:
3639 t.join()
3640 getResponses.append( t.result )
3641 getResults = main.TRUE
3642 for i in range( len( main.activeNodes ) ):
3643 node = str( main.activeNodes[i] + 1 )
3644 if isinstance( getResponses[ i ], list):
3645 current = set( getResponses[ i ] )
3646 if len( current ) == len( getResponses[ i ] ):
3647 # no repeats
3648 if onosSet != current:
3649 main.log.error( "ONOS" + node +
3650 " has incorrect view" +
3651 " of set " + onosSetName + ":\n" +
3652 str( getResponses[ i ] ) )
3653 main.log.debug( "Expected: " + str( onosSet ) )
3654 main.log.debug( "Actual: " + str( current ) )
3655 getResults = main.FALSE
3656 else:
3657 # error, set is not a set
3658 main.log.error( "ONOS" + node +
3659 " has repeat elements in" +
3660 " set " + onosSetName + ":\n" +
3661 str( getResponses[ i ] ) )
3662 getResults = main.FALSE
3663 elif getResponses[ i ] == main.ERROR:
3664 getResults = main.FALSE
3665 sizeResponses = []
3666 threads = []
3667 for i in main.activeNodes:
3668 t = main.Thread( target=main.CLIs[i].setTestSize,
3669 name="setTestSize-" + str( i ),
3670 args=[ onosSetName ] )
3671 threads.append( t )
3672 t.start()
3673 for t in threads:
3674 t.join()
3675 sizeResponses.append( t.result )
3676 sizeResults = main.TRUE
3677 for i in range( len( main.activeNodes ) ):
3678 node = str( main.activeNodes[i] + 1 )
3679 if size != sizeResponses[ i ]:
3680 sizeResults = main.FALSE
3681 main.log.error( "ONOS" + node +
3682 " expected a size of " + str( size ) +
3683 " for set " + onosSetName +
3684 " but got " + str( sizeResponses[ i ] ) )
3685 removeResults = removeResults and getResults and sizeResults
3686 utilities.assert_equals( expect=main.TRUE,
3687 actual=removeResults,
3688 onpass="Set remove correct",
3689 onfail="Set remove was incorrect" )
3690
3691 main.step( "Distributed Set removeAll()" )
3692 onosSet.difference_update( addAllValue.split() )
3693 removeAllResponses = []
3694 threads = []
3695 try:
3696 for i in main.activeNodes:
3697 t = main.Thread( target=main.CLIs[i].setTestRemove,
3698 name="setTestRemoveAll-" + str( i ),
3699 args=[ onosSetName, addAllValue ] )
3700 threads.append( t )
3701 t.start()
3702 for t in threads:
3703 t.join()
3704 removeAllResponses.append( t.result )
3705 except Exception, e:
3706 main.log.exception(e)
3707
3708 # main.TRUE = successfully changed the set
3709 # main.FALSE = action resulted in no change in set
3710 # main.ERROR - Some error in executing the function
3711 removeAllResults = main.TRUE
3712 for i in range( len( main.activeNodes ) ):
3713 if removeAllResponses[ i ] == main.TRUE:
3714 # All is well
3715 pass
3716 elif removeAllResponses[ i ] == main.FALSE:
3717 # not in set, probably fine
3718 pass
3719 elif removeAllResponses[ i ] == main.ERROR:
3720 # Error in execution
3721 removeAllResults = main.FALSE
3722 else:
3723 # unexpected result
3724 removeAllResults = main.FALSE
3725 if removeAllResults != main.TRUE:
3726 main.log.error( "Error executing set removeAll" )
3727
3728 # Check if set is still correct
3729 size = len( onosSet )
3730 getResponses = []
3731 threads = []
3732 for i in main.activeNodes:
3733 t = main.Thread( target=main.CLIs[i].setTestGet,
3734 name="setTestGet-" + str( i ),
3735 args=[ onosSetName ] )
3736 threads.append( t )
3737 t.start()
3738 for t in threads:
3739 t.join()
3740 getResponses.append( t.result )
3741 getResults = main.TRUE
3742 for i in range( len( main.activeNodes ) ):
3743 node = str( main.activeNodes[i] + 1 )
3744 if isinstance( getResponses[ i ], list):
3745 current = set( getResponses[ i ] )
3746 if len( current ) == len( getResponses[ i ] ):
3747 # no repeats
3748 if onosSet != current:
3749 main.log.error( "ONOS" + node +
3750 " has incorrect view" +
3751 " of set " + onosSetName + ":\n" +
3752 str( getResponses[ i ] ) )
3753 main.log.debug( "Expected: " + str( onosSet ) )
3754 main.log.debug( "Actual: " + str( current ) )
3755 getResults = main.FALSE
3756 else:
3757 # error, set is not a set
3758 main.log.error( "ONOS" + node +
3759 " has repeat elements in" +
3760 " set " + onosSetName + ":\n" +
3761 str( getResponses[ i ] ) )
3762 getResults = main.FALSE
3763 elif getResponses[ i ] == main.ERROR:
3764 getResults = main.FALSE
3765 sizeResponses = []
3766 threads = []
3767 for i in main.activeNodes:
3768 t = main.Thread( target=main.CLIs[i].setTestSize,
3769 name="setTestSize-" + str( i ),
3770 args=[ onosSetName ] )
3771 threads.append( t )
3772 t.start()
3773 for t in threads:
3774 t.join()
3775 sizeResponses.append( t.result )
3776 sizeResults = main.TRUE
3777 for i in range( len( main.activeNodes ) ):
3778 node = str( main.activeNodes[i] + 1 )
3779 if size != sizeResponses[ i ]:
3780 sizeResults = main.FALSE
3781 main.log.error( "ONOS" + node +
3782 " expected a size of " + str( size ) +
3783 " for set " + onosSetName +
3784 " but got " + str( sizeResponses[ i ] ) )
3785 removeAllResults = removeAllResults and getResults and sizeResults
3786 utilities.assert_equals( expect=main.TRUE,
3787 actual=removeAllResults,
3788 onpass="Set removeAll correct",
3789 onfail="Set removeAll was incorrect" )
3790
3791 main.step( "Distributed Set addAll()" )
3792 onosSet.update( addAllValue.split() )
3793 addResponses = []
3794 threads = []
3795 for i in main.activeNodes:
3796 t = main.Thread( target=main.CLIs[i].setTestAdd,
3797 name="setTestAddAll-" + str( i ),
3798 args=[ onosSetName, addAllValue ] )
3799 threads.append( t )
3800 t.start()
3801 for t in threads:
3802 t.join()
3803 addResponses.append( t.result )
3804
3805 # main.TRUE = successfully changed the set
3806 # main.FALSE = action resulted in no change in set
3807 # main.ERROR - Some error in executing the function
3808 addAllResults = main.TRUE
3809 for i in range( len( main.activeNodes ) ):
3810 if addResponses[ i ] == main.TRUE:
3811 # All is well
3812 pass
3813 elif addResponses[ i ] == main.FALSE:
3814 # Already in set, probably fine
3815 pass
3816 elif addResponses[ i ] == main.ERROR:
3817 # Error in execution
3818 addAllResults = main.FALSE
3819 else:
3820 # unexpected result
3821 addAllResults = main.FALSE
3822 if addAllResults != main.TRUE:
3823 main.log.error( "Error executing set addAll" )
3824
3825 # Check if set is still correct
3826 size = len( onosSet )
3827 getResponses = []
3828 threads = []
3829 for i in main.activeNodes:
3830 t = main.Thread( target=main.CLIs[i].setTestGet,
3831 name="setTestGet-" + str( i ),
3832 args=[ onosSetName ] )
3833 threads.append( t )
3834 t.start()
3835 for t in threads:
3836 t.join()
3837 getResponses.append( t.result )
3838 getResults = main.TRUE
3839 for i in range( len( main.activeNodes ) ):
3840 node = str( main.activeNodes[i] + 1 )
3841 if isinstance( getResponses[ i ], list):
3842 current = set( getResponses[ i ] )
3843 if len( current ) == len( getResponses[ i ] ):
3844 # no repeats
3845 if onosSet != current:
3846 main.log.error( "ONOS" + node +
3847 " has incorrect view" +
3848 " of set " + onosSetName + ":\n" +
3849 str( getResponses[ i ] ) )
3850 main.log.debug( "Expected: " + str( onosSet ) )
3851 main.log.debug( "Actual: " + str( current ) )
3852 getResults = main.FALSE
3853 else:
3854 # error, set is not a set
3855 main.log.error( "ONOS" + node +
3856 " has repeat elements in" +
3857 " set " + onosSetName + ":\n" +
3858 str( getResponses[ i ] ) )
3859 getResults = main.FALSE
3860 elif getResponses[ i ] == main.ERROR:
3861 getResults = main.FALSE
3862 sizeResponses = []
3863 threads = []
3864 for i in main.activeNodes:
3865 t = main.Thread( target=main.CLIs[i].setTestSize,
3866 name="setTestSize-" + str( i ),
3867 args=[ onosSetName ] )
3868 threads.append( t )
3869 t.start()
3870 for t in threads:
3871 t.join()
3872 sizeResponses.append( t.result )
3873 sizeResults = main.TRUE
3874 for i in range( len( main.activeNodes ) ):
3875 node = str( main.activeNodes[i] + 1 )
3876 if size != sizeResponses[ i ]:
3877 sizeResults = main.FALSE
3878 main.log.error( "ONOS" + node +
3879 " expected a size of " + str( size ) +
3880 " for set " + onosSetName +
3881 " but got " + str( sizeResponses[ i ] ) )
3882 addAllResults = addAllResults and getResults and sizeResults
3883 utilities.assert_equals( expect=main.TRUE,
3884 actual=addAllResults,
3885 onpass="Set addAll correct",
3886 onfail="Set addAll was incorrect" )
3887
3888 main.step( "Distributed Set clear()" )
3889 onosSet.clear()
3890 clearResponses = []
3891 threads = []
3892 for i in main.activeNodes:
3893 t = main.Thread( target=main.CLIs[i].setTestRemove,
3894 name="setTestClear-" + str( i ),
3895 args=[ onosSetName, " "], # Values doesn't matter
3896 kwargs={ "clear": True } )
3897 threads.append( t )
3898 t.start()
3899 for t in threads:
3900 t.join()
3901 clearResponses.append( t.result )
3902
3903 # main.TRUE = successfully changed the set
3904 # main.FALSE = action resulted in no change in set
3905 # main.ERROR - Some error in executing the function
3906 clearResults = main.TRUE
3907 for i in range( len( main.activeNodes ) ):
3908 if clearResponses[ i ] == main.TRUE:
3909 # All is well
3910 pass
3911 elif clearResponses[ i ] == main.FALSE:
3912 # Nothing set, probably fine
3913 pass
3914 elif clearResponses[ i ] == main.ERROR:
3915 # Error in execution
3916 clearResults = main.FALSE
3917 else:
3918 # unexpected result
3919 clearResults = main.FALSE
3920 if clearResults != main.TRUE:
3921 main.log.error( "Error executing set clear" )
3922
3923 # Check if set is still correct
3924 size = len( onosSet )
3925 getResponses = []
3926 threads = []
3927 for i in main.activeNodes:
3928 t = main.Thread( target=main.CLIs[i].setTestGet,
3929 name="setTestGet-" + str( i ),
3930 args=[ onosSetName ] )
3931 threads.append( t )
3932 t.start()
3933 for t in threads:
3934 t.join()
3935 getResponses.append( t.result )
3936 getResults = main.TRUE
3937 for i in range( len( main.activeNodes ) ):
3938 node = str( main.activeNodes[i] + 1 )
3939 if isinstance( getResponses[ i ], list):
3940 current = set( getResponses[ i ] )
3941 if len( current ) == len( getResponses[ i ] ):
3942 # no repeats
3943 if onosSet != current:
3944 main.log.error( "ONOS" + node +
3945 " has incorrect view" +
3946 " of set " + onosSetName + ":\n" +
3947 str( getResponses[ i ] ) )
3948 main.log.debug( "Expected: " + str( onosSet ) )
3949 main.log.debug( "Actual: " + str( current ) )
3950 getResults = main.FALSE
3951 else:
3952 # error, set is not a set
3953 main.log.error( "ONOS" + node +
3954 " has repeat elements in" +
3955 " set " + onosSetName + ":\n" +
3956 str( getResponses[ i ] ) )
3957 getResults = main.FALSE
3958 elif getResponses[ i ] == main.ERROR:
3959 getResults = main.FALSE
3960 sizeResponses = []
3961 threads = []
3962 for i in main.activeNodes:
3963 t = main.Thread( target=main.CLIs[i].setTestSize,
3964 name="setTestSize-" + str( i ),
3965 args=[ onosSetName ] )
3966 threads.append( t )
3967 t.start()
3968 for t in threads:
3969 t.join()
3970 sizeResponses.append( t.result )
3971 sizeResults = main.TRUE
3972 for i in range( len( main.activeNodes ) ):
3973 node = str( main.activeNodes[i] + 1 )
3974 if size != sizeResponses[ i ]:
3975 sizeResults = main.FALSE
3976 main.log.error( "ONOS" + node +
3977 " expected a size of " + str( size ) +
3978 " for set " + onosSetName +
3979 " but got " + str( sizeResponses[ i ] ) )
3980 clearResults = clearResults and getResults and sizeResults
3981 utilities.assert_equals( expect=main.TRUE,
3982 actual=clearResults,
3983 onpass="Set clear correct",
3984 onfail="Set clear was incorrect" )
3985
3986 main.step( "Distributed Set addAll()" )
3987 onosSet.update( addAllValue.split() )
3988 addResponses = []
3989 threads = []
3990 for i in main.activeNodes:
3991 t = main.Thread( target=main.CLIs[i].setTestAdd,
3992 name="setTestAddAll-" + str( i ),
3993 args=[ onosSetName, addAllValue ] )
3994 threads.append( t )
3995 t.start()
3996 for t in threads:
3997 t.join()
3998 addResponses.append( t.result )
3999
4000 # main.TRUE = successfully changed the set
4001 # main.FALSE = action resulted in no change in set
4002 # main.ERROR - Some error in executing the function
4003 addAllResults = main.TRUE
4004 for i in range( len( main.activeNodes ) ):
4005 if addResponses[ i ] == main.TRUE:
4006 # All is well
4007 pass
4008 elif addResponses[ i ] == main.FALSE:
4009 # Already in set, probably fine
4010 pass
4011 elif addResponses[ i ] == main.ERROR:
4012 # Error in execution
4013 addAllResults = main.FALSE
4014 else:
4015 # unexpected result
4016 addAllResults = main.FALSE
4017 if addAllResults != main.TRUE:
4018 main.log.error( "Error executing set addAll" )
4019
4020 # Check if set is still correct
4021 size = len( onosSet )
4022 getResponses = []
4023 threads = []
4024 for i in main.activeNodes:
4025 t = main.Thread( target=main.CLIs[i].setTestGet,
4026 name="setTestGet-" + str( i ),
4027 args=[ onosSetName ] )
4028 threads.append( t )
4029 t.start()
4030 for t in threads:
4031 t.join()
4032 getResponses.append( t.result )
4033 getResults = main.TRUE
4034 for i in range( len( main.activeNodes ) ):
4035 node = str( main.activeNodes[i] + 1 )
4036 if isinstance( getResponses[ i ], list):
4037 current = set( getResponses[ i ] )
4038 if len( current ) == len( getResponses[ i ] ):
4039 # no repeats
4040 if onosSet != current:
4041 main.log.error( "ONOS" + node +
4042 " has incorrect view" +
4043 " of set " + onosSetName + ":\n" +
4044 str( getResponses[ i ] ) )
4045 main.log.debug( "Expected: " + str( onosSet ) )
4046 main.log.debug( "Actual: " + str( current ) )
4047 getResults = main.FALSE
4048 else:
4049 # error, set is not a set
4050 main.log.error( "ONOS" + node +
4051 " has repeat elements in" +
4052 " set " + onosSetName + ":\n" +
4053 str( getResponses[ i ] ) )
4054 getResults = main.FALSE
4055 elif getResponses[ i ] == main.ERROR:
4056 getResults = main.FALSE
4057 sizeResponses = []
4058 threads = []
4059 for i in main.activeNodes:
4060 t = main.Thread( target=main.CLIs[i].setTestSize,
4061 name="setTestSize-" + str( i ),
4062 args=[ onosSetName ] )
4063 threads.append( t )
4064 t.start()
4065 for t in threads:
4066 t.join()
4067 sizeResponses.append( t.result )
4068 sizeResults = main.TRUE
4069 for i in range( len( main.activeNodes ) ):
4070 node = str( main.activeNodes[i] + 1 )
4071 if size != sizeResponses[ i ]:
4072 sizeResults = main.FALSE
4073 main.log.error( "ONOS" + node +
4074 " expected a size of " + str( size ) +
4075 " for set " + onosSetName +
4076 " but got " + str( sizeResponses[ i ] ) )
4077 addAllResults = addAllResults and getResults and sizeResults
4078 utilities.assert_equals( expect=main.TRUE,
4079 actual=addAllResults,
4080 onpass="Set addAll correct",
4081 onfail="Set addAll was incorrect" )
4082
4083 main.step( "Distributed Set retain()" )
4084 onosSet.intersection_update( retainValue.split() )
4085 retainResponses = []
4086 threads = []
4087 for i in main.activeNodes:
4088 t = main.Thread( target=main.CLIs[i].setTestRemove,
4089 name="setTestRetain-" + str( i ),
4090 args=[ onosSetName, retainValue ],
4091 kwargs={ "retain": True } )
4092 threads.append( t )
4093 t.start()
4094 for t in threads:
4095 t.join()
4096 retainResponses.append( t.result )
4097
4098 # main.TRUE = successfully changed the set
4099 # main.FALSE = action resulted in no change in set
4100 # main.ERROR - Some error in executing the function
4101 retainResults = main.TRUE
4102 for i in range( len( main.activeNodes ) ):
4103 if retainResponses[ i ] == main.TRUE:
4104 # All is well
4105 pass
4106 elif retainResponses[ i ] == main.FALSE:
4107 # Already in set, probably fine
4108 pass
4109 elif retainResponses[ i ] == main.ERROR:
4110 # Error in execution
4111 retainResults = main.FALSE
4112 else:
4113 # unexpected result
4114 retainResults = main.FALSE
4115 if retainResults != main.TRUE:
4116 main.log.error( "Error executing set retain" )
4117
4118 # Check if set is still correct
4119 size = len( onosSet )
4120 getResponses = []
4121 threads = []
4122 for i in main.activeNodes:
4123 t = main.Thread( target=main.CLIs[i].setTestGet,
4124 name="setTestGet-" + str( i ),
4125 args=[ onosSetName ] )
4126 threads.append( t )
4127 t.start()
4128 for t in threads:
4129 t.join()
4130 getResponses.append( t.result )
4131 getResults = main.TRUE
4132 for i in range( len( main.activeNodes ) ):
4133 node = str( main.activeNodes[i] + 1 )
4134 if isinstance( getResponses[ i ], list):
4135 current = set( getResponses[ i ] )
4136 if len( current ) == len( getResponses[ i ] ):
4137 # no repeats
4138 if onosSet != current:
4139 main.log.error( "ONOS" + node +
4140 " has incorrect view" +
4141 " of set " + onosSetName + ":\n" +
4142 str( getResponses[ i ] ) )
4143 main.log.debug( "Expected: " + str( onosSet ) )
4144 main.log.debug( "Actual: " + str( current ) )
4145 getResults = main.FALSE
4146 else:
4147 # error, set is not a set
4148 main.log.error( "ONOS" + node +
4149 " has repeat elements in" +
4150 " set " + onosSetName + ":\n" +
4151 str( getResponses[ i ] ) )
4152 getResults = main.FALSE
4153 elif getResponses[ i ] == main.ERROR:
4154 getResults = main.FALSE
4155 sizeResponses = []
4156 threads = []
4157 for i in main.activeNodes:
4158 t = main.Thread( target=main.CLIs[i].setTestSize,
4159 name="setTestSize-" + str( i ),
4160 args=[ onosSetName ] )
4161 threads.append( t )
4162 t.start()
4163 for t in threads:
4164 t.join()
4165 sizeResponses.append( t.result )
4166 sizeResults = main.TRUE
4167 for i in range( len( main.activeNodes ) ):
4168 node = str( main.activeNodes[i] + 1 )
4169 if size != sizeResponses[ i ]:
4170 sizeResults = main.FALSE
4171 main.log.error( "ONOS" + node + " expected a size of " +
4172 str( size ) + " for set " + onosSetName +
4173 " but got " + str( sizeResponses[ i ] ) )
4174 retainResults = retainResults and getResults and sizeResults
4175 utilities.assert_equals( expect=main.TRUE,
4176 actual=retainResults,
4177 onpass="Set retain correct",
4178 onfail="Set retain was incorrect" )
4179
4180 # Transactional maps
4181 main.step( "Partitioned Transactional maps put" )
4182 tMapValue = "Testing"
4183 numKeys = 100
4184 putResult = True
4185 node = main.activeNodes[0]
4186 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4187 if putResponses and len( putResponses ) == 100:
4188 for i in putResponses:
4189 if putResponses[ i ][ 'value' ] != tMapValue:
4190 putResult = False
4191 else:
4192 putResult = False
4193 if not putResult:
4194 main.log.debug( "Put response values: " + str( putResponses ) )
4195 utilities.assert_equals( expect=True,
4196 actual=putResult,
4197 onpass="Partitioned Transactional Map put successful",
4198 onfail="Partitioned Transactional Map put values are incorrect" )
4199
4200 main.step( "Partitioned Transactional maps get" )
Jon Hall9bfadd22016-05-11 14:48:07 -07004201 # FIXME: is this sleep needed?
4202 time.sleep( 5 )
4203
Jon Hall6e709752016-02-01 13:38:46 -08004204 getCheck = True
4205 for n in range( 1, numKeys + 1 ):
4206 getResponses = []
4207 threads = []
4208 valueCheck = True
4209 for i in main.activeNodes:
4210 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4211 name="TMap-get-" + str( i ),
4212 args=[ "Key" + str( n ) ] )
4213 threads.append( t )
4214 t.start()
4215 for t in threads:
4216 t.join()
4217 getResponses.append( t.result )
4218 for node in getResponses:
4219 if node != tMapValue:
4220 valueCheck = False
4221 if not valueCheck:
4222 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4223 main.log.warn( getResponses )
4224 getCheck = getCheck and valueCheck
4225 utilities.assert_equals( expect=True,
4226 actual=getCheck,
4227 onpass="Partitioned Transactional Map get values were correct",
4228 onfail="Partitioned Transactional Map values incorrect" )