blob: d4cf53d6fd04ca9cb339dfbfc10268e6f683d7e7 [file] [log] [blame]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001"""
2Description: This test is to determine if ONOS can handle
3 dynamic scaling of the cluster size.
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE6: The scaling case.
13CASE7: Check state after control plane failure
14CASE8: Compare topo
15CASE9: Link s3-s28 down
16CASE10: Link s3-s28 up
17CASE11: Switch down
18CASE12: Switch up
19CASE13: Clean up
20CASE14: start election app on all onos nodes
21CASE15: Check that Leadership Election is still functional
22CASE16: Install Distributed Primitives app
23CASE17: Check for basic functionality with distributed primitives
24"""
25
26
27class HAscaling:
28
29 def __init__( self ):
30 self.default = ''
31
32 def CASE1( self, main ):
33 """
34 CASE1 is to compile ONOS and push it to the test machines
35
36 Startup sequence:
37 cell <name>
38 onos-verify-cell
39 NOTE: temporary - onos-remove-raft-logs
40 onos-uninstall
41 start mininet
42 git pull
43 mvn clean install
44 onos-package
45 onos-install -f
46 onos-wait-for-start
47 start cli sessions
48 start tcpdump
49 """
50 import time
51 import os
52 import re
53 main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
54 "initialization" )
55 main.case( "Setting up test environment" )
56 main.caseExplanation = "Setup the test environment including " +\
57 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
59
60 # load some variables from the params file
61 PULLCODE = False
62 if main.params[ 'Git' ] == 'True':
63 PULLCODE = True
64 gitBranch = main.params[ 'branch' ]
65 cellName = main.params[ 'ENV' ][ 'cellName' ]
66
67 main.numCtrls = int( main.params[ 'num_controllers' ] )
68 if main.ONOSbench.maxNodes:
69 if main.ONOSbench.maxNodes < main.numCtrls:
70 main.numCtrls = int( main.ONOSbench.maxNodes )
71 # set global variables
72 # These are for csv plotting in jenkins
73 global labels
74 global data
75 labels = []
76 data = []
77
78 try:
79 from tests.HA.dependencies.HA import HA
80 main.HA = HA()
81 from tests.HA.HAscaling.dependencies.Server import Server
82 main.Server = Server()
83 except Exception as e:
84 main.log.exception( e )
85 main.cleanup()
86 main.exit()
87
88 main.CLIs = []
89 main.nodes = []
90 ipList = []
91 for i in range( 1, main.numCtrls + 1 ):
92 try:
93 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
94 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
95 ipList.append( main.nodes[ -1 ].ip_address )
96 except AttributeError:
97 break
98
99 main.step( "Create cell file" )
100 cellAppString = main.params[ 'ENV' ][ 'appString' ]
101 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
102 main.Mininet1.ip_address,
103 cellAppString, ipList )
104
105 main.step( "Applying cell variable to environment" )
106 cellResult = main.ONOSbench.setCell( cellName )
107 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
108 onpass="Set cell successfull",
109 onfail="Failled to set cell" )
110
111 main.step( "Verify connectivity to cell" )
112 verifyResult = main.ONOSbench.verifyCell()
113 utilities.assert_equals( expect=main.TRUE, actual=verifyResult,
114 onpass="Verify cell passed",
115 onfail="Failled to verify cell" )
116
117 # FIXME:this is short term fix
118 main.log.info( "Removing raft logs" )
119 main.ONOSbench.onosRemoveRaftLogs()
120
121 main.log.info( "Uninstalling ONOS" )
122 for node in main.nodes:
123 main.ONOSbench.onosUninstall( node.ip_address )
124
125 # Make sure ONOS is DEAD
126 main.log.info( "Killing any ONOS processes" )
127 killResults = main.TRUE
128 for node in main.nodes:
129 killed = main.ONOSbench.onosKill( node.ip_address )
130 killResults = killResults and killed
131
132 main.step( "Setup server for cluster metadata file" )
Jon Hall8f6d4622016-05-23 15:27:18 -0700133 port = main.params['server']['port']
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700134 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
135 main.log.debug( "Root dir: {}".format( rootDir ) )
136 status = main.Server.start( main.ONOSbench,
137 rootDir,
138 port=port,
139 logDir=main.logdir + "/server.log" )
140 utilities.assert_equals( expect=main.TRUE, actual=status,
141 onpass="Server started",
142 onfail="Failled to start SimpleHTTPServer" )
143
144 main.step( "Generate initial metadata file" )
145 main.scaling = main.params['scaling'].split( "," )
146 main.log.debug( main.scaling )
147 scale = main.scaling.pop(0)
148 main.log.debug( scale)
149 if "e" in scale:
150 equal = True
151 else:
152 equal = False
153 main.log.debug( equal)
154 main.numCtrls = int( re.search( "\d+", scale ).group(0) )
155 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
156 utilities.assert_equals( expect=main.TRUE, actual=genResult,
157 onpass="New cluster metadata file generated",
158 onfail="Failled to generate new metadata file" )
159
160 cleanInstallResult = main.TRUE
161 gitPullResult = main.TRUE
162
163 main.step( "Starting Mininet" )
164 # scp topo file to mininet
165 # TODO: move to params?
166 topoName = "obelisk.py"
167 filePath = main.ONOSbench.home + "/tools/test/topos/"
168 main.ONOSbench.scp( main.Mininet1,
169 filePath + topoName,
170 main.Mininet1.home,
171 direction="to" )
172 mnResult = main.Mininet1.startNet( )
173 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
174 onpass="Mininet Started",
175 onfail="Error starting Mininet" )
176
177 main.step( "Git checkout and pull " + gitBranch )
178 if PULLCODE:
179 main.ONOSbench.gitCheckout( gitBranch )
180 gitPullResult = main.ONOSbench.gitPull()
181 # values of 1 or 3 are good
182 utilities.assert_lesser( expect=0, actual=gitPullResult,
183 onpass="Git pull successful",
184 onfail="Git pull failed" )
185 main.ONOSbench.getVersion( report=True )
186
187 main.step( "Using mvn clean install" )
188 cleanInstallResult = main.TRUE
189 if PULLCODE and gitPullResult == main.TRUE:
190 cleanInstallResult = main.ONOSbench.cleanInstall()
191 else:
192 main.log.warn( "Did not pull new code so skipping mvn " +
193 "clean install" )
194 utilities.assert_equals( expect=main.TRUE,
195 actual=cleanInstallResult,
196 onpass="MCI successful",
197 onfail="MCI failed" )
198 # GRAPHS
199 # NOTE: important params here:
200 # job = name of Jenkins job
201 # Plot Name = Plot-HA, only can be used if multiple plots
202 # index = The number of the graph under plot name
203 job = "HAscaling"
204 plotName = "Plot-HA"
Jon Hall676e5432016-09-26 11:32:50 -0700205 index = "1"
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700206 graphs = '<ac:structured-macro ac:name="html">\n'
207 graphs += '<ac:plain-text-body><![CDATA[\n'
208 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
209 '/plot/' + plotName + '/getPlot?index=' + index +\
210 '&width=500&height=300"' +\
211 'noborder="0" width="500" height="300" scrolling="yes" ' +\
212 'seamless="seamless"></iframe>\n'
213 graphs += ']]></ac:plain-text-body>\n'
214 graphs += '</ac:structured-macro>\n'
215 main.log.wiki(graphs)
216
217 main.step( "Copying backup config files" )
218 path = "~/onos/tools/package/bin/onos-service"
219 cp = main.ONOSbench.scp( main.ONOSbench,
220 path,
221 path + ".backup",
222 direction="to" )
223
224 utilities.assert_equals( expect=main.TRUE,
225 actual=cp,
226 onpass="Copy backup config file succeeded",
227 onfail="Copy backup config file failed" )
228 # we need to modify the onos-service file to use remote metadata file
229 # url for cluster metadata file
Jon Hall8f6d4622016-05-23 15:27:18 -0700230 iface = main.params['server'].get( 'interface' )
231 ip = main.ONOSbench.getIpAddr( iface=iface )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700232 metaFile = "cluster.json"
233 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, port, metaFile )
234 main.log.warn( javaArgs )
235 main.log.warn( repr( javaArgs ) )
236 handle = main.ONOSbench.handle
237 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, path )
238 main.log.warn( sed )
239 main.log.warn( repr( sed ) )
240 handle.sendline( sed )
Jon Hallbd60ea02016-08-23 10:03:59 -0700241 handle.expect( metaFile )
242 output = handle.before
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700243 handle.expect( "\$" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700244 output += handle.before
245 main.log.debug( repr( output ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700246
247 main.step( "Creating ONOS package" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700248 packageResult = main.ONOSbench.buckBuild()
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700249 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
250 onpass="ONOS package successful",
251 onfail="ONOS package failed" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700252 if not packageResult:
253 main.cleanup()
254 main.exit()
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700255
256 main.step( "Installing ONOS package" )
257 onosInstallResult = main.TRUE
258 for i in range( main.ONOSbench.maxNodes ):
259 node = main.nodes[i]
260 options = "-f"
261 if i >= main.numCtrls:
262 options = "-nf" # Don't start more than the current scale
263 tmpResult = main.ONOSbench.onosInstall( options=options,
264 node=node.ip_address )
265 onosInstallResult = onosInstallResult and tmpResult
266 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
267 onpass="ONOS install successful",
268 onfail="ONOS install failed" )
269
270 # Cleanup custom onos-service file
271 main.ONOSbench.scp( main.ONOSbench,
272 path + ".backup",
273 path,
274 direction="to" )
275
You Wangf5de25b2017-01-06 15:13:01 -0800276 main.step( "Set up ONOS secure SSH" )
277 secureSshResult = main.TRUE
278 for node in main.nodes:
279 secureSshResult = secureSshResult and main.ONOSbench.onosSecureSSH( node=node.ip_address )
280 utilities.assert_equals( expect=main.TRUE, actual=secureSshResult,
281 onpass="Test step PASS",
282 onfail="Test step FAIL" )
283
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700284 main.step( "Checking if ONOS is up yet" )
285 for i in range( 2 ):
286 onosIsupResult = main.TRUE
287 for i in range( main.numCtrls ):
288 node = main.nodes[i]
289 started = main.ONOSbench.isup( node.ip_address )
290 if not started:
291 main.log.error( node.name + " hasn't started" )
292 onosIsupResult = onosIsupResult and started
293 if onosIsupResult == main.TRUE:
294 break
295 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
296 onpass="ONOS startup successful",
297 onfail="ONOS startup failed" )
298
Jon Hall6509dbf2016-06-21 17:01:17 -0700299 main.step( "Starting ONOS CLI sessions" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700300 cliResults = main.TRUE
301 threads = []
302 for i in range( main.numCtrls ):
303 t = main.Thread( target=main.CLIs[i].startOnosCli,
304 name="startOnosCli-" + str( i ),
305 args=[main.nodes[i].ip_address] )
306 threads.append( t )
307 t.start()
308
309 for t in threads:
310 t.join()
311 cliResults = cliResults and t.result
312 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
313 onpass="ONOS cli startup successful",
314 onfail="ONOS cli startup failed" )
315
316 # Create a list of active nodes for use when some nodes are stopped
317 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
318
319 if main.params[ 'tcpdump' ].lower() == "true":
320 main.step( "Start Packet Capture MN" )
321 main.Mininet2.startTcpdump(
322 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
323 + "-MN.pcap",
324 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
325 port=main.params[ 'MNtcpdump' ][ 'port' ] )
326
327 main.step( "Checking ONOS nodes" )
328 nodeResults = utilities.retry( main.HA.nodesCheck,
329 False,
330 args=[main.activeNodes],
331 attempts=5 )
332 utilities.assert_equals( expect=True, actual=nodeResults,
333 onpass="Nodes check successful",
334 onfail="Nodes check NOT successful" )
335
336 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700337 for i in main.activeNodes:
338 cli = main.CLIs[i]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700339 main.log.debug( "{} components not ACTIVE: \n{}".format(
340 cli.name,
341 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700342 main.log.error( "Failed to start ONOS, stopping test" )
343 main.cleanup()
344 main.exit()
345
346 main.step( "Activate apps defined in the params file" )
347 # get data from the params
348 apps = main.params.get( 'apps' )
349 if apps:
350 apps = apps.split(',')
351 main.log.warn( apps )
352 activateResult = True
353 for app in apps:
354 main.CLIs[ 0 ].app( app, "Activate" )
355 # TODO: check this worked
356 time.sleep( 10 ) # wait for apps to activate
357 for app in apps:
358 state = main.CLIs[ 0 ].appStatus( app )
359 if state == "ACTIVE":
360 activateResult = activateResult and True
361 else:
362 main.log.error( "{} is in {} state".format( app, state ) )
363 activateResult = False
364 utilities.assert_equals( expect=True,
365 actual=activateResult,
366 onpass="Successfully activated apps",
367 onfail="Failed to activate apps" )
368 else:
369 main.log.warn( "No apps were specified to be loaded after startup" )
370
371 main.step( "Set ONOS configurations" )
372 config = main.params.get( 'ONOS_Configuration' )
373 if config:
374 main.log.debug( config )
375 checkResult = main.TRUE
376 for component in config:
377 for setting in config[component]:
378 value = config[component][setting]
379 check = main.CLIs[ 0 ].setCfg( component, setting, value )
380 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
381 checkResult = check and checkResult
382 utilities.assert_equals( expect=main.TRUE,
383 actual=checkResult,
384 onpass="Successfully set config",
385 onfail="Failed to set config" )
386 else:
387 main.log.warn( "No configurations were specified to be changed after startup" )
388
389 main.step( "App Ids check" )
390 appCheck = main.TRUE
391 threads = []
392 for i in main.activeNodes:
393 t = main.Thread( target=main.CLIs[i].appToIDCheck,
394 name="appToIDCheck-" + str( i ),
395 args=[] )
396 threads.append( t )
397 t.start()
398
399 for t in threads:
400 t.join()
401 appCheck = appCheck and t.result
402 if appCheck != main.TRUE:
403 node = main.activeNodes[0]
404 main.log.warn( main.CLIs[node].apps() )
405 main.log.warn( main.CLIs[node].appIDs() )
406 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
407 onpass="App Ids seem to be correct",
408 onfail="Something is wrong with app Ids" )
409
410 def CASE2( self, main ):
411 """
412 Assign devices to controllers
413 """
414 import re
415 assert main.numCtrls, "main.numCtrls not defined"
416 assert main, "main not defined"
417 assert utilities.assert_equals, "utilities.assert_equals not defined"
418 assert main.CLIs, "main.CLIs not defined"
419 assert main.nodes, "main.nodes not defined"
420
421 main.case( "Assigning devices to controllers" )
422 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
423 "and check that an ONOS node becomes the " +\
424 "master of the device."
425 main.step( "Assign switches to controllers" )
426
427 ipList = []
428 for i in range( main.ONOSbench.maxNodes ):
429 ipList.append( main.nodes[ i ].ip_address )
430 swList = []
431 for i in range( 1, 29 ):
432 swList.append( "s" + str( i ) )
433 main.Mininet1.assignSwController( sw=swList, ip=ipList )
434
435 mastershipCheck = main.TRUE
436 for i in range( 1, 29 ):
437 response = main.Mininet1.getSwController( "s" + str( i ) )
438 try:
439 main.log.info( str( response ) )
440 except Exception:
441 main.log.info( repr( response ) )
442 for node in main.nodes:
443 if re.search( "tcp:" + node.ip_address, response ):
444 mastershipCheck = mastershipCheck and main.TRUE
445 else:
446 main.log.error( "Error, node " + node.ip_address + " is " +
447 "not in the list of controllers s" +
448 str( i ) + " is connecting to." )
449 mastershipCheck = main.FALSE
450 utilities.assert_equals(
451 expect=main.TRUE,
452 actual=mastershipCheck,
453 onpass="Switch mastership assigned correctly",
454 onfail="Switches not assigned correctly to controllers" )
455
456 def CASE21( self, main ):
457 """
458 Assign mastership to controllers
459 """
460 import time
461 assert main.numCtrls, "main.numCtrls not defined"
462 assert main, "main not defined"
463 assert utilities.assert_equals, "utilities.assert_equals not defined"
464 assert main.CLIs, "main.CLIs not defined"
465 assert main.nodes, "main.nodes not defined"
466
467 main.case( "Assigning Controller roles for switches" )
468 main.caseExplanation = "Check that ONOS is connected to each " +\
469 "device. Then manually assign" +\
470 " mastership to specific ONOS nodes using" +\
471 " 'device-role'"
472 main.step( "Assign mastership of switches to specific controllers" )
473 # Manually assign mastership to the controller we want
474 roleCall = main.TRUE
475
476 ipList = [ ]
477 deviceList = []
478 onosCli = main.CLIs[ main.activeNodes[0] ]
479 try:
480 # Assign mastership to specific controllers. This assignment was
481 # determined for a 7 node cluser, but will work with any sized
482 # cluster
483 for i in range( 1, 29 ): # switches 1 through 28
484 # set up correct variables:
485 if i == 1:
486 c = 0
487 ip = main.nodes[ c ].ip_address # ONOS1
488 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
489 elif i == 2:
490 c = 1 % main.numCtrls
491 ip = main.nodes[ c ].ip_address # ONOS2
492 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
493 elif i == 3:
494 c = 1 % main.numCtrls
495 ip = main.nodes[ c ].ip_address # ONOS2
496 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
497 elif i == 4:
498 c = 3 % main.numCtrls
499 ip = main.nodes[ c ].ip_address # ONOS4
500 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
501 elif i == 5:
502 c = 2 % main.numCtrls
503 ip = main.nodes[ c ].ip_address # ONOS3
504 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
505 elif i == 6:
506 c = 2 % main.numCtrls
507 ip = main.nodes[ c ].ip_address # ONOS3
508 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
509 elif i == 7:
510 c = 5 % main.numCtrls
511 ip = main.nodes[ c ].ip_address # ONOS6
512 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
513 elif i >= 8 and i <= 17:
514 c = 4 % main.numCtrls
515 ip = main.nodes[ c ].ip_address # ONOS5
516 dpid = '3' + str( i ).zfill( 3 )
517 deviceId = onosCli.getDevice( dpid ).get( 'id' )
518 elif i >= 18 and i <= 27:
519 c = 6 % main.numCtrls
520 ip = main.nodes[ c ].ip_address # ONOS7
521 dpid = '6' + str( i ).zfill( 3 )
522 deviceId = onosCli.getDevice( dpid ).get( 'id' )
523 elif i == 28:
524 c = 0
525 ip = main.nodes[ c ].ip_address # ONOS1
526 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
527 else:
528 main.log.error( "You didn't write an else statement for " +
529 "switch s" + str( i ) )
530 roleCall = main.FALSE
531 # Assign switch
532 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
533 # TODO: make this controller dynamic
534 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
535 ipList.append( ip )
536 deviceList.append( deviceId )
537 except ( AttributeError, AssertionError ):
538 main.log.exception( "Something is wrong with ONOS device view" )
539 main.log.info( onosCli.devices() )
540 utilities.assert_equals(
541 expect=main.TRUE,
542 actual=roleCall,
543 onpass="Re-assigned switch mastership to designated controller",
544 onfail="Something wrong with deviceRole calls" )
545
546 main.step( "Check mastership was correctly assigned" )
547 roleCheck = main.TRUE
548 # NOTE: This is due to the fact that device mastership change is not
549 # atomic and is actually a multi step process
550 time.sleep( 5 )
551 for i in range( len( ipList ) ):
552 ip = ipList[i]
553 deviceId = deviceList[i]
554 # Check assignment
555 master = onosCli.getRole( deviceId ).get( 'master' )
556 if ip in master:
557 roleCheck = roleCheck and main.TRUE
558 else:
559 roleCheck = roleCheck and main.FALSE
560 main.log.error( "Error, controller " + ip + " is not" +
561 " master " + "of device " +
562 str( deviceId ) + ". Master is " +
563 repr( master ) + "." )
564 utilities.assert_equals(
565 expect=main.TRUE,
566 actual=roleCheck,
567 onpass="Switches were successfully reassigned to designated " +
568 "controller",
569 onfail="Switches were not successfully reassigned" )
570
571 def CASE3( self, main ):
572 """
573 Assign intents
574 """
575 import time
576 import json
577 assert main.numCtrls, "main.numCtrls not defined"
578 assert main, "main not defined"
579 assert utilities.assert_equals, "utilities.assert_equals not defined"
580 assert main.CLIs, "main.CLIs not defined"
581 assert main.nodes, "main.nodes not defined"
582 try:
583 labels
584 except NameError:
585 main.log.error( "labels not defined, setting to []" )
586 labels = []
587 try:
588 data
589 except NameError:
590 main.log.error( "data not defined, setting to []" )
591 data = []
592 # NOTE: we must reinstall intents until we have a persistant intent
593 # datastore!
594 main.case( "Adding host Intents" )
595 main.caseExplanation = "Discover hosts by using pingall then " +\
596 "assign predetermined host-to-host intents." +\
597 " After installation, check that the intent" +\
598 " is distributed to all nodes and the state" +\
599 " is INSTALLED"
600
601 # install onos-app-fwd
602 main.step( "Install reactive forwarding app" )
603 onosCli = main.CLIs[ main.activeNodes[0] ]
604 installResults = onosCli.activateApp( "org.onosproject.fwd" )
605 utilities.assert_equals( expect=main.TRUE, actual=installResults,
606 onpass="Install fwd successful",
607 onfail="Install fwd failed" )
608
609 main.step( "Check app ids" )
610 appCheck = main.TRUE
611 threads = []
612 for i in main.activeNodes:
613 t = main.Thread( target=main.CLIs[i].appToIDCheck,
614 name="appToIDCheck-" + str( i ),
615 args=[] )
616 threads.append( t )
617 t.start()
618
619 for t in threads:
620 t.join()
621 appCheck = appCheck and t.result
622 if appCheck != main.TRUE:
623 main.log.warn( onosCli.apps() )
624 main.log.warn( onosCli.appIDs() )
625 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
626 onpass="App Ids seem to be correct",
627 onfail="Something is wrong with app Ids" )
628
629 main.step( "Discovering Hosts( Via pingall for now )" )
630 # FIXME: Once we have a host discovery mechanism, use that instead
631 # REACTIVE FWD test
632 pingResult = main.FALSE
633 passMsg = "Reactive Pingall test passed"
634 time1 = time.time()
635 pingResult = main.Mininet1.pingall()
636 time2 = time.time()
637 if not pingResult:
638 main.log.warn("First pingall failed. Trying again...")
639 pingResult = main.Mininet1.pingall()
640 passMsg += " on the second try"
641 utilities.assert_equals(
642 expect=main.TRUE,
643 actual=pingResult,
644 onpass= passMsg,
645 onfail="Reactive Pingall failed, " +
646 "one or more ping pairs failed" )
647 main.log.info( "Time for pingall: %2f seconds" %
648 ( time2 - time1 ) )
649 # timeout for fwd flows
650 time.sleep( 11 )
651 # uninstall onos-app-fwd
652 main.step( "Uninstall reactive forwarding app" )
653 node = main.activeNodes[0]
654 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
655 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
656 onpass="Uninstall fwd successful",
657 onfail="Uninstall fwd failed" )
658
659 main.step( "Check app ids" )
660 threads = []
661 appCheck2 = main.TRUE
662 for i in main.activeNodes:
663 t = main.Thread( target=main.CLIs[i].appToIDCheck,
664 name="appToIDCheck-" + str( i ),
665 args=[] )
666 threads.append( t )
667 t.start()
668
669 for t in threads:
670 t.join()
671 appCheck2 = appCheck2 and t.result
672 if appCheck2 != main.TRUE:
673 node = main.activeNodes[0]
674 main.log.warn( main.CLIs[node].apps() )
675 main.log.warn( main.CLIs[node].appIDs() )
676 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
677 onpass="App Ids seem to be correct",
678 onfail="Something is wrong with app Ids" )
679
680 main.step( "Add host intents via cli" )
681 intentIds = []
682 # TODO: move the host numbers to params
683 # Maybe look at all the paths we ping?
684 intentAddResult = True
685 hostResult = main.TRUE
686 for i in range( 8, 18 ):
687 main.log.info( "Adding host intent between h" + str( i ) +
688 " and h" + str( i + 10 ) )
689 host1 = "00:00:00:00:00:" + \
690 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
691 host2 = "00:00:00:00:00:" + \
692 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
693 # NOTE: getHost can return None
694 host1Dict = onosCli.getHost( host1 )
695 host2Dict = onosCli.getHost( host2 )
696 host1Id = None
697 host2Id = None
698 if host1Dict and host2Dict:
699 host1Id = host1Dict.get( 'id', None )
700 host2Id = host2Dict.get( 'id', None )
701 if host1Id and host2Id:
702 nodeNum = ( i % len( main.activeNodes ) )
703 node = main.activeNodes[nodeNum]
704 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
705 if tmpId:
706 main.log.info( "Added intent with id: " + tmpId )
707 intentIds.append( tmpId )
708 else:
709 main.log.error( "addHostIntent returned: " +
710 repr( tmpId ) )
711 else:
712 main.log.error( "Error, getHost() failed for h" + str( i ) +
713 " and/or h" + str( i + 10 ) )
714 node = main.activeNodes[0]
715 hosts = main.CLIs[node].hosts()
716 main.log.warn( "Hosts output: " )
717 try:
718 main.log.warn( json.dumps( json.loads( hosts ),
719 sort_keys=True,
720 indent=4,
721 separators=( ',', ': ' ) ) )
722 except ( ValueError, TypeError ):
723 main.log.warn( repr( hosts ) )
724 hostResult = main.FALSE
725 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
726 onpass="Found a host id for each host",
727 onfail="Error looking up host ids" )
728
729 intentStart = time.time()
730 onosIds = onosCli.getAllIntentsId()
731 main.log.info( "Submitted intents: " + str( intentIds ) )
732 main.log.info( "Intents in ONOS: " + str( onosIds ) )
733 for intent in intentIds:
734 if intent in onosIds:
735 pass # intent submitted is in onos
736 else:
737 intentAddResult = False
738 if intentAddResult:
739 intentStop = time.time()
740 else:
741 intentStop = None
742 # Print the intent states
743 intents = onosCli.intents()
744 intentStates = []
745 installedCheck = True
746 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
747 count = 0
748 try:
749 for intent in json.loads( intents ):
750 state = intent.get( 'state', None )
751 if "INSTALLED" not in state:
752 installedCheck = False
753 intentId = intent.get( 'id', None )
754 intentStates.append( ( intentId, state ) )
755 except ( ValueError, TypeError ):
756 main.log.exception( "Error parsing intents" )
757 # add submitted intents not in the store
758 tmplist = [ i for i, s in intentStates ]
759 missingIntents = False
760 for i in intentIds:
761 if i not in tmplist:
762 intentStates.append( ( i, " - " ) )
763 missingIntents = True
764 intentStates.sort()
765 for i, s in intentStates:
766 count += 1
767 main.log.info( "%-6s%-15s%-15s" %
768 ( str( count ), str( i ), str( s ) ) )
769 leaders = onosCli.leaders()
770 try:
771 missing = False
772 if leaders:
773 parsedLeaders = json.loads( leaders )
774 main.log.warn( json.dumps( parsedLeaders,
775 sort_keys=True,
776 indent=4,
777 separators=( ',', ': ' ) ) )
778 # check for all intent partitions
779 topics = []
780 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700781 topics.append( "work-partition-" + str( i ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700782 main.log.debug( topics )
783 ONOStopics = [ j['topic'] for j in parsedLeaders ]
784 for topic in topics:
785 if topic not in ONOStopics:
786 main.log.error( "Error: " + topic +
787 " not in leaders" )
788 missing = True
789 else:
790 main.log.error( "leaders() returned None" )
791 except ( ValueError, TypeError ):
792 main.log.exception( "Error parsing leaders" )
793 main.log.error( repr( leaders ) )
794 # Check all nodes
795 if missing:
796 for i in main.activeNodes:
797 response = main.CLIs[i].leaders( jsonFormat=False)
798 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
799 str( response ) )
800
801 partitions = onosCli.partitions()
802 try:
803 if partitions :
804 parsedPartitions = json.loads( partitions )
805 main.log.warn( json.dumps( parsedPartitions,
806 sort_keys=True,
807 indent=4,
808 separators=( ',', ': ' ) ) )
809 # TODO check for a leader in all paritions
810 # TODO check for consistency among nodes
811 else:
812 main.log.error( "partitions() returned None" )
813 except ( ValueError, TypeError ):
814 main.log.exception( "Error parsing partitions" )
815 main.log.error( repr( partitions ) )
816 pendingMap = onosCli.pendingMap()
817 try:
818 if pendingMap :
819 parsedPending = json.loads( pendingMap )
820 main.log.warn( json.dumps( parsedPending,
821 sort_keys=True,
822 indent=4,
823 separators=( ',', ': ' ) ) )
824 # TODO check something here?
825 else:
826 main.log.error( "pendingMap() returned None" )
827 except ( ValueError, TypeError ):
828 main.log.exception( "Error parsing pending map" )
829 main.log.error( repr( pendingMap ) )
830
831 intentAddResult = bool( intentAddResult and not missingIntents and
832 installedCheck )
833 if not intentAddResult:
834 main.log.error( "Error in pushing host intents to ONOS" )
835
836 main.step( "Intent Anti-Entropy dispersion" )
837 for j in range(100):
838 correct = True
839 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
840 for i in main.activeNodes:
841 onosIds = []
842 ids = main.CLIs[i].getAllIntentsId()
843 onosIds.append( ids )
844 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
845 str( sorted( onosIds ) ) )
846 if sorted( ids ) != sorted( intentIds ):
847 main.log.warn( "Set of intent IDs doesn't match" )
848 correct = False
849 break
850 else:
851 intents = json.loads( main.CLIs[i].intents() )
852 for intent in intents:
853 if intent[ 'state' ] != "INSTALLED":
854 main.log.warn( "Intent " + intent[ 'id' ] +
855 " is " + intent[ 'state' ] )
856 correct = False
857 break
858 if correct:
859 break
860 else:
861 time.sleep(1)
862 if not intentStop:
863 intentStop = time.time()
864 global gossipTime
865 gossipTime = intentStop - intentStart
866 main.log.info( "It took about " + str( gossipTime ) +
867 " seconds for all intents to appear in each node" )
868 append = False
869 title = "Gossip Intents"
870 count = 1
871 while append is False:
872 curTitle = title + str( count )
873 if curTitle not in labels:
874 labels.append( curTitle )
875 data.append( str( gossipTime ) )
876 append = True
877 else:
878 count += 1
879 gossipPeriod = int( main.params['timers']['gossip'] )
880 maxGossipTime = gossipPeriod * len( main.activeNodes )
881 utilities.assert_greater_equals(
882 expect=maxGossipTime, actual=gossipTime,
883 onpass="ECM anti-entropy for intents worked within " +
884 "expected time",
885 onfail="Intent ECM anti-entropy took too long. " +
886 "Expected time:{}, Actual time:{}".format( maxGossipTime,
887 gossipTime ) )
888 if gossipTime <= maxGossipTime:
889 intentAddResult = True
890
891 if not intentAddResult or "key" in pendingMap:
892 import time
893 installedCheck = True
894 main.log.info( "Sleeping 60 seconds to see if intents are found" )
895 time.sleep( 60 )
896 onosIds = onosCli.getAllIntentsId()
897 main.log.info( "Submitted intents: " + str( intentIds ) )
898 main.log.info( "Intents in ONOS: " + str( onosIds ) )
899 # Print the intent states
900 intents = onosCli.intents()
901 intentStates = []
902 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
903 count = 0
904 try:
905 for intent in json.loads( intents ):
906 # Iter through intents of a node
907 state = intent.get( 'state', None )
908 if "INSTALLED" not in state:
909 installedCheck = False
910 intentId = intent.get( 'id', None )
911 intentStates.append( ( intentId, state ) )
912 except ( ValueError, TypeError ):
913 main.log.exception( "Error parsing intents" )
914 # add submitted intents not in the store
915 tmplist = [ i for i, s in intentStates ]
916 for i in intentIds:
917 if i not in tmplist:
918 intentStates.append( ( i, " - " ) )
919 intentStates.sort()
920 for i, s in intentStates:
921 count += 1
922 main.log.info( "%-6s%-15s%-15s" %
923 ( str( count ), str( i ), str( s ) ) )
924 leaders = onosCli.leaders()
925 try:
926 missing = False
927 if leaders:
928 parsedLeaders = json.loads( leaders )
929 main.log.warn( json.dumps( parsedLeaders,
930 sort_keys=True,
931 indent=4,
932 separators=( ',', ': ' ) ) )
933 # check for all intent partitions
934 # check for election
935 topics = []
936 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700937 topics.append( "work-partition-" + str( i ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700938 # FIXME: this should only be after we start the app
939 topics.append( "org.onosproject.election" )
940 main.log.debug( topics )
941 ONOStopics = [ j['topic'] for j in parsedLeaders ]
942 for topic in topics:
943 if topic not in ONOStopics:
944 main.log.error( "Error: " + topic +
945 " not in leaders" )
946 missing = True
947 else:
948 main.log.error( "leaders() returned None" )
949 except ( ValueError, TypeError ):
950 main.log.exception( "Error parsing leaders" )
951 main.log.error( repr( leaders ) )
952 # Check all nodes
953 if missing:
954 for i in main.activeNodes:
955 node = main.CLIs[i]
956 response = node.leaders( jsonFormat=False)
957 main.log.warn( str( node.name ) + " leaders output: \n" +
958 str( response ) )
959
960 partitions = onosCli.partitions()
961 try:
962 if partitions :
963 parsedPartitions = json.loads( partitions )
964 main.log.warn( json.dumps( parsedPartitions,
965 sort_keys=True,
966 indent=4,
967 separators=( ',', ': ' ) ) )
968 # TODO check for a leader in all paritions
969 # TODO check for consistency among nodes
970 else:
971 main.log.error( "partitions() returned None" )
972 except ( ValueError, TypeError ):
973 main.log.exception( "Error parsing partitions" )
974 main.log.error( repr( partitions ) )
975 pendingMap = onosCli.pendingMap()
976 try:
977 if pendingMap :
978 parsedPending = json.loads( pendingMap )
979 main.log.warn( json.dumps( parsedPending,
980 sort_keys=True,
981 indent=4,
982 separators=( ',', ': ' ) ) )
983 # TODO check something here?
984 else:
985 main.log.error( "pendingMap() returned None" )
986 except ( ValueError, TypeError ):
987 main.log.exception( "Error parsing pending map" )
988 main.log.error( repr( pendingMap ) )
989
990 def CASE4( self, main ):
991 """
992 Ping across added host intents
993 """
994 import json
995 import time
996 assert main.numCtrls, "main.numCtrls not defined"
997 assert main, "main not defined"
998 assert utilities.assert_equals, "utilities.assert_equals not defined"
999 assert main.CLIs, "main.CLIs not defined"
1000 assert main.nodes, "main.nodes not defined"
1001 main.case( "Verify connectivity by sending traffic across Intents" )
1002 main.caseExplanation = "Ping across added host intents to check " +\
1003 "functionality and check the state of " +\
1004 "the intent"
1005
1006 onosCli = main.CLIs[ main.activeNodes[0] ]
1007 main.step( "Check Intent state" )
1008 installedCheck = False
1009 loopCount = 0
1010 while not installedCheck and loopCount < 40:
1011 installedCheck = True
1012 # Print the intent states
1013 intents = onosCli.intents()
1014 intentStates = []
1015 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1016 count = 0
1017 # Iter through intents of a node
1018 try:
1019 for intent in json.loads( intents ):
1020 state = intent.get( 'state', None )
1021 if "INSTALLED" not in state:
1022 installedCheck = False
1023 intentId = intent.get( 'id', None )
1024 intentStates.append( ( intentId, state ) )
1025 except ( ValueError, TypeError ):
1026 main.log.exception( "Error parsing intents." )
1027 # Print states
1028 intentStates.sort()
1029 for i, s in intentStates:
1030 count += 1
1031 main.log.info( "%-6s%-15s%-15s" %
1032 ( str( count ), str( i ), str( s ) ) )
1033 if not installedCheck:
1034 time.sleep( 1 )
1035 loopCount += 1
1036 utilities.assert_equals( expect=True, actual=installedCheck,
1037 onpass="Intents are all INSTALLED",
1038 onfail="Intents are not all in " +
1039 "INSTALLED state" )
1040
1041 main.step( "Ping across added host intents" )
1042 PingResult = main.TRUE
1043 for i in range( 8, 18 ):
1044 ping = main.Mininet1.pingHost( src="h" + str( i ),
1045 target="h" + str( i + 10 ) )
1046 PingResult = PingResult and ping
1047 if ping == main.FALSE:
1048 main.log.warn( "Ping failed between h" + str( i ) +
1049 " and h" + str( i + 10 ) )
1050 elif ping == main.TRUE:
1051 main.log.info( "Ping test passed!" )
1052 # Don't set PingResult or you'd override failures
1053 if PingResult == main.FALSE:
1054 main.log.error(
1055 "Intents have not been installed correctly, pings failed." )
1056 # TODO: pretty print
1057 main.log.warn( "ONOS1 intents: " )
1058 try:
1059 tmpIntents = onosCli.intents()
1060 main.log.warn( json.dumps( json.loads( tmpIntents ),
1061 sort_keys=True,
1062 indent=4,
1063 separators=( ',', ': ' ) ) )
1064 except ( ValueError, TypeError ):
1065 main.log.warn( repr( tmpIntents ) )
1066 utilities.assert_equals(
1067 expect=main.TRUE,
1068 actual=PingResult,
1069 onpass="Intents have been installed correctly and pings work",
1070 onfail="Intents have not been installed correctly, pings failed." )
1071
1072 main.step( "Check leadership of topics" )
1073 leaders = onosCli.leaders()
1074 topicCheck = main.TRUE
1075 try:
1076 if leaders:
1077 parsedLeaders = json.loads( leaders )
1078 main.log.warn( json.dumps( parsedLeaders,
1079 sort_keys=True,
1080 indent=4,
1081 separators=( ',', ': ' ) ) )
1082 # check for all intent partitions
1083 # check for election
1084 # TODO: Look at Devices as topics now that it uses this system
1085 topics = []
1086 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001087 topics.append( "work-partition-" + str( i ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001088 # FIXME: this should only be after we start the app
1089 # FIXME: topics.append( "org.onosproject.election" )
1090 # Print leaders output
1091 main.log.debug( topics )
1092 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1093 for topic in topics:
1094 if topic not in ONOStopics:
1095 main.log.error( "Error: " + topic +
1096 " not in leaders" )
1097 topicCheck = main.FALSE
1098 else:
1099 main.log.error( "leaders() returned None" )
1100 topicCheck = main.FALSE
1101 except ( ValueError, TypeError ):
1102 topicCheck = main.FALSE
1103 main.log.exception( "Error parsing leaders" )
1104 main.log.error( repr( leaders ) )
1105 # TODO: Check for a leader of these topics
1106 # Check all nodes
1107 if topicCheck:
1108 for i in main.activeNodes:
1109 node = main.CLIs[i]
1110 response = node.leaders( jsonFormat=False)
1111 main.log.warn( str( node.name ) + " leaders output: \n" +
1112 str( response ) )
1113
1114 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1115 onpass="intent Partitions is in leaders",
1116 onfail="Some topics were lost " )
1117 # Print partitions
1118 partitions = onosCli.partitions()
1119 try:
1120 if partitions :
1121 parsedPartitions = json.loads( partitions )
1122 main.log.warn( json.dumps( parsedPartitions,
1123 sort_keys=True,
1124 indent=4,
1125 separators=( ',', ': ' ) ) )
1126 # TODO check for a leader in all paritions
1127 # TODO check for consistency among nodes
1128 else:
1129 main.log.error( "partitions() returned None" )
1130 except ( ValueError, TypeError ):
1131 main.log.exception( "Error parsing partitions" )
1132 main.log.error( repr( partitions ) )
1133 # Print Pending Map
1134 pendingMap = onosCli.pendingMap()
1135 try:
1136 if pendingMap :
1137 parsedPending = json.loads( pendingMap )
1138 main.log.warn( json.dumps( parsedPending,
1139 sort_keys=True,
1140 indent=4,
1141 separators=( ',', ': ' ) ) )
1142 # TODO check something here?
1143 else:
1144 main.log.error( "pendingMap() returned None" )
1145 except ( ValueError, TypeError ):
1146 main.log.exception( "Error parsing pending map" )
1147 main.log.error( repr( pendingMap ) )
1148
1149 if not installedCheck:
1150 main.log.info( "Waiting 60 seconds to see if the state of " +
1151 "intents change" )
1152 time.sleep( 60 )
1153 # Print the intent states
1154 intents = onosCli.intents()
1155 intentStates = []
1156 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1157 count = 0
1158 # Iter through intents of a node
1159 try:
1160 for intent in json.loads( intents ):
1161 state = intent.get( 'state', None )
1162 if "INSTALLED" not in state:
1163 installedCheck = False
1164 intentId = intent.get( 'id', None )
1165 intentStates.append( ( intentId, state ) )
1166 except ( ValueError, TypeError ):
1167 main.log.exception( "Error parsing intents." )
1168 intentStates.sort()
1169 for i, s in intentStates:
1170 count += 1
1171 main.log.info( "%-6s%-15s%-15s" %
1172 ( str( count ), str( i ), str( s ) ) )
1173 leaders = onosCli.leaders()
1174 try:
1175 missing = False
1176 if leaders:
1177 parsedLeaders = json.loads( leaders )
1178 main.log.warn( json.dumps( parsedLeaders,
1179 sort_keys=True,
1180 indent=4,
1181 separators=( ',', ': ' ) ) )
1182 # check for all intent partitions
1183 # check for election
1184 topics = []
1185 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001186 topics.append( "work-partition-" + str( i ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001187 # FIXME: this should only be after we start the app
1188 topics.append( "org.onosproject.election" )
1189 main.log.debug( topics )
1190 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1191 for topic in topics:
1192 if topic not in ONOStopics:
1193 main.log.error( "Error: " + topic +
1194 " not in leaders" )
1195 missing = True
1196 else:
1197 main.log.error( "leaders() returned None" )
1198 except ( ValueError, TypeError ):
1199 main.log.exception( "Error parsing leaders" )
1200 main.log.error( repr( leaders ) )
1201 if missing:
1202 for i in main.activeNodes:
1203 node = main.CLIs[i]
1204 response = node.leaders( jsonFormat=False)
1205 main.log.warn( str( node.name ) + " leaders output: \n" +
1206 str( response ) )
1207
1208 partitions = onosCli.partitions()
1209 try:
1210 if partitions :
1211 parsedPartitions = json.loads( partitions )
1212 main.log.warn( json.dumps( parsedPartitions,
1213 sort_keys=True,
1214 indent=4,
1215 separators=( ',', ': ' ) ) )
1216 # TODO check for a leader in all paritions
1217 # TODO check for consistency among nodes
1218 else:
1219 main.log.error( "partitions() returned None" )
1220 except ( ValueError, TypeError ):
1221 main.log.exception( "Error parsing partitions" )
1222 main.log.error( repr( partitions ) )
1223 pendingMap = onosCli.pendingMap()
1224 try:
1225 if pendingMap :
1226 parsedPending = json.loads( pendingMap )
1227 main.log.warn( json.dumps( parsedPending,
1228 sort_keys=True,
1229 indent=4,
1230 separators=( ',', ': ' ) ) )
1231 # TODO check something here?
1232 else:
1233 main.log.error( "pendingMap() returned None" )
1234 except ( ValueError, TypeError ):
1235 main.log.exception( "Error parsing pending map" )
1236 main.log.error( repr( pendingMap ) )
1237 # Print flowrules
1238 node = main.activeNodes[0]
1239 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1240 main.step( "Wait a minute then ping again" )
1241 # the wait is above
1242 PingResult = main.TRUE
1243 for i in range( 8, 18 ):
1244 ping = main.Mininet1.pingHost( src="h" + str( i ),
1245 target="h" + str( i + 10 ) )
1246 PingResult = PingResult and ping
1247 if ping == main.FALSE:
1248 main.log.warn( "Ping failed between h" + str( i ) +
1249 " and h" + str( i + 10 ) )
1250 elif ping == main.TRUE:
1251 main.log.info( "Ping test passed!" )
1252 # Don't set PingResult or you'd override failures
1253 if PingResult == main.FALSE:
1254 main.log.error(
1255 "Intents have not been installed correctly, pings failed." )
1256 # TODO: pretty print
1257 main.log.warn( "ONOS1 intents: " )
1258 try:
1259 tmpIntents = onosCli.intents()
1260 main.log.warn( json.dumps( json.loads( tmpIntents ),
1261 sort_keys=True,
1262 indent=4,
1263 separators=( ',', ': ' ) ) )
1264 except ( ValueError, TypeError ):
1265 main.log.warn( repr( tmpIntents ) )
1266 utilities.assert_equals(
1267 expect=main.TRUE,
1268 actual=PingResult,
1269 onpass="Intents have been installed correctly and pings work",
1270 onfail="Intents have not been installed correctly, pings failed." )
1271
1272 def CASE5( self, main ):
1273 """
1274 Reading state of ONOS
1275 """
1276 import json
1277 import time
1278 assert main.numCtrls, "main.numCtrls not defined"
1279 assert main, "main not defined"
1280 assert utilities.assert_equals, "utilities.assert_equals not defined"
1281 assert main.CLIs, "main.CLIs not defined"
1282 assert main.nodes, "main.nodes not defined"
1283
1284 main.case( "Setting up and gathering data for current state" )
1285 # The general idea for this test case is to pull the state of
1286 # ( intents,flows, topology,... ) from each ONOS node
1287 # We can then compare them with each other and also with past states
1288
1289 main.step( "Check that each switch has a master" )
1290 global mastershipState
1291 mastershipState = '[]'
1292
1293 # Assert that each device has a master
1294 rolesNotNull = main.TRUE
1295 threads = []
1296 for i in main.activeNodes:
1297 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1298 name="rolesNotNull-" + str( i ),
1299 args=[] )
1300 threads.append( t )
1301 t.start()
1302
1303 for t in threads:
1304 t.join()
1305 rolesNotNull = rolesNotNull and t.result
1306 utilities.assert_equals(
1307 expect=main.TRUE,
1308 actual=rolesNotNull,
1309 onpass="Each device has a master",
1310 onfail="Some devices don't have a master assigned" )
1311
1312 main.step( "Get the Mastership of each switch from each controller" )
1313 ONOSMastership = []
1314 consistentMastership = True
1315 rolesResults = True
1316 threads = []
1317 for i in main.activeNodes:
1318 t = main.Thread( target=main.CLIs[i].roles,
1319 name="roles-" + str( i ),
1320 args=[] )
1321 threads.append( t )
1322 t.start()
1323
1324 for t in threads:
1325 t.join()
1326 ONOSMastership.append( t.result )
1327
1328 for i in range( len( ONOSMastership ) ):
1329 node = str( main.activeNodes[i] + 1 )
1330 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1331 main.log.error( "Error in getting ONOS" + node + " roles" )
1332 main.log.warn( "ONOS" + node + " mastership response: " +
1333 repr( ONOSMastership[i] ) )
1334 rolesResults = False
1335 utilities.assert_equals(
1336 expect=True,
1337 actual=rolesResults,
1338 onpass="No error in reading roles output",
1339 onfail="Error in reading roles from ONOS" )
1340
1341 main.step( "Check for consistency in roles from each controller" )
1342 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1343 main.log.info(
1344 "Switch roles are consistent across all ONOS nodes" )
1345 else:
1346 consistentMastership = False
1347 utilities.assert_equals(
1348 expect=True,
1349 actual=consistentMastership,
1350 onpass="Switch roles are consistent across all ONOS nodes",
1351 onfail="ONOS nodes have different views of switch roles" )
1352
1353 if rolesResults and not consistentMastership:
1354 for i in range( len( main.activeNodes ) ):
1355 node = str( main.activeNodes[i] + 1 )
1356 try:
1357 main.log.warn(
1358 "ONOS" + node + " roles: ",
1359 json.dumps(
1360 json.loads( ONOSMastership[ i ] ),
1361 sort_keys=True,
1362 indent=4,
1363 separators=( ',', ': ' ) ) )
1364 except ( ValueError, TypeError ):
1365 main.log.warn( repr( ONOSMastership[ i ] ) )
1366 elif rolesResults and consistentMastership:
1367 mastershipState = ONOSMastership[ 0 ]
1368
1369 main.step( "Get the intents from each controller" )
1370 global intentState
1371 intentState = []
1372 ONOSIntents = []
1373 consistentIntents = True # Are Intents consistent across nodes?
1374 intentsResults = True # Could we read Intents from ONOS?
1375 threads = []
1376 for i in main.activeNodes:
1377 t = main.Thread( target=main.CLIs[i].intents,
1378 name="intents-" + str( i ),
1379 args=[],
1380 kwargs={ 'jsonFormat': True } )
1381 threads.append( t )
1382 t.start()
1383
1384 for t in threads:
1385 t.join()
1386 ONOSIntents.append( t.result )
1387
1388 for i in range( len( ONOSIntents ) ):
1389 node = str( main.activeNodes[i] + 1 )
1390 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1391 main.log.error( "Error in getting ONOS" + node + " intents" )
1392 main.log.warn( "ONOS" + node + " intents response: " +
1393 repr( ONOSIntents[ i ] ) )
1394 intentsResults = False
1395 utilities.assert_equals(
1396 expect=True,
1397 actual=intentsResults,
1398 onpass="No error in reading intents output",
1399 onfail="Error in reading intents from ONOS" )
1400
1401 main.step( "Check for consistency in Intents from each controller" )
1402 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1403 main.log.info( "Intents are consistent across all ONOS " +
1404 "nodes" )
1405 else:
1406 consistentIntents = False
1407 main.log.error( "Intents not consistent" )
1408 utilities.assert_equals(
1409 expect=True,
1410 actual=consistentIntents,
1411 onpass="Intents are consistent across all ONOS nodes",
1412 onfail="ONOS nodes have different views of intents" )
1413
1414 if intentsResults:
1415 # Try to make it easy to figure out what is happening
1416 #
1417 # Intent ONOS1 ONOS2 ...
1418 # 0x01 INSTALLED INSTALLING
1419 # ... ... ...
1420 # ... ... ...
1421 title = " Id"
1422 for n in main.activeNodes:
1423 title += " " * 10 + "ONOS" + str( n + 1 )
1424 main.log.warn( title )
1425 # get all intent keys in the cluster
1426 keys = []
1427 try:
1428 # Get the set of all intent keys
1429 for nodeStr in ONOSIntents:
1430 node = json.loads( nodeStr )
1431 for intent in node:
1432 keys.append( intent.get( 'id' ) )
1433 keys = set( keys )
1434 # For each intent key, print the state on each node
1435 for key in keys:
1436 row = "%-13s" % key
1437 for nodeStr in ONOSIntents:
1438 node = json.loads( nodeStr )
1439 for intent in node:
1440 if intent.get( 'id', "Error" ) == key:
1441 row += "%-15s" % intent.get( 'state' )
1442 main.log.warn( row )
1443 # End of intent state table
1444 except ValueError as e:
1445 main.log.exception( e )
1446 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1447
1448 if intentsResults and not consistentIntents:
1449 # print the json objects
1450 n = str( main.activeNodes[-1] + 1 )
1451 main.log.debug( "ONOS" + n + " intents: " )
1452 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1453 sort_keys=True,
1454 indent=4,
1455 separators=( ',', ': ' ) ) )
1456 for i in range( len( ONOSIntents ) ):
1457 node = str( main.activeNodes[i] + 1 )
1458 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1459 main.log.debug( "ONOS" + node + " intents: " )
1460 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1461 sort_keys=True,
1462 indent=4,
1463 separators=( ',', ': ' ) ) )
1464 else:
1465 main.log.debug( "ONOS" + node + " intents match ONOS" +
1466 n + " intents" )
1467 elif intentsResults and consistentIntents:
1468 intentState = ONOSIntents[ 0 ]
1469
1470 main.step( "Get the flows from each controller" )
1471 global flowState
1472 flowState = []
1473 ONOSFlows = []
1474 ONOSFlowsJson = []
1475 flowCheck = main.FALSE
1476 consistentFlows = True
1477 flowsResults = True
1478 threads = []
1479 for i in main.activeNodes:
1480 t = main.Thread( target=main.CLIs[i].flows,
1481 name="flows-" + str( i ),
1482 args=[],
1483 kwargs={ 'jsonFormat': True } )
1484 threads.append( t )
1485 t.start()
1486
1487 # NOTE: Flows command can take some time to run
1488 time.sleep(30)
1489 for t in threads:
1490 t.join()
1491 result = t.result
1492 ONOSFlows.append( result )
1493
1494 for i in range( len( ONOSFlows ) ):
1495 num = str( main.activeNodes[i] + 1 )
1496 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1497 main.log.error( "Error in getting ONOS" + num + " flows" )
1498 main.log.warn( "ONOS" + num + " flows response: " +
1499 repr( ONOSFlows[ i ] ) )
1500 flowsResults = False
1501 ONOSFlowsJson.append( None )
1502 else:
1503 try:
1504 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1505 except ( ValueError, TypeError ):
1506 # FIXME: change this to log.error?
1507 main.log.exception( "Error in parsing ONOS" + num +
1508 " response as json." )
1509 main.log.error( repr( ONOSFlows[ i ] ) )
1510 ONOSFlowsJson.append( None )
1511 flowsResults = False
1512 utilities.assert_equals(
1513 expect=True,
1514 actual=flowsResults,
1515 onpass="No error in reading flows output",
1516 onfail="Error in reading flows from ONOS" )
1517
1518 main.step( "Check for consistency in Flows from each controller" )
1519 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1520 if all( tmp ):
1521 main.log.info( "Flow count is consistent across all ONOS nodes" )
1522 else:
1523 consistentFlows = False
1524 utilities.assert_equals(
1525 expect=True,
1526 actual=consistentFlows,
1527 onpass="The flow count is consistent across all ONOS nodes",
1528 onfail="ONOS nodes have different flow counts" )
1529
1530 if flowsResults and not consistentFlows:
1531 for i in range( len( ONOSFlows ) ):
1532 node = str( main.activeNodes[i] + 1 )
1533 try:
1534 main.log.warn(
1535 "ONOS" + node + " flows: " +
1536 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1537 indent=4, separators=( ',', ': ' ) ) )
1538 except ( ValueError, TypeError ):
1539 main.log.warn( "ONOS" + node + " flows: " +
1540 repr( ONOSFlows[ i ] ) )
1541 elif flowsResults and consistentFlows:
1542 flowCheck = main.TRUE
1543 flowState = ONOSFlows[ 0 ]
1544
1545 main.step( "Get the OF Table entries" )
1546 global flows
1547 flows = []
1548 for i in range( 1, 29 ):
1549 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1550 if flowCheck == main.FALSE:
1551 for table in flows:
1552 main.log.warn( table )
1553 # TODO: Compare switch flow tables with ONOS flow tables
1554
1555 main.step( "Start continuous pings" )
1556 main.Mininet2.pingLong(
1557 src=main.params[ 'PING' ][ 'source1' ],
1558 target=main.params[ 'PING' ][ 'target1' ],
1559 pingTime=500 )
1560 main.Mininet2.pingLong(
1561 src=main.params[ 'PING' ][ 'source2' ],
1562 target=main.params[ 'PING' ][ 'target2' ],
1563 pingTime=500 )
1564 main.Mininet2.pingLong(
1565 src=main.params[ 'PING' ][ 'source3' ],
1566 target=main.params[ 'PING' ][ 'target3' ],
1567 pingTime=500 )
1568 main.Mininet2.pingLong(
1569 src=main.params[ 'PING' ][ 'source4' ],
1570 target=main.params[ 'PING' ][ 'target4' ],
1571 pingTime=500 )
1572 main.Mininet2.pingLong(
1573 src=main.params[ 'PING' ][ 'source5' ],
1574 target=main.params[ 'PING' ][ 'target5' ],
1575 pingTime=500 )
1576 main.Mininet2.pingLong(
1577 src=main.params[ 'PING' ][ 'source6' ],
1578 target=main.params[ 'PING' ][ 'target6' ],
1579 pingTime=500 )
1580 main.Mininet2.pingLong(
1581 src=main.params[ 'PING' ][ 'source7' ],
1582 target=main.params[ 'PING' ][ 'target7' ],
1583 pingTime=500 )
1584 main.Mininet2.pingLong(
1585 src=main.params[ 'PING' ][ 'source8' ],
1586 target=main.params[ 'PING' ][ 'target8' ],
1587 pingTime=500 )
1588 main.Mininet2.pingLong(
1589 src=main.params[ 'PING' ][ 'source9' ],
1590 target=main.params[ 'PING' ][ 'target9' ],
1591 pingTime=500 )
1592 main.Mininet2.pingLong(
1593 src=main.params[ 'PING' ][ 'source10' ],
1594 target=main.params[ 'PING' ][ 'target10' ],
1595 pingTime=500 )
1596
1597 main.step( "Collecting topology information from ONOS" )
1598 devices = []
1599 threads = []
1600 for i in main.activeNodes:
1601 t = main.Thread( target=main.CLIs[i].devices,
1602 name="devices-" + str( i ),
1603 args=[ ] )
1604 threads.append( t )
1605 t.start()
1606
1607 for t in threads:
1608 t.join()
1609 devices.append( t.result )
1610 hosts = []
1611 threads = []
1612 for i in main.activeNodes:
1613 t = main.Thread( target=main.CLIs[i].hosts,
1614 name="hosts-" + str( i ),
1615 args=[ ] )
1616 threads.append( t )
1617 t.start()
1618
1619 for t in threads:
1620 t.join()
1621 try:
1622 hosts.append( json.loads( t.result ) )
1623 except ( ValueError, TypeError ):
1624 # FIXME: better handling of this, print which node
1625 # Maybe use thread name?
1626 main.log.exception( "Error parsing json output of hosts" )
1627 main.log.warn( repr( t.result ) )
1628 hosts.append( None )
1629
1630 ports = []
1631 threads = []
1632 for i in main.activeNodes:
1633 t = main.Thread( target=main.CLIs[i].ports,
1634 name="ports-" + str( i ),
1635 args=[ ] )
1636 threads.append( t )
1637 t.start()
1638
1639 for t in threads:
1640 t.join()
1641 ports.append( t.result )
1642 links = []
1643 threads = []
1644 for i in main.activeNodes:
1645 t = main.Thread( target=main.CLIs[i].links,
1646 name="links-" + str( i ),
1647 args=[ ] )
1648 threads.append( t )
1649 t.start()
1650
1651 for t in threads:
1652 t.join()
1653 links.append( t.result )
1654 clusters = []
1655 threads = []
1656 for i in main.activeNodes:
1657 t = main.Thread( target=main.CLIs[i].clusters,
1658 name="clusters-" + str( i ),
1659 args=[ ] )
1660 threads.append( t )
1661 t.start()
1662
1663 for t in threads:
1664 t.join()
1665 clusters.append( t.result )
1666 # Compare json objects for hosts and dataplane clusters
1667
1668 # hosts
1669 main.step( "Host view is consistent across ONOS nodes" )
1670 consistentHostsResult = main.TRUE
1671 for controller in range( len( hosts ) ):
1672 controllerStr = str( main.activeNodes[controller] + 1 )
1673 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1674 if hosts[ controller ] == hosts[ 0 ]:
1675 continue
1676 else: # hosts not consistent
1677 main.log.error( "hosts from ONOS" +
1678 controllerStr +
1679 " is inconsistent with ONOS1" )
1680 main.log.warn( repr( hosts[ controller ] ) )
1681 consistentHostsResult = main.FALSE
1682
1683 else:
1684 main.log.error( "Error in getting ONOS hosts from ONOS" +
1685 controllerStr )
1686 consistentHostsResult = main.FALSE
1687 main.log.warn( "ONOS" + controllerStr +
1688 " hosts response: " +
1689 repr( hosts[ controller ] ) )
1690 utilities.assert_equals(
1691 expect=main.TRUE,
1692 actual=consistentHostsResult,
1693 onpass="Hosts view is consistent across all ONOS nodes",
1694 onfail="ONOS nodes have different views of hosts" )
1695
1696 main.step( "Each host has an IP address" )
1697 ipResult = main.TRUE
1698 for controller in range( 0, len( hosts ) ):
1699 controllerStr = str( main.activeNodes[controller] + 1 )
1700 if hosts[ controller ]:
1701 for host in hosts[ controller ]:
1702 if not host.get( 'ipAddresses', [ ] ):
1703 main.log.error( "Error with host ips on controller" +
1704 controllerStr + ": " + str( host ) )
1705 ipResult = main.FALSE
1706 utilities.assert_equals(
1707 expect=main.TRUE,
1708 actual=ipResult,
1709 onpass="The ips of the hosts aren't empty",
1710 onfail="The ip of at least one host is missing" )
1711
1712 # Strongly connected clusters of devices
1713 main.step( "Cluster view is consistent across ONOS nodes" )
1714 consistentClustersResult = main.TRUE
1715 for controller in range( len( clusters ) ):
1716 controllerStr = str( main.activeNodes[controller] + 1 )
1717 if "Error" not in clusters[ controller ]:
1718 if clusters[ controller ] == clusters[ 0 ]:
1719 continue
1720 else: # clusters not consistent
1721 main.log.error( "clusters from ONOS" + controllerStr +
1722 " is inconsistent with ONOS1" )
1723 consistentClustersResult = main.FALSE
1724
1725 else:
1726 main.log.error( "Error in getting dataplane clusters " +
1727 "from ONOS" + controllerStr )
1728 consistentClustersResult = main.FALSE
1729 main.log.warn( "ONOS" + controllerStr +
1730 " clusters response: " +
1731 repr( clusters[ controller ] ) )
1732 utilities.assert_equals(
1733 expect=main.TRUE,
1734 actual=consistentClustersResult,
1735 onpass="Clusters view is consistent across all ONOS nodes",
1736 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07001737 if not consistentClustersResult:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001738 main.log.debug( clusters )
Jon Hall64948022016-05-12 13:38:50 -07001739
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001740 # there should always only be one cluster
1741 main.step( "Cluster view correct across ONOS nodes" )
1742 try:
1743 numClusters = len( json.loads( clusters[ 0 ] ) )
1744 except ( ValueError, TypeError ):
1745 main.log.exception( "Error parsing clusters[0]: " +
1746 repr( clusters[ 0 ] ) )
1747 numClusters = "ERROR"
1748 utilities.assert_equals(
1749 expect=1,
1750 actual=numClusters,
1751 onpass="ONOS shows 1 SCC",
1752 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1753
1754 main.step( "Comparing ONOS topology to MN" )
1755 devicesResults = main.TRUE
1756 linksResults = main.TRUE
1757 hostsResults = main.TRUE
1758 mnSwitches = main.Mininet1.getSwitches()
1759 mnLinks = main.Mininet1.getLinks()
1760 mnHosts = main.Mininet1.getHosts()
1761 for controller in main.activeNodes:
1762 controllerStr = str( main.activeNodes[controller] + 1 )
1763 if devices[ controller ] and ports[ controller ] and\
1764 "Error" not in devices[ controller ] and\
1765 "Error" not in ports[ controller ]:
1766 currentDevicesResult = main.Mininet1.compareSwitches(
1767 mnSwitches,
1768 json.loads( devices[ controller ] ),
1769 json.loads( ports[ controller ] ) )
1770 else:
1771 currentDevicesResult = main.FALSE
1772 utilities.assert_equals( expect=main.TRUE,
1773 actual=currentDevicesResult,
1774 onpass="ONOS" + controllerStr +
1775 " Switches view is correct",
1776 onfail="ONOS" + controllerStr +
1777 " Switches view is incorrect" )
1778 if links[ controller ] and "Error" not in links[ controller ]:
1779 currentLinksResult = main.Mininet1.compareLinks(
1780 mnSwitches, mnLinks,
1781 json.loads( links[ controller ] ) )
1782 else:
1783 currentLinksResult = main.FALSE
1784 utilities.assert_equals( expect=main.TRUE,
1785 actual=currentLinksResult,
1786 onpass="ONOS" + controllerStr +
1787 " links view is correct",
1788 onfail="ONOS" + controllerStr +
1789 " links view is incorrect" )
1790
1791 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1792 currentHostsResult = main.Mininet1.compareHosts(
1793 mnHosts,
1794 hosts[ controller ] )
1795 else:
1796 currentHostsResult = main.FALSE
1797 utilities.assert_equals( expect=main.TRUE,
1798 actual=currentHostsResult,
1799 onpass="ONOS" + controllerStr +
1800 " hosts exist in Mininet",
1801 onfail="ONOS" + controllerStr +
1802 " hosts don't match Mininet" )
1803
1804 devicesResults = devicesResults and currentDevicesResult
1805 linksResults = linksResults and currentLinksResult
1806 hostsResults = hostsResults and currentHostsResult
1807
1808 main.step( "Device information is correct" )
1809 utilities.assert_equals(
1810 expect=main.TRUE,
1811 actual=devicesResults,
1812 onpass="Device information is correct",
1813 onfail="Device information is incorrect" )
1814
1815 main.step( "Links are correct" )
1816 utilities.assert_equals(
1817 expect=main.TRUE,
1818 actual=linksResults,
1819 onpass="Link are correct",
1820 onfail="Links are incorrect" )
1821
1822 main.step( "Hosts are correct" )
1823 utilities.assert_equals(
1824 expect=main.TRUE,
1825 actual=hostsResults,
1826 onpass="Hosts are correct",
1827 onfail="Hosts are incorrect" )
1828
1829 def CASE6( self, main ):
1830 """
1831 The Scaling case.
1832 """
1833 import time
1834 import re
1835 assert main.numCtrls, "main.numCtrls not defined"
1836 assert main, "main not defined"
1837 assert utilities.assert_equals, "utilities.assert_equals not defined"
1838 assert main.CLIs, "main.CLIs not defined"
1839 assert main.nodes, "main.nodes not defined"
1840 try:
1841 labels
1842 except NameError:
1843 main.log.error( "labels not defined, setting to []" )
1844 global labels
1845 labels = []
1846 try:
1847 data
1848 except NameError:
1849 main.log.error( "data not defined, setting to []" )
1850 global data
1851 data = []
1852
Jon Hall69b2b982016-05-11 12:04:59 -07001853 main.case( "Scale the number of nodes in the ONOS cluster" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001854
1855 main.step( "Checking ONOS Logs for errors" )
1856 for i in main.activeNodes:
1857 node = main.nodes[i]
1858 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1859 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1860
1861 """
1862 pop # of nodes from a list, might look like 1,3b,3,5b,5,7b,7,7b,5,5b,3...
1863 modify cluster.json file appropriately
1864 install/deactivate node as needed
1865 """
1866
1867 try:
1868 prevNodes = main.activeNodes
1869 scale = main.scaling.pop(0)
1870 if "e" in scale:
1871 equal = True
1872 else:
1873 equal = False
1874 main.numCtrls = int( re.search( "\d+", scale ).group(0) )
1875 main.log.info( "Scaling to {} nodes".format( main.numCtrls ) )
1876 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
1877 utilities.assert_equals( expect=main.TRUE, actual=genResult,
1878 onpass="New cluster metadata file generated",
1879 onfail="Failled to generate new metadata file" )
1880 time.sleep( 5 ) # Give time for nodes to read new file
1881 except IndexError:
1882 main.cleanup()
1883 main.exit()
1884
1885 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
1886 newNodes = [ x for x in main.activeNodes if x not in prevNodes ]
1887
1888 main.step( "Start new nodes" ) # OR stop old nodes?
1889 started = main.TRUE
1890 for i in newNodes:
1891 started = main.ONOSbench.onosStart( main.nodes[i].ip_address ) and main.TRUE
1892 utilities.assert_equals( expect=main.TRUE, actual=started,
1893 onpass="ONOS started",
1894 onfail="ONOS start NOT successful" )
1895
1896 main.step( "Checking if ONOS is up yet" )
1897 for i in range( 2 ):
1898 onosIsupResult = main.TRUE
1899 for i in main.activeNodes:
1900 node = main.nodes[i]
1901 started = main.ONOSbench.isup( node.ip_address )
1902 if not started:
1903 main.log.error( node.name + " didn't start!" )
1904 onosIsupResult = onosIsupResult and started
1905 if onosIsupResult == main.TRUE:
1906 break
1907 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1908 onpass="ONOS started",
1909 onfail="ONOS start NOT successful" )
1910
Jon Hall6509dbf2016-06-21 17:01:17 -07001911 main.step( "Starting ONOS CLI sessions" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001912 cliResults = main.TRUE
1913 threads = []
1914 for i in main.activeNodes:
1915 t = main.Thread( target=main.CLIs[i].startOnosCli,
1916 name="startOnosCli-" + str( i ),
1917 args=[main.nodes[i].ip_address] )
1918 threads.append( t )
1919 t.start()
1920
1921 for t in threads:
1922 t.join()
1923 cliResults = cliResults and t.result
1924 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1925 onpass="ONOS cli started",
1926 onfail="ONOS clis did not start" )
1927
1928 main.step( "Checking ONOS nodes" )
1929 nodeResults = utilities.retry( main.HA.nodesCheck,
1930 False,
1931 args=[main.activeNodes],
1932 attempts=5 )
1933 utilities.assert_equals( expect=True, actual=nodeResults,
1934 onpass="Nodes check successful",
1935 onfail="Nodes check NOT successful" )
1936
1937 for i in range( 10 ):
1938 ready = True
1939 for i in main.activeNodes:
1940 cli = main.CLIs[i]
1941 output = cli.summary()
1942 if not output:
1943 ready = False
1944 if ready:
1945 break
1946 time.sleep( 30 )
1947 utilities.assert_equals( expect=True, actual=ready,
1948 onpass="ONOS summary command succeded",
1949 onfail="ONOS summary command failed" )
1950 if not ready:
1951 main.cleanup()
1952 main.exit()
1953
1954 # Rerun for election on new nodes
1955 runResults = main.TRUE
1956 for i in main.activeNodes:
1957 cli = main.CLIs[i]
1958 run = cli.electionTestRun()
1959 if run != main.TRUE:
1960 main.log.error( "Error running for election on " + cli.name )
1961 runResults = runResults and run
1962 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1963 onpass="Reran for election",
1964 onfail="Failed to rerun for election" )
1965
1966 # TODO: Make this configurable
1967 time.sleep( 60 )
1968 for node in main.activeNodes:
1969 main.log.warn( "\n****************** {} **************".format( main.nodes[node].ip_address ) )
1970 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1971 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1972 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
1973 main.log.debug( main.CLIs[node].apps( jsonFormat=False ) )
1974
1975 def CASE7( self, main ):
1976 """
1977 Check state after ONOS scaling
1978 """
1979 import json
1980 assert main.numCtrls, "main.numCtrls not defined"
1981 assert main, "main not defined"
1982 assert utilities.assert_equals, "utilities.assert_equals not defined"
1983 assert main.CLIs, "main.CLIs not defined"
1984 assert main.nodes, "main.nodes not defined"
1985 main.case( "Running ONOS Constant State Tests" )
1986
1987 main.step( "Check that each switch has a master" )
1988 # Assert that each device has a master
1989 rolesNotNull = main.TRUE
1990 threads = []
1991 for i in main.activeNodes:
1992 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1993 name="rolesNotNull-" + str( i ),
1994 args=[ ] )
1995 threads.append( t )
1996 t.start()
1997
1998 for t in threads:
1999 t.join()
2000 rolesNotNull = rolesNotNull and t.result
2001 utilities.assert_equals(
2002 expect=main.TRUE,
2003 actual=rolesNotNull,
2004 onpass="Each device has a master",
2005 onfail="Some devices don't have a master assigned" )
2006
2007 main.step( "Read device roles from ONOS" )
2008 ONOSMastership = []
2009 consistentMastership = True
2010 rolesResults = True
2011 threads = []
2012 for i in main.activeNodes:
2013 t = main.Thread( target=main.CLIs[i].roles,
2014 name="roles-" + str( i ),
2015 args=[] )
2016 threads.append( t )
2017 t.start()
2018
2019 for t in threads:
2020 t.join()
2021 ONOSMastership.append( t.result )
2022
2023 for i in range( len( ONOSMastership ) ):
2024 node = str( main.activeNodes[i] + 1 )
2025 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
2026 main.log.error( "Error in getting ONOS" + node + " roles" )
2027 main.log.warn( "ONOS" + node + " mastership response: " +
2028 repr( ONOSMastership[i] ) )
2029 rolesResults = False
2030 utilities.assert_equals(
2031 expect=True,
2032 actual=rolesResults,
2033 onpass="No error in reading roles output",
2034 onfail="Error in reading roles from ONOS" )
2035
2036 main.step( "Check for consistency in roles from each controller" )
2037 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
2038 main.log.info(
2039 "Switch roles are consistent across all ONOS nodes" )
2040 else:
2041 consistentMastership = False
2042 utilities.assert_equals(
2043 expect=True,
2044 actual=consistentMastership,
2045 onpass="Switch roles are consistent across all ONOS nodes",
2046 onfail="ONOS nodes have different views of switch roles" )
2047
2048 if rolesResults and not consistentMastership:
2049 for i in range( len( ONOSMastership ) ):
2050 node = str( main.activeNodes[i] + 1 )
2051 main.log.warn( "ONOS" + node + " roles: ",
2052 json.dumps( json.loads( ONOSMastership[ i ] ),
2053 sort_keys=True,
2054 indent=4,
2055 separators=( ',', ': ' ) ) )
2056
2057 # NOTE: we expect mastership to change on controller scaling down
2058
2059 main.step( "Get the intents and compare across all nodes" )
2060 ONOSIntents = []
2061 intentCheck = main.FALSE
2062 consistentIntents = True
2063 intentsResults = True
2064 threads = []
2065 for i in main.activeNodes:
2066 t = main.Thread( target=main.CLIs[i].intents,
2067 name="intents-" + str( i ),
2068 args=[],
2069 kwargs={ 'jsonFormat': True } )
2070 threads.append( t )
2071 t.start()
2072
2073 for t in threads:
2074 t.join()
2075 ONOSIntents.append( t.result )
2076
2077 for i in range( len( ONOSIntents) ):
2078 node = str( main.activeNodes[i] + 1 )
2079 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2080 main.log.error( "Error in getting ONOS" + node + " intents" )
2081 main.log.warn( "ONOS" + node + " intents response: " +
2082 repr( ONOSIntents[ i ] ) )
2083 intentsResults = False
2084 utilities.assert_equals(
2085 expect=True,
2086 actual=intentsResults,
2087 onpass="No error in reading intents output",
2088 onfail="Error in reading intents from ONOS" )
2089
2090 main.step( "Check for consistency in Intents from each controller" )
2091 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2092 main.log.info( "Intents are consistent across all ONOS " +
2093 "nodes" )
2094 else:
2095 consistentIntents = False
2096
2097 # Try to make it easy to figure out what is happening
2098 #
2099 # Intent ONOS1 ONOS2 ...
2100 # 0x01 INSTALLED INSTALLING
2101 # ... ... ...
2102 # ... ... ...
2103 title = " ID"
2104 for n in main.activeNodes:
2105 title += " " * 10 + "ONOS" + str( n + 1 )
2106 main.log.warn( title )
2107 # get all intent keys in the cluster
2108 keys = []
2109 for nodeStr in ONOSIntents:
2110 node = json.loads( nodeStr )
2111 for intent in node:
2112 keys.append( intent.get( 'id' ) )
2113 keys = set( keys )
2114 for key in keys:
2115 row = "%-13s" % key
2116 for nodeStr in ONOSIntents:
2117 node = json.loads( nodeStr )
2118 for intent in node:
2119 if intent.get( 'id' ) == key:
2120 row += "%-15s" % intent.get( 'state' )
2121 main.log.warn( row )
2122 # End table view
2123
2124 utilities.assert_equals(
2125 expect=True,
2126 actual=consistentIntents,
2127 onpass="Intents are consistent across all ONOS nodes",
2128 onfail="ONOS nodes have different views of intents" )
2129 intentStates = []
2130 for node in ONOSIntents: # Iter through ONOS nodes
2131 nodeStates = []
2132 # Iter through intents of a node
2133 try:
2134 for intent in json.loads( node ):
2135 nodeStates.append( intent[ 'state' ] )
2136 except ( ValueError, TypeError ):
2137 main.log.exception( "Error in parsing intents" )
2138 main.log.error( repr( node ) )
2139 intentStates.append( nodeStates )
2140 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2141 main.log.info( dict( out ) )
2142
2143 if intentsResults and not consistentIntents:
2144 for i in range( len( main.activeNodes ) ):
2145 node = str( main.activeNodes[i] + 1 )
2146 main.log.warn( "ONOS" + node + " intents: " )
2147 main.log.warn( json.dumps(
2148 json.loads( ONOSIntents[ i ] ),
2149 sort_keys=True,
2150 indent=4,
2151 separators=( ',', ': ' ) ) )
2152 elif intentsResults and consistentIntents:
2153 intentCheck = main.TRUE
2154
2155 main.step( "Compare current intents with intents before the scaling" )
2156 # NOTE: this requires case 5 to pass for intentState to be set.
2157 # maybe we should stop the test if that fails?
2158 sameIntents = main.FALSE
2159 try:
2160 intentState
2161 except NameError:
2162 main.log.warn( "No previous intent state was saved" )
2163 else:
2164 if intentState and intentState == ONOSIntents[ 0 ]:
2165 sameIntents = main.TRUE
2166 main.log.info( "Intents are consistent with before scaling" )
2167 # TODO: possibly the states have changed? we may need to figure out
2168 # what the acceptable states are
2169 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2170 sameIntents = main.TRUE
2171 try:
2172 before = json.loads( intentState )
2173 after = json.loads( ONOSIntents[ 0 ] )
2174 for intent in before:
2175 if intent not in after:
2176 sameIntents = main.FALSE
2177 main.log.debug( "Intent is not currently in ONOS " +
2178 "(at least in the same form):" )
2179 main.log.debug( json.dumps( intent ) )
2180 except ( ValueError, TypeError ):
2181 main.log.exception( "Exception printing intents" )
2182 main.log.debug( repr( ONOSIntents[0] ) )
2183 main.log.debug( repr( intentState ) )
2184 if sameIntents == main.FALSE:
2185 try:
2186 main.log.debug( "ONOS intents before: " )
2187 main.log.debug( json.dumps( json.loads( intentState ),
2188 sort_keys=True, indent=4,
2189 separators=( ',', ': ' ) ) )
2190 main.log.debug( "Current ONOS intents: " )
2191 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2192 sort_keys=True, indent=4,
2193 separators=( ',', ': ' ) ) )
2194 except ( ValueError, TypeError ):
2195 main.log.exception( "Exception printing intents" )
2196 main.log.debug( repr( ONOSIntents[0] ) )
2197 main.log.debug( repr( intentState ) )
2198 utilities.assert_equals(
2199 expect=main.TRUE,
2200 actual=sameIntents,
2201 onpass="Intents are consistent with before scaling",
2202 onfail="The Intents changed during scaling" )
2203 intentCheck = intentCheck and sameIntents
2204
2205 main.step( "Get the OF Table entries and compare to before " +
2206 "component scaling" )
2207 FlowTables = main.TRUE
2208 for i in range( 28 ):
2209 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2210 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2211 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2212 FlowTables = FlowTables and curSwitch
2213 if curSwitch == main.FALSE:
2214 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2215 utilities.assert_equals(
2216 expect=main.TRUE,
2217 actual=FlowTables,
2218 onpass="No changes were found in the flow tables",
2219 onfail="Changes were found in the flow tables" )
2220
2221 main.Mininet2.pingLongKill()
2222 '''
2223 # main.step( "Check the continuous pings to ensure that no packets " +
2224 # "were dropped during component failure" )
2225 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2226 main.params[ 'TESTONIP' ] )
2227 LossInPings = main.FALSE
2228 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2229 for i in range( 8, 18 ):
2230 main.log.info(
2231 "Checking for a loss in pings along flow from s" +
2232 str( i ) )
2233 LossInPings = main.Mininet2.checkForLoss(
2234 "/tmp/ping.h" +
2235 str( i ) ) or LossInPings
2236 if LossInPings == main.TRUE:
2237 main.log.info( "Loss in ping detected" )
2238 elif LossInPings == main.ERROR:
2239 main.log.info( "There are multiple mininet process running" )
2240 elif LossInPings == main.FALSE:
2241 main.log.info( "No Loss in the pings" )
2242 main.log.info( "No loss of dataplane connectivity" )
2243 # utilities.assert_equals(
2244 # expect=main.FALSE,
2245 # actual=LossInPings,
2246 # onpass="No Loss of connectivity",
2247 # onfail="Loss of dataplane connectivity detected" )
2248
2249 # NOTE: Since intents are not persisted with IntnentStore,
2250 # we expect loss in dataplane connectivity
2251 LossInPings = main.FALSE
2252 '''
2253
2254 main.step( "Leadership Election is still functional" )
2255 # Test of LeadershipElection
2256 leaderList = []
2257 leaderResult = main.TRUE
2258
2259 for i in main.activeNodes:
2260 cli = main.CLIs[i]
2261 leaderN = cli.electionTestLeader()
2262 leaderList.append( leaderN )
2263 if leaderN == main.FALSE:
2264 # error in response
2265 main.log.error( "Something is wrong with " +
2266 "electionTestLeader function, check the" +
2267 " error logs" )
2268 leaderResult = main.FALSE
2269 elif leaderN is None:
2270 main.log.error( cli.name +
2271 " shows no leader for the election-app." )
2272 leaderResult = main.FALSE
2273 if len( set( leaderList ) ) != 1:
2274 leaderResult = main.FALSE
2275 main.log.error(
2276 "Inconsistent view of leader for the election test app" )
2277 # TODO: print the list
2278 utilities.assert_equals(
2279 expect=main.TRUE,
2280 actual=leaderResult,
2281 onpass="Leadership election passed",
2282 onfail="Something went wrong with Leadership election" )
2283
2284 def CASE8( self, main ):
2285 """
2286 Compare topo
2287 """
2288 import json
2289 import time
2290 assert main.numCtrls, "main.numCtrls not defined"
2291 assert main, "main not defined"
2292 assert utilities.assert_equals, "utilities.assert_equals not defined"
2293 assert main.CLIs, "main.CLIs not defined"
2294 assert main.nodes, "main.nodes not defined"
2295
2296 main.case( "Compare ONOS Topology view to Mininet topology" )
2297 main.caseExplanation = "Compare topology objects between Mininet" +\
2298 " and ONOS"
2299 topoResult = main.FALSE
2300 topoFailMsg = "ONOS topology don't match Mininet"
2301 elapsed = 0
2302 count = 0
2303 main.step( "Comparing ONOS topology to MN topology" )
2304 startTime = time.time()
2305 # Give time for Gossip to work
2306 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2307 devicesResults = main.TRUE
2308 linksResults = main.TRUE
2309 hostsResults = main.TRUE
2310 hostAttachmentResults = True
2311 count += 1
2312 cliStart = time.time()
2313 devices = []
2314 threads = []
2315 for i in main.activeNodes:
2316 t = main.Thread( target=utilities.retry,
2317 name="devices-" + str( i ),
2318 args=[ main.CLIs[i].devices, [ None ] ],
2319 kwargs= { 'sleep': 5, 'attempts': 5,
2320 'randomTime': True } )
2321 threads.append( t )
2322 t.start()
2323
2324 for t in threads:
2325 t.join()
2326 devices.append( t.result )
2327 hosts = []
2328 ipResult = main.TRUE
2329 threads = []
2330 for i in main.activeNodes:
2331 t = main.Thread( target=utilities.retry,
2332 name="hosts-" + str( i ),
2333 args=[ main.CLIs[i].hosts, [ None ] ],
2334 kwargs= { 'sleep': 5, 'attempts': 5,
2335 'randomTime': True } )
2336 threads.append( t )
2337 t.start()
2338
2339 for t in threads:
2340 t.join()
2341 try:
2342 hosts.append( json.loads( t.result ) )
2343 except ( ValueError, TypeError ):
2344 main.log.exception( "Error parsing hosts results" )
2345 main.log.error( repr( t.result ) )
2346 hosts.append( None )
2347 for controller in range( 0, len( hosts ) ):
2348 controllerStr = str( main.activeNodes[controller] + 1 )
2349 if hosts[ controller ]:
2350 for host in hosts[ controller ]:
2351 if host is None or host.get( 'ipAddresses', [] ) == []:
2352 main.log.error(
2353 "Error with host ipAddresses on controller" +
2354 controllerStr + ": " + str( host ) )
2355 ipResult = main.FALSE
2356 ports = []
2357 threads = []
2358 for i in main.activeNodes:
2359 t = main.Thread( target=utilities.retry,
2360 name="ports-" + str( i ),
2361 args=[ main.CLIs[i].ports, [ None ] ],
2362 kwargs= { 'sleep': 5, 'attempts': 5,
2363 'randomTime': True } )
2364 threads.append( t )
2365 t.start()
2366
2367 for t in threads:
2368 t.join()
2369 ports.append( t.result )
2370 links = []
2371 threads = []
2372 for i in main.activeNodes:
2373 t = main.Thread( target=utilities.retry,
2374 name="links-" + str( i ),
2375 args=[ main.CLIs[i].links, [ None ] ],
2376 kwargs= { 'sleep': 5, 'attempts': 5,
2377 'randomTime': True } )
2378 threads.append( t )
2379 t.start()
2380
2381 for t in threads:
2382 t.join()
2383 links.append( t.result )
2384 clusters = []
2385 threads = []
2386 for i in main.activeNodes:
2387 t = main.Thread( target=utilities.retry,
2388 name="clusters-" + str( i ),
2389 args=[ main.CLIs[i].clusters, [ None ] ],
2390 kwargs= { 'sleep': 5, 'attempts': 5,
2391 'randomTime': True } )
2392 threads.append( t )
2393 t.start()
2394
2395 for t in threads:
2396 t.join()
2397 clusters.append( t.result )
2398
2399 elapsed = time.time() - startTime
2400 cliTime = time.time() - cliStart
2401 print "Elapsed time: " + str( elapsed )
2402 print "CLI time: " + str( cliTime )
2403
2404 if all( e is None for e in devices ) and\
2405 all( e is None for e in hosts ) and\
2406 all( e is None for e in ports ) and\
2407 all( e is None for e in links ) and\
2408 all( e is None for e in clusters ):
2409 topoFailMsg = "Could not get topology from ONOS"
2410 main.log.error( topoFailMsg )
2411 continue # Try again, No use trying to compare
2412
2413 mnSwitches = main.Mininet1.getSwitches()
2414 mnLinks = main.Mininet1.getLinks()
2415 mnHosts = main.Mininet1.getHosts()
2416 for controller in range( len( main.activeNodes ) ):
2417 controllerStr = str( main.activeNodes[controller] + 1 )
2418 if devices[ controller ] and ports[ controller ] and\
2419 "Error" not in devices[ controller ] and\
2420 "Error" not in ports[ controller ]:
2421
2422 try:
2423 currentDevicesResult = main.Mininet1.compareSwitches(
2424 mnSwitches,
2425 json.loads( devices[ controller ] ),
2426 json.loads( ports[ controller ] ) )
2427 except ( TypeError, ValueError ):
2428 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2429 devices[ controller ], ports[ controller ] ) )
2430 else:
2431 currentDevicesResult = main.FALSE
2432 utilities.assert_equals( expect=main.TRUE,
2433 actual=currentDevicesResult,
2434 onpass="ONOS" + controllerStr +
2435 " Switches view is correct",
2436 onfail="ONOS" + controllerStr +
2437 " Switches view is incorrect" )
2438
2439 if links[ controller ] and "Error" not in links[ controller ]:
2440 currentLinksResult = main.Mininet1.compareLinks(
2441 mnSwitches, mnLinks,
2442 json.loads( links[ controller ] ) )
2443 else:
2444 currentLinksResult = main.FALSE
2445 utilities.assert_equals( expect=main.TRUE,
2446 actual=currentLinksResult,
2447 onpass="ONOS" + controllerStr +
2448 " links view is correct",
2449 onfail="ONOS" + controllerStr +
2450 " links view is incorrect" )
2451 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2452 currentHostsResult = main.Mininet1.compareHosts(
2453 mnHosts,
2454 hosts[ controller ] )
2455 elif hosts[ controller ] == []:
2456 currentHostsResult = main.TRUE
2457 else:
2458 currentHostsResult = main.FALSE
2459 utilities.assert_equals( expect=main.TRUE,
2460 actual=currentHostsResult,
2461 onpass="ONOS" + controllerStr +
2462 " hosts exist in Mininet",
2463 onfail="ONOS" + controllerStr +
2464 " hosts don't match Mininet" )
2465 # CHECKING HOST ATTACHMENT POINTS
2466 hostAttachment = True
2467 zeroHosts = False
2468 # FIXME: topo-HA/obelisk specific mappings:
2469 # key is mac and value is dpid
2470 mappings = {}
2471 for i in range( 1, 29 ): # hosts 1 through 28
2472 # set up correct variables:
2473 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2474 if i == 1:
2475 deviceId = "1000".zfill(16)
2476 elif i == 2:
2477 deviceId = "2000".zfill(16)
2478 elif i == 3:
2479 deviceId = "3000".zfill(16)
2480 elif i == 4:
2481 deviceId = "3004".zfill(16)
2482 elif i == 5:
2483 deviceId = "5000".zfill(16)
2484 elif i == 6:
2485 deviceId = "6000".zfill(16)
2486 elif i == 7:
2487 deviceId = "6007".zfill(16)
2488 elif i >= 8 and i <= 17:
2489 dpid = '3' + str( i ).zfill( 3 )
2490 deviceId = dpid.zfill(16)
2491 elif i >= 18 and i <= 27:
2492 dpid = '6' + str( i ).zfill( 3 )
2493 deviceId = dpid.zfill(16)
2494 elif i == 28:
2495 deviceId = "2800".zfill(16)
2496 mappings[ macId ] = deviceId
2497 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2498 if hosts[ controller ] == []:
2499 main.log.warn( "There are no hosts discovered" )
2500 zeroHosts = True
2501 else:
2502 for host in hosts[ controller ]:
2503 mac = None
2504 location = None
2505 device = None
2506 port = None
2507 try:
2508 mac = host.get( 'mac' )
2509 assert mac, "mac field could not be found for this host object"
2510
2511 location = host.get( 'location' )
2512 assert location, "location field could not be found for this host object"
2513
2514 # Trim the protocol identifier off deviceId
2515 device = str( location.get( 'elementId' ) ).split(':')[1]
2516 assert device, "elementId field could not be found for this host location object"
2517
2518 port = location.get( 'port' )
2519 assert port, "port field could not be found for this host location object"
2520
2521 # Now check if this matches where they should be
2522 if mac and device and port:
2523 if str( port ) != "1":
2524 main.log.error( "The attachment port is incorrect for " +
2525 "host " + str( mac ) +
2526 ". Expected: 1 Actual: " + str( port) )
2527 hostAttachment = False
2528 if device != mappings[ str( mac ) ]:
2529 main.log.error( "The attachment device is incorrect for " +
2530 "host " + str( mac ) +
2531 ". Expected: " + mappings[ str( mac ) ] +
2532 " Actual: " + device )
2533 hostAttachment = False
2534 else:
2535 hostAttachment = False
2536 except AssertionError:
2537 main.log.exception( "Json object not as expected" )
2538 main.log.error( repr( host ) )
2539 hostAttachment = False
2540 else:
2541 main.log.error( "No hosts json output or \"Error\"" +
2542 " in output. hosts = " +
2543 repr( hosts[ controller ] ) )
2544 if zeroHosts is False:
2545 # TODO: Find a way to know if there should be hosts in a
2546 # given point of the test
2547 hostAttachment = True
2548
2549 # END CHECKING HOST ATTACHMENT POINTS
2550 devicesResults = devicesResults and currentDevicesResult
2551 linksResults = linksResults and currentLinksResult
2552 hostsResults = hostsResults and currentHostsResult
2553 hostAttachmentResults = hostAttachmentResults and\
2554 hostAttachment
2555 topoResult = ( devicesResults and linksResults
2556 and hostsResults and ipResult and
2557 hostAttachmentResults )
2558 utilities.assert_equals( expect=True,
2559 actual=topoResult,
2560 onpass="ONOS topology matches Mininet",
2561 onfail=topoFailMsg )
2562 # End of While loop to pull ONOS state
2563
2564 # Compare json objects for hosts and dataplane clusters
2565
2566 # hosts
2567 main.step( "Hosts view is consistent across all ONOS nodes" )
2568 consistentHostsResult = main.TRUE
2569 for controller in range( len( hosts ) ):
2570 controllerStr = str( main.activeNodes[controller] + 1 )
2571 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2572 if hosts[ controller ] == hosts[ 0 ]:
2573 continue
2574 else: # hosts not consistent
2575 main.log.error( "hosts from ONOS" + controllerStr +
2576 " is inconsistent with ONOS1" )
2577 main.log.warn( repr( hosts[ controller ] ) )
2578 consistentHostsResult = main.FALSE
2579
2580 else:
2581 main.log.error( "Error in getting ONOS hosts from ONOS" +
2582 controllerStr )
2583 consistentHostsResult = main.FALSE
2584 main.log.warn( "ONOS" + controllerStr +
2585 " hosts response: " +
2586 repr( hosts[ controller ] ) )
2587 utilities.assert_equals(
2588 expect=main.TRUE,
2589 actual=consistentHostsResult,
2590 onpass="Hosts view is consistent across all ONOS nodes",
2591 onfail="ONOS nodes have different views of hosts" )
2592
2593 main.step( "Hosts information is correct" )
2594 hostsResults = hostsResults and ipResult
2595 utilities.assert_equals(
2596 expect=main.TRUE,
2597 actual=hostsResults,
2598 onpass="Host information is correct",
2599 onfail="Host information is incorrect" )
2600
2601 main.step( "Host attachment points to the network" )
2602 utilities.assert_equals(
2603 expect=True,
2604 actual=hostAttachmentResults,
2605 onpass="Hosts are correctly attached to the network",
2606 onfail="ONOS did not correctly attach hosts to the network" )
2607
2608 # Strongly connected clusters of devices
2609 main.step( "Clusters view is consistent across all ONOS nodes" )
2610 consistentClustersResult = main.TRUE
2611 for controller in range( len( clusters ) ):
2612 controllerStr = str( main.activeNodes[controller] + 1 )
2613 if "Error" not in clusters[ controller ]:
2614 if clusters[ controller ] == clusters[ 0 ]:
2615 continue
2616 else: # clusters not consistent
2617 main.log.error( "clusters from ONOS" +
2618 controllerStr +
2619 " is inconsistent with ONOS1" )
2620 consistentClustersResult = main.FALSE
2621 else:
2622 main.log.error( "Error in getting dataplane clusters " +
2623 "from ONOS" + controllerStr )
2624 consistentClustersResult = main.FALSE
2625 main.log.warn( "ONOS" + controllerStr +
2626 " clusters response: " +
2627 repr( clusters[ controller ] ) )
2628 utilities.assert_equals(
2629 expect=main.TRUE,
2630 actual=consistentClustersResult,
2631 onpass="Clusters view is consistent across all ONOS nodes",
2632 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07002633 if not consistentClustersResult:
2634 main.log.debug( clusters )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002635
2636 main.step( "There is only one SCC" )
2637 # there should always only be one cluster
2638 try:
2639 numClusters = len( json.loads( clusters[ 0 ] ) )
2640 except ( ValueError, TypeError ):
2641 main.log.exception( "Error parsing clusters[0]: " +
2642 repr( clusters[0] ) )
2643 numClusters = "ERROR"
2644 clusterResults = main.FALSE
2645 if numClusters == 1:
2646 clusterResults = main.TRUE
2647 utilities.assert_equals(
2648 expect=1,
2649 actual=numClusters,
2650 onpass="ONOS shows 1 SCC",
2651 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2652
2653 topoResult = ( devicesResults and linksResults
2654 and hostsResults and consistentHostsResult
2655 and consistentClustersResult and clusterResults
2656 and ipResult and hostAttachmentResults )
2657
2658 topoResult = topoResult and int( count <= 2 )
2659 note = "note it takes about " + str( int( cliTime ) ) + \
2660 " seconds for the test to make all the cli calls to fetch " +\
2661 "the topology from each ONOS instance"
2662 main.log.info(
2663 "Very crass estimate for topology discovery/convergence( " +
2664 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2665 str( count ) + " tries" )
2666
2667 main.step( "Device information is correct" )
2668 utilities.assert_equals(
2669 expect=main.TRUE,
2670 actual=devicesResults,
2671 onpass="Device information is correct",
2672 onfail="Device information is incorrect" )
2673
2674 main.step( "Links are correct" )
2675 utilities.assert_equals(
2676 expect=main.TRUE,
2677 actual=linksResults,
2678 onpass="Link are correct",
2679 onfail="Links are incorrect" )
2680
2681 main.step( "Hosts are correct" )
2682 utilities.assert_equals(
2683 expect=main.TRUE,
2684 actual=hostsResults,
2685 onpass="Hosts are correct",
2686 onfail="Hosts are incorrect" )
2687
2688 # FIXME: move this to an ONOS state case
2689 main.step( "Checking ONOS nodes" )
2690 nodeResults = utilities.retry( main.HA.nodesCheck,
2691 False,
2692 args=[main.activeNodes],
2693 attempts=5 )
2694 utilities.assert_equals( expect=True, actual=nodeResults,
2695 onpass="Nodes check successful",
2696 onfail="Nodes check NOT successful" )
2697 if not nodeResults:
2698 for i in main.activeNodes:
2699 main.log.debug( "{} components not ACTIVE: \n{}".format(
2700 main.CLIs[i].name,
2701 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
2702
Jon Halld2871c22016-07-26 11:01:14 -07002703 if not topoResult:
2704 main.cleanup()
2705 main.exit()
2706
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002707 def CASE9( self, main ):
2708 """
2709 Link s3-s28 down
2710 """
2711 import time
2712 assert main.numCtrls, "main.numCtrls not defined"
2713 assert main, "main not defined"
2714 assert utilities.assert_equals, "utilities.assert_equals not defined"
2715 assert main.CLIs, "main.CLIs not defined"
2716 assert main.nodes, "main.nodes not defined"
2717 # NOTE: You should probably run a topology check after this
2718
2719 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2720
2721 description = "Turn off a link to ensure that Link Discovery " +\
2722 "is working properly"
2723 main.case( description )
2724
2725 main.step( "Kill Link between s3 and s28" )
2726 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2727 main.log.info( "Waiting " + str( linkSleep ) +
2728 " seconds for link down to be discovered" )
2729 time.sleep( linkSleep )
2730 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2731 onpass="Link down successful",
2732 onfail="Failed to bring link down" )
2733 # TODO do some sort of check here
2734
2735 def CASE10( self, main ):
2736 """
2737 Link s3-s28 up
2738 """
2739 import time
2740 assert main.numCtrls, "main.numCtrls not defined"
2741 assert main, "main not defined"
2742 assert utilities.assert_equals, "utilities.assert_equals not defined"
2743 assert main.CLIs, "main.CLIs not defined"
2744 assert main.nodes, "main.nodes not defined"
2745 # NOTE: You should probably run a topology check after this
2746
2747 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2748
2749 description = "Restore a link to ensure that Link Discovery is " + \
2750 "working properly"
2751 main.case( description )
2752
2753 main.step( "Bring link between s3 and s28 back up" )
2754 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2755 main.log.info( "Waiting " + str( linkSleep ) +
2756 " seconds for link up to be discovered" )
2757 time.sleep( linkSleep )
2758 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2759 onpass="Link up successful",
2760 onfail="Failed to bring link up" )
2761 # TODO do some sort of check here
2762
2763 def CASE11( self, main ):
2764 """
2765 Switch Down
2766 """
2767 # NOTE: You should probably run a topology check after this
2768 import time
2769 assert main.numCtrls, "main.numCtrls not defined"
2770 assert main, "main not defined"
2771 assert utilities.assert_equals, "utilities.assert_equals not defined"
2772 assert main.CLIs, "main.CLIs not defined"
2773 assert main.nodes, "main.nodes not defined"
2774
2775 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2776
2777 description = "Killing a switch to ensure it is discovered correctly"
2778 onosCli = main.CLIs[ main.activeNodes[0] ]
2779 main.case( description )
2780 switch = main.params[ 'kill' ][ 'switch' ]
2781 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2782
2783 # TODO: Make this switch parameterizable
2784 main.step( "Kill " + switch )
2785 main.log.info( "Deleting " + switch )
2786 main.Mininet1.delSwitch( switch )
2787 main.log.info( "Waiting " + str( switchSleep ) +
2788 " seconds for switch down to be discovered" )
2789 time.sleep( switchSleep )
2790 device = onosCli.getDevice( dpid=switchDPID )
2791 # Peek at the deleted switch
2792 main.log.warn( str( device ) )
2793 result = main.FALSE
2794 if device and device[ 'available' ] is False:
2795 result = main.TRUE
2796 utilities.assert_equals( expect=main.TRUE, actual=result,
2797 onpass="Kill switch successful",
2798 onfail="Failed to kill switch?" )
2799
2800 def CASE12( self, main ):
2801 """
2802 Switch Up
2803 """
2804 # NOTE: You should probably run a topology check after this
2805 import time
2806 assert main.numCtrls, "main.numCtrls not defined"
2807 assert main, "main not defined"
2808 assert utilities.assert_equals, "utilities.assert_equals not defined"
2809 assert main.CLIs, "main.CLIs not defined"
2810 assert main.nodes, "main.nodes not defined"
2811
2812 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2813 switch = main.params[ 'kill' ][ 'switch' ]
2814 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2815 links = main.params[ 'kill' ][ 'links' ].split()
2816 onosCli = main.CLIs[ main.activeNodes[0] ]
2817 description = "Adding a switch to ensure it is discovered correctly"
2818 main.case( description )
2819
2820 main.step( "Add back " + switch )
2821 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2822 for peer in links:
2823 main.Mininet1.addLink( switch, peer )
2824 ipList = [ node.ip_address for node in main.nodes ]
2825 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2826 main.log.info( "Waiting " + str( switchSleep ) +
2827 " seconds for switch up to be discovered" )
2828 time.sleep( switchSleep )
2829 device = onosCli.getDevice( dpid=switchDPID )
2830 # Peek at the deleted switch
2831 main.log.warn( str( device ) )
2832 result = main.FALSE
2833 if device and device[ 'available' ]:
2834 result = main.TRUE
2835 utilities.assert_equals( expect=main.TRUE, actual=result,
2836 onpass="add switch successful",
2837 onfail="Failed to add switch?" )
2838
2839 def CASE13( self, main ):
2840 """
2841 Clean up
2842 """
2843 assert main.numCtrls, "main.numCtrls not defined"
2844 assert main, "main not defined"
2845 assert utilities.assert_equals, "utilities.assert_equals not defined"
2846 assert main.CLIs, "main.CLIs not defined"
2847 assert main.nodes, "main.nodes not defined"
2848
2849 main.case( "Test Cleanup" )
2850 main.step( "Killing tcpdumps" )
2851 main.Mininet2.stopTcpdump()
2852
2853 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2854 main.step( "Copying MN pcap and ONOS log files to test station" )
2855 # NOTE: MN Pcap file is being saved to logdir.
2856 # We scp this file as MN and TestON aren't necessarily the same vm
2857
2858 # FIXME: To be replaced with a Jenkin's post script
2859 # TODO: Load these from params
2860 # NOTE: must end in /
2861 logFolder = "/opt/onos/log/"
2862 logFiles = [ "karaf.log", "karaf.log.1" ]
2863 # NOTE: must end in /
2864 for f in logFiles:
2865 for node in main.nodes:
2866 dstName = main.logdir + "/" + node.name + "-" + f
2867 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2868 logFolder + f, dstName )
2869 # std*.log's
2870 # NOTE: must end in /
2871 logFolder = "/opt/onos/var/"
2872 logFiles = [ "stderr.log", "stdout.log" ]
2873 # NOTE: must end in /
2874 for f in logFiles:
2875 for node in main.nodes:
2876 dstName = main.logdir + "/" + node.name + "-" + f
2877 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2878 logFolder + f, dstName )
2879 else:
2880 main.log.debug( "skipping saving log files" )
2881
2882 main.step( "Stopping Mininet" )
2883 mnResult = main.Mininet1.stopNet()
2884 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2885 onpass="Mininet stopped",
2886 onfail="MN cleanup NOT successful" )
2887
2888 main.step( "Checking ONOS Logs for errors" )
2889 for node in main.nodes:
2890 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2891 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2892
2893 try:
2894 timerLog = open( main.logdir + "/Timers.csv", 'w')
2895 main.log.error( ", ".join( labels ) + "\n" + ", ".join( data ) )
2896 timerLog.write( ", ".join( labels ) + "\n" + ", ".join( data ) )
2897 timerLog.close()
2898 except NameError, e:
2899 main.log.exception(e)
2900
2901 main.step( "Stopping webserver" )
2902 status = main.Server.stop( )
2903 utilities.assert_equals( expect=main.TRUE, actual=status,
2904 onpass="Stop Server",
2905 onfail="Failled to stop SimpleHTTPServer" )
2906 del main.Server
2907
2908 def CASE14( self, main ):
2909 """
2910 start election app on all onos nodes
2911 """
2912 import time
2913 assert main.numCtrls, "main.numCtrls not defined"
2914 assert main, "main not defined"
2915 assert utilities.assert_equals, "utilities.assert_equals not defined"
2916 assert main.CLIs, "main.CLIs not defined"
2917 assert main.nodes, "main.nodes not defined"
2918
2919 main.case("Start Leadership Election app")
2920 main.step( "Install leadership election app" )
2921 onosCli = main.CLIs[ main.activeNodes[0] ]
2922 appResult = onosCli.activateApp( "org.onosproject.election" )
2923 utilities.assert_equals(
2924 expect=main.TRUE,
2925 actual=appResult,
2926 onpass="Election app installed",
2927 onfail="Something went wrong with installing Leadership election" )
2928
2929 main.step( "Run for election on each node" )
2930 for i in main.activeNodes:
2931 main.CLIs[i].electionTestRun()
2932 time.sleep(5)
2933 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2934 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
2935 utilities.assert_equals(
2936 expect=True,
2937 actual=sameResult,
2938 onpass="All nodes see the same leaderboards",
2939 onfail="Inconsistent leaderboards" )
2940
2941 if sameResult:
2942 leader = leaders[ 0 ][ 0 ]
2943 if main.nodes[ main.activeNodes[0] ].ip_address in leader:
2944 correctLeader = True
2945 else:
2946 correctLeader = False
2947 main.step( "First node was elected leader" )
2948 utilities.assert_equals(
2949 expect=True,
2950 actual=correctLeader,
2951 onpass="Correct leader was elected",
2952 onfail="Incorrect leader" )
2953
2954 def CASE15( self, main ):
2955 """
2956 Check that Leadership Election is still functional
2957 15.1 Run election on each node
2958 15.2 Check that each node has the same leaders and candidates
2959 15.3 Find current leader and withdraw
2960 15.4 Check that a new node was elected leader
2961 15.5 Check that that new leader was the candidate of old leader
2962 15.6 Run for election on old leader
2963 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2964 15.8 Make sure that the old leader was added to the candidate list
2965
2966 old and new variable prefixes refer to data from before vs after
2967 withdrawl and later before withdrawl vs after re-election
2968 """
2969 import time
2970 assert main.numCtrls, "main.numCtrls not defined"
2971 assert main, "main not defined"
2972 assert utilities.assert_equals, "utilities.assert_equals not defined"
2973 assert main.CLIs, "main.CLIs not defined"
2974 assert main.nodes, "main.nodes not defined"
2975
2976 description = "Check that Leadership Election is still functional"
2977 main.case( description )
2978 # NOTE: Need to re-run after restarts since being a canidate is not persistant
2979
2980 oldLeaders = [] # list of lists of each nodes' candidates before
2981 newLeaders = [] # list of lists of each nodes' candidates after
2982 oldLeader = '' # the old leader from oldLeaders, None if not same
2983 newLeader = '' # the new leaders fron newLoeaders, None if not same
2984 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2985 expectNoLeader = False # True when there is only one leader
2986 if main.numCtrls == 1:
2987 expectNoLeader = True
2988
2989 main.step( "Run for election on each node" )
2990 electionResult = main.TRUE
2991
2992 for i in main.activeNodes: # run test election on each node
2993 if main.CLIs[i].electionTestRun() == main.FALSE:
2994 electionResult = main.FALSE
2995 utilities.assert_equals(
2996 expect=main.TRUE,
2997 actual=electionResult,
2998 onpass="All nodes successfully ran for leadership",
2999 onfail="At least one node failed to run for leadership" )
3000
3001 if electionResult == main.FALSE:
3002 main.log.error(
3003 "Skipping Test Case because Election Test App isn't loaded" )
3004 main.skipCase()
3005
3006 main.step( "Check that each node shows the same leader and candidates" )
3007 failMessage = "Nodes have different leaderboards"
3008 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
3009 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
3010 if sameResult:
3011 oldLeader = oldLeaders[ 0 ][ 0 ]
3012 main.log.warn( oldLeader )
3013 else:
3014 oldLeader = None
3015 utilities.assert_equals(
3016 expect=True,
3017 actual=sameResult,
3018 onpass="Leaderboards are consistent for the election topic",
3019 onfail=failMessage )
3020
3021 main.step( "Find current leader and withdraw" )
3022 withdrawResult = main.TRUE
3023 # do some sanity checking on leader before using it
3024 if oldLeader is None:
3025 main.log.error( "Leadership isn't consistent." )
3026 withdrawResult = main.FALSE
3027 # Get the CLI of the oldLeader
3028 for i in main.activeNodes:
3029 if oldLeader == main.nodes[ i ].ip_address:
3030 oldLeaderCLI = main.CLIs[ i ]
3031 break
3032 else: # FOR/ELSE statement
3033 main.log.error( "Leader election, could not find current leader" )
3034 if oldLeader:
3035 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3036 utilities.assert_equals(
3037 expect=main.TRUE,
3038 actual=withdrawResult,
3039 onpass="Node was withdrawn from election",
3040 onfail="Node was not withdrawn from election" )
3041
3042 main.step( "Check that a new node was elected leader" )
3043 failMessage = "Nodes have different leaders"
3044 # Get new leaders and candidates
3045 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
3046 newLeader = None
3047 if newLeaderResult:
3048 if newLeaders[ 0 ][ 0 ] == 'none':
3049 main.log.error( "No leader was elected on at least 1 node" )
3050 if not expectNoLeader:
3051 newLeaderResult = False
3052 newLeader = newLeaders[ 0 ][ 0 ]
3053
3054 # Check that the new leader is not the older leader, which was withdrawn
3055 if newLeader == oldLeader:
3056 newLeaderResult = False
3057 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3058 " as the current leader" )
3059 utilities.assert_equals(
3060 expect=True,
3061 actual=newLeaderResult,
3062 onpass="Leadership election passed",
3063 onfail="Something went wrong with Leadership election" )
3064
3065 main.step( "Check that that new leader was the candidate of old leader" )
3066 # candidates[ 2 ] should become the top candidate after withdrawl
3067 correctCandidateResult = main.TRUE
3068 if expectNoLeader:
3069 if newLeader == 'none':
3070 main.log.info( "No leader expected. None found. Pass" )
3071 correctCandidateResult = main.TRUE
3072 else:
3073 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3074 correctCandidateResult = main.FALSE
3075 elif len( oldLeaders[0] ) >= 3:
3076 if newLeader == oldLeaders[ 0 ][ 2 ]:
3077 # correct leader was elected
3078 correctCandidateResult = main.TRUE
3079 else:
3080 correctCandidateResult = main.FALSE
3081 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3082 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3083 else:
3084 main.log.warn( "Could not determine who should be the correct leader" )
3085 main.log.debug( oldLeaders[ 0 ] )
3086 correctCandidateResult = main.FALSE
3087 utilities.assert_equals(
3088 expect=main.TRUE,
3089 actual=correctCandidateResult,
3090 onpass="Correct Candidate Elected",
3091 onfail="Incorrect Candidate Elected" )
3092
3093 main.step( "Run for election on old leader( just so everyone " +
3094 "is in the hat )" )
3095 if oldLeaderCLI is not None:
3096 runResult = oldLeaderCLI.electionTestRun()
3097 else:
3098 main.log.error( "No old leader to re-elect" )
3099 runResult = main.FALSE
3100 utilities.assert_equals(
3101 expect=main.TRUE,
3102 actual=runResult,
3103 onpass="App re-ran for election",
3104 onfail="App failed to run for election" )
3105
3106 main.step(
3107 "Check that oldLeader is a candidate, and leader if only 1 node" )
3108 # verify leader didn't just change
3109 # Get new leaders and candidates
3110 reRunLeaders = []
3111 time.sleep( 5 ) # Paremterize
3112 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
3113
3114 # Check that the re-elected node is last on the candidate List
3115 if not reRunLeaders[0]:
3116 positionResult = main.FALSE
3117 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3118 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3119 str( reRunLeaders[ 0 ] ) ) )
3120 positionResult = main.FALSE
3121 utilities.assert_equals(
3122 expect=True,
3123 actual=positionResult,
3124 onpass="Old leader successfully re-ran for election",
3125 onfail="Something went wrong with Leadership election after " +
3126 "the old leader re-ran for election" )
3127
3128 def CASE16( self, main ):
3129 """
3130 Install Distributed Primitives app
3131 """
3132 import time
3133 assert main.numCtrls, "main.numCtrls not defined"
3134 assert main, "main not defined"
3135 assert utilities.assert_equals, "utilities.assert_equals not defined"
3136 assert main.CLIs, "main.CLIs not defined"
3137 assert main.nodes, "main.nodes not defined"
3138
3139 # Variables for the distributed primitives tests
3140 global pCounterName
3141 global pCounterValue
3142 global onosSet
3143 global onosSetName
3144 pCounterName = "TestON-Partitions"
3145 pCounterValue = 0
3146 onosSet = set([])
3147 onosSetName = "TestON-set"
3148
3149 description = "Install Primitives app"
3150 main.case( description )
3151 main.step( "Install Primitives app" )
3152 appName = "org.onosproject.distributedprimitives"
3153 node = main.activeNodes[0]
3154 appResults = main.CLIs[node].activateApp( appName )
3155 utilities.assert_equals( expect=main.TRUE,
3156 actual=appResults,
3157 onpass="Primitives app activated",
3158 onfail="Primitives app not activated" )
3159 time.sleep( 5 ) # To allow all nodes to activate
3160
3161 def CASE17( self, main ):
3162 """
3163 Check for basic functionality with distributed primitives
3164 """
3165 # Make sure variables are defined/set
3166 assert main.numCtrls, "main.numCtrls not defined"
3167 assert main, "main not defined"
3168 assert utilities.assert_equals, "utilities.assert_equals not defined"
3169 assert main.CLIs, "main.CLIs not defined"
3170 assert main.nodes, "main.nodes not defined"
3171 assert pCounterName, "pCounterName not defined"
3172 assert onosSetName, "onosSetName not defined"
3173 # NOTE: assert fails if value is 0/None/Empty/False
3174 try:
3175 pCounterValue
3176 except NameError:
3177 main.log.error( "pCounterValue not defined, setting to 0" )
3178 pCounterValue = 0
3179 try:
3180 onosSet
3181 except NameError:
3182 main.log.error( "onosSet not defined, setting to empty Set" )
3183 onosSet = set([])
3184 # Variables for the distributed primitives tests. These are local only
3185 addValue = "a"
3186 addAllValue = "a b c d e f"
3187 retainValue = "c d e f"
3188
3189 description = "Check for basic functionality with distributed " +\
3190 "primitives"
3191 main.case( description )
3192 main.caseExplanation = "Test the methods of the distributed " +\
3193 "primitives (counters and sets) throught the cli"
3194 # DISTRIBUTED ATOMIC COUNTERS
3195 # Partitioned counters
3196 main.step( "Increment then get a default counter on each node" )
3197 pCounters = []
3198 threads = []
3199 addedPValues = []
3200 for i in main.activeNodes:
3201 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3202 name="counterAddAndGet-" + str( i ),
3203 args=[ pCounterName ] )
3204 pCounterValue += 1
3205 addedPValues.append( pCounterValue )
3206 threads.append( t )
3207 t.start()
3208
3209 for t in threads:
3210 t.join()
3211 pCounters.append( t.result )
3212 # Check that counter incremented numController times
3213 pCounterResults = True
3214 for i in addedPValues:
3215 tmpResult = i in pCounters
3216 pCounterResults = pCounterResults and tmpResult
3217 if not tmpResult:
3218 main.log.error( str( i ) + " is not in partitioned "
3219 "counter incremented results" )
3220 utilities.assert_equals( expect=True,
3221 actual=pCounterResults,
3222 onpass="Default counter incremented",
3223 onfail="Error incrementing default" +
3224 " counter" )
3225
3226 main.step( "Get then Increment a default counter on each node" )
3227 pCounters = []
3228 threads = []
3229 addedPValues = []
3230 for i in main.activeNodes:
3231 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3232 name="counterGetAndAdd-" + str( i ),
3233 args=[ pCounterName ] )
3234 addedPValues.append( pCounterValue )
3235 pCounterValue += 1
3236 threads.append( t )
3237 t.start()
3238
3239 for t in threads:
3240 t.join()
3241 pCounters.append( t.result )
3242 # Check that counter incremented numController times
3243 pCounterResults = True
3244 for i in addedPValues:
3245 tmpResult = i in pCounters
3246 pCounterResults = pCounterResults and tmpResult
3247 if not tmpResult:
3248 main.log.error( str( i ) + " is not in partitioned "
3249 "counter incremented results" )
3250 utilities.assert_equals( expect=True,
3251 actual=pCounterResults,
3252 onpass="Default counter incremented",
3253 onfail="Error incrementing default" +
3254 " counter" )
3255
3256 main.step( "Counters we added have the correct values" )
3257 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3258 utilities.assert_equals( expect=main.TRUE,
3259 actual=incrementCheck,
3260 onpass="Added counters are correct",
3261 onfail="Added counters are incorrect" )
3262
3263 main.step( "Add -8 to then get a default counter on each node" )
3264 pCounters = []
3265 threads = []
3266 addedPValues = []
3267 for i in main.activeNodes:
3268 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3269 name="counterIncrement-" + str( i ),
3270 args=[ pCounterName ],
3271 kwargs={ "delta": -8 } )
3272 pCounterValue += -8
3273 addedPValues.append( pCounterValue )
3274 threads.append( t )
3275 t.start()
3276
3277 for t in threads:
3278 t.join()
3279 pCounters.append( t.result )
3280 # Check that counter incremented numController times
3281 pCounterResults = True
3282 for i in addedPValues:
3283 tmpResult = i in pCounters
3284 pCounterResults = pCounterResults and tmpResult
3285 if not tmpResult:
3286 main.log.error( str( i ) + " is not in partitioned "
3287 "counter incremented results" )
3288 utilities.assert_equals( expect=True,
3289 actual=pCounterResults,
3290 onpass="Default counter incremented",
3291 onfail="Error incrementing default" +
3292 " counter" )
3293
3294 main.step( "Add 5 to then get a default counter on each node" )
3295 pCounters = []
3296 threads = []
3297 addedPValues = []
3298 for i in main.activeNodes:
3299 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3300 name="counterIncrement-" + str( i ),
3301 args=[ pCounterName ],
3302 kwargs={ "delta": 5 } )
3303 pCounterValue += 5
3304 addedPValues.append( pCounterValue )
3305 threads.append( t )
3306 t.start()
3307
3308 for t in threads:
3309 t.join()
3310 pCounters.append( t.result )
3311 # Check that counter incremented numController times
3312 pCounterResults = True
3313 for i in addedPValues:
3314 tmpResult = i in pCounters
3315 pCounterResults = pCounterResults and tmpResult
3316 if not tmpResult:
3317 main.log.error( str( i ) + " is not in partitioned "
3318 "counter incremented results" )
3319 utilities.assert_equals( expect=True,
3320 actual=pCounterResults,
3321 onpass="Default counter incremented",
3322 onfail="Error incrementing default" +
3323 " counter" )
3324
3325 main.step( "Get then add 5 to a default counter on each node" )
3326 pCounters = []
3327 threads = []
3328 addedPValues = []
3329 for i in main.activeNodes:
3330 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3331 name="counterIncrement-" + str( i ),
3332 args=[ pCounterName ],
3333 kwargs={ "delta": 5 } )
3334 addedPValues.append( pCounterValue )
3335 pCounterValue += 5
3336 threads.append( t )
3337 t.start()
3338
3339 for t in threads:
3340 t.join()
3341 pCounters.append( t.result )
3342 # Check that counter incremented numController times
3343 pCounterResults = True
3344 for i in addedPValues:
3345 tmpResult = i in pCounters
3346 pCounterResults = pCounterResults and tmpResult
3347 if not tmpResult:
3348 main.log.error( str( i ) + " is not in partitioned "
3349 "counter incremented results" )
3350 utilities.assert_equals( expect=True,
3351 actual=pCounterResults,
3352 onpass="Default counter incremented",
3353 onfail="Error incrementing default" +
3354 " counter" )
3355
3356 main.step( "Counters we added have the correct values" )
3357 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3358 utilities.assert_equals( expect=main.TRUE,
3359 actual=incrementCheck,
3360 onpass="Added counters are correct",
3361 onfail="Added counters are incorrect" )
3362
3363 # DISTRIBUTED SETS
3364 main.step( "Distributed Set get" )
3365 size = len( onosSet )
3366 getResponses = []
3367 threads = []
3368 for i in main.activeNodes:
3369 t = main.Thread( target=main.CLIs[i].setTestGet,
3370 name="setTestGet-" + str( i ),
3371 args=[ onosSetName ] )
3372 threads.append( t )
3373 t.start()
3374 for t in threads:
3375 t.join()
3376 getResponses.append( t.result )
3377
3378 getResults = main.TRUE
3379 for i in range( len( main.activeNodes ) ):
3380 node = str( main.activeNodes[i] + 1 )
3381 if isinstance( getResponses[ i ], list):
3382 current = set( getResponses[ i ] )
3383 if len( current ) == len( getResponses[ i ] ):
3384 # no repeats
3385 if onosSet != current:
3386 main.log.error( "ONOS" + node +
3387 " has incorrect view" +
3388 " of set " + onosSetName + ":\n" +
3389 str( getResponses[ i ] ) )
3390 main.log.debug( "Expected: " + str( onosSet ) )
3391 main.log.debug( "Actual: " + str( current ) )
3392 getResults = main.FALSE
3393 else:
3394 # error, set is not a set
3395 main.log.error( "ONOS" + node +
3396 " has repeat elements in" +
3397 " set " + onosSetName + ":\n" +
3398 str( getResponses[ i ] ) )
3399 getResults = main.FALSE
3400 elif getResponses[ i ] == main.ERROR:
3401 getResults = main.FALSE
3402 utilities.assert_equals( expect=main.TRUE,
3403 actual=getResults,
3404 onpass="Set elements are correct",
3405 onfail="Set elements are incorrect" )
3406
3407 main.step( "Distributed Set size" )
3408 sizeResponses = []
3409 threads = []
3410 for i in main.activeNodes:
3411 t = main.Thread( target=main.CLIs[i].setTestSize,
3412 name="setTestSize-" + str( i ),
3413 args=[ onosSetName ] )
3414 threads.append( t )
3415 t.start()
3416 for t in threads:
3417 t.join()
3418 sizeResponses.append( t.result )
3419
3420 sizeResults = main.TRUE
3421 for i in range( len( main.activeNodes ) ):
3422 node = str( main.activeNodes[i] + 1 )
3423 if size != sizeResponses[ i ]:
3424 sizeResults = main.FALSE
3425 main.log.error( "ONOS" + node +
3426 " expected a size of " + str( size ) +
3427 " for set " + onosSetName +
3428 " but got " + str( sizeResponses[ i ] ) )
3429 utilities.assert_equals( expect=main.TRUE,
3430 actual=sizeResults,
3431 onpass="Set sizes are correct",
3432 onfail="Set sizes are incorrect" )
3433
3434 main.step( "Distributed Set add()" )
3435 onosSet.add( addValue )
3436 addResponses = []
3437 threads = []
3438 for i in main.activeNodes:
3439 t = main.Thread( target=main.CLIs[i].setTestAdd,
3440 name="setTestAdd-" + str( i ),
3441 args=[ onosSetName, addValue ] )
3442 threads.append( t )
3443 t.start()
3444 for t in threads:
3445 t.join()
3446 addResponses.append( t.result )
3447
3448 # main.TRUE = successfully changed the set
3449 # main.FALSE = action resulted in no change in set
3450 # main.ERROR - Some error in executing the function
3451 addResults = main.TRUE
3452 for i in range( len( main.activeNodes ) ):
3453 if addResponses[ i ] == main.TRUE:
3454 # All is well
3455 pass
3456 elif addResponses[ i ] == main.FALSE:
3457 # Already in set, probably fine
3458 pass
3459 elif addResponses[ i ] == main.ERROR:
3460 # Error in execution
3461 addResults = main.FALSE
3462 else:
3463 # unexpected result
3464 addResults = main.FALSE
3465 if addResults != main.TRUE:
3466 main.log.error( "Error executing set add" )
3467
3468 # Check if set is still correct
3469 size = len( onosSet )
3470 getResponses = []
3471 threads = []
3472 for i in main.activeNodes:
3473 t = main.Thread( target=main.CLIs[i].setTestGet,
3474 name="setTestGet-" + str( i ),
3475 args=[ onosSetName ] )
3476 threads.append( t )
3477 t.start()
3478 for t in threads:
3479 t.join()
3480 getResponses.append( t.result )
3481 getResults = main.TRUE
3482 for i in range( len( main.activeNodes ) ):
3483 node = str( main.activeNodes[i] + 1 )
3484 if isinstance( getResponses[ i ], list):
3485 current = set( getResponses[ i ] )
3486 if len( current ) == len( getResponses[ i ] ):
3487 # no repeats
3488 if onosSet != current:
3489 main.log.error( "ONOS" + node + " has incorrect view" +
3490 " of set " + onosSetName + ":\n" +
3491 str( getResponses[ i ] ) )
3492 main.log.debug( "Expected: " + str( onosSet ) )
3493 main.log.debug( "Actual: " + str( current ) )
3494 getResults = main.FALSE
3495 else:
3496 # error, set is not a set
3497 main.log.error( "ONOS" + node + " has repeat elements in" +
3498 " set " + onosSetName + ":\n" +
3499 str( getResponses[ i ] ) )
3500 getResults = main.FALSE
3501 elif getResponses[ i ] == main.ERROR:
3502 getResults = main.FALSE
3503 sizeResponses = []
3504 threads = []
3505 for i in main.activeNodes:
3506 t = main.Thread( target=main.CLIs[i].setTestSize,
3507 name="setTestSize-" + str( i ),
3508 args=[ onosSetName ] )
3509 threads.append( t )
3510 t.start()
3511 for t in threads:
3512 t.join()
3513 sizeResponses.append( t.result )
3514 sizeResults = main.TRUE
3515 for i in range( len( main.activeNodes ) ):
3516 node = str( main.activeNodes[i] + 1 )
3517 if size != sizeResponses[ i ]:
3518 sizeResults = main.FALSE
3519 main.log.error( "ONOS" + node +
3520 " expected a size of " + str( size ) +
3521 " for set " + onosSetName +
3522 " but got " + str( sizeResponses[ i ] ) )
3523 addResults = addResults and getResults and sizeResults
3524 utilities.assert_equals( expect=main.TRUE,
3525 actual=addResults,
3526 onpass="Set add correct",
3527 onfail="Set add was incorrect" )
3528
3529 main.step( "Distributed Set addAll()" )
3530 onosSet.update( addAllValue.split() )
3531 addResponses = []
3532 threads = []
3533 for i in main.activeNodes:
3534 t = main.Thread( target=main.CLIs[i].setTestAdd,
3535 name="setTestAddAll-" + str( i ),
3536 args=[ onosSetName, addAllValue ] )
3537 threads.append( t )
3538 t.start()
3539 for t in threads:
3540 t.join()
3541 addResponses.append( t.result )
3542
3543 # main.TRUE = successfully changed the set
3544 # main.FALSE = action resulted in no change in set
3545 # main.ERROR - Some error in executing the function
3546 addAllResults = main.TRUE
3547 for i in range( len( main.activeNodes ) ):
3548 if addResponses[ i ] == main.TRUE:
3549 # All is well
3550 pass
3551 elif addResponses[ i ] == main.FALSE:
3552 # Already in set, probably fine
3553 pass
3554 elif addResponses[ i ] == main.ERROR:
3555 # Error in execution
3556 addAllResults = main.FALSE
3557 else:
3558 # unexpected result
3559 addAllResults = main.FALSE
3560 if addAllResults != main.TRUE:
3561 main.log.error( "Error executing set addAll" )
3562
3563 # Check if set is still correct
3564 size = len( onosSet )
3565 getResponses = []
3566 threads = []
3567 for i in main.activeNodes:
3568 t = main.Thread( target=main.CLIs[i].setTestGet,
3569 name="setTestGet-" + str( i ),
3570 args=[ onosSetName ] )
3571 threads.append( t )
3572 t.start()
3573 for t in threads:
3574 t.join()
3575 getResponses.append( t.result )
3576 getResults = main.TRUE
3577 for i in range( len( main.activeNodes ) ):
3578 node = str( main.activeNodes[i] + 1 )
3579 if isinstance( getResponses[ i ], list):
3580 current = set( getResponses[ i ] )
3581 if len( current ) == len( getResponses[ i ] ):
3582 # no repeats
3583 if onosSet != current:
3584 main.log.error( "ONOS" + node +
3585 " has incorrect view" +
3586 " of set " + onosSetName + ":\n" +
3587 str( getResponses[ i ] ) )
3588 main.log.debug( "Expected: " + str( onosSet ) )
3589 main.log.debug( "Actual: " + str( current ) )
3590 getResults = main.FALSE
3591 else:
3592 # error, set is not a set
3593 main.log.error( "ONOS" + node +
3594 " has repeat elements in" +
3595 " set " + onosSetName + ":\n" +
3596 str( getResponses[ i ] ) )
3597 getResults = main.FALSE
3598 elif getResponses[ i ] == main.ERROR:
3599 getResults = main.FALSE
3600 sizeResponses = []
3601 threads = []
3602 for i in main.activeNodes:
3603 t = main.Thread( target=main.CLIs[i].setTestSize,
3604 name="setTestSize-" + str( i ),
3605 args=[ onosSetName ] )
3606 threads.append( t )
3607 t.start()
3608 for t in threads:
3609 t.join()
3610 sizeResponses.append( t.result )
3611 sizeResults = main.TRUE
3612 for i in range( len( main.activeNodes ) ):
3613 node = str( main.activeNodes[i] + 1 )
3614 if size != sizeResponses[ i ]:
3615 sizeResults = main.FALSE
3616 main.log.error( "ONOS" + node +
3617 " expected a size of " + str( size ) +
3618 " for set " + onosSetName +
3619 " but got " + str( sizeResponses[ i ] ) )
3620 addAllResults = addAllResults and getResults and sizeResults
3621 utilities.assert_equals( expect=main.TRUE,
3622 actual=addAllResults,
3623 onpass="Set addAll correct",
3624 onfail="Set addAll was incorrect" )
3625
3626 main.step( "Distributed Set contains()" )
3627 containsResponses = []
3628 threads = []
3629 for i in main.activeNodes:
3630 t = main.Thread( target=main.CLIs[i].setTestGet,
3631 name="setContains-" + str( i ),
3632 args=[ onosSetName ],
3633 kwargs={ "values": addValue } )
3634 threads.append( t )
3635 t.start()
3636 for t in threads:
3637 t.join()
3638 # NOTE: This is the tuple
3639 containsResponses.append( t.result )
3640
3641 containsResults = main.TRUE
3642 for i in range( len( main.activeNodes ) ):
3643 if containsResponses[ i ] == main.ERROR:
3644 containsResults = main.FALSE
3645 else:
3646 containsResults = containsResults and\
3647 containsResponses[ i ][ 1 ]
3648 utilities.assert_equals( expect=main.TRUE,
3649 actual=containsResults,
3650 onpass="Set contains is functional",
3651 onfail="Set contains failed" )
3652
3653 main.step( "Distributed Set containsAll()" )
3654 containsAllResponses = []
3655 threads = []
3656 for i in main.activeNodes:
3657 t = main.Thread( target=main.CLIs[i].setTestGet,
3658 name="setContainsAll-" + str( i ),
3659 args=[ onosSetName ],
3660 kwargs={ "values": addAllValue } )
3661 threads.append( t )
3662 t.start()
3663 for t in threads:
3664 t.join()
3665 # NOTE: This is the tuple
3666 containsAllResponses.append( t.result )
3667
3668 containsAllResults = main.TRUE
3669 for i in range( len( main.activeNodes ) ):
3670 if containsResponses[ i ] == main.ERROR:
3671 containsResults = main.FALSE
3672 else:
3673 containsResults = containsResults and\
3674 containsResponses[ i ][ 1 ]
3675 utilities.assert_equals( expect=main.TRUE,
3676 actual=containsAllResults,
3677 onpass="Set containsAll is functional",
3678 onfail="Set containsAll failed" )
3679
3680 main.step( "Distributed Set remove()" )
3681 onosSet.remove( addValue )
3682 removeResponses = []
3683 threads = []
3684 for i in main.activeNodes:
3685 t = main.Thread( target=main.CLIs[i].setTestRemove,
3686 name="setTestRemove-" + str( i ),
3687 args=[ onosSetName, addValue ] )
3688 threads.append( t )
3689 t.start()
3690 for t in threads:
3691 t.join()
3692 removeResponses.append( t.result )
3693
3694 # main.TRUE = successfully changed the set
3695 # main.FALSE = action resulted in no change in set
3696 # main.ERROR - Some error in executing the function
3697 removeResults = main.TRUE
3698 for i in range( len( main.activeNodes ) ):
3699 if removeResponses[ i ] == main.TRUE:
3700 # All is well
3701 pass
3702 elif removeResponses[ i ] == main.FALSE:
3703 # not in set, probably fine
3704 pass
3705 elif removeResponses[ i ] == main.ERROR:
3706 # Error in execution
3707 removeResults = main.FALSE
3708 else:
3709 # unexpected result
3710 removeResults = main.FALSE
3711 if removeResults != main.TRUE:
3712 main.log.error( "Error executing set remove" )
3713
3714 # Check if set is still correct
3715 size = len( onosSet )
3716 getResponses = []
3717 threads = []
3718 for i in main.activeNodes:
3719 t = main.Thread( target=main.CLIs[i].setTestGet,
3720 name="setTestGet-" + str( i ),
3721 args=[ onosSetName ] )
3722 threads.append( t )
3723 t.start()
3724 for t in threads:
3725 t.join()
3726 getResponses.append( t.result )
3727 getResults = main.TRUE
3728 for i in range( len( main.activeNodes ) ):
3729 node = str( main.activeNodes[i] + 1 )
3730 if isinstance( getResponses[ i ], list):
3731 current = set( getResponses[ i ] )
3732 if len( current ) == len( getResponses[ i ] ):
3733 # no repeats
3734 if onosSet != current:
3735 main.log.error( "ONOS" + node +
3736 " has incorrect view" +
3737 " of set " + onosSetName + ":\n" +
3738 str( getResponses[ i ] ) )
3739 main.log.debug( "Expected: " + str( onosSet ) )
3740 main.log.debug( "Actual: " + str( current ) )
3741 getResults = main.FALSE
3742 else:
3743 # error, set is not a set
3744 main.log.error( "ONOS" + node +
3745 " has repeat elements in" +
3746 " set " + onosSetName + ":\n" +
3747 str( getResponses[ i ] ) )
3748 getResults = main.FALSE
3749 elif getResponses[ i ] == main.ERROR:
3750 getResults = main.FALSE
3751 sizeResponses = []
3752 threads = []
3753 for i in main.activeNodes:
3754 t = main.Thread( target=main.CLIs[i].setTestSize,
3755 name="setTestSize-" + str( i ),
3756 args=[ onosSetName ] )
3757 threads.append( t )
3758 t.start()
3759 for t in threads:
3760 t.join()
3761 sizeResponses.append( t.result )
3762 sizeResults = main.TRUE
3763 for i in range( len( main.activeNodes ) ):
3764 node = str( main.activeNodes[i] + 1 )
3765 if size != sizeResponses[ i ]:
3766 sizeResults = main.FALSE
3767 main.log.error( "ONOS" + node +
3768 " expected a size of " + str( size ) +
3769 " for set " + onosSetName +
3770 " but got " + str( sizeResponses[ i ] ) )
3771 removeResults = removeResults and getResults and sizeResults
3772 utilities.assert_equals( expect=main.TRUE,
3773 actual=removeResults,
3774 onpass="Set remove correct",
3775 onfail="Set remove was incorrect" )
3776
3777 main.step( "Distributed Set removeAll()" )
3778 onosSet.difference_update( addAllValue.split() )
3779 removeAllResponses = []
3780 threads = []
3781 try:
3782 for i in main.activeNodes:
3783 t = main.Thread( target=main.CLIs[i].setTestRemove,
3784 name="setTestRemoveAll-" + str( i ),
3785 args=[ onosSetName, addAllValue ] )
3786 threads.append( t )
3787 t.start()
3788 for t in threads:
3789 t.join()
3790 removeAllResponses.append( t.result )
3791 except Exception, e:
3792 main.log.exception(e)
3793
3794 # main.TRUE = successfully changed the set
3795 # main.FALSE = action resulted in no change in set
3796 # main.ERROR - Some error in executing the function
3797 removeAllResults = main.TRUE
3798 for i in range( len( main.activeNodes ) ):
3799 if removeAllResponses[ i ] == main.TRUE:
3800 # All is well
3801 pass
3802 elif removeAllResponses[ i ] == main.FALSE:
3803 # not in set, probably fine
3804 pass
3805 elif removeAllResponses[ i ] == main.ERROR:
3806 # Error in execution
3807 removeAllResults = main.FALSE
3808 else:
3809 # unexpected result
3810 removeAllResults = main.FALSE
3811 if removeAllResults != main.TRUE:
3812 main.log.error( "Error executing set removeAll" )
3813
3814 # Check if set is still correct
3815 size = len( onosSet )
3816 getResponses = []
3817 threads = []
3818 for i in main.activeNodes:
3819 t = main.Thread( target=main.CLIs[i].setTestGet,
3820 name="setTestGet-" + str( i ),
3821 args=[ onosSetName ] )
3822 threads.append( t )
3823 t.start()
3824 for t in threads:
3825 t.join()
3826 getResponses.append( t.result )
3827 getResults = main.TRUE
3828 for i in range( len( main.activeNodes ) ):
3829 node = str( main.activeNodes[i] + 1 )
3830 if isinstance( getResponses[ i ], list):
3831 current = set( getResponses[ i ] )
3832 if len( current ) == len( getResponses[ i ] ):
3833 # no repeats
3834 if onosSet != current:
3835 main.log.error( "ONOS" + node +
3836 " has incorrect view" +
3837 " of set " + onosSetName + ":\n" +
3838 str( getResponses[ i ] ) )
3839 main.log.debug( "Expected: " + str( onosSet ) )
3840 main.log.debug( "Actual: " + str( current ) )
3841 getResults = main.FALSE
3842 else:
3843 # error, set is not a set
3844 main.log.error( "ONOS" + node +
3845 " has repeat elements in" +
3846 " set " + onosSetName + ":\n" +
3847 str( getResponses[ i ] ) )
3848 getResults = main.FALSE
3849 elif getResponses[ i ] == main.ERROR:
3850 getResults = main.FALSE
3851 sizeResponses = []
3852 threads = []
3853 for i in main.activeNodes:
3854 t = main.Thread( target=main.CLIs[i].setTestSize,
3855 name="setTestSize-" + str( i ),
3856 args=[ onosSetName ] )
3857 threads.append( t )
3858 t.start()
3859 for t in threads:
3860 t.join()
3861 sizeResponses.append( t.result )
3862 sizeResults = main.TRUE
3863 for i in range( len( main.activeNodes ) ):
3864 node = str( main.activeNodes[i] + 1 )
3865 if size != sizeResponses[ i ]:
3866 sizeResults = main.FALSE
3867 main.log.error( "ONOS" + node +
3868 " expected a size of " + str( size ) +
3869 " for set " + onosSetName +
3870 " but got " + str( sizeResponses[ i ] ) )
3871 removeAllResults = removeAllResults and getResults and sizeResults
3872 utilities.assert_equals( expect=main.TRUE,
3873 actual=removeAllResults,
3874 onpass="Set removeAll correct",
3875 onfail="Set removeAll was incorrect" )
3876
3877 main.step( "Distributed Set addAll()" )
3878 onosSet.update( addAllValue.split() )
3879 addResponses = []
3880 threads = []
3881 for i in main.activeNodes:
3882 t = main.Thread( target=main.CLIs[i].setTestAdd,
3883 name="setTestAddAll-" + str( i ),
3884 args=[ onosSetName, addAllValue ] )
3885 threads.append( t )
3886 t.start()
3887 for t in threads:
3888 t.join()
3889 addResponses.append( t.result )
3890
3891 # main.TRUE = successfully changed the set
3892 # main.FALSE = action resulted in no change in set
3893 # main.ERROR - Some error in executing the function
3894 addAllResults = main.TRUE
3895 for i in range( len( main.activeNodes ) ):
3896 if addResponses[ i ] == main.TRUE:
3897 # All is well
3898 pass
3899 elif addResponses[ i ] == main.FALSE:
3900 # Already in set, probably fine
3901 pass
3902 elif addResponses[ i ] == main.ERROR:
3903 # Error in execution
3904 addAllResults = main.FALSE
3905 else:
3906 # unexpected result
3907 addAllResults = main.FALSE
3908 if addAllResults != main.TRUE:
3909 main.log.error( "Error executing set addAll" )
3910
3911 # Check if set is still correct
3912 size = len( onosSet )
3913 getResponses = []
3914 threads = []
3915 for i in main.activeNodes:
3916 t = main.Thread( target=main.CLIs[i].setTestGet,
3917 name="setTestGet-" + str( i ),
3918 args=[ onosSetName ] )
3919 threads.append( t )
3920 t.start()
3921 for t in threads:
3922 t.join()
3923 getResponses.append( t.result )
3924 getResults = main.TRUE
3925 for i in range( len( main.activeNodes ) ):
3926 node = str( main.activeNodes[i] + 1 )
3927 if isinstance( getResponses[ i ], list):
3928 current = set( getResponses[ i ] )
3929 if len( current ) == len( getResponses[ i ] ):
3930 # no repeats
3931 if onosSet != current:
3932 main.log.error( "ONOS" + node +
3933 " has incorrect view" +
3934 " of set " + onosSetName + ":\n" +
3935 str( getResponses[ i ] ) )
3936 main.log.debug( "Expected: " + str( onosSet ) )
3937 main.log.debug( "Actual: " + str( current ) )
3938 getResults = main.FALSE
3939 else:
3940 # error, set is not a set
3941 main.log.error( "ONOS" + node +
3942 " has repeat elements in" +
3943 " set " + onosSetName + ":\n" +
3944 str( getResponses[ i ] ) )
3945 getResults = main.FALSE
3946 elif getResponses[ i ] == main.ERROR:
3947 getResults = main.FALSE
3948 sizeResponses = []
3949 threads = []
3950 for i in main.activeNodes:
3951 t = main.Thread( target=main.CLIs[i].setTestSize,
3952 name="setTestSize-" + str( i ),
3953 args=[ onosSetName ] )
3954 threads.append( t )
3955 t.start()
3956 for t in threads:
3957 t.join()
3958 sizeResponses.append( t.result )
3959 sizeResults = main.TRUE
3960 for i in range( len( main.activeNodes ) ):
3961 node = str( main.activeNodes[i] + 1 )
3962 if size != sizeResponses[ i ]:
3963 sizeResults = main.FALSE
3964 main.log.error( "ONOS" + node +
3965 " expected a size of " + str( size ) +
3966 " for set " + onosSetName +
3967 " but got " + str( sizeResponses[ i ] ) )
3968 addAllResults = addAllResults and getResults and sizeResults
3969 utilities.assert_equals( expect=main.TRUE,
3970 actual=addAllResults,
3971 onpass="Set addAll correct",
3972 onfail="Set addAll was incorrect" )
3973
3974 main.step( "Distributed Set clear()" )
3975 onosSet.clear()
3976 clearResponses = []
3977 threads = []
3978 for i in main.activeNodes:
3979 t = main.Thread( target=main.CLIs[i].setTestRemove,
3980 name="setTestClear-" + str( i ),
3981 args=[ onosSetName, " "], # Values doesn't matter
3982 kwargs={ "clear": True } )
3983 threads.append( t )
3984 t.start()
3985 for t in threads:
3986 t.join()
3987 clearResponses.append( t.result )
3988
3989 # main.TRUE = successfully changed the set
3990 # main.FALSE = action resulted in no change in set
3991 # main.ERROR - Some error in executing the function
3992 clearResults = main.TRUE
3993 for i in range( len( main.activeNodes ) ):
3994 if clearResponses[ i ] == main.TRUE:
3995 # All is well
3996 pass
3997 elif clearResponses[ i ] == main.FALSE:
3998 # Nothing set, probably fine
3999 pass
4000 elif clearResponses[ i ] == main.ERROR:
4001 # Error in execution
4002 clearResults = main.FALSE
4003 else:
4004 # unexpected result
4005 clearResults = main.FALSE
4006 if clearResults != main.TRUE:
4007 main.log.error( "Error executing set clear" )
4008
4009 # Check if set is still correct
4010 size = len( onosSet )
4011 getResponses = []
4012 threads = []
4013 for i in main.activeNodes:
4014 t = main.Thread( target=main.CLIs[i].setTestGet,
4015 name="setTestGet-" + str( i ),
4016 args=[ onosSetName ] )
4017 threads.append( t )
4018 t.start()
4019 for t in threads:
4020 t.join()
4021 getResponses.append( t.result )
4022 getResults = main.TRUE
4023 for i in range( len( main.activeNodes ) ):
4024 node = str( main.activeNodes[i] + 1 )
4025 if isinstance( getResponses[ i ], list):
4026 current = set( getResponses[ i ] )
4027 if len( current ) == len( getResponses[ i ] ):
4028 # no repeats
4029 if onosSet != current:
4030 main.log.error( "ONOS" + node +
4031 " has incorrect view" +
4032 " of set " + onosSetName + ":\n" +
4033 str( getResponses[ i ] ) )
4034 main.log.debug( "Expected: " + str( onosSet ) )
4035 main.log.debug( "Actual: " + str( current ) )
4036 getResults = main.FALSE
4037 else:
4038 # error, set is not a set
4039 main.log.error( "ONOS" + node +
4040 " has repeat elements in" +
4041 " set " + onosSetName + ":\n" +
4042 str( getResponses[ i ] ) )
4043 getResults = main.FALSE
4044 elif getResponses[ i ] == main.ERROR:
4045 getResults = main.FALSE
4046 sizeResponses = []
4047 threads = []
4048 for i in main.activeNodes:
4049 t = main.Thread( target=main.CLIs[i].setTestSize,
4050 name="setTestSize-" + str( i ),
4051 args=[ onosSetName ] )
4052 threads.append( t )
4053 t.start()
4054 for t in threads:
4055 t.join()
4056 sizeResponses.append( t.result )
4057 sizeResults = main.TRUE
4058 for i in range( len( main.activeNodes ) ):
4059 node = str( main.activeNodes[i] + 1 )
4060 if size != sizeResponses[ i ]:
4061 sizeResults = main.FALSE
4062 main.log.error( "ONOS" + node +
4063 " expected a size of " + str( size ) +
4064 " for set " + onosSetName +
4065 " but got " + str( sizeResponses[ i ] ) )
4066 clearResults = clearResults and getResults and sizeResults
4067 utilities.assert_equals( expect=main.TRUE,
4068 actual=clearResults,
4069 onpass="Set clear correct",
4070 onfail="Set clear was incorrect" )
4071
4072 main.step( "Distributed Set addAll()" )
4073 onosSet.update( addAllValue.split() )
4074 addResponses = []
4075 threads = []
4076 for i in main.activeNodes:
4077 t = main.Thread( target=main.CLIs[i].setTestAdd,
4078 name="setTestAddAll-" + str( i ),
4079 args=[ onosSetName, addAllValue ] )
4080 threads.append( t )
4081 t.start()
4082 for t in threads:
4083 t.join()
4084 addResponses.append( t.result )
4085
4086 # main.TRUE = successfully changed the set
4087 # main.FALSE = action resulted in no change in set
4088 # main.ERROR - Some error in executing the function
4089 addAllResults = main.TRUE
4090 for i in range( len( main.activeNodes ) ):
4091 if addResponses[ i ] == main.TRUE:
4092 # All is well
4093 pass
4094 elif addResponses[ i ] == main.FALSE:
4095 # Already in set, probably fine
4096 pass
4097 elif addResponses[ i ] == main.ERROR:
4098 # Error in execution
4099 addAllResults = main.FALSE
4100 else:
4101 # unexpected result
4102 addAllResults = main.FALSE
4103 if addAllResults != main.TRUE:
4104 main.log.error( "Error executing set addAll" )
4105
4106 # Check if set is still correct
4107 size = len( onosSet )
4108 getResponses = []
4109 threads = []
4110 for i in main.activeNodes:
4111 t = main.Thread( target=main.CLIs[i].setTestGet,
4112 name="setTestGet-" + str( i ),
4113 args=[ onosSetName ] )
4114 threads.append( t )
4115 t.start()
4116 for t in threads:
4117 t.join()
4118 getResponses.append( t.result )
4119 getResults = main.TRUE
4120 for i in range( len( main.activeNodes ) ):
4121 node = str( main.activeNodes[i] + 1 )
4122 if isinstance( getResponses[ i ], list):
4123 current = set( getResponses[ i ] )
4124 if len( current ) == len( getResponses[ i ] ):
4125 # no repeats
4126 if onosSet != current:
4127 main.log.error( "ONOS" + node +
4128 " has incorrect view" +
4129 " of set " + onosSetName + ":\n" +
4130 str( getResponses[ i ] ) )
4131 main.log.debug( "Expected: " + str( onosSet ) )
4132 main.log.debug( "Actual: " + str( current ) )
4133 getResults = main.FALSE
4134 else:
4135 # error, set is not a set
4136 main.log.error( "ONOS" + node +
4137 " has repeat elements in" +
4138 " set " + onosSetName + ":\n" +
4139 str( getResponses[ i ] ) )
4140 getResults = main.FALSE
4141 elif getResponses[ i ] == main.ERROR:
4142 getResults = main.FALSE
4143 sizeResponses = []
4144 threads = []
4145 for i in main.activeNodes:
4146 t = main.Thread( target=main.CLIs[i].setTestSize,
4147 name="setTestSize-" + str( i ),
4148 args=[ onosSetName ] )
4149 threads.append( t )
4150 t.start()
4151 for t in threads:
4152 t.join()
4153 sizeResponses.append( t.result )
4154 sizeResults = main.TRUE
4155 for i in range( len( main.activeNodes ) ):
4156 node = str( main.activeNodes[i] + 1 )
4157 if size != sizeResponses[ i ]:
4158 sizeResults = main.FALSE
4159 main.log.error( "ONOS" + node +
4160 " expected a size of " + str( size ) +
4161 " for set " + onosSetName +
4162 " but got " + str( sizeResponses[ i ] ) )
4163 addAllResults = addAllResults and getResults and sizeResults
4164 utilities.assert_equals( expect=main.TRUE,
4165 actual=addAllResults,
4166 onpass="Set addAll correct",
4167 onfail="Set addAll was incorrect" )
4168
4169 main.step( "Distributed Set retain()" )
4170 onosSet.intersection_update( retainValue.split() )
4171 retainResponses = []
4172 threads = []
4173 for i in main.activeNodes:
4174 t = main.Thread( target=main.CLIs[i].setTestRemove,
4175 name="setTestRetain-" + str( i ),
4176 args=[ onosSetName, retainValue ],
4177 kwargs={ "retain": True } )
4178 threads.append( t )
4179 t.start()
4180 for t in threads:
4181 t.join()
4182 retainResponses.append( t.result )
4183
4184 # main.TRUE = successfully changed the set
4185 # main.FALSE = action resulted in no change in set
4186 # main.ERROR - Some error in executing the function
4187 retainResults = main.TRUE
4188 for i in range( len( main.activeNodes ) ):
4189 if retainResponses[ i ] == main.TRUE:
4190 # All is well
4191 pass
4192 elif retainResponses[ i ] == main.FALSE:
4193 # Already in set, probably fine
4194 pass
4195 elif retainResponses[ i ] == main.ERROR:
4196 # Error in execution
4197 retainResults = main.FALSE
4198 else:
4199 # unexpected result
4200 retainResults = main.FALSE
4201 if retainResults != main.TRUE:
4202 main.log.error( "Error executing set retain" )
4203
4204 # Check if set is still correct
4205 size = len( onosSet )
4206 getResponses = []
4207 threads = []
4208 for i in main.activeNodes:
4209 t = main.Thread( target=main.CLIs[i].setTestGet,
4210 name="setTestGet-" + str( i ),
4211 args=[ onosSetName ] )
4212 threads.append( t )
4213 t.start()
4214 for t in threads:
4215 t.join()
4216 getResponses.append( t.result )
4217 getResults = main.TRUE
4218 for i in range( len( main.activeNodes ) ):
4219 node = str( main.activeNodes[i] + 1 )
4220 if isinstance( getResponses[ i ], list):
4221 current = set( getResponses[ i ] )
4222 if len( current ) == len( getResponses[ i ] ):
4223 # no repeats
4224 if onosSet != current:
4225 main.log.error( "ONOS" + node +
4226 " has incorrect view" +
4227 " of set " + onosSetName + ":\n" +
4228 str( getResponses[ i ] ) )
4229 main.log.debug( "Expected: " + str( onosSet ) )
4230 main.log.debug( "Actual: " + str( current ) )
4231 getResults = main.FALSE
4232 else:
4233 # error, set is not a set
4234 main.log.error( "ONOS" + node +
4235 " has repeat elements in" +
4236 " set " + onosSetName + ":\n" +
4237 str( getResponses[ i ] ) )
4238 getResults = main.FALSE
4239 elif getResponses[ i ] == main.ERROR:
4240 getResults = main.FALSE
4241 sizeResponses = []
4242 threads = []
4243 for i in main.activeNodes:
4244 t = main.Thread( target=main.CLIs[i].setTestSize,
4245 name="setTestSize-" + str( i ),
4246 args=[ onosSetName ] )
4247 threads.append( t )
4248 t.start()
4249 for t in threads:
4250 t.join()
4251 sizeResponses.append( t.result )
4252 sizeResults = main.TRUE
4253 for i in range( len( main.activeNodes ) ):
4254 node = str( main.activeNodes[i] + 1 )
4255 if size != sizeResponses[ i ]:
4256 sizeResults = main.FALSE
4257 main.log.error( "ONOS" + node + " expected a size of " +
4258 str( size ) + " for set " + onosSetName +
4259 " but got " + str( sizeResponses[ i ] ) )
4260 retainResults = retainResults and getResults and sizeResults
4261 utilities.assert_equals( expect=main.TRUE,
4262 actual=retainResults,
4263 onpass="Set retain correct",
4264 onfail="Set retain was incorrect" )
4265
4266 # Transactional maps
4267 main.step( "Partitioned Transactional maps put" )
4268 tMapValue = "Testing"
4269 numKeys = 100
4270 putResult = True
4271 node = main.activeNodes[0]
4272 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4273 if putResponses and len( putResponses ) == 100:
4274 for i in putResponses:
4275 if putResponses[ i ][ 'value' ] != tMapValue:
4276 putResult = False
4277 else:
4278 putResult = False
4279 if not putResult:
4280 main.log.debug( "Put response values: " + str( putResponses ) )
4281 utilities.assert_equals( expect=True,
4282 actual=putResult,
4283 onpass="Partitioned Transactional Map put successful",
4284 onfail="Partitioned Transactional Map put values are incorrect" )
4285
4286 main.step( "Partitioned Transactional maps get" )
Jon Hall9bfadd22016-05-11 14:48:07 -07004287 # FIXME: is this sleep needed?
4288 time.sleep( 5 )
4289
Jon Hall9ebd1bd2016-04-19 01:37:17 -07004290 getCheck = True
4291 for n in range( 1, numKeys + 1 ):
4292 getResponses = []
4293 threads = []
4294 valueCheck = True
4295 for i in main.activeNodes:
4296 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4297 name="TMap-get-" + str( i ),
4298 args=[ "Key" + str( n ) ] )
4299 threads.append( t )
4300 t.start()
4301 for t in threads:
4302 t.join()
4303 getResponses.append( t.result )
4304 for node in getResponses:
4305 if node != tMapValue:
4306 valueCheck = False
4307 if not valueCheck:
4308 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4309 main.log.warn( getResponses )
4310 getCheck = getCheck and valueCheck
4311 utilities.assert_equals( expect=True,
4312 actual=getCheck,
4313 onpass="Partitioned Transactional Map get values were correct",
4314 onfail="Partitioned Transactional Map values incorrect" )