blob: 9ab1fce791b903f2b9c05703d1ae4bcbf4e47405 [file] [log] [blame]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001"""
2Description: This test is to determine if ONOS can handle
3 dynamic scaling of the cluster size.
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE6: The scaling case.
13CASE7: Check state after control plane failure
14CASE8: Compare topo
15CASE9: Link s3-s28 down
16CASE10: Link s3-s28 up
17CASE11: Switch down
18CASE12: Switch up
19CASE13: Clean up
20CASE14: start election app on all onos nodes
21CASE15: Check that Leadership Election is still functional
22CASE16: Install Distributed Primitives app
23CASE17: Check for basic functionality with distributed primitives
24"""
25
26
27class HAscaling:
28
29 def __init__( self ):
30 self.default = ''
31
32 def CASE1( self, main ):
33 """
34 CASE1 is to compile ONOS and push it to the test machines
35
36 Startup sequence:
37 cell <name>
38 onos-verify-cell
39 NOTE: temporary - onos-remove-raft-logs
40 onos-uninstall
41 start mininet
42 git pull
43 mvn clean install
44 onos-package
45 onos-install -f
46 onos-wait-for-start
47 start cli sessions
48 start tcpdump
49 """
50 import time
51 import os
52 import re
53 main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
54 "initialization" )
55 main.case( "Setting up test environment" )
56 main.caseExplanation = "Setup the test environment including " +\
57 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
59
60 # load some variables from the params file
61 PULLCODE = False
62 if main.params[ 'Git' ] == 'True':
63 PULLCODE = True
64 gitBranch = main.params[ 'branch' ]
65 cellName = main.params[ 'ENV' ][ 'cellName' ]
66
67 main.numCtrls = int( main.params[ 'num_controllers' ] )
68 if main.ONOSbench.maxNodes:
69 if main.ONOSbench.maxNodes < main.numCtrls:
70 main.numCtrls = int( main.ONOSbench.maxNodes )
71 # set global variables
72 # These are for csv plotting in jenkins
73 global labels
74 global data
75 labels = []
76 data = []
77
78 try:
79 from tests.HA.dependencies.HA import HA
80 main.HA = HA()
81 from tests.HA.HAscaling.dependencies.Server import Server
82 main.Server = Server()
83 except Exception as e:
84 main.log.exception( e )
85 main.cleanup()
86 main.exit()
87
88 main.CLIs = []
89 main.nodes = []
90 ipList = []
91 for i in range( 1, main.numCtrls + 1 ):
92 try:
93 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
94 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
95 ipList.append( main.nodes[ -1 ].ip_address )
96 except AttributeError:
97 break
98
99 main.step( "Create cell file" )
100 cellAppString = main.params[ 'ENV' ][ 'appString' ]
101 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
102 main.Mininet1.ip_address,
103 cellAppString, ipList )
104
105 main.step( "Applying cell variable to environment" )
106 cellResult = main.ONOSbench.setCell( cellName )
107 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
108 onpass="Set cell successfull",
109 onfail="Failled to set cell" )
110
111 main.step( "Verify connectivity to cell" )
112 verifyResult = main.ONOSbench.verifyCell()
113 utilities.assert_equals( expect=main.TRUE, actual=verifyResult,
114 onpass="Verify cell passed",
115 onfail="Failled to verify cell" )
116
117 # FIXME:this is short term fix
118 main.log.info( "Removing raft logs" )
119 main.ONOSbench.onosRemoveRaftLogs()
120
121 main.log.info( "Uninstalling ONOS" )
122 for node in main.nodes:
123 main.ONOSbench.onosUninstall( node.ip_address )
124
125 # Make sure ONOS is DEAD
126 main.log.info( "Killing any ONOS processes" )
127 killResults = main.TRUE
128 for node in main.nodes:
129 killed = main.ONOSbench.onosKill( node.ip_address )
130 killResults = killResults and killed
131
132 main.step( "Setup server for cluster metadata file" )
Jon Hall8f6d4622016-05-23 15:27:18 -0700133 port = main.params['server']['port']
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700134 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
135 main.log.debug( "Root dir: {}".format( rootDir ) )
136 status = main.Server.start( main.ONOSbench,
137 rootDir,
138 port=port,
139 logDir=main.logdir + "/server.log" )
140 utilities.assert_equals( expect=main.TRUE, actual=status,
141 onpass="Server started",
142 onfail="Failled to start SimpleHTTPServer" )
143
144 main.step( "Generate initial metadata file" )
145 main.scaling = main.params['scaling'].split( "," )
146 main.log.debug( main.scaling )
147 scale = main.scaling.pop(0)
148 main.log.debug( scale)
149 if "e" in scale:
150 equal = True
151 else:
152 equal = False
153 main.log.debug( equal)
154 main.numCtrls = int( re.search( "\d+", scale ).group(0) )
155 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
156 utilities.assert_equals( expect=main.TRUE, actual=genResult,
157 onpass="New cluster metadata file generated",
158 onfail="Failled to generate new metadata file" )
159
160 cleanInstallResult = main.TRUE
161 gitPullResult = main.TRUE
162
163 main.step( "Starting Mininet" )
164 # scp topo file to mininet
165 # TODO: move to params?
166 topoName = "obelisk.py"
167 filePath = main.ONOSbench.home + "/tools/test/topos/"
168 main.ONOSbench.scp( main.Mininet1,
169 filePath + topoName,
170 main.Mininet1.home,
171 direction="to" )
172 mnResult = main.Mininet1.startNet( )
173 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
174 onpass="Mininet Started",
175 onfail="Error starting Mininet" )
176
177 main.step( "Git checkout and pull " + gitBranch )
178 if PULLCODE:
179 main.ONOSbench.gitCheckout( gitBranch )
180 gitPullResult = main.ONOSbench.gitPull()
181 # values of 1 or 3 are good
182 utilities.assert_lesser( expect=0, actual=gitPullResult,
183 onpass="Git pull successful",
184 onfail="Git pull failed" )
185 main.ONOSbench.getVersion( report=True )
186
187 main.step( "Using mvn clean install" )
188 cleanInstallResult = main.TRUE
189 if PULLCODE and gitPullResult == main.TRUE:
190 cleanInstallResult = main.ONOSbench.cleanInstall()
191 else:
192 main.log.warn( "Did not pull new code so skipping mvn " +
193 "clean install" )
194 utilities.assert_equals( expect=main.TRUE,
195 actual=cleanInstallResult,
196 onpass="MCI successful",
197 onfail="MCI failed" )
198 # GRAPHS
199 # NOTE: important params here:
200 # job = name of Jenkins job
201 # Plot Name = Plot-HA, only can be used if multiple plots
202 # index = The number of the graph under plot name
203 job = "HAscaling"
204 plotName = "Plot-HA"
205 index = "0"
206 graphs = '<ac:structured-macro ac:name="html">\n'
207 graphs += '<ac:plain-text-body><![CDATA[\n'
208 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
209 '/plot/' + plotName + '/getPlot?index=' + index +\
210 '&width=500&height=300"' +\
211 'noborder="0" width="500" height="300" scrolling="yes" ' +\
212 'seamless="seamless"></iframe>\n'
213 graphs += ']]></ac:plain-text-body>\n'
214 graphs += '</ac:structured-macro>\n'
215 main.log.wiki(graphs)
216
217 main.step( "Copying backup config files" )
218 path = "~/onos/tools/package/bin/onos-service"
219 cp = main.ONOSbench.scp( main.ONOSbench,
220 path,
221 path + ".backup",
222 direction="to" )
223
224 utilities.assert_equals( expect=main.TRUE,
225 actual=cp,
226 onpass="Copy backup config file succeeded",
227 onfail="Copy backup config file failed" )
228 # we need to modify the onos-service file to use remote metadata file
229 # url for cluster metadata file
Jon Hall8f6d4622016-05-23 15:27:18 -0700230 iface = main.params['server'].get( 'interface' )
231 ip = main.ONOSbench.getIpAddr( iface=iface )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700232 metaFile = "cluster.json"
233 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, port, metaFile )
234 main.log.warn( javaArgs )
235 main.log.warn( repr( javaArgs ) )
236 handle = main.ONOSbench.handle
237 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, path )
238 main.log.warn( sed )
239 main.log.warn( repr( sed ) )
240 handle.sendline( sed )
Jon Hallbd60ea02016-08-23 10:03:59 -0700241 handle.expect( metaFile )
242 output = handle.before
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700243 handle.expect( "\$" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700244 output += handle.before
245 main.log.debug( repr( output ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700246
247 main.step( "Creating ONOS package" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700248 packageResult = main.ONOSbench.buckBuild()
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700249 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
250 onpass="ONOS package successful",
251 onfail="ONOS package failed" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700252 if not packageResult:
253 main.cleanup()
254 main.exit()
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700255
256 main.step( "Installing ONOS package" )
257 onosInstallResult = main.TRUE
258 for i in range( main.ONOSbench.maxNodes ):
259 node = main.nodes[i]
260 options = "-f"
261 if i >= main.numCtrls:
262 options = "-nf" # Don't start more than the current scale
263 tmpResult = main.ONOSbench.onosInstall( options=options,
264 node=node.ip_address )
265 onosInstallResult = onosInstallResult and tmpResult
266 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
267 onpass="ONOS install successful",
268 onfail="ONOS install failed" )
269
270 # Cleanup custom onos-service file
271 main.ONOSbench.scp( main.ONOSbench,
272 path + ".backup",
273 path,
274 direction="to" )
275
276 main.step( "Checking if ONOS is up yet" )
277 for i in range( 2 ):
278 onosIsupResult = main.TRUE
279 for i in range( main.numCtrls ):
280 node = main.nodes[i]
281 started = main.ONOSbench.isup( node.ip_address )
282 if not started:
283 main.log.error( node.name + " hasn't started" )
284 onosIsupResult = onosIsupResult and started
285 if onosIsupResult == main.TRUE:
286 break
287 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
288 onpass="ONOS startup successful",
289 onfail="ONOS startup failed" )
290
Jon Hall6509dbf2016-06-21 17:01:17 -0700291 main.step( "Starting ONOS CLI sessions" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700292 cliResults = main.TRUE
293 threads = []
294 for i in range( main.numCtrls ):
295 t = main.Thread( target=main.CLIs[i].startOnosCli,
296 name="startOnosCli-" + str( i ),
297 args=[main.nodes[i].ip_address] )
298 threads.append( t )
299 t.start()
300
301 for t in threads:
302 t.join()
303 cliResults = cliResults and t.result
304 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
305 onpass="ONOS cli startup successful",
306 onfail="ONOS cli startup failed" )
307
308 # Create a list of active nodes for use when some nodes are stopped
309 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
310
311 if main.params[ 'tcpdump' ].lower() == "true":
312 main.step( "Start Packet Capture MN" )
313 main.Mininet2.startTcpdump(
314 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
315 + "-MN.pcap",
316 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
317 port=main.params[ 'MNtcpdump' ][ 'port' ] )
318
319 main.step( "Checking ONOS nodes" )
320 nodeResults = utilities.retry( main.HA.nodesCheck,
321 False,
322 args=[main.activeNodes],
323 attempts=5 )
324 utilities.assert_equals( expect=True, actual=nodeResults,
325 onpass="Nodes check successful",
326 onfail="Nodes check NOT successful" )
327
328 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700329 for i in main.activeNodes:
330 cli = main.CLIs[i]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700331 main.log.debug( "{} components not ACTIVE: \n{}".format(
332 cli.name,
333 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700334 main.log.error( "Failed to start ONOS, stopping test" )
335 main.cleanup()
336 main.exit()
337
338 main.step( "Activate apps defined in the params file" )
339 # get data from the params
340 apps = main.params.get( 'apps' )
341 if apps:
342 apps = apps.split(',')
343 main.log.warn( apps )
344 activateResult = True
345 for app in apps:
346 main.CLIs[ 0 ].app( app, "Activate" )
347 # TODO: check this worked
348 time.sleep( 10 ) # wait for apps to activate
349 for app in apps:
350 state = main.CLIs[ 0 ].appStatus( app )
351 if state == "ACTIVE":
352 activateResult = activateResult and True
353 else:
354 main.log.error( "{} is in {} state".format( app, state ) )
355 activateResult = False
356 utilities.assert_equals( expect=True,
357 actual=activateResult,
358 onpass="Successfully activated apps",
359 onfail="Failed to activate apps" )
360 else:
361 main.log.warn( "No apps were specified to be loaded after startup" )
362
363 main.step( "Set ONOS configurations" )
364 config = main.params.get( 'ONOS_Configuration' )
365 if config:
366 main.log.debug( config )
367 checkResult = main.TRUE
368 for component in config:
369 for setting in config[component]:
370 value = config[component][setting]
371 check = main.CLIs[ 0 ].setCfg( component, setting, value )
372 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
373 checkResult = check and checkResult
374 utilities.assert_equals( expect=main.TRUE,
375 actual=checkResult,
376 onpass="Successfully set config",
377 onfail="Failed to set config" )
378 else:
379 main.log.warn( "No configurations were specified to be changed after startup" )
380
381 main.step( "App Ids check" )
382 appCheck = main.TRUE
383 threads = []
384 for i in main.activeNodes:
385 t = main.Thread( target=main.CLIs[i].appToIDCheck,
386 name="appToIDCheck-" + str( i ),
387 args=[] )
388 threads.append( t )
389 t.start()
390
391 for t in threads:
392 t.join()
393 appCheck = appCheck and t.result
394 if appCheck != main.TRUE:
395 node = main.activeNodes[0]
396 main.log.warn( main.CLIs[node].apps() )
397 main.log.warn( main.CLIs[node].appIDs() )
398 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
399 onpass="App Ids seem to be correct",
400 onfail="Something is wrong with app Ids" )
401
402 def CASE2( self, main ):
403 """
404 Assign devices to controllers
405 """
406 import re
407 assert main.numCtrls, "main.numCtrls not defined"
408 assert main, "main not defined"
409 assert utilities.assert_equals, "utilities.assert_equals not defined"
410 assert main.CLIs, "main.CLIs not defined"
411 assert main.nodes, "main.nodes not defined"
412
413 main.case( "Assigning devices to controllers" )
414 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
415 "and check that an ONOS node becomes the " +\
416 "master of the device."
417 main.step( "Assign switches to controllers" )
418
419 ipList = []
420 for i in range( main.ONOSbench.maxNodes ):
421 ipList.append( main.nodes[ i ].ip_address )
422 swList = []
423 for i in range( 1, 29 ):
424 swList.append( "s" + str( i ) )
425 main.Mininet1.assignSwController( sw=swList, ip=ipList )
426
427 mastershipCheck = main.TRUE
428 for i in range( 1, 29 ):
429 response = main.Mininet1.getSwController( "s" + str( i ) )
430 try:
431 main.log.info( str( response ) )
432 except Exception:
433 main.log.info( repr( response ) )
434 for node in main.nodes:
435 if re.search( "tcp:" + node.ip_address, response ):
436 mastershipCheck = mastershipCheck and main.TRUE
437 else:
438 main.log.error( "Error, node " + node.ip_address + " is " +
439 "not in the list of controllers s" +
440 str( i ) + " is connecting to." )
441 mastershipCheck = main.FALSE
442 utilities.assert_equals(
443 expect=main.TRUE,
444 actual=mastershipCheck,
445 onpass="Switch mastership assigned correctly",
446 onfail="Switches not assigned correctly to controllers" )
447
448 def CASE21( self, main ):
449 """
450 Assign mastership to controllers
451 """
452 import time
453 assert main.numCtrls, "main.numCtrls not defined"
454 assert main, "main not defined"
455 assert utilities.assert_equals, "utilities.assert_equals not defined"
456 assert main.CLIs, "main.CLIs not defined"
457 assert main.nodes, "main.nodes not defined"
458
459 main.case( "Assigning Controller roles for switches" )
460 main.caseExplanation = "Check that ONOS is connected to each " +\
461 "device. Then manually assign" +\
462 " mastership to specific ONOS nodes using" +\
463 " 'device-role'"
464 main.step( "Assign mastership of switches to specific controllers" )
465 # Manually assign mastership to the controller we want
466 roleCall = main.TRUE
467
468 ipList = [ ]
469 deviceList = []
470 onosCli = main.CLIs[ main.activeNodes[0] ]
471 try:
472 # Assign mastership to specific controllers. This assignment was
473 # determined for a 7 node cluser, but will work with any sized
474 # cluster
475 for i in range( 1, 29 ): # switches 1 through 28
476 # set up correct variables:
477 if i == 1:
478 c = 0
479 ip = main.nodes[ c ].ip_address # ONOS1
480 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
481 elif i == 2:
482 c = 1 % main.numCtrls
483 ip = main.nodes[ c ].ip_address # ONOS2
484 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
485 elif i == 3:
486 c = 1 % main.numCtrls
487 ip = main.nodes[ c ].ip_address # ONOS2
488 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
489 elif i == 4:
490 c = 3 % main.numCtrls
491 ip = main.nodes[ c ].ip_address # ONOS4
492 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
493 elif i == 5:
494 c = 2 % main.numCtrls
495 ip = main.nodes[ c ].ip_address # ONOS3
496 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
497 elif i == 6:
498 c = 2 % main.numCtrls
499 ip = main.nodes[ c ].ip_address # ONOS3
500 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
501 elif i == 7:
502 c = 5 % main.numCtrls
503 ip = main.nodes[ c ].ip_address # ONOS6
504 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
505 elif i >= 8 and i <= 17:
506 c = 4 % main.numCtrls
507 ip = main.nodes[ c ].ip_address # ONOS5
508 dpid = '3' + str( i ).zfill( 3 )
509 deviceId = onosCli.getDevice( dpid ).get( 'id' )
510 elif i >= 18 and i <= 27:
511 c = 6 % main.numCtrls
512 ip = main.nodes[ c ].ip_address # ONOS7
513 dpid = '6' + str( i ).zfill( 3 )
514 deviceId = onosCli.getDevice( dpid ).get( 'id' )
515 elif i == 28:
516 c = 0
517 ip = main.nodes[ c ].ip_address # ONOS1
518 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
519 else:
520 main.log.error( "You didn't write an else statement for " +
521 "switch s" + str( i ) )
522 roleCall = main.FALSE
523 # Assign switch
524 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
525 # TODO: make this controller dynamic
526 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
527 ipList.append( ip )
528 deviceList.append( deviceId )
529 except ( AttributeError, AssertionError ):
530 main.log.exception( "Something is wrong with ONOS device view" )
531 main.log.info( onosCli.devices() )
532 utilities.assert_equals(
533 expect=main.TRUE,
534 actual=roleCall,
535 onpass="Re-assigned switch mastership to designated controller",
536 onfail="Something wrong with deviceRole calls" )
537
538 main.step( "Check mastership was correctly assigned" )
539 roleCheck = main.TRUE
540 # NOTE: This is due to the fact that device mastership change is not
541 # atomic and is actually a multi step process
542 time.sleep( 5 )
543 for i in range( len( ipList ) ):
544 ip = ipList[i]
545 deviceId = deviceList[i]
546 # Check assignment
547 master = onosCli.getRole( deviceId ).get( 'master' )
548 if ip in master:
549 roleCheck = roleCheck and main.TRUE
550 else:
551 roleCheck = roleCheck and main.FALSE
552 main.log.error( "Error, controller " + ip + " is not" +
553 " master " + "of device " +
554 str( deviceId ) + ". Master is " +
555 repr( master ) + "." )
556 utilities.assert_equals(
557 expect=main.TRUE,
558 actual=roleCheck,
559 onpass="Switches were successfully reassigned to designated " +
560 "controller",
561 onfail="Switches were not successfully reassigned" )
562
563 def CASE3( self, main ):
564 """
565 Assign intents
566 """
567 import time
568 import json
569 assert main.numCtrls, "main.numCtrls not defined"
570 assert main, "main not defined"
571 assert utilities.assert_equals, "utilities.assert_equals not defined"
572 assert main.CLIs, "main.CLIs not defined"
573 assert main.nodes, "main.nodes not defined"
574 try:
575 labels
576 except NameError:
577 main.log.error( "labels not defined, setting to []" )
578 labels = []
579 try:
580 data
581 except NameError:
582 main.log.error( "data not defined, setting to []" )
583 data = []
584 # NOTE: we must reinstall intents until we have a persistant intent
585 # datastore!
586 main.case( "Adding host Intents" )
587 main.caseExplanation = "Discover hosts by using pingall then " +\
588 "assign predetermined host-to-host intents." +\
589 " After installation, check that the intent" +\
590 " is distributed to all nodes and the state" +\
591 " is INSTALLED"
592
593 # install onos-app-fwd
594 main.step( "Install reactive forwarding app" )
595 onosCli = main.CLIs[ main.activeNodes[0] ]
596 installResults = onosCli.activateApp( "org.onosproject.fwd" )
597 utilities.assert_equals( expect=main.TRUE, actual=installResults,
598 onpass="Install fwd successful",
599 onfail="Install fwd failed" )
600
601 main.step( "Check app ids" )
602 appCheck = main.TRUE
603 threads = []
604 for i in main.activeNodes:
605 t = main.Thread( target=main.CLIs[i].appToIDCheck,
606 name="appToIDCheck-" + str( i ),
607 args=[] )
608 threads.append( t )
609 t.start()
610
611 for t in threads:
612 t.join()
613 appCheck = appCheck and t.result
614 if appCheck != main.TRUE:
615 main.log.warn( onosCli.apps() )
616 main.log.warn( onosCli.appIDs() )
617 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
618 onpass="App Ids seem to be correct",
619 onfail="Something is wrong with app Ids" )
620
621 main.step( "Discovering Hosts( Via pingall for now )" )
622 # FIXME: Once we have a host discovery mechanism, use that instead
623 # REACTIVE FWD test
624 pingResult = main.FALSE
625 passMsg = "Reactive Pingall test passed"
626 time1 = time.time()
627 pingResult = main.Mininet1.pingall()
628 time2 = time.time()
629 if not pingResult:
630 main.log.warn("First pingall failed. Trying again...")
631 pingResult = main.Mininet1.pingall()
632 passMsg += " on the second try"
633 utilities.assert_equals(
634 expect=main.TRUE,
635 actual=pingResult,
636 onpass= passMsg,
637 onfail="Reactive Pingall failed, " +
638 "one or more ping pairs failed" )
639 main.log.info( "Time for pingall: %2f seconds" %
640 ( time2 - time1 ) )
641 # timeout for fwd flows
642 time.sleep( 11 )
643 # uninstall onos-app-fwd
644 main.step( "Uninstall reactive forwarding app" )
645 node = main.activeNodes[0]
646 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
647 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
648 onpass="Uninstall fwd successful",
649 onfail="Uninstall fwd failed" )
650
651 main.step( "Check app ids" )
652 threads = []
653 appCheck2 = main.TRUE
654 for i in main.activeNodes:
655 t = main.Thread( target=main.CLIs[i].appToIDCheck,
656 name="appToIDCheck-" + str( i ),
657 args=[] )
658 threads.append( t )
659 t.start()
660
661 for t in threads:
662 t.join()
663 appCheck2 = appCheck2 and t.result
664 if appCheck2 != main.TRUE:
665 node = main.activeNodes[0]
666 main.log.warn( main.CLIs[node].apps() )
667 main.log.warn( main.CLIs[node].appIDs() )
668 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
669 onpass="App Ids seem to be correct",
670 onfail="Something is wrong with app Ids" )
671
672 main.step( "Add host intents via cli" )
673 intentIds = []
674 # TODO: move the host numbers to params
675 # Maybe look at all the paths we ping?
676 intentAddResult = True
677 hostResult = main.TRUE
678 for i in range( 8, 18 ):
679 main.log.info( "Adding host intent between h" + str( i ) +
680 " and h" + str( i + 10 ) )
681 host1 = "00:00:00:00:00:" + \
682 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
683 host2 = "00:00:00:00:00:" + \
684 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
685 # NOTE: getHost can return None
686 host1Dict = onosCli.getHost( host1 )
687 host2Dict = onosCli.getHost( host2 )
688 host1Id = None
689 host2Id = None
690 if host1Dict and host2Dict:
691 host1Id = host1Dict.get( 'id', None )
692 host2Id = host2Dict.get( 'id', None )
693 if host1Id and host2Id:
694 nodeNum = ( i % len( main.activeNodes ) )
695 node = main.activeNodes[nodeNum]
696 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
697 if tmpId:
698 main.log.info( "Added intent with id: " + tmpId )
699 intentIds.append( tmpId )
700 else:
701 main.log.error( "addHostIntent returned: " +
702 repr( tmpId ) )
703 else:
704 main.log.error( "Error, getHost() failed for h" + str( i ) +
705 " and/or h" + str( i + 10 ) )
706 node = main.activeNodes[0]
707 hosts = main.CLIs[node].hosts()
708 main.log.warn( "Hosts output: " )
709 try:
710 main.log.warn( json.dumps( json.loads( hosts ),
711 sort_keys=True,
712 indent=4,
713 separators=( ',', ': ' ) ) )
714 except ( ValueError, TypeError ):
715 main.log.warn( repr( hosts ) )
716 hostResult = main.FALSE
717 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
718 onpass="Found a host id for each host",
719 onfail="Error looking up host ids" )
720
721 intentStart = time.time()
722 onosIds = onosCli.getAllIntentsId()
723 main.log.info( "Submitted intents: " + str( intentIds ) )
724 main.log.info( "Intents in ONOS: " + str( onosIds ) )
725 for intent in intentIds:
726 if intent in onosIds:
727 pass # intent submitted is in onos
728 else:
729 intentAddResult = False
730 if intentAddResult:
731 intentStop = time.time()
732 else:
733 intentStop = None
734 # Print the intent states
735 intents = onosCli.intents()
736 intentStates = []
737 installedCheck = True
738 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
739 count = 0
740 try:
741 for intent in json.loads( intents ):
742 state = intent.get( 'state', None )
743 if "INSTALLED" not in state:
744 installedCheck = False
745 intentId = intent.get( 'id', None )
746 intentStates.append( ( intentId, state ) )
747 except ( ValueError, TypeError ):
748 main.log.exception( "Error parsing intents" )
749 # add submitted intents not in the store
750 tmplist = [ i for i, s in intentStates ]
751 missingIntents = False
752 for i in intentIds:
753 if i not in tmplist:
754 intentStates.append( ( i, " - " ) )
755 missingIntents = True
756 intentStates.sort()
757 for i, s in intentStates:
758 count += 1
759 main.log.info( "%-6s%-15s%-15s" %
760 ( str( count ), str( i ), str( s ) ) )
761 leaders = onosCli.leaders()
762 try:
763 missing = False
764 if leaders:
765 parsedLeaders = json.loads( leaders )
766 main.log.warn( json.dumps( parsedLeaders,
767 sort_keys=True,
768 indent=4,
769 separators=( ',', ': ' ) ) )
770 # check for all intent partitions
771 topics = []
772 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700773 topics.append( "work-partition-" + str( i ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700774 main.log.debug( topics )
775 ONOStopics = [ j['topic'] for j in parsedLeaders ]
776 for topic in topics:
777 if topic not in ONOStopics:
778 main.log.error( "Error: " + topic +
779 " not in leaders" )
780 missing = True
781 else:
782 main.log.error( "leaders() returned None" )
783 except ( ValueError, TypeError ):
784 main.log.exception( "Error parsing leaders" )
785 main.log.error( repr( leaders ) )
786 # Check all nodes
787 if missing:
788 for i in main.activeNodes:
789 response = main.CLIs[i].leaders( jsonFormat=False)
790 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
791 str( response ) )
792
793 partitions = onosCli.partitions()
794 try:
795 if partitions :
796 parsedPartitions = json.loads( partitions )
797 main.log.warn( json.dumps( parsedPartitions,
798 sort_keys=True,
799 indent=4,
800 separators=( ',', ': ' ) ) )
801 # TODO check for a leader in all paritions
802 # TODO check for consistency among nodes
803 else:
804 main.log.error( "partitions() returned None" )
805 except ( ValueError, TypeError ):
806 main.log.exception( "Error parsing partitions" )
807 main.log.error( repr( partitions ) )
808 pendingMap = onosCli.pendingMap()
809 try:
810 if pendingMap :
811 parsedPending = json.loads( pendingMap )
812 main.log.warn( json.dumps( parsedPending,
813 sort_keys=True,
814 indent=4,
815 separators=( ',', ': ' ) ) )
816 # TODO check something here?
817 else:
818 main.log.error( "pendingMap() returned None" )
819 except ( ValueError, TypeError ):
820 main.log.exception( "Error parsing pending map" )
821 main.log.error( repr( pendingMap ) )
822
823 intentAddResult = bool( intentAddResult and not missingIntents and
824 installedCheck )
825 if not intentAddResult:
826 main.log.error( "Error in pushing host intents to ONOS" )
827
828 main.step( "Intent Anti-Entropy dispersion" )
829 for j in range(100):
830 correct = True
831 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
832 for i in main.activeNodes:
833 onosIds = []
834 ids = main.CLIs[i].getAllIntentsId()
835 onosIds.append( ids )
836 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
837 str( sorted( onosIds ) ) )
838 if sorted( ids ) != sorted( intentIds ):
839 main.log.warn( "Set of intent IDs doesn't match" )
840 correct = False
841 break
842 else:
843 intents = json.loads( main.CLIs[i].intents() )
844 for intent in intents:
845 if intent[ 'state' ] != "INSTALLED":
846 main.log.warn( "Intent " + intent[ 'id' ] +
847 " is " + intent[ 'state' ] )
848 correct = False
849 break
850 if correct:
851 break
852 else:
853 time.sleep(1)
854 if not intentStop:
855 intentStop = time.time()
856 global gossipTime
857 gossipTime = intentStop - intentStart
858 main.log.info( "It took about " + str( gossipTime ) +
859 " seconds for all intents to appear in each node" )
860 append = False
861 title = "Gossip Intents"
862 count = 1
863 while append is False:
864 curTitle = title + str( count )
865 if curTitle not in labels:
866 labels.append( curTitle )
867 data.append( str( gossipTime ) )
868 append = True
869 else:
870 count += 1
871 gossipPeriod = int( main.params['timers']['gossip'] )
872 maxGossipTime = gossipPeriod * len( main.activeNodes )
873 utilities.assert_greater_equals(
874 expect=maxGossipTime, actual=gossipTime,
875 onpass="ECM anti-entropy for intents worked within " +
876 "expected time",
877 onfail="Intent ECM anti-entropy took too long. " +
878 "Expected time:{}, Actual time:{}".format( maxGossipTime,
879 gossipTime ) )
880 if gossipTime <= maxGossipTime:
881 intentAddResult = True
882
883 if not intentAddResult or "key" in pendingMap:
884 import time
885 installedCheck = True
886 main.log.info( "Sleeping 60 seconds to see if intents are found" )
887 time.sleep( 60 )
888 onosIds = onosCli.getAllIntentsId()
889 main.log.info( "Submitted intents: " + str( intentIds ) )
890 main.log.info( "Intents in ONOS: " + str( onosIds ) )
891 # Print the intent states
892 intents = onosCli.intents()
893 intentStates = []
894 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
895 count = 0
896 try:
897 for intent in json.loads( intents ):
898 # Iter through intents of a node
899 state = intent.get( 'state', None )
900 if "INSTALLED" not in state:
901 installedCheck = False
902 intentId = intent.get( 'id', None )
903 intentStates.append( ( intentId, state ) )
904 except ( ValueError, TypeError ):
905 main.log.exception( "Error parsing intents" )
906 # add submitted intents not in the store
907 tmplist = [ i for i, s in intentStates ]
908 for i in intentIds:
909 if i not in tmplist:
910 intentStates.append( ( i, " - " ) )
911 intentStates.sort()
912 for i, s in intentStates:
913 count += 1
914 main.log.info( "%-6s%-15s%-15s" %
915 ( str( count ), str( i ), str( s ) ) )
916 leaders = onosCli.leaders()
917 try:
918 missing = False
919 if leaders:
920 parsedLeaders = json.loads( leaders )
921 main.log.warn( json.dumps( parsedLeaders,
922 sort_keys=True,
923 indent=4,
924 separators=( ',', ': ' ) ) )
925 # check for all intent partitions
926 # check for election
927 topics = []
928 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700929 topics.append( "work-partition-" + str( i ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700930 # FIXME: this should only be after we start the app
931 topics.append( "org.onosproject.election" )
932 main.log.debug( topics )
933 ONOStopics = [ j['topic'] for j in parsedLeaders ]
934 for topic in topics:
935 if topic not in ONOStopics:
936 main.log.error( "Error: " + topic +
937 " not in leaders" )
938 missing = True
939 else:
940 main.log.error( "leaders() returned None" )
941 except ( ValueError, TypeError ):
942 main.log.exception( "Error parsing leaders" )
943 main.log.error( repr( leaders ) )
944 # Check all nodes
945 if missing:
946 for i in main.activeNodes:
947 node = main.CLIs[i]
948 response = node.leaders( jsonFormat=False)
949 main.log.warn( str( node.name ) + " leaders output: \n" +
950 str( response ) )
951
952 partitions = onosCli.partitions()
953 try:
954 if partitions :
955 parsedPartitions = json.loads( partitions )
956 main.log.warn( json.dumps( parsedPartitions,
957 sort_keys=True,
958 indent=4,
959 separators=( ',', ': ' ) ) )
960 # TODO check for a leader in all paritions
961 # TODO check for consistency among nodes
962 else:
963 main.log.error( "partitions() returned None" )
964 except ( ValueError, TypeError ):
965 main.log.exception( "Error parsing partitions" )
966 main.log.error( repr( partitions ) )
967 pendingMap = onosCli.pendingMap()
968 try:
969 if pendingMap :
970 parsedPending = json.loads( pendingMap )
971 main.log.warn( json.dumps( parsedPending,
972 sort_keys=True,
973 indent=4,
974 separators=( ',', ': ' ) ) )
975 # TODO check something here?
976 else:
977 main.log.error( "pendingMap() returned None" )
978 except ( ValueError, TypeError ):
979 main.log.exception( "Error parsing pending map" )
980 main.log.error( repr( pendingMap ) )
981
982 def CASE4( self, main ):
983 """
984 Ping across added host intents
985 """
986 import json
987 import time
988 assert main.numCtrls, "main.numCtrls not defined"
989 assert main, "main not defined"
990 assert utilities.assert_equals, "utilities.assert_equals not defined"
991 assert main.CLIs, "main.CLIs not defined"
992 assert main.nodes, "main.nodes not defined"
993 main.case( "Verify connectivity by sending traffic across Intents" )
994 main.caseExplanation = "Ping across added host intents to check " +\
995 "functionality and check the state of " +\
996 "the intent"
997
998 onosCli = main.CLIs[ main.activeNodes[0] ]
999 main.step( "Check Intent state" )
1000 installedCheck = False
1001 loopCount = 0
1002 while not installedCheck and loopCount < 40:
1003 installedCheck = True
1004 # Print the intent states
1005 intents = onosCli.intents()
1006 intentStates = []
1007 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1008 count = 0
1009 # Iter through intents of a node
1010 try:
1011 for intent in json.loads( intents ):
1012 state = intent.get( 'state', None )
1013 if "INSTALLED" not in state:
1014 installedCheck = False
1015 intentId = intent.get( 'id', None )
1016 intentStates.append( ( intentId, state ) )
1017 except ( ValueError, TypeError ):
1018 main.log.exception( "Error parsing intents." )
1019 # Print states
1020 intentStates.sort()
1021 for i, s in intentStates:
1022 count += 1
1023 main.log.info( "%-6s%-15s%-15s" %
1024 ( str( count ), str( i ), str( s ) ) )
1025 if not installedCheck:
1026 time.sleep( 1 )
1027 loopCount += 1
1028 utilities.assert_equals( expect=True, actual=installedCheck,
1029 onpass="Intents are all INSTALLED",
1030 onfail="Intents are not all in " +
1031 "INSTALLED state" )
1032
1033 main.step( "Ping across added host intents" )
1034 PingResult = main.TRUE
1035 for i in range( 8, 18 ):
1036 ping = main.Mininet1.pingHost( src="h" + str( i ),
1037 target="h" + str( i + 10 ) )
1038 PingResult = PingResult and ping
1039 if ping == main.FALSE:
1040 main.log.warn( "Ping failed between h" + str( i ) +
1041 " and h" + str( i + 10 ) )
1042 elif ping == main.TRUE:
1043 main.log.info( "Ping test passed!" )
1044 # Don't set PingResult or you'd override failures
1045 if PingResult == main.FALSE:
1046 main.log.error(
1047 "Intents have not been installed correctly, pings failed." )
1048 # TODO: pretty print
1049 main.log.warn( "ONOS1 intents: " )
1050 try:
1051 tmpIntents = onosCli.intents()
1052 main.log.warn( json.dumps( json.loads( tmpIntents ),
1053 sort_keys=True,
1054 indent=4,
1055 separators=( ',', ': ' ) ) )
1056 except ( ValueError, TypeError ):
1057 main.log.warn( repr( tmpIntents ) )
1058 utilities.assert_equals(
1059 expect=main.TRUE,
1060 actual=PingResult,
1061 onpass="Intents have been installed correctly and pings work",
1062 onfail="Intents have not been installed correctly, pings failed." )
1063
1064 main.step( "Check leadership of topics" )
1065 leaders = onosCli.leaders()
1066 topicCheck = main.TRUE
1067 try:
1068 if leaders:
1069 parsedLeaders = json.loads( leaders )
1070 main.log.warn( json.dumps( parsedLeaders,
1071 sort_keys=True,
1072 indent=4,
1073 separators=( ',', ': ' ) ) )
1074 # check for all intent partitions
1075 # check for election
1076 # TODO: Look at Devices as topics now that it uses this system
1077 topics = []
1078 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001079 topics.append( "work-partition-" + str( i ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001080 # FIXME: this should only be after we start the app
1081 # FIXME: topics.append( "org.onosproject.election" )
1082 # Print leaders output
1083 main.log.debug( topics )
1084 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1085 for topic in topics:
1086 if topic not in ONOStopics:
1087 main.log.error( "Error: " + topic +
1088 " not in leaders" )
1089 topicCheck = main.FALSE
1090 else:
1091 main.log.error( "leaders() returned None" )
1092 topicCheck = main.FALSE
1093 except ( ValueError, TypeError ):
1094 topicCheck = main.FALSE
1095 main.log.exception( "Error parsing leaders" )
1096 main.log.error( repr( leaders ) )
1097 # TODO: Check for a leader of these topics
1098 # Check all nodes
1099 if topicCheck:
1100 for i in main.activeNodes:
1101 node = main.CLIs[i]
1102 response = node.leaders( jsonFormat=False)
1103 main.log.warn( str( node.name ) + " leaders output: \n" +
1104 str( response ) )
1105
1106 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1107 onpass="intent Partitions is in leaders",
1108 onfail="Some topics were lost " )
1109 # Print partitions
1110 partitions = onosCli.partitions()
1111 try:
1112 if partitions :
1113 parsedPartitions = json.loads( partitions )
1114 main.log.warn( json.dumps( parsedPartitions,
1115 sort_keys=True,
1116 indent=4,
1117 separators=( ',', ': ' ) ) )
1118 # TODO check for a leader in all paritions
1119 # TODO check for consistency among nodes
1120 else:
1121 main.log.error( "partitions() returned None" )
1122 except ( ValueError, TypeError ):
1123 main.log.exception( "Error parsing partitions" )
1124 main.log.error( repr( partitions ) )
1125 # Print Pending Map
1126 pendingMap = onosCli.pendingMap()
1127 try:
1128 if pendingMap :
1129 parsedPending = json.loads( pendingMap )
1130 main.log.warn( json.dumps( parsedPending,
1131 sort_keys=True,
1132 indent=4,
1133 separators=( ',', ': ' ) ) )
1134 # TODO check something here?
1135 else:
1136 main.log.error( "pendingMap() returned None" )
1137 except ( ValueError, TypeError ):
1138 main.log.exception( "Error parsing pending map" )
1139 main.log.error( repr( pendingMap ) )
1140
1141 if not installedCheck:
1142 main.log.info( "Waiting 60 seconds to see if the state of " +
1143 "intents change" )
1144 time.sleep( 60 )
1145 # Print the intent states
1146 intents = onosCli.intents()
1147 intentStates = []
1148 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1149 count = 0
1150 # Iter through intents of a node
1151 try:
1152 for intent in json.loads( intents ):
1153 state = intent.get( 'state', None )
1154 if "INSTALLED" not in state:
1155 installedCheck = False
1156 intentId = intent.get( 'id', None )
1157 intentStates.append( ( intentId, state ) )
1158 except ( ValueError, TypeError ):
1159 main.log.exception( "Error parsing intents." )
1160 intentStates.sort()
1161 for i, s in intentStates:
1162 count += 1
1163 main.log.info( "%-6s%-15s%-15s" %
1164 ( str( count ), str( i ), str( s ) ) )
1165 leaders = onosCli.leaders()
1166 try:
1167 missing = False
1168 if leaders:
1169 parsedLeaders = json.loads( leaders )
1170 main.log.warn( json.dumps( parsedLeaders,
1171 sort_keys=True,
1172 indent=4,
1173 separators=( ',', ': ' ) ) )
1174 # check for all intent partitions
1175 # check for election
1176 topics = []
1177 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001178 topics.append( "work-partition-" + str( i ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001179 # FIXME: this should only be after we start the app
1180 topics.append( "org.onosproject.election" )
1181 main.log.debug( topics )
1182 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1183 for topic in topics:
1184 if topic not in ONOStopics:
1185 main.log.error( "Error: " + topic +
1186 " not in leaders" )
1187 missing = True
1188 else:
1189 main.log.error( "leaders() returned None" )
1190 except ( ValueError, TypeError ):
1191 main.log.exception( "Error parsing leaders" )
1192 main.log.error( repr( leaders ) )
1193 if missing:
1194 for i in main.activeNodes:
1195 node = main.CLIs[i]
1196 response = node.leaders( jsonFormat=False)
1197 main.log.warn( str( node.name ) + " leaders output: \n" +
1198 str( response ) )
1199
1200 partitions = onosCli.partitions()
1201 try:
1202 if partitions :
1203 parsedPartitions = json.loads( partitions )
1204 main.log.warn( json.dumps( parsedPartitions,
1205 sort_keys=True,
1206 indent=4,
1207 separators=( ',', ': ' ) ) )
1208 # TODO check for a leader in all paritions
1209 # TODO check for consistency among nodes
1210 else:
1211 main.log.error( "partitions() returned None" )
1212 except ( ValueError, TypeError ):
1213 main.log.exception( "Error parsing partitions" )
1214 main.log.error( repr( partitions ) )
1215 pendingMap = onosCli.pendingMap()
1216 try:
1217 if pendingMap :
1218 parsedPending = json.loads( pendingMap )
1219 main.log.warn( json.dumps( parsedPending,
1220 sort_keys=True,
1221 indent=4,
1222 separators=( ',', ': ' ) ) )
1223 # TODO check something here?
1224 else:
1225 main.log.error( "pendingMap() returned None" )
1226 except ( ValueError, TypeError ):
1227 main.log.exception( "Error parsing pending map" )
1228 main.log.error( repr( pendingMap ) )
1229 # Print flowrules
1230 node = main.activeNodes[0]
1231 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1232 main.step( "Wait a minute then ping again" )
1233 # the wait is above
1234 PingResult = main.TRUE
1235 for i in range( 8, 18 ):
1236 ping = main.Mininet1.pingHost( src="h" + str( i ),
1237 target="h" + str( i + 10 ) )
1238 PingResult = PingResult and ping
1239 if ping == main.FALSE:
1240 main.log.warn( "Ping failed between h" + str( i ) +
1241 " and h" + str( i + 10 ) )
1242 elif ping == main.TRUE:
1243 main.log.info( "Ping test passed!" )
1244 # Don't set PingResult or you'd override failures
1245 if PingResult == main.FALSE:
1246 main.log.error(
1247 "Intents have not been installed correctly, pings failed." )
1248 # TODO: pretty print
1249 main.log.warn( "ONOS1 intents: " )
1250 try:
1251 tmpIntents = onosCli.intents()
1252 main.log.warn( json.dumps( json.loads( tmpIntents ),
1253 sort_keys=True,
1254 indent=4,
1255 separators=( ',', ': ' ) ) )
1256 except ( ValueError, TypeError ):
1257 main.log.warn( repr( tmpIntents ) )
1258 utilities.assert_equals(
1259 expect=main.TRUE,
1260 actual=PingResult,
1261 onpass="Intents have been installed correctly and pings work",
1262 onfail="Intents have not been installed correctly, pings failed." )
1263
1264 def CASE5( self, main ):
1265 """
1266 Reading state of ONOS
1267 """
1268 import json
1269 import time
1270 assert main.numCtrls, "main.numCtrls not defined"
1271 assert main, "main not defined"
1272 assert utilities.assert_equals, "utilities.assert_equals not defined"
1273 assert main.CLIs, "main.CLIs not defined"
1274 assert main.nodes, "main.nodes not defined"
1275
1276 main.case( "Setting up and gathering data for current state" )
1277 # The general idea for this test case is to pull the state of
1278 # ( intents,flows, topology,... ) from each ONOS node
1279 # We can then compare them with each other and also with past states
1280
1281 main.step( "Check that each switch has a master" )
1282 global mastershipState
1283 mastershipState = '[]'
1284
1285 # Assert that each device has a master
1286 rolesNotNull = main.TRUE
1287 threads = []
1288 for i in main.activeNodes:
1289 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1290 name="rolesNotNull-" + str( i ),
1291 args=[] )
1292 threads.append( t )
1293 t.start()
1294
1295 for t in threads:
1296 t.join()
1297 rolesNotNull = rolesNotNull and t.result
1298 utilities.assert_equals(
1299 expect=main.TRUE,
1300 actual=rolesNotNull,
1301 onpass="Each device has a master",
1302 onfail="Some devices don't have a master assigned" )
1303
1304 main.step( "Get the Mastership of each switch from each controller" )
1305 ONOSMastership = []
1306 consistentMastership = True
1307 rolesResults = True
1308 threads = []
1309 for i in main.activeNodes:
1310 t = main.Thread( target=main.CLIs[i].roles,
1311 name="roles-" + str( i ),
1312 args=[] )
1313 threads.append( t )
1314 t.start()
1315
1316 for t in threads:
1317 t.join()
1318 ONOSMastership.append( t.result )
1319
1320 for i in range( len( ONOSMastership ) ):
1321 node = str( main.activeNodes[i] + 1 )
1322 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1323 main.log.error( "Error in getting ONOS" + node + " roles" )
1324 main.log.warn( "ONOS" + node + " mastership response: " +
1325 repr( ONOSMastership[i] ) )
1326 rolesResults = False
1327 utilities.assert_equals(
1328 expect=True,
1329 actual=rolesResults,
1330 onpass="No error in reading roles output",
1331 onfail="Error in reading roles from ONOS" )
1332
1333 main.step( "Check for consistency in roles from each controller" )
1334 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1335 main.log.info(
1336 "Switch roles are consistent across all ONOS nodes" )
1337 else:
1338 consistentMastership = False
1339 utilities.assert_equals(
1340 expect=True,
1341 actual=consistentMastership,
1342 onpass="Switch roles are consistent across all ONOS nodes",
1343 onfail="ONOS nodes have different views of switch roles" )
1344
1345 if rolesResults and not consistentMastership:
1346 for i in range( len( main.activeNodes ) ):
1347 node = str( main.activeNodes[i] + 1 )
1348 try:
1349 main.log.warn(
1350 "ONOS" + node + " roles: ",
1351 json.dumps(
1352 json.loads( ONOSMastership[ i ] ),
1353 sort_keys=True,
1354 indent=4,
1355 separators=( ',', ': ' ) ) )
1356 except ( ValueError, TypeError ):
1357 main.log.warn( repr( ONOSMastership[ i ] ) )
1358 elif rolesResults and consistentMastership:
1359 mastershipState = ONOSMastership[ 0 ]
1360
1361 main.step( "Get the intents from each controller" )
1362 global intentState
1363 intentState = []
1364 ONOSIntents = []
1365 consistentIntents = True # Are Intents consistent across nodes?
1366 intentsResults = True # Could we read Intents from ONOS?
1367 threads = []
1368 for i in main.activeNodes:
1369 t = main.Thread( target=main.CLIs[i].intents,
1370 name="intents-" + str( i ),
1371 args=[],
1372 kwargs={ 'jsonFormat': True } )
1373 threads.append( t )
1374 t.start()
1375
1376 for t in threads:
1377 t.join()
1378 ONOSIntents.append( t.result )
1379
1380 for i in range( len( ONOSIntents ) ):
1381 node = str( main.activeNodes[i] + 1 )
1382 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1383 main.log.error( "Error in getting ONOS" + node + " intents" )
1384 main.log.warn( "ONOS" + node + " intents response: " +
1385 repr( ONOSIntents[ i ] ) )
1386 intentsResults = False
1387 utilities.assert_equals(
1388 expect=True,
1389 actual=intentsResults,
1390 onpass="No error in reading intents output",
1391 onfail="Error in reading intents from ONOS" )
1392
1393 main.step( "Check for consistency in Intents from each controller" )
1394 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1395 main.log.info( "Intents are consistent across all ONOS " +
1396 "nodes" )
1397 else:
1398 consistentIntents = False
1399 main.log.error( "Intents not consistent" )
1400 utilities.assert_equals(
1401 expect=True,
1402 actual=consistentIntents,
1403 onpass="Intents are consistent across all ONOS nodes",
1404 onfail="ONOS nodes have different views of intents" )
1405
1406 if intentsResults:
1407 # Try to make it easy to figure out what is happening
1408 #
1409 # Intent ONOS1 ONOS2 ...
1410 # 0x01 INSTALLED INSTALLING
1411 # ... ... ...
1412 # ... ... ...
1413 title = " Id"
1414 for n in main.activeNodes:
1415 title += " " * 10 + "ONOS" + str( n + 1 )
1416 main.log.warn( title )
1417 # get all intent keys in the cluster
1418 keys = []
1419 try:
1420 # Get the set of all intent keys
1421 for nodeStr in ONOSIntents:
1422 node = json.loads( nodeStr )
1423 for intent in node:
1424 keys.append( intent.get( 'id' ) )
1425 keys = set( keys )
1426 # For each intent key, print the state on each node
1427 for key in keys:
1428 row = "%-13s" % key
1429 for nodeStr in ONOSIntents:
1430 node = json.loads( nodeStr )
1431 for intent in node:
1432 if intent.get( 'id', "Error" ) == key:
1433 row += "%-15s" % intent.get( 'state' )
1434 main.log.warn( row )
1435 # End of intent state table
1436 except ValueError as e:
1437 main.log.exception( e )
1438 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1439
1440 if intentsResults and not consistentIntents:
1441 # print the json objects
1442 n = str( main.activeNodes[-1] + 1 )
1443 main.log.debug( "ONOS" + n + " intents: " )
1444 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1445 sort_keys=True,
1446 indent=4,
1447 separators=( ',', ': ' ) ) )
1448 for i in range( len( ONOSIntents ) ):
1449 node = str( main.activeNodes[i] + 1 )
1450 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1451 main.log.debug( "ONOS" + node + " intents: " )
1452 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1453 sort_keys=True,
1454 indent=4,
1455 separators=( ',', ': ' ) ) )
1456 else:
1457 main.log.debug( "ONOS" + node + " intents match ONOS" +
1458 n + " intents" )
1459 elif intentsResults and consistentIntents:
1460 intentState = ONOSIntents[ 0 ]
1461
1462 main.step( "Get the flows from each controller" )
1463 global flowState
1464 flowState = []
1465 ONOSFlows = []
1466 ONOSFlowsJson = []
1467 flowCheck = main.FALSE
1468 consistentFlows = True
1469 flowsResults = True
1470 threads = []
1471 for i in main.activeNodes:
1472 t = main.Thread( target=main.CLIs[i].flows,
1473 name="flows-" + str( i ),
1474 args=[],
1475 kwargs={ 'jsonFormat': True } )
1476 threads.append( t )
1477 t.start()
1478
1479 # NOTE: Flows command can take some time to run
1480 time.sleep(30)
1481 for t in threads:
1482 t.join()
1483 result = t.result
1484 ONOSFlows.append( result )
1485
1486 for i in range( len( ONOSFlows ) ):
1487 num = str( main.activeNodes[i] + 1 )
1488 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1489 main.log.error( "Error in getting ONOS" + num + " flows" )
1490 main.log.warn( "ONOS" + num + " flows response: " +
1491 repr( ONOSFlows[ i ] ) )
1492 flowsResults = False
1493 ONOSFlowsJson.append( None )
1494 else:
1495 try:
1496 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1497 except ( ValueError, TypeError ):
1498 # FIXME: change this to log.error?
1499 main.log.exception( "Error in parsing ONOS" + num +
1500 " response as json." )
1501 main.log.error( repr( ONOSFlows[ i ] ) )
1502 ONOSFlowsJson.append( None )
1503 flowsResults = False
1504 utilities.assert_equals(
1505 expect=True,
1506 actual=flowsResults,
1507 onpass="No error in reading flows output",
1508 onfail="Error in reading flows from ONOS" )
1509
1510 main.step( "Check for consistency in Flows from each controller" )
1511 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1512 if all( tmp ):
1513 main.log.info( "Flow count is consistent across all ONOS nodes" )
1514 else:
1515 consistentFlows = False
1516 utilities.assert_equals(
1517 expect=True,
1518 actual=consistentFlows,
1519 onpass="The flow count is consistent across all ONOS nodes",
1520 onfail="ONOS nodes have different flow counts" )
1521
1522 if flowsResults and not consistentFlows:
1523 for i in range( len( ONOSFlows ) ):
1524 node = str( main.activeNodes[i] + 1 )
1525 try:
1526 main.log.warn(
1527 "ONOS" + node + " flows: " +
1528 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1529 indent=4, separators=( ',', ': ' ) ) )
1530 except ( ValueError, TypeError ):
1531 main.log.warn( "ONOS" + node + " flows: " +
1532 repr( ONOSFlows[ i ] ) )
1533 elif flowsResults and consistentFlows:
1534 flowCheck = main.TRUE
1535 flowState = ONOSFlows[ 0 ]
1536
1537 main.step( "Get the OF Table entries" )
1538 global flows
1539 flows = []
1540 for i in range( 1, 29 ):
1541 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1542 if flowCheck == main.FALSE:
1543 for table in flows:
1544 main.log.warn( table )
1545 # TODO: Compare switch flow tables with ONOS flow tables
1546
1547 main.step( "Start continuous pings" )
1548 main.Mininet2.pingLong(
1549 src=main.params[ 'PING' ][ 'source1' ],
1550 target=main.params[ 'PING' ][ 'target1' ],
1551 pingTime=500 )
1552 main.Mininet2.pingLong(
1553 src=main.params[ 'PING' ][ 'source2' ],
1554 target=main.params[ 'PING' ][ 'target2' ],
1555 pingTime=500 )
1556 main.Mininet2.pingLong(
1557 src=main.params[ 'PING' ][ 'source3' ],
1558 target=main.params[ 'PING' ][ 'target3' ],
1559 pingTime=500 )
1560 main.Mininet2.pingLong(
1561 src=main.params[ 'PING' ][ 'source4' ],
1562 target=main.params[ 'PING' ][ 'target4' ],
1563 pingTime=500 )
1564 main.Mininet2.pingLong(
1565 src=main.params[ 'PING' ][ 'source5' ],
1566 target=main.params[ 'PING' ][ 'target5' ],
1567 pingTime=500 )
1568 main.Mininet2.pingLong(
1569 src=main.params[ 'PING' ][ 'source6' ],
1570 target=main.params[ 'PING' ][ 'target6' ],
1571 pingTime=500 )
1572 main.Mininet2.pingLong(
1573 src=main.params[ 'PING' ][ 'source7' ],
1574 target=main.params[ 'PING' ][ 'target7' ],
1575 pingTime=500 )
1576 main.Mininet2.pingLong(
1577 src=main.params[ 'PING' ][ 'source8' ],
1578 target=main.params[ 'PING' ][ 'target8' ],
1579 pingTime=500 )
1580 main.Mininet2.pingLong(
1581 src=main.params[ 'PING' ][ 'source9' ],
1582 target=main.params[ 'PING' ][ 'target9' ],
1583 pingTime=500 )
1584 main.Mininet2.pingLong(
1585 src=main.params[ 'PING' ][ 'source10' ],
1586 target=main.params[ 'PING' ][ 'target10' ],
1587 pingTime=500 )
1588
1589 main.step( "Collecting topology information from ONOS" )
1590 devices = []
1591 threads = []
1592 for i in main.activeNodes:
1593 t = main.Thread( target=main.CLIs[i].devices,
1594 name="devices-" + str( i ),
1595 args=[ ] )
1596 threads.append( t )
1597 t.start()
1598
1599 for t in threads:
1600 t.join()
1601 devices.append( t.result )
1602 hosts = []
1603 threads = []
1604 for i in main.activeNodes:
1605 t = main.Thread( target=main.CLIs[i].hosts,
1606 name="hosts-" + str( i ),
1607 args=[ ] )
1608 threads.append( t )
1609 t.start()
1610
1611 for t in threads:
1612 t.join()
1613 try:
1614 hosts.append( json.loads( t.result ) )
1615 except ( ValueError, TypeError ):
1616 # FIXME: better handling of this, print which node
1617 # Maybe use thread name?
1618 main.log.exception( "Error parsing json output of hosts" )
1619 main.log.warn( repr( t.result ) )
1620 hosts.append( None )
1621
1622 ports = []
1623 threads = []
1624 for i in main.activeNodes:
1625 t = main.Thread( target=main.CLIs[i].ports,
1626 name="ports-" + str( i ),
1627 args=[ ] )
1628 threads.append( t )
1629 t.start()
1630
1631 for t in threads:
1632 t.join()
1633 ports.append( t.result )
1634 links = []
1635 threads = []
1636 for i in main.activeNodes:
1637 t = main.Thread( target=main.CLIs[i].links,
1638 name="links-" + str( i ),
1639 args=[ ] )
1640 threads.append( t )
1641 t.start()
1642
1643 for t in threads:
1644 t.join()
1645 links.append( t.result )
1646 clusters = []
1647 threads = []
1648 for i in main.activeNodes:
1649 t = main.Thread( target=main.CLIs[i].clusters,
1650 name="clusters-" + str( i ),
1651 args=[ ] )
1652 threads.append( t )
1653 t.start()
1654
1655 for t in threads:
1656 t.join()
1657 clusters.append( t.result )
1658 # Compare json objects for hosts and dataplane clusters
1659
1660 # hosts
1661 main.step( "Host view is consistent across ONOS nodes" )
1662 consistentHostsResult = main.TRUE
1663 for controller in range( len( hosts ) ):
1664 controllerStr = str( main.activeNodes[controller] + 1 )
1665 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1666 if hosts[ controller ] == hosts[ 0 ]:
1667 continue
1668 else: # hosts not consistent
1669 main.log.error( "hosts from ONOS" +
1670 controllerStr +
1671 " is inconsistent with ONOS1" )
1672 main.log.warn( repr( hosts[ controller ] ) )
1673 consistentHostsResult = main.FALSE
1674
1675 else:
1676 main.log.error( "Error in getting ONOS hosts from ONOS" +
1677 controllerStr )
1678 consistentHostsResult = main.FALSE
1679 main.log.warn( "ONOS" + controllerStr +
1680 " hosts response: " +
1681 repr( hosts[ controller ] ) )
1682 utilities.assert_equals(
1683 expect=main.TRUE,
1684 actual=consistentHostsResult,
1685 onpass="Hosts view is consistent across all ONOS nodes",
1686 onfail="ONOS nodes have different views of hosts" )
1687
1688 main.step( "Each host has an IP address" )
1689 ipResult = main.TRUE
1690 for controller in range( 0, len( hosts ) ):
1691 controllerStr = str( main.activeNodes[controller] + 1 )
1692 if hosts[ controller ]:
1693 for host in hosts[ controller ]:
1694 if not host.get( 'ipAddresses', [ ] ):
1695 main.log.error( "Error with host ips on controller" +
1696 controllerStr + ": " + str( host ) )
1697 ipResult = main.FALSE
1698 utilities.assert_equals(
1699 expect=main.TRUE,
1700 actual=ipResult,
1701 onpass="The ips of the hosts aren't empty",
1702 onfail="The ip of at least one host is missing" )
1703
1704 # Strongly connected clusters of devices
1705 main.step( "Cluster view is consistent across ONOS nodes" )
1706 consistentClustersResult = main.TRUE
1707 for controller in range( len( clusters ) ):
1708 controllerStr = str( main.activeNodes[controller] + 1 )
1709 if "Error" not in clusters[ controller ]:
1710 if clusters[ controller ] == clusters[ 0 ]:
1711 continue
1712 else: # clusters not consistent
1713 main.log.error( "clusters from ONOS" + controllerStr +
1714 " is inconsistent with ONOS1" )
1715 consistentClustersResult = main.FALSE
1716
1717 else:
1718 main.log.error( "Error in getting dataplane clusters " +
1719 "from ONOS" + controllerStr )
1720 consistentClustersResult = main.FALSE
1721 main.log.warn( "ONOS" + controllerStr +
1722 " clusters response: " +
1723 repr( clusters[ controller ] ) )
1724 utilities.assert_equals(
1725 expect=main.TRUE,
1726 actual=consistentClustersResult,
1727 onpass="Clusters view is consistent across all ONOS nodes",
1728 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07001729 if not consistentClustersResult:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001730 main.log.debug( clusters )
Jon Hall64948022016-05-12 13:38:50 -07001731
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001732 # there should always only be one cluster
1733 main.step( "Cluster view correct across ONOS nodes" )
1734 try:
1735 numClusters = len( json.loads( clusters[ 0 ] ) )
1736 except ( ValueError, TypeError ):
1737 main.log.exception( "Error parsing clusters[0]: " +
1738 repr( clusters[ 0 ] ) )
1739 numClusters = "ERROR"
1740 utilities.assert_equals(
1741 expect=1,
1742 actual=numClusters,
1743 onpass="ONOS shows 1 SCC",
1744 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1745
1746 main.step( "Comparing ONOS topology to MN" )
1747 devicesResults = main.TRUE
1748 linksResults = main.TRUE
1749 hostsResults = main.TRUE
1750 mnSwitches = main.Mininet1.getSwitches()
1751 mnLinks = main.Mininet1.getLinks()
1752 mnHosts = main.Mininet1.getHosts()
1753 for controller in main.activeNodes:
1754 controllerStr = str( main.activeNodes[controller] + 1 )
1755 if devices[ controller ] and ports[ controller ] and\
1756 "Error" not in devices[ controller ] and\
1757 "Error" not in ports[ controller ]:
1758 currentDevicesResult = main.Mininet1.compareSwitches(
1759 mnSwitches,
1760 json.loads( devices[ controller ] ),
1761 json.loads( ports[ controller ] ) )
1762 else:
1763 currentDevicesResult = main.FALSE
1764 utilities.assert_equals( expect=main.TRUE,
1765 actual=currentDevicesResult,
1766 onpass="ONOS" + controllerStr +
1767 " Switches view is correct",
1768 onfail="ONOS" + controllerStr +
1769 " Switches view is incorrect" )
1770 if links[ controller ] and "Error" not in links[ controller ]:
1771 currentLinksResult = main.Mininet1.compareLinks(
1772 mnSwitches, mnLinks,
1773 json.loads( links[ controller ] ) )
1774 else:
1775 currentLinksResult = main.FALSE
1776 utilities.assert_equals( expect=main.TRUE,
1777 actual=currentLinksResult,
1778 onpass="ONOS" + controllerStr +
1779 " links view is correct",
1780 onfail="ONOS" + controllerStr +
1781 " links view is incorrect" )
1782
1783 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1784 currentHostsResult = main.Mininet1.compareHosts(
1785 mnHosts,
1786 hosts[ controller ] )
1787 else:
1788 currentHostsResult = main.FALSE
1789 utilities.assert_equals( expect=main.TRUE,
1790 actual=currentHostsResult,
1791 onpass="ONOS" + controllerStr +
1792 " hosts exist in Mininet",
1793 onfail="ONOS" + controllerStr +
1794 " hosts don't match Mininet" )
1795
1796 devicesResults = devicesResults and currentDevicesResult
1797 linksResults = linksResults and currentLinksResult
1798 hostsResults = hostsResults and currentHostsResult
1799
1800 main.step( "Device information is correct" )
1801 utilities.assert_equals(
1802 expect=main.TRUE,
1803 actual=devicesResults,
1804 onpass="Device information is correct",
1805 onfail="Device information is incorrect" )
1806
1807 main.step( "Links are correct" )
1808 utilities.assert_equals(
1809 expect=main.TRUE,
1810 actual=linksResults,
1811 onpass="Link are correct",
1812 onfail="Links are incorrect" )
1813
1814 main.step( "Hosts are correct" )
1815 utilities.assert_equals(
1816 expect=main.TRUE,
1817 actual=hostsResults,
1818 onpass="Hosts are correct",
1819 onfail="Hosts are incorrect" )
1820
1821 def CASE6( self, main ):
1822 """
1823 The Scaling case.
1824 """
1825 import time
1826 import re
1827 assert main.numCtrls, "main.numCtrls not defined"
1828 assert main, "main not defined"
1829 assert utilities.assert_equals, "utilities.assert_equals not defined"
1830 assert main.CLIs, "main.CLIs not defined"
1831 assert main.nodes, "main.nodes not defined"
1832 try:
1833 labels
1834 except NameError:
1835 main.log.error( "labels not defined, setting to []" )
1836 global labels
1837 labels = []
1838 try:
1839 data
1840 except NameError:
1841 main.log.error( "data not defined, setting to []" )
1842 global data
1843 data = []
1844
Jon Hall69b2b982016-05-11 12:04:59 -07001845 main.case( "Scale the number of nodes in the ONOS cluster" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001846
1847 main.step( "Checking ONOS Logs for errors" )
1848 for i in main.activeNodes:
1849 node = main.nodes[i]
1850 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1851 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1852
1853 """
1854 pop # of nodes from a list, might look like 1,3b,3,5b,5,7b,7,7b,5,5b,3...
1855 modify cluster.json file appropriately
1856 install/deactivate node as needed
1857 """
1858
1859 try:
1860 prevNodes = main.activeNodes
1861 scale = main.scaling.pop(0)
1862 if "e" in scale:
1863 equal = True
1864 else:
1865 equal = False
1866 main.numCtrls = int( re.search( "\d+", scale ).group(0) )
1867 main.log.info( "Scaling to {} nodes".format( main.numCtrls ) )
1868 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
1869 utilities.assert_equals( expect=main.TRUE, actual=genResult,
1870 onpass="New cluster metadata file generated",
1871 onfail="Failled to generate new metadata file" )
1872 time.sleep( 5 ) # Give time for nodes to read new file
1873 except IndexError:
1874 main.cleanup()
1875 main.exit()
1876
1877 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
1878 newNodes = [ x for x in main.activeNodes if x not in prevNodes ]
1879
1880 main.step( "Start new nodes" ) # OR stop old nodes?
1881 started = main.TRUE
1882 for i in newNodes:
1883 started = main.ONOSbench.onosStart( main.nodes[i].ip_address ) and main.TRUE
1884 utilities.assert_equals( expect=main.TRUE, actual=started,
1885 onpass="ONOS started",
1886 onfail="ONOS start NOT successful" )
1887
1888 main.step( "Checking if ONOS is up yet" )
1889 for i in range( 2 ):
1890 onosIsupResult = main.TRUE
1891 for i in main.activeNodes:
1892 node = main.nodes[i]
1893 started = main.ONOSbench.isup( node.ip_address )
1894 if not started:
1895 main.log.error( node.name + " didn't start!" )
1896 onosIsupResult = onosIsupResult and started
1897 if onosIsupResult == main.TRUE:
1898 break
1899 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1900 onpass="ONOS started",
1901 onfail="ONOS start NOT successful" )
1902
Jon Hall6509dbf2016-06-21 17:01:17 -07001903 main.step( "Starting ONOS CLI sessions" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001904 cliResults = main.TRUE
1905 threads = []
1906 for i in main.activeNodes:
1907 t = main.Thread( target=main.CLIs[i].startOnosCli,
1908 name="startOnosCli-" + str( i ),
1909 args=[main.nodes[i].ip_address] )
1910 threads.append( t )
1911 t.start()
1912
1913 for t in threads:
1914 t.join()
1915 cliResults = cliResults and t.result
1916 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1917 onpass="ONOS cli started",
1918 onfail="ONOS clis did not start" )
1919
1920 main.step( "Checking ONOS nodes" )
1921 nodeResults = utilities.retry( main.HA.nodesCheck,
1922 False,
1923 args=[main.activeNodes],
1924 attempts=5 )
1925 utilities.assert_equals( expect=True, actual=nodeResults,
1926 onpass="Nodes check successful",
1927 onfail="Nodes check NOT successful" )
1928
1929 for i in range( 10 ):
1930 ready = True
1931 for i in main.activeNodes:
1932 cli = main.CLIs[i]
1933 output = cli.summary()
1934 if not output:
1935 ready = False
1936 if ready:
1937 break
1938 time.sleep( 30 )
1939 utilities.assert_equals( expect=True, actual=ready,
1940 onpass="ONOS summary command succeded",
1941 onfail="ONOS summary command failed" )
1942 if not ready:
1943 main.cleanup()
1944 main.exit()
1945
1946 # Rerun for election on new nodes
1947 runResults = main.TRUE
1948 for i in main.activeNodes:
1949 cli = main.CLIs[i]
1950 run = cli.electionTestRun()
1951 if run != main.TRUE:
1952 main.log.error( "Error running for election on " + cli.name )
1953 runResults = runResults and run
1954 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1955 onpass="Reran for election",
1956 onfail="Failed to rerun for election" )
1957
1958 # TODO: Make this configurable
1959 time.sleep( 60 )
1960 for node in main.activeNodes:
1961 main.log.warn( "\n****************** {} **************".format( main.nodes[node].ip_address ) )
1962 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1963 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1964 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
1965 main.log.debug( main.CLIs[node].apps( jsonFormat=False ) )
1966
1967 def CASE7( self, main ):
1968 """
1969 Check state after ONOS scaling
1970 """
1971 import json
1972 assert main.numCtrls, "main.numCtrls not defined"
1973 assert main, "main not defined"
1974 assert utilities.assert_equals, "utilities.assert_equals not defined"
1975 assert main.CLIs, "main.CLIs not defined"
1976 assert main.nodes, "main.nodes not defined"
1977 main.case( "Running ONOS Constant State Tests" )
1978
1979 main.step( "Check that each switch has a master" )
1980 # Assert that each device has a master
1981 rolesNotNull = main.TRUE
1982 threads = []
1983 for i in main.activeNodes:
1984 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1985 name="rolesNotNull-" + str( i ),
1986 args=[ ] )
1987 threads.append( t )
1988 t.start()
1989
1990 for t in threads:
1991 t.join()
1992 rolesNotNull = rolesNotNull and t.result
1993 utilities.assert_equals(
1994 expect=main.TRUE,
1995 actual=rolesNotNull,
1996 onpass="Each device has a master",
1997 onfail="Some devices don't have a master assigned" )
1998
1999 main.step( "Read device roles from ONOS" )
2000 ONOSMastership = []
2001 consistentMastership = True
2002 rolesResults = True
2003 threads = []
2004 for i in main.activeNodes:
2005 t = main.Thread( target=main.CLIs[i].roles,
2006 name="roles-" + str( i ),
2007 args=[] )
2008 threads.append( t )
2009 t.start()
2010
2011 for t in threads:
2012 t.join()
2013 ONOSMastership.append( t.result )
2014
2015 for i in range( len( ONOSMastership ) ):
2016 node = str( main.activeNodes[i] + 1 )
2017 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
2018 main.log.error( "Error in getting ONOS" + node + " roles" )
2019 main.log.warn( "ONOS" + node + " mastership response: " +
2020 repr( ONOSMastership[i] ) )
2021 rolesResults = False
2022 utilities.assert_equals(
2023 expect=True,
2024 actual=rolesResults,
2025 onpass="No error in reading roles output",
2026 onfail="Error in reading roles from ONOS" )
2027
2028 main.step( "Check for consistency in roles from each controller" )
2029 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
2030 main.log.info(
2031 "Switch roles are consistent across all ONOS nodes" )
2032 else:
2033 consistentMastership = False
2034 utilities.assert_equals(
2035 expect=True,
2036 actual=consistentMastership,
2037 onpass="Switch roles are consistent across all ONOS nodes",
2038 onfail="ONOS nodes have different views of switch roles" )
2039
2040 if rolesResults and not consistentMastership:
2041 for i in range( len( ONOSMastership ) ):
2042 node = str( main.activeNodes[i] + 1 )
2043 main.log.warn( "ONOS" + node + " roles: ",
2044 json.dumps( json.loads( ONOSMastership[ i ] ),
2045 sort_keys=True,
2046 indent=4,
2047 separators=( ',', ': ' ) ) )
2048
2049 # NOTE: we expect mastership to change on controller scaling down
2050
2051 main.step( "Get the intents and compare across all nodes" )
2052 ONOSIntents = []
2053 intentCheck = main.FALSE
2054 consistentIntents = True
2055 intentsResults = True
2056 threads = []
2057 for i in main.activeNodes:
2058 t = main.Thread( target=main.CLIs[i].intents,
2059 name="intents-" + str( i ),
2060 args=[],
2061 kwargs={ 'jsonFormat': True } )
2062 threads.append( t )
2063 t.start()
2064
2065 for t in threads:
2066 t.join()
2067 ONOSIntents.append( t.result )
2068
2069 for i in range( len( ONOSIntents) ):
2070 node = str( main.activeNodes[i] + 1 )
2071 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2072 main.log.error( "Error in getting ONOS" + node + " intents" )
2073 main.log.warn( "ONOS" + node + " intents response: " +
2074 repr( ONOSIntents[ i ] ) )
2075 intentsResults = False
2076 utilities.assert_equals(
2077 expect=True,
2078 actual=intentsResults,
2079 onpass="No error in reading intents output",
2080 onfail="Error in reading intents from ONOS" )
2081
2082 main.step( "Check for consistency in Intents from each controller" )
2083 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2084 main.log.info( "Intents are consistent across all ONOS " +
2085 "nodes" )
2086 else:
2087 consistentIntents = False
2088
2089 # Try to make it easy to figure out what is happening
2090 #
2091 # Intent ONOS1 ONOS2 ...
2092 # 0x01 INSTALLED INSTALLING
2093 # ... ... ...
2094 # ... ... ...
2095 title = " ID"
2096 for n in main.activeNodes:
2097 title += " " * 10 + "ONOS" + str( n + 1 )
2098 main.log.warn( title )
2099 # get all intent keys in the cluster
2100 keys = []
2101 for nodeStr in ONOSIntents:
2102 node = json.loads( nodeStr )
2103 for intent in node:
2104 keys.append( intent.get( 'id' ) )
2105 keys = set( keys )
2106 for key in keys:
2107 row = "%-13s" % key
2108 for nodeStr in ONOSIntents:
2109 node = json.loads( nodeStr )
2110 for intent in node:
2111 if intent.get( 'id' ) == key:
2112 row += "%-15s" % intent.get( 'state' )
2113 main.log.warn( row )
2114 # End table view
2115
2116 utilities.assert_equals(
2117 expect=True,
2118 actual=consistentIntents,
2119 onpass="Intents are consistent across all ONOS nodes",
2120 onfail="ONOS nodes have different views of intents" )
2121 intentStates = []
2122 for node in ONOSIntents: # Iter through ONOS nodes
2123 nodeStates = []
2124 # Iter through intents of a node
2125 try:
2126 for intent in json.loads( node ):
2127 nodeStates.append( intent[ 'state' ] )
2128 except ( ValueError, TypeError ):
2129 main.log.exception( "Error in parsing intents" )
2130 main.log.error( repr( node ) )
2131 intentStates.append( nodeStates )
2132 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2133 main.log.info( dict( out ) )
2134
2135 if intentsResults and not consistentIntents:
2136 for i in range( len( main.activeNodes ) ):
2137 node = str( main.activeNodes[i] + 1 )
2138 main.log.warn( "ONOS" + node + " intents: " )
2139 main.log.warn( json.dumps(
2140 json.loads( ONOSIntents[ i ] ),
2141 sort_keys=True,
2142 indent=4,
2143 separators=( ',', ': ' ) ) )
2144 elif intentsResults and consistentIntents:
2145 intentCheck = main.TRUE
2146
2147 main.step( "Compare current intents with intents before the scaling" )
2148 # NOTE: this requires case 5 to pass for intentState to be set.
2149 # maybe we should stop the test if that fails?
2150 sameIntents = main.FALSE
2151 try:
2152 intentState
2153 except NameError:
2154 main.log.warn( "No previous intent state was saved" )
2155 else:
2156 if intentState and intentState == ONOSIntents[ 0 ]:
2157 sameIntents = main.TRUE
2158 main.log.info( "Intents are consistent with before scaling" )
2159 # TODO: possibly the states have changed? we may need to figure out
2160 # what the acceptable states are
2161 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2162 sameIntents = main.TRUE
2163 try:
2164 before = json.loads( intentState )
2165 after = json.loads( ONOSIntents[ 0 ] )
2166 for intent in before:
2167 if intent not in after:
2168 sameIntents = main.FALSE
2169 main.log.debug( "Intent is not currently in ONOS " +
2170 "(at least in the same form):" )
2171 main.log.debug( json.dumps( intent ) )
2172 except ( ValueError, TypeError ):
2173 main.log.exception( "Exception printing intents" )
2174 main.log.debug( repr( ONOSIntents[0] ) )
2175 main.log.debug( repr( intentState ) )
2176 if sameIntents == main.FALSE:
2177 try:
2178 main.log.debug( "ONOS intents before: " )
2179 main.log.debug( json.dumps( json.loads( intentState ),
2180 sort_keys=True, indent=4,
2181 separators=( ',', ': ' ) ) )
2182 main.log.debug( "Current ONOS intents: " )
2183 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2184 sort_keys=True, indent=4,
2185 separators=( ',', ': ' ) ) )
2186 except ( ValueError, TypeError ):
2187 main.log.exception( "Exception printing intents" )
2188 main.log.debug( repr( ONOSIntents[0] ) )
2189 main.log.debug( repr( intentState ) )
2190 utilities.assert_equals(
2191 expect=main.TRUE,
2192 actual=sameIntents,
2193 onpass="Intents are consistent with before scaling",
2194 onfail="The Intents changed during scaling" )
2195 intentCheck = intentCheck and sameIntents
2196
2197 main.step( "Get the OF Table entries and compare to before " +
2198 "component scaling" )
2199 FlowTables = main.TRUE
2200 for i in range( 28 ):
2201 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2202 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2203 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2204 FlowTables = FlowTables and curSwitch
2205 if curSwitch == main.FALSE:
2206 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2207 utilities.assert_equals(
2208 expect=main.TRUE,
2209 actual=FlowTables,
2210 onpass="No changes were found in the flow tables",
2211 onfail="Changes were found in the flow tables" )
2212
2213 main.Mininet2.pingLongKill()
2214 '''
2215 # main.step( "Check the continuous pings to ensure that no packets " +
2216 # "were dropped during component failure" )
2217 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2218 main.params[ 'TESTONIP' ] )
2219 LossInPings = main.FALSE
2220 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2221 for i in range( 8, 18 ):
2222 main.log.info(
2223 "Checking for a loss in pings along flow from s" +
2224 str( i ) )
2225 LossInPings = main.Mininet2.checkForLoss(
2226 "/tmp/ping.h" +
2227 str( i ) ) or LossInPings
2228 if LossInPings == main.TRUE:
2229 main.log.info( "Loss in ping detected" )
2230 elif LossInPings == main.ERROR:
2231 main.log.info( "There are multiple mininet process running" )
2232 elif LossInPings == main.FALSE:
2233 main.log.info( "No Loss in the pings" )
2234 main.log.info( "No loss of dataplane connectivity" )
2235 # utilities.assert_equals(
2236 # expect=main.FALSE,
2237 # actual=LossInPings,
2238 # onpass="No Loss of connectivity",
2239 # onfail="Loss of dataplane connectivity detected" )
2240
2241 # NOTE: Since intents are not persisted with IntnentStore,
2242 # we expect loss in dataplane connectivity
2243 LossInPings = main.FALSE
2244 '''
2245
2246 main.step( "Leadership Election is still functional" )
2247 # Test of LeadershipElection
2248 leaderList = []
2249 leaderResult = main.TRUE
2250
2251 for i in main.activeNodes:
2252 cli = main.CLIs[i]
2253 leaderN = cli.electionTestLeader()
2254 leaderList.append( leaderN )
2255 if leaderN == main.FALSE:
2256 # error in response
2257 main.log.error( "Something is wrong with " +
2258 "electionTestLeader function, check the" +
2259 " error logs" )
2260 leaderResult = main.FALSE
2261 elif leaderN is None:
2262 main.log.error( cli.name +
2263 " shows no leader for the election-app." )
2264 leaderResult = main.FALSE
2265 if len( set( leaderList ) ) != 1:
2266 leaderResult = main.FALSE
2267 main.log.error(
2268 "Inconsistent view of leader for the election test app" )
2269 # TODO: print the list
2270 utilities.assert_equals(
2271 expect=main.TRUE,
2272 actual=leaderResult,
2273 onpass="Leadership election passed",
2274 onfail="Something went wrong with Leadership election" )
2275
2276 def CASE8( self, main ):
2277 """
2278 Compare topo
2279 """
2280 import json
2281 import time
2282 assert main.numCtrls, "main.numCtrls not defined"
2283 assert main, "main not defined"
2284 assert utilities.assert_equals, "utilities.assert_equals not defined"
2285 assert main.CLIs, "main.CLIs not defined"
2286 assert main.nodes, "main.nodes not defined"
2287
2288 main.case( "Compare ONOS Topology view to Mininet topology" )
2289 main.caseExplanation = "Compare topology objects between Mininet" +\
2290 " and ONOS"
2291 topoResult = main.FALSE
2292 topoFailMsg = "ONOS topology don't match Mininet"
2293 elapsed = 0
2294 count = 0
2295 main.step( "Comparing ONOS topology to MN topology" )
2296 startTime = time.time()
2297 # Give time for Gossip to work
2298 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2299 devicesResults = main.TRUE
2300 linksResults = main.TRUE
2301 hostsResults = main.TRUE
2302 hostAttachmentResults = True
2303 count += 1
2304 cliStart = time.time()
2305 devices = []
2306 threads = []
2307 for i in main.activeNodes:
2308 t = main.Thread( target=utilities.retry,
2309 name="devices-" + str( i ),
2310 args=[ main.CLIs[i].devices, [ None ] ],
2311 kwargs= { 'sleep': 5, 'attempts': 5,
2312 'randomTime': True } )
2313 threads.append( t )
2314 t.start()
2315
2316 for t in threads:
2317 t.join()
2318 devices.append( t.result )
2319 hosts = []
2320 ipResult = main.TRUE
2321 threads = []
2322 for i in main.activeNodes:
2323 t = main.Thread( target=utilities.retry,
2324 name="hosts-" + str( i ),
2325 args=[ main.CLIs[i].hosts, [ None ] ],
2326 kwargs= { 'sleep': 5, 'attempts': 5,
2327 'randomTime': True } )
2328 threads.append( t )
2329 t.start()
2330
2331 for t in threads:
2332 t.join()
2333 try:
2334 hosts.append( json.loads( t.result ) )
2335 except ( ValueError, TypeError ):
2336 main.log.exception( "Error parsing hosts results" )
2337 main.log.error( repr( t.result ) )
2338 hosts.append( None )
2339 for controller in range( 0, len( hosts ) ):
2340 controllerStr = str( main.activeNodes[controller] + 1 )
2341 if hosts[ controller ]:
2342 for host in hosts[ controller ]:
2343 if host is None or host.get( 'ipAddresses', [] ) == []:
2344 main.log.error(
2345 "Error with host ipAddresses on controller" +
2346 controllerStr + ": " + str( host ) )
2347 ipResult = main.FALSE
2348 ports = []
2349 threads = []
2350 for i in main.activeNodes:
2351 t = main.Thread( target=utilities.retry,
2352 name="ports-" + str( i ),
2353 args=[ main.CLIs[i].ports, [ None ] ],
2354 kwargs= { 'sleep': 5, 'attempts': 5,
2355 'randomTime': True } )
2356 threads.append( t )
2357 t.start()
2358
2359 for t in threads:
2360 t.join()
2361 ports.append( t.result )
2362 links = []
2363 threads = []
2364 for i in main.activeNodes:
2365 t = main.Thread( target=utilities.retry,
2366 name="links-" + str( i ),
2367 args=[ main.CLIs[i].links, [ None ] ],
2368 kwargs= { 'sleep': 5, 'attempts': 5,
2369 'randomTime': True } )
2370 threads.append( t )
2371 t.start()
2372
2373 for t in threads:
2374 t.join()
2375 links.append( t.result )
2376 clusters = []
2377 threads = []
2378 for i in main.activeNodes:
2379 t = main.Thread( target=utilities.retry,
2380 name="clusters-" + str( i ),
2381 args=[ main.CLIs[i].clusters, [ None ] ],
2382 kwargs= { 'sleep': 5, 'attempts': 5,
2383 'randomTime': True } )
2384 threads.append( t )
2385 t.start()
2386
2387 for t in threads:
2388 t.join()
2389 clusters.append( t.result )
2390
2391 elapsed = time.time() - startTime
2392 cliTime = time.time() - cliStart
2393 print "Elapsed time: " + str( elapsed )
2394 print "CLI time: " + str( cliTime )
2395
2396 if all( e is None for e in devices ) and\
2397 all( e is None for e in hosts ) and\
2398 all( e is None for e in ports ) and\
2399 all( e is None for e in links ) and\
2400 all( e is None for e in clusters ):
2401 topoFailMsg = "Could not get topology from ONOS"
2402 main.log.error( topoFailMsg )
2403 continue # Try again, No use trying to compare
2404
2405 mnSwitches = main.Mininet1.getSwitches()
2406 mnLinks = main.Mininet1.getLinks()
2407 mnHosts = main.Mininet1.getHosts()
2408 for controller in range( len( main.activeNodes ) ):
2409 controllerStr = str( main.activeNodes[controller] + 1 )
2410 if devices[ controller ] and ports[ controller ] and\
2411 "Error" not in devices[ controller ] and\
2412 "Error" not in ports[ controller ]:
2413
2414 try:
2415 currentDevicesResult = main.Mininet1.compareSwitches(
2416 mnSwitches,
2417 json.loads( devices[ controller ] ),
2418 json.loads( ports[ controller ] ) )
2419 except ( TypeError, ValueError ):
2420 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2421 devices[ controller ], ports[ controller ] ) )
2422 else:
2423 currentDevicesResult = main.FALSE
2424 utilities.assert_equals( expect=main.TRUE,
2425 actual=currentDevicesResult,
2426 onpass="ONOS" + controllerStr +
2427 " Switches view is correct",
2428 onfail="ONOS" + controllerStr +
2429 " Switches view is incorrect" )
2430
2431 if links[ controller ] and "Error" not in links[ controller ]:
2432 currentLinksResult = main.Mininet1.compareLinks(
2433 mnSwitches, mnLinks,
2434 json.loads( links[ controller ] ) )
2435 else:
2436 currentLinksResult = main.FALSE
2437 utilities.assert_equals( expect=main.TRUE,
2438 actual=currentLinksResult,
2439 onpass="ONOS" + controllerStr +
2440 " links view is correct",
2441 onfail="ONOS" + controllerStr +
2442 " links view is incorrect" )
2443 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2444 currentHostsResult = main.Mininet1.compareHosts(
2445 mnHosts,
2446 hosts[ controller ] )
2447 elif hosts[ controller ] == []:
2448 currentHostsResult = main.TRUE
2449 else:
2450 currentHostsResult = main.FALSE
2451 utilities.assert_equals( expect=main.TRUE,
2452 actual=currentHostsResult,
2453 onpass="ONOS" + controllerStr +
2454 " hosts exist in Mininet",
2455 onfail="ONOS" + controllerStr +
2456 " hosts don't match Mininet" )
2457 # CHECKING HOST ATTACHMENT POINTS
2458 hostAttachment = True
2459 zeroHosts = False
2460 # FIXME: topo-HA/obelisk specific mappings:
2461 # key is mac and value is dpid
2462 mappings = {}
2463 for i in range( 1, 29 ): # hosts 1 through 28
2464 # set up correct variables:
2465 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2466 if i == 1:
2467 deviceId = "1000".zfill(16)
2468 elif i == 2:
2469 deviceId = "2000".zfill(16)
2470 elif i == 3:
2471 deviceId = "3000".zfill(16)
2472 elif i == 4:
2473 deviceId = "3004".zfill(16)
2474 elif i == 5:
2475 deviceId = "5000".zfill(16)
2476 elif i == 6:
2477 deviceId = "6000".zfill(16)
2478 elif i == 7:
2479 deviceId = "6007".zfill(16)
2480 elif i >= 8 and i <= 17:
2481 dpid = '3' + str( i ).zfill( 3 )
2482 deviceId = dpid.zfill(16)
2483 elif i >= 18 and i <= 27:
2484 dpid = '6' + str( i ).zfill( 3 )
2485 deviceId = dpid.zfill(16)
2486 elif i == 28:
2487 deviceId = "2800".zfill(16)
2488 mappings[ macId ] = deviceId
2489 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2490 if hosts[ controller ] == []:
2491 main.log.warn( "There are no hosts discovered" )
2492 zeroHosts = True
2493 else:
2494 for host in hosts[ controller ]:
2495 mac = None
2496 location = None
2497 device = None
2498 port = None
2499 try:
2500 mac = host.get( 'mac' )
2501 assert mac, "mac field could not be found for this host object"
2502
2503 location = host.get( 'location' )
2504 assert location, "location field could not be found for this host object"
2505
2506 # Trim the protocol identifier off deviceId
2507 device = str( location.get( 'elementId' ) ).split(':')[1]
2508 assert device, "elementId field could not be found for this host location object"
2509
2510 port = location.get( 'port' )
2511 assert port, "port field could not be found for this host location object"
2512
2513 # Now check if this matches where they should be
2514 if mac and device and port:
2515 if str( port ) != "1":
2516 main.log.error( "The attachment port is incorrect for " +
2517 "host " + str( mac ) +
2518 ". Expected: 1 Actual: " + str( port) )
2519 hostAttachment = False
2520 if device != mappings[ str( mac ) ]:
2521 main.log.error( "The attachment device is incorrect for " +
2522 "host " + str( mac ) +
2523 ". Expected: " + mappings[ str( mac ) ] +
2524 " Actual: " + device )
2525 hostAttachment = False
2526 else:
2527 hostAttachment = False
2528 except AssertionError:
2529 main.log.exception( "Json object not as expected" )
2530 main.log.error( repr( host ) )
2531 hostAttachment = False
2532 else:
2533 main.log.error( "No hosts json output or \"Error\"" +
2534 " in output. hosts = " +
2535 repr( hosts[ controller ] ) )
2536 if zeroHosts is False:
2537 # TODO: Find a way to know if there should be hosts in a
2538 # given point of the test
2539 hostAttachment = True
2540
2541 # END CHECKING HOST ATTACHMENT POINTS
2542 devicesResults = devicesResults and currentDevicesResult
2543 linksResults = linksResults and currentLinksResult
2544 hostsResults = hostsResults and currentHostsResult
2545 hostAttachmentResults = hostAttachmentResults and\
2546 hostAttachment
2547 topoResult = ( devicesResults and linksResults
2548 and hostsResults and ipResult and
2549 hostAttachmentResults )
2550 utilities.assert_equals( expect=True,
2551 actual=topoResult,
2552 onpass="ONOS topology matches Mininet",
2553 onfail=topoFailMsg )
2554 # End of While loop to pull ONOS state
2555
2556 # Compare json objects for hosts and dataplane clusters
2557
2558 # hosts
2559 main.step( "Hosts view is consistent across all ONOS nodes" )
2560 consistentHostsResult = main.TRUE
2561 for controller in range( len( hosts ) ):
2562 controllerStr = str( main.activeNodes[controller] + 1 )
2563 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2564 if hosts[ controller ] == hosts[ 0 ]:
2565 continue
2566 else: # hosts not consistent
2567 main.log.error( "hosts from ONOS" + controllerStr +
2568 " is inconsistent with ONOS1" )
2569 main.log.warn( repr( hosts[ controller ] ) )
2570 consistentHostsResult = main.FALSE
2571
2572 else:
2573 main.log.error( "Error in getting ONOS hosts from ONOS" +
2574 controllerStr )
2575 consistentHostsResult = main.FALSE
2576 main.log.warn( "ONOS" + controllerStr +
2577 " hosts response: " +
2578 repr( hosts[ controller ] ) )
2579 utilities.assert_equals(
2580 expect=main.TRUE,
2581 actual=consistentHostsResult,
2582 onpass="Hosts view is consistent across all ONOS nodes",
2583 onfail="ONOS nodes have different views of hosts" )
2584
2585 main.step( "Hosts information is correct" )
2586 hostsResults = hostsResults and ipResult
2587 utilities.assert_equals(
2588 expect=main.TRUE,
2589 actual=hostsResults,
2590 onpass="Host information is correct",
2591 onfail="Host information is incorrect" )
2592
2593 main.step( "Host attachment points to the network" )
2594 utilities.assert_equals(
2595 expect=True,
2596 actual=hostAttachmentResults,
2597 onpass="Hosts are correctly attached to the network",
2598 onfail="ONOS did not correctly attach hosts to the network" )
2599
2600 # Strongly connected clusters of devices
2601 main.step( "Clusters view is consistent across all ONOS nodes" )
2602 consistentClustersResult = main.TRUE
2603 for controller in range( len( clusters ) ):
2604 controllerStr = str( main.activeNodes[controller] + 1 )
2605 if "Error" not in clusters[ controller ]:
2606 if clusters[ controller ] == clusters[ 0 ]:
2607 continue
2608 else: # clusters not consistent
2609 main.log.error( "clusters from ONOS" +
2610 controllerStr +
2611 " is inconsistent with ONOS1" )
2612 consistentClustersResult = main.FALSE
2613 else:
2614 main.log.error( "Error in getting dataplane clusters " +
2615 "from ONOS" + controllerStr )
2616 consistentClustersResult = main.FALSE
2617 main.log.warn( "ONOS" + controllerStr +
2618 " clusters response: " +
2619 repr( clusters[ controller ] ) )
2620 utilities.assert_equals(
2621 expect=main.TRUE,
2622 actual=consistentClustersResult,
2623 onpass="Clusters view is consistent across all ONOS nodes",
2624 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07002625 if not consistentClustersResult:
2626 main.log.debug( clusters )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002627
2628 main.step( "There is only one SCC" )
2629 # there should always only be one cluster
2630 try:
2631 numClusters = len( json.loads( clusters[ 0 ] ) )
2632 except ( ValueError, TypeError ):
2633 main.log.exception( "Error parsing clusters[0]: " +
2634 repr( clusters[0] ) )
2635 numClusters = "ERROR"
2636 clusterResults = main.FALSE
2637 if numClusters == 1:
2638 clusterResults = main.TRUE
2639 utilities.assert_equals(
2640 expect=1,
2641 actual=numClusters,
2642 onpass="ONOS shows 1 SCC",
2643 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2644
2645 topoResult = ( devicesResults and linksResults
2646 and hostsResults and consistentHostsResult
2647 and consistentClustersResult and clusterResults
2648 and ipResult and hostAttachmentResults )
2649
2650 topoResult = topoResult and int( count <= 2 )
2651 note = "note it takes about " + str( int( cliTime ) ) + \
2652 " seconds for the test to make all the cli calls to fetch " +\
2653 "the topology from each ONOS instance"
2654 main.log.info(
2655 "Very crass estimate for topology discovery/convergence( " +
2656 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2657 str( count ) + " tries" )
2658
2659 main.step( "Device information is correct" )
2660 utilities.assert_equals(
2661 expect=main.TRUE,
2662 actual=devicesResults,
2663 onpass="Device information is correct",
2664 onfail="Device information is incorrect" )
2665
2666 main.step( "Links are correct" )
2667 utilities.assert_equals(
2668 expect=main.TRUE,
2669 actual=linksResults,
2670 onpass="Link are correct",
2671 onfail="Links are incorrect" )
2672
2673 main.step( "Hosts are correct" )
2674 utilities.assert_equals(
2675 expect=main.TRUE,
2676 actual=hostsResults,
2677 onpass="Hosts are correct",
2678 onfail="Hosts are incorrect" )
2679
2680 # FIXME: move this to an ONOS state case
2681 main.step( "Checking ONOS nodes" )
2682 nodeResults = utilities.retry( main.HA.nodesCheck,
2683 False,
2684 args=[main.activeNodes],
2685 attempts=5 )
2686 utilities.assert_equals( expect=True, actual=nodeResults,
2687 onpass="Nodes check successful",
2688 onfail="Nodes check NOT successful" )
2689 if not nodeResults:
2690 for i in main.activeNodes:
2691 main.log.debug( "{} components not ACTIVE: \n{}".format(
2692 main.CLIs[i].name,
2693 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
2694
Jon Halld2871c22016-07-26 11:01:14 -07002695 if not topoResult:
2696 main.cleanup()
2697 main.exit()
2698
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002699 def CASE9( self, main ):
2700 """
2701 Link s3-s28 down
2702 """
2703 import time
2704 assert main.numCtrls, "main.numCtrls not defined"
2705 assert main, "main not defined"
2706 assert utilities.assert_equals, "utilities.assert_equals not defined"
2707 assert main.CLIs, "main.CLIs not defined"
2708 assert main.nodes, "main.nodes not defined"
2709 # NOTE: You should probably run a topology check after this
2710
2711 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2712
2713 description = "Turn off a link to ensure that Link Discovery " +\
2714 "is working properly"
2715 main.case( description )
2716
2717 main.step( "Kill Link between s3 and s28" )
2718 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2719 main.log.info( "Waiting " + str( linkSleep ) +
2720 " seconds for link down to be discovered" )
2721 time.sleep( linkSleep )
2722 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2723 onpass="Link down successful",
2724 onfail="Failed to bring link down" )
2725 # TODO do some sort of check here
2726
2727 def CASE10( self, main ):
2728 """
2729 Link s3-s28 up
2730 """
2731 import time
2732 assert main.numCtrls, "main.numCtrls not defined"
2733 assert main, "main not defined"
2734 assert utilities.assert_equals, "utilities.assert_equals not defined"
2735 assert main.CLIs, "main.CLIs not defined"
2736 assert main.nodes, "main.nodes not defined"
2737 # NOTE: You should probably run a topology check after this
2738
2739 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2740
2741 description = "Restore a link to ensure that Link Discovery is " + \
2742 "working properly"
2743 main.case( description )
2744
2745 main.step( "Bring link between s3 and s28 back up" )
2746 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2747 main.log.info( "Waiting " + str( linkSleep ) +
2748 " seconds for link up to be discovered" )
2749 time.sleep( linkSleep )
2750 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2751 onpass="Link up successful",
2752 onfail="Failed to bring link up" )
2753 # TODO do some sort of check here
2754
2755 def CASE11( self, main ):
2756 """
2757 Switch Down
2758 """
2759 # NOTE: You should probably run a topology check after this
2760 import time
2761 assert main.numCtrls, "main.numCtrls not defined"
2762 assert main, "main not defined"
2763 assert utilities.assert_equals, "utilities.assert_equals not defined"
2764 assert main.CLIs, "main.CLIs not defined"
2765 assert main.nodes, "main.nodes not defined"
2766
2767 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2768
2769 description = "Killing a switch to ensure it is discovered correctly"
2770 onosCli = main.CLIs[ main.activeNodes[0] ]
2771 main.case( description )
2772 switch = main.params[ 'kill' ][ 'switch' ]
2773 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2774
2775 # TODO: Make this switch parameterizable
2776 main.step( "Kill " + switch )
2777 main.log.info( "Deleting " + switch )
2778 main.Mininet1.delSwitch( switch )
2779 main.log.info( "Waiting " + str( switchSleep ) +
2780 " seconds for switch down to be discovered" )
2781 time.sleep( switchSleep )
2782 device = onosCli.getDevice( dpid=switchDPID )
2783 # Peek at the deleted switch
2784 main.log.warn( str( device ) )
2785 result = main.FALSE
2786 if device and device[ 'available' ] is False:
2787 result = main.TRUE
2788 utilities.assert_equals( expect=main.TRUE, actual=result,
2789 onpass="Kill switch successful",
2790 onfail="Failed to kill switch?" )
2791
2792 def CASE12( self, main ):
2793 """
2794 Switch Up
2795 """
2796 # NOTE: You should probably run a topology check after this
2797 import time
2798 assert main.numCtrls, "main.numCtrls not defined"
2799 assert main, "main not defined"
2800 assert utilities.assert_equals, "utilities.assert_equals not defined"
2801 assert main.CLIs, "main.CLIs not defined"
2802 assert main.nodes, "main.nodes not defined"
2803
2804 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2805 switch = main.params[ 'kill' ][ 'switch' ]
2806 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2807 links = main.params[ 'kill' ][ 'links' ].split()
2808 onosCli = main.CLIs[ main.activeNodes[0] ]
2809 description = "Adding a switch to ensure it is discovered correctly"
2810 main.case( description )
2811
2812 main.step( "Add back " + switch )
2813 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2814 for peer in links:
2815 main.Mininet1.addLink( switch, peer )
2816 ipList = [ node.ip_address for node in main.nodes ]
2817 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2818 main.log.info( "Waiting " + str( switchSleep ) +
2819 " seconds for switch up to be discovered" )
2820 time.sleep( switchSleep )
2821 device = onosCli.getDevice( dpid=switchDPID )
2822 # Peek at the deleted switch
2823 main.log.warn( str( device ) )
2824 result = main.FALSE
2825 if device and device[ 'available' ]:
2826 result = main.TRUE
2827 utilities.assert_equals( expect=main.TRUE, actual=result,
2828 onpass="add switch successful",
2829 onfail="Failed to add switch?" )
2830
2831 def CASE13( self, main ):
2832 """
2833 Clean up
2834 """
2835 assert main.numCtrls, "main.numCtrls not defined"
2836 assert main, "main not defined"
2837 assert utilities.assert_equals, "utilities.assert_equals not defined"
2838 assert main.CLIs, "main.CLIs not defined"
2839 assert main.nodes, "main.nodes not defined"
2840
2841 main.case( "Test Cleanup" )
2842 main.step( "Killing tcpdumps" )
2843 main.Mininet2.stopTcpdump()
2844
2845 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2846 main.step( "Copying MN pcap and ONOS log files to test station" )
2847 # NOTE: MN Pcap file is being saved to logdir.
2848 # We scp this file as MN and TestON aren't necessarily the same vm
2849
2850 # FIXME: To be replaced with a Jenkin's post script
2851 # TODO: Load these from params
2852 # NOTE: must end in /
2853 logFolder = "/opt/onos/log/"
2854 logFiles = [ "karaf.log", "karaf.log.1" ]
2855 # NOTE: must end in /
2856 for f in logFiles:
2857 for node in main.nodes:
2858 dstName = main.logdir + "/" + node.name + "-" + f
2859 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2860 logFolder + f, dstName )
2861 # std*.log's
2862 # NOTE: must end in /
2863 logFolder = "/opt/onos/var/"
2864 logFiles = [ "stderr.log", "stdout.log" ]
2865 # NOTE: must end in /
2866 for f in logFiles:
2867 for node in main.nodes:
2868 dstName = main.logdir + "/" + node.name + "-" + f
2869 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2870 logFolder + f, dstName )
2871 else:
2872 main.log.debug( "skipping saving log files" )
2873
2874 main.step( "Stopping Mininet" )
2875 mnResult = main.Mininet1.stopNet()
2876 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2877 onpass="Mininet stopped",
2878 onfail="MN cleanup NOT successful" )
2879
2880 main.step( "Checking ONOS Logs for errors" )
2881 for node in main.nodes:
2882 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2883 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2884
2885 try:
2886 timerLog = open( main.logdir + "/Timers.csv", 'w')
2887 main.log.error( ", ".join( labels ) + "\n" + ", ".join( data ) )
2888 timerLog.write( ", ".join( labels ) + "\n" + ", ".join( data ) )
2889 timerLog.close()
2890 except NameError, e:
2891 main.log.exception(e)
2892
2893 main.step( "Stopping webserver" )
2894 status = main.Server.stop( )
2895 utilities.assert_equals( expect=main.TRUE, actual=status,
2896 onpass="Stop Server",
2897 onfail="Failled to stop SimpleHTTPServer" )
2898 del main.Server
2899
2900 def CASE14( self, main ):
2901 """
2902 start election app on all onos nodes
2903 """
2904 import time
2905 assert main.numCtrls, "main.numCtrls not defined"
2906 assert main, "main not defined"
2907 assert utilities.assert_equals, "utilities.assert_equals not defined"
2908 assert main.CLIs, "main.CLIs not defined"
2909 assert main.nodes, "main.nodes not defined"
2910
2911 main.case("Start Leadership Election app")
2912 main.step( "Install leadership election app" )
2913 onosCli = main.CLIs[ main.activeNodes[0] ]
2914 appResult = onosCli.activateApp( "org.onosproject.election" )
2915 utilities.assert_equals(
2916 expect=main.TRUE,
2917 actual=appResult,
2918 onpass="Election app installed",
2919 onfail="Something went wrong with installing Leadership election" )
2920
2921 main.step( "Run for election on each node" )
2922 for i in main.activeNodes:
2923 main.CLIs[i].electionTestRun()
2924 time.sleep(5)
2925 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2926 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
2927 utilities.assert_equals(
2928 expect=True,
2929 actual=sameResult,
2930 onpass="All nodes see the same leaderboards",
2931 onfail="Inconsistent leaderboards" )
2932
2933 if sameResult:
2934 leader = leaders[ 0 ][ 0 ]
2935 if main.nodes[ main.activeNodes[0] ].ip_address in leader:
2936 correctLeader = True
2937 else:
2938 correctLeader = False
2939 main.step( "First node was elected leader" )
2940 utilities.assert_equals(
2941 expect=True,
2942 actual=correctLeader,
2943 onpass="Correct leader was elected",
2944 onfail="Incorrect leader" )
2945
2946 def CASE15( self, main ):
2947 """
2948 Check that Leadership Election is still functional
2949 15.1 Run election on each node
2950 15.2 Check that each node has the same leaders and candidates
2951 15.3 Find current leader and withdraw
2952 15.4 Check that a new node was elected leader
2953 15.5 Check that that new leader was the candidate of old leader
2954 15.6 Run for election on old leader
2955 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2956 15.8 Make sure that the old leader was added to the candidate list
2957
2958 old and new variable prefixes refer to data from before vs after
2959 withdrawl and later before withdrawl vs after re-election
2960 """
2961 import time
2962 assert main.numCtrls, "main.numCtrls not defined"
2963 assert main, "main not defined"
2964 assert utilities.assert_equals, "utilities.assert_equals not defined"
2965 assert main.CLIs, "main.CLIs not defined"
2966 assert main.nodes, "main.nodes not defined"
2967
2968 description = "Check that Leadership Election is still functional"
2969 main.case( description )
2970 # NOTE: Need to re-run after restarts since being a canidate is not persistant
2971
2972 oldLeaders = [] # list of lists of each nodes' candidates before
2973 newLeaders = [] # list of lists of each nodes' candidates after
2974 oldLeader = '' # the old leader from oldLeaders, None if not same
2975 newLeader = '' # the new leaders fron newLoeaders, None if not same
2976 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2977 expectNoLeader = False # True when there is only one leader
2978 if main.numCtrls == 1:
2979 expectNoLeader = True
2980
2981 main.step( "Run for election on each node" )
2982 electionResult = main.TRUE
2983
2984 for i in main.activeNodes: # run test election on each node
2985 if main.CLIs[i].electionTestRun() == main.FALSE:
2986 electionResult = main.FALSE
2987 utilities.assert_equals(
2988 expect=main.TRUE,
2989 actual=electionResult,
2990 onpass="All nodes successfully ran for leadership",
2991 onfail="At least one node failed to run for leadership" )
2992
2993 if electionResult == main.FALSE:
2994 main.log.error(
2995 "Skipping Test Case because Election Test App isn't loaded" )
2996 main.skipCase()
2997
2998 main.step( "Check that each node shows the same leader and candidates" )
2999 failMessage = "Nodes have different leaderboards"
3000 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
3001 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
3002 if sameResult:
3003 oldLeader = oldLeaders[ 0 ][ 0 ]
3004 main.log.warn( oldLeader )
3005 else:
3006 oldLeader = None
3007 utilities.assert_equals(
3008 expect=True,
3009 actual=sameResult,
3010 onpass="Leaderboards are consistent for the election topic",
3011 onfail=failMessage )
3012
3013 main.step( "Find current leader and withdraw" )
3014 withdrawResult = main.TRUE
3015 # do some sanity checking on leader before using it
3016 if oldLeader is None:
3017 main.log.error( "Leadership isn't consistent." )
3018 withdrawResult = main.FALSE
3019 # Get the CLI of the oldLeader
3020 for i in main.activeNodes:
3021 if oldLeader == main.nodes[ i ].ip_address:
3022 oldLeaderCLI = main.CLIs[ i ]
3023 break
3024 else: # FOR/ELSE statement
3025 main.log.error( "Leader election, could not find current leader" )
3026 if oldLeader:
3027 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3028 utilities.assert_equals(
3029 expect=main.TRUE,
3030 actual=withdrawResult,
3031 onpass="Node was withdrawn from election",
3032 onfail="Node was not withdrawn from election" )
3033
3034 main.step( "Check that a new node was elected leader" )
3035 failMessage = "Nodes have different leaders"
3036 # Get new leaders and candidates
3037 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
3038 newLeader = None
3039 if newLeaderResult:
3040 if newLeaders[ 0 ][ 0 ] == 'none':
3041 main.log.error( "No leader was elected on at least 1 node" )
3042 if not expectNoLeader:
3043 newLeaderResult = False
3044 newLeader = newLeaders[ 0 ][ 0 ]
3045
3046 # Check that the new leader is not the older leader, which was withdrawn
3047 if newLeader == oldLeader:
3048 newLeaderResult = False
3049 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3050 " as the current leader" )
3051 utilities.assert_equals(
3052 expect=True,
3053 actual=newLeaderResult,
3054 onpass="Leadership election passed",
3055 onfail="Something went wrong with Leadership election" )
3056
3057 main.step( "Check that that new leader was the candidate of old leader" )
3058 # candidates[ 2 ] should become the top candidate after withdrawl
3059 correctCandidateResult = main.TRUE
3060 if expectNoLeader:
3061 if newLeader == 'none':
3062 main.log.info( "No leader expected. None found. Pass" )
3063 correctCandidateResult = main.TRUE
3064 else:
3065 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3066 correctCandidateResult = main.FALSE
3067 elif len( oldLeaders[0] ) >= 3:
3068 if newLeader == oldLeaders[ 0 ][ 2 ]:
3069 # correct leader was elected
3070 correctCandidateResult = main.TRUE
3071 else:
3072 correctCandidateResult = main.FALSE
3073 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3074 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3075 else:
3076 main.log.warn( "Could not determine who should be the correct leader" )
3077 main.log.debug( oldLeaders[ 0 ] )
3078 correctCandidateResult = main.FALSE
3079 utilities.assert_equals(
3080 expect=main.TRUE,
3081 actual=correctCandidateResult,
3082 onpass="Correct Candidate Elected",
3083 onfail="Incorrect Candidate Elected" )
3084
3085 main.step( "Run for election on old leader( just so everyone " +
3086 "is in the hat )" )
3087 if oldLeaderCLI is not None:
3088 runResult = oldLeaderCLI.electionTestRun()
3089 else:
3090 main.log.error( "No old leader to re-elect" )
3091 runResult = main.FALSE
3092 utilities.assert_equals(
3093 expect=main.TRUE,
3094 actual=runResult,
3095 onpass="App re-ran for election",
3096 onfail="App failed to run for election" )
3097
3098 main.step(
3099 "Check that oldLeader is a candidate, and leader if only 1 node" )
3100 # verify leader didn't just change
3101 # Get new leaders and candidates
3102 reRunLeaders = []
3103 time.sleep( 5 ) # Paremterize
3104 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
3105
3106 # Check that the re-elected node is last on the candidate List
3107 if not reRunLeaders[0]:
3108 positionResult = main.FALSE
3109 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3110 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3111 str( reRunLeaders[ 0 ] ) ) )
3112 positionResult = main.FALSE
3113 utilities.assert_equals(
3114 expect=True,
3115 actual=positionResult,
3116 onpass="Old leader successfully re-ran for election",
3117 onfail="Something went wrong with Leadership election after " +
3118 "the old leader re-ran for election" )
3119
3120 def CASE16( self, main ):
3121 """
3122 Install Distributed Primitives app
3123 """
3124 import time
3125 assert main.numCtrls, "main.numCtrls not defined"
3126 assert main, "main not defined"
3127 assert utilities.assert_equals, "utilities.assert_equals not defined"
3128 assert main.CLIs, "main.CLIs not defined"
3129 assert main.nodes, "main.nodes not defined"
3130
3131 # Variables for the distributed primitives tests
3132 global pCounterName
3133 global pCounterValue
3134 global onosSet
3135 global onosSetName
3136 pCounterName = "TestON-Partitions"
3137 pCounterValue = 0
3138 onosSet = set([])
3139 onosSetName = "TestON-set"
3140
3141 description = "Install Primitives app"
3142 main.case( description )
3143 main.step( "Install Primitives app" )
3144 appName = "org.onosproject.distributedprimitives"
3145 node = main.activeNodes[0]
3146 appResults = main.CLIs[node].activateApp( appName )
3147 utilities.assert_equals( expect=main.TRUE,
3148 actual=appResults,
3149 onpass="Primitives app activated",
3150 onfail="Primitives app not activated" )
3151 time.sleep( 5 ) # To allow all nodes to activate
3152
3153 def CASE17( self, main ):
3154 """
3155 Check for basic functionality with distributed primitives
3156 """
3157 # Make sure variables are defined/set
3158 assert main.numCtrls, "main.numCtrls not defined"
3159 assert main, "main not defined"
3160 assert utilities.assert_equals, "utilities.assert_equals not defined"
3161 assert main.CLIs, "main.CLIs not defined"
3162 assert main.nodes, "main.nodes not defined"
3163 assert pCounterName, "pCounterName not defined"
3164 assert onosSetName, "onosSetName not defined"
3165 # NOTE: assert fails if value is 0/None/Empty/False
3166 try:
3167 pCounterValue
3168 except NameError:
3169 main.log.error( "pCounterValue not defined, setting to 0" )
3170 pCounterValue = 0
3171 try:
3172 onosSet
3173 except NameError:
3174 main.log.error( "onosSet not defined, setting to empty Set" )
3175 onosSet = set([])
3176 # Variables for the distributed primitives tests. These are local only
3177 addValue = "a"
3178 addAllValue = "a b c d e f"
3179 retainValue = "c d e f"
3180
3181 description = "Check for basic functionality with distributed " +\
3182 "primitives"
3183 main.case( description )
3184 main.caseExplanation = "Test the methods of the distributed " +\
3185 "primitives (counters and sets) throught the cli"
3186 # DISTRIBUTED ATOMIC COUNTERS
3187 # Partitioned counters
3188 main.step( "Increment then get a default counter on each node" )
3189 pCounters = []
3190 threads = []
3191 addedPValues = []
3192 for i in main.activeNodes:
3193 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3194 name="counterAddAndGet-" + str( i ),
3195 args=[ pCounterName ] )
3196 pCounterValue += 1
3197 addedPValues.append( pCounterValue )
3198 threads.append( t )
3199 t.start()
3200
3201 for t in threads:
3202 t.join()
3203 pCounters.append( t.result )
3204 # Check that counter incremented numController times
3205 pCounterResults = True
3206 for i in addedPValues:
3207 tmpResult = i in pCounters
3208 pCounterResults = pCounterResults and tmpResult
3209 if not tmpResult:
3210 main.log.error( str( i ) + " is not in partitioned "
3211 "counter incremented results" )
3212 utilities.assert_equals( expect=True,
3213 actual=pCounterResults,
3214 onpass="Default counter incremented",
3215 onfail="Error incrementing default" +
3216 " counter" )
3217
3218 main.step( "Get then Increment a default counter on each node" )
3219 pCounters = []
3220 threads = []
3221 addedPValues = []
3222 for i in main.activeNodes:
3223 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3224 name="counterGetAndAdd-" + str( i ),
3225 args=[ pCounterName ] )
3226 addedPValues.append( pCounterValue )
3227 pCounterValue += 1
3228 threads.append( t )
3229 t.start()
3230
3231 for t in threads:
3232 t.join()
3233 pCounters.append( t.result )
3234 # Check that counter incremented numController times
3235 pCounterResults = True
3236 for i in addedPValues:
3237 tmpResult = i in pCounters
3238 pCounterResults = pCounterResults and tmpResult
3239 if not tmpResult:
3240 main.log.error( str( i ) + " is not in partitioned "
3241 "counter incremented results" )
3242 utilities.assert_equals( expect=True,
3243 actual=pCounterResults,
3244 onpass="Default counter incremented",
3245 onfail="Error incrementing default" +
3246 " counter" )
3247
3248 main.step( "Counters we added have the correct values" )
3249 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3250 utilities.assert_equals( expect=main.TRUE,
3251 actual=incrementCheck,
3252 onpass="Added counters are correct",
3253 onfail="Added counters are incorrect" )
3254
3255 main.step( "Add -8 to then get a default counter on each node" )
3256 pCounters = []
3257 threads = []
3258 addedPValues = []
3259 for i in main.activeNodes:
3260 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3261 name="counterIncrement-" + str( i ),
3262 args=[ pCounterName ],
3263 kwargs={ "delta": -8 } )
3264 pCounterValue += -8
3265 addedPValues.append( pCounterValue )
3266 threads.append( t )
3267 t.start()
3268
3269 for t in threads:
3270 t.join()
3271 pCounters.append( t.result )
3272 # Check that counter incremented numController times
3273 pCounterResults = True
3274 for i in addedPValues:
3275 tmpResult = i in pCounters
3276 pCounterResults = pCounterResults and tmpResult
3277 if not tmpResult:
3278 main.log.error( str( i ) + " is not in partitioned "
3279 "counter incremented results" )
3280 utilities.assert_equals( expect=True,
3281 actual=pCounterResults,
3282 onpass="Default counter incremented",
3283 onfail="Error incrementing default" +
3284 " counter" )
3285
3286 main.step( "Add 5 to then get a default counter on each node" )
3287 pCounters = []
3288 threads = []
3289 addedPValues = []
3290 for i in main.activeNodes:
3291 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3292 name="counterIncrement-" + str( i ),
3293 args=[ pCounterName ],
3294 kwargs={ "delta": 5 } )
3295 pCounterValue += 5
3296 addedPValues.append( pCounterValue )
3297 threads.append( t )
3298 t.start()
3299
3300 for t in threads:
3301 t.join()
3302 pCounters.append( t.result )
3303 # Check that counter incremented numController times
3304 pCounterResults = True
3305 for i in addedPValues:
3306 tmpResult = i in pCounters
3307 pCounterResults = pCounterResults and tmpResult
3308 if not tmpResult:
3309 main.log.error( str( i ) + " is not in partitioned "
3310 "counter incremented results" )
3311 utilities.assert_equals( expect=True,
3312 actual=pCounterResults,
3313 onpass="Default counter incremented",
3314 onfail="Error incrementing default" +
3315 " counter" )
3316
3317 main.step( "Get then add 5 to a default counter on each node" )
3318 pCounters = []
3319 threads = []
3320 addedPValues = []
3321 for i in main.activeNodes:
3322 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3323 name="counterIncrement-" + str( i ),
3324 args=[ pCounterName ],
3325 kwargs={ "delta": 5 } )
3326 addedPValues.append( pCounterValue )
3327 pCounterValue += 5
3328 threads.append( t )
3329 t.start()
3330
3331 for t in threads:
3332 t.join()
3333 pCounters.append( t.result )
3334 # Check that counter incremented numController times
3335 pCounterResults = True
3336 for i in addedPValues:
3337 tmpResult = i in pCounters
3338 pCounterResults = pCounterResults and tmpResult
3339 if not tmpResult:
3340 main.log.error( str( i ) + " is not in partitioned "
3341 "counter incremented results" )
3342 utilities.assert_equals( expect=True,
3343 actual=pCounterResults,
3344 onpass="Default counter incremented",
3345 onfail="Error incrementing default" +
3346 " counter" )
3347
3348 main.step( "Counters we added have the correct values" )
3349 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3350 utilities.assert_equals( expect=main.TRUE,
3351 actual=incrementCheck,
3352 onpass="Added counters are correct",
3353 onfail="Added counters are incorrect" )
3354
3355 # DISTRIBUTED SETS
3356 main.step( "Distributed Set get" )
3357 size = len( onosSet )
3358 getResponses = []
3359 threads = []
3360 for i in main.activeNodes:
3361 t = main.Thread( target=main.CLIs[i].setTestGet,
3362 name="setTestGet-" + str( i ),
3363 args=[ onosSetName ] )
3364 threads.append( t )
3365 t.start()
3366 for t in threads:
3367 t.join()
3368 getResponses.append( t.result )
3369
3370 getResults = main.TRUE
3371 for i in range( len( main.activeNodes ) ):
3372 node = str( main.activeNodes[i] + 1 )
3373 if isinstance( getResponses[ i ], list):
3374 current = set( getResponses[ i ] )
3375 if len( current ) == len( getResponses[ i ] ):
3376 # no repeats
3377 if onosSet != current:
3378 main.log.error( "ONOS" + node +
3379 " has incorrect view" +
3380 " of set " + onosSetName + ":\n" +
3381 str( getResponses[ i ] ) )
3382 main.log.debug( "Expected: " + str( onosSet ) )
3383 main.log.debug( "Actual: " + str( current ) )
3384 getResults = main.FALSE
3385 else:
3386 # error, set is not a set
3387 main.log.error( "ONOS" + node +
3388 " has repeat elements in" +
3389 " set " + onosSetName + ":\n" +
3390 str( getResponses[ i ] ) )
3391 getResults = main.FALSE
3392 elif getResponses[ i ] == main.ERROR:
3393 getResults = main.FALSE
3394 utilities.assert_equals( expect=main.TRUE,
3395 actual=getResults,
3396 onpass="Set elements are correct",
3397 onfail="Set elements are incorrect" )
3398
3399 main.step( "Distributed Set size" )
3400 sizeResponses = []
3401 threads = []
3402 for i in main.activeNodes:
3403 t = main.Thread( target=main.CLIs[i].setTestSize,
3404 name="setTestSize-" + str( i ),
3405 args=[ onosSetName ] )
3406 threads.append( t )
3407 t.start()
3408 for t in threads:
3409 t.join()
3410 sizeResponses.append( t.result )
3411
3412 sizeResults = main.TRUE
3413 for i in range( len( main.activeNodes ) ):
3414 node = str( main.activeNodes[i] + 1 )
3415 if size != sizeResponses[ i ]:
3416 sizeResults = main.FALSE
3417 main.log.error( "ONOS" + node +
3418 " expected a size of " + str( size ) +
3419 " for set " + onosSetName +
3420 " but got " + str( sizeResponses[ i ] ) )
3421 utilities.assert_equals( expect=main.TRUE,
3422 actual=sizeResults,
3423 onpass="Set sizes are correct",
3424 onfail="Set sizes are incorrect" )
3425
3426 main.step( "Distributed Set add()" )
3427 onosSet.add( addValue )
3428 addResponses = []
3429 threads = []
3430 for i in main.activeNodes:
3431 t = main.Thread( target=main.CLIs[i].setTestAdd,
3432 name="setTestAdd-" + str( i ),
3433 args=[ onosSetName, addValue ] )
3434 threads.append( t )
3435 t.start()
3436 for t in threads:
3437 t.join()
3438 addResponses.append( t.result )
3439
3440 # main.TRUE = successfully changed the set
3441 # main.FALSE = action resulted in no change in set
3442 # main.ERROR - Some error in executing the function
3443 addResults = main.TRUE
3444 for i in range( len( main.activeNodes ) ):
3445 if addResponses[ i ] == main.TRUE:
3446 # All is well
3447 pass
3448 elif addResponses[ i ] == main.FALSE:
3449 # Already in set, probably fine
3450 pass
3451 elif addResponses[ i ] == main.ERROR:
3452 # Error in execution
3453 addResults = main.FALSE
3454 else:
3455 # unexpected result
3456 addResults = main.FALSE
3457 if addResults != main.TRUE:
3458 main.log.error( "Error executing set add" )
3459
3460 # Check if set is still correct
3461 size = len( onosSet )
3462 getResponses = []
3463 threads = []
3464 for i in main.activeNodes:
3465 t = main.Thread( target=main.CLIs[i].setTestGet,
3466 name="setTestGet-" + str( i ),
3467 args=[ onosSetName ] )
3468 threads.append( t )
3469 t.start()
3470 for t in threads:
3471 t.join()
3472 getResponses.append( t.result )
3473 getResults = main.TRUE
3474 for i in range( len( main.activeNodes ) ):
3475 node = str( main.activeNodes[i] + 1 )
3476 if isinstance( getResponses[ i ], list):
3477 current = set( getResponses[ i ] )
3478 if len( current ) == len( getResponses[ i ] ):
3479 # no repeats
3480 if onosSet != current:
3481 main.log.error( "ONOS" + node + " has incorrect view" +
3482 " of set " + onosSetName + ":\n" +
3483 str( getResponses[ i ] ) )
3484 main.log.debug( "Expected: " + str( onosSet ) )
3485 main.log.debug( "Actual: " + str( current ) )
3486 getResults = main.FALSE
3487 else:
3488 # error, set is not a set
3489 main.log.error( "ONOS" + node + " has repeat elements in" +
3490 " set " + onosSetName + ":\n" +
3491 str( getResponses[ i ] ) )
3492 getResults = main.FALSE
3493 elif getResponses[ i ] == main.ERROR:
3494 getResults = main.FALSE
3495 sizeResponses = []
3496 threads = []
3497 for i in main.activeNodes:
3498 t = main.Thread( target=main.CLIs[i].setTestSize,
3499 name="setTestSize-" + str( i ),
3500 args=[ onosSetName ] )
3501 threads.append( t )
3502 t.start()
3503 for t in threads:
3504 t.join()
3505 sizeResponses.append( t.result )
3506 sizeResults = main.TRUE
3507 for i in range( len( main.activeNodes ) ):
3508 node = str( main.activeNodes[i] + 1 )
3509 if size != sizeResponses[ i ]:
3510 sizeResults = main.FALSE
3511 main.log.error( "ONOS" + node +
3512 " expected a size of " + str( size ) +
3513 " for set " + onosSetName +
3514 " but got " + str( sizeResponses[ i ] ) )
3515 addResults = addResults and getResults and sizeResults
3516 utilities.assert_equals( expect=main.TRUE,
3517 actual=addResults,
3518 onpass="Set add correct",
3519 onfail="Set add was incorrect" )
3520
3521 main.step( "Distributed Set addAll()" )
3522 onosSet.update( addAllValue.split() )
3523 addResponses = []
3524 threads = []
3525 for i in main.activeNodes:
3526 t = main.Thread( target=main.CLIs[i].setTestAdd,
3527 name="setTestAddAll-" + str( i ),
3528 args=[ onosSetName, addAllValue ] )
3529 threads.append( t )
3530 t.start()
3531 for t in threads:
3532 t.join()
3533 addResponses.append( t.result )
3534
3535 # main.TRUE = successfully changed the set
3536 # main.FALSE = action resulted in no change in set
3537 # main.ERROR - Some error in executing the function
3538 addAllResults = main.TRUE
3539 for i in range( len( main.activeNodes ) ):
3540 if addResponses[ i ] == main.TRUE:
3541 # All is well
3542 pass
3543 elif addResponses[ i ] == main.FALSE:
3544 # Already in set, probably fine
3545 pass
3546 elif addResponses[ i ] == main.ERROR:
3547 # Error in execution
3548 addAllResults = main.FALSE
3549 else:
3550 # unexpected result
3551 addAllResults = main.FALSE
3552 if addAllResults != main.TRUE:
3553 main.log.error( "Error executing set addAll" )
3554
3555 # Check if set is still correct
3556 size = len( onosSet )
3557 getResponses = []
3558 threads = []
3559 for i in main.activeNodes:
3560 t = main.Thread( target=main.CLIs[i].setTestGet,
3561 name="setTestGet-" + str( i ),
3562 args=[ onosSetName ] )
3563 threads.append( t )
3564 t.start()
3565 for t in threads:
3566 t.join()
3567 getResponses.append( t.result )
3568 getResults = main.TRUE
3569 for i in range( len( main.activeNodes ) ):
3570 node = str( main.activeNodes[i] + 1 )
3571 if isinstance( getResponses[ i ], list):
3572 current = set( getResponses[ i ] )
3573 if len( current ) == len( getResponses[ i ] ):
3574 # no repeats
3575 if onosSet != current:
3576 main.log.error( "ONOS" + node +
3577 " has incorrect view" +
3578 " of set " + onosSetName + ":\n" +
3579 str( getResponses[ i ] ) )
3580 main.log.debug( "Expected: " + str( onosSet ) )
3581 main.log.debug( "Actual: " + str( current ) )
3582 getResults = main.FALSE
3583 else:
3584 # error, set is not a set
3585 main.log.error( "ONOS" + node +
3586 " has repeat elements in" +
3587 " set " + onosSetName + ":\n" +
3588 str( getResponses[ i ] ) )
3589 getResults = main.FALSE
3590 elif getResponses[ i ] == main.ERROR:
3591 getResults = main.FALSE
3592 sizeResponses = []
3593 threads = []
3594 for i in main.activeNodes:
3595 t = main.Thread( target=main.CLIs[i].setTestSize,
3596 name="setTestSize-" + str( i ),
3597 args=[ onosSetName ] )
3598 threads.append( t )
3599 t.start()
3600 for t in threads:
3601 t.join()
3602 sizeResponses.append( t.result )
3603 sizeResults = main.TRUE
3604 for i in range( len( main.activeNodes ) ):
3605 node = str( main.activeNodes[i] + 1 )
3606 if size != sizeResponses[ i ]:
3607 sizeResults = main.FALSE
3608 main.log.error( "ONOS" + node +
3609 " expected a size of " + str( size ) +
3610 " for set " + onosSetName +
3611 " but got " + str( sizeResponses[ i ] ) )
3612 addAllResults = addAllResults and getResults and sizeResults
3613 utilities.assert_equals( expect=main.TRUE,
3614 actual=addAllResults,
3615 onpass="Set addAll correct",
3616 onfail="Set addAll was incorrect" )
3617
3618 main.step( "Distributed Set contains()" )
3619 containsResponses = []
3620 threads = []
3621 for i in main.activeNodes:
3622 t = main.Thread( target=main.CLIs[i].setTestGet,
3623 name="setContains-" + str( i ),
3624 args=[ onosSetName ],
3625 kwargs={ "values": addValue } )
3626 threads.append( t )
3627 t.start()
3628 for t in threads:
3629 t.join()
3630 # NOTE: This is the tuple
3631 containsResponses.append( t.result )
3632
3633 containsResults = main.TRUE
3634 for i in range( len( main.activeNodes ) ):
3635 if containsResponses[ i ] == main.ERROR:
3636 containsResults = main.FALSE
3637 else:
3638 containsResults = containsResults and\
3639 containsResponses[ i ][ 1 ]
3640 utilities.assert_equals( expect=main.TRUE,
3641 actual=containsResults,
3642 onpass="Set contains is functional",
3643 onfail="Set contains failed" )
3644
3645 main.step( "Distributed Set containsAll()" )
3646 containsAllResponses = []
3647 threads = []
3648 for i in main.activeNodes:
3649 t = main.Thread( target=main.CLIs[i].setTestGet,
3650 name="setContainsAll-" + str( i ),
3651 args=[ onosSetName ],
3652 kwargs={ "values": addAllValue } )
3653 threads.append( t )
3654 t.start()
3655 for t in threads:
3656 t.join()
3657 # NOTE: This is the tuple
3658 containsAllResponses.append( t.result )
3659
3660 containsAllResults = main.TRUE
3661 for i in range( len( main.activeNodes ) ):
3662 if containsResponses[ i ] == main.ERROR:
3663 containsResults = main.FALSE
3664 else:
3665 containsResults = containsResults and\
3666 containsResponses[ i ][ 1 ]
3667 utilities.assert_equals( expect=main.TRUE,
3668 actual=containsAllResults,
3669 onpass="Set containsAll is functional",
3670 onfail="Set containsAll failed" )
3671
3672 main.step( "Distributed Set remove()" )
3673 onosSet.remove( addValue )
3674 removeResponses = []
3675 threads = []
3676 for i in main.activeNodes:
3677 t = main.Thread( target=main.CLIs[i].setTestRemove,
3678 name="setTestRemove-" + str( i ),
3679 args=[ onosSetName, addValue ] )
3680 threads.append( t )
3681 t.start()
3682 for t in threads:
3683 t.join()
3684 removeResponses.append( t.result )
3685
3686 # main.TRUE = successfully changed the set
3687 # main.FALSE = action resulted in no change in set
3688 # main.ERROR - Some error in executing the function
3689 removeResults = main.TRUE
3690 for i in range( len( main.activeNodes ) ):
3691 if removeResponses[ i ] == main.TRUE:
3692 # All is well
3693 pass
3694 elif removeResponses[ i ] == main.FALSE:
3695 # not in set, probably fine
3696 pass
3697 elif removeResponses[ i ] == main.ERROR:
3698 # Error in execution
3699 removeResults = main.FALSE
3700 else:
3701 # unexpected result
3702 removeResults = main.FALSE
3703 if removeResults != main.TRUE:
3704 main.log.error( "Error executing set remove" )
3705
3706 # Check if set is still correct
3707 size = len( onosSet )
3708 getResponses = []
3709 threads = []
3710 for i in main.activeNodes:
3711 t = main.Thread( target=main.CLIs[i].setTestGet,
3712 name="setTestGet-" + str( i ),
3713 args=[ onosSetName ] )
3714 threads.append( t )
3715 t.start()
3716 for t in threads:
3717 t.join()
3718 getResponses.append( t.result )
3719 getResults = main.TRUE
3720 for i in range( len( main.activeNodes ) ):
3721 node = str( main.activeNodes[i] + 1 )
3722 if isinstance( getResponses[ i ], list):
3723 current = set( getResponses[ i ] )
3724 if len( current ) == len( getResponses[ i ] ):
3725 # no repeats
3726 if onosSet != current:
3727 main.log.error( "ONOS" + node +
3728 " has incorrect view" +
3729 " of set " + onosSetName + ":\n" +
3730 str( getResponses[ i ] ) )
3731 main.log.debug( "Expected: " + str( onosSet ) )
3732 main.log.debug( "Actual: " + str( current ) )
3733 getResults = main.FALSE
3734 else:
3735 # error, set is not a set
3736 main.log.error( "ONOS" + node +
3737 " has repeat elements in" +
3738 " set " + onosSetName + ":\n" +
3739 str( getResponses[ i ] ) )
3740 getResults = main.FALSE
3741 elif getResponses[ i ] == main.ERROR:
3742 getResults = main.FALSE
3743 sizeResponses = []
3744 threads = []
3745 for i in main.activeNodes:
3746 t = main.Thread( target=main.CLIs[i].setTestSize,
3747 name="setTestSize-" + str( i ),
3748 args=[ onosSetName ] )
3749 threads.append( t )
3750 t.start()
3751 for t in threads:
3752 t.join()
3753 sizeResponses.append( t.result )
3754 sizeResults = main.TRUE
3755 for i in range( len( main.activeNodes ) ):
3756 node = str( main.activeNodes[i] + 1 )
3757 if size != sizeResponses[ i ]:
3758 sizeResults = main.FALSE
3759 main.log.error( "ONOS" + node +
3760 " expected a size of " + str( size ) +
3761 " for set " + onosSetName +
3762 " but got " + str( sizeResponses[ i ] ) )
3763 removeResults = removeResults and getResults and sizeResults
3764 utilities.assert_equals( expect=main.TRUE,
3765 actual=removeResults,
3766 onpass="Set remove correct",
3767 onfail="Set remove was incorrect" )
3768
3769 main.step( "Distributed Set removeAll()" )
3770 onosSet.difference_update( addAllValue.split() )
3771 removeAllResponses = []
3772 threads = []
3773 try:
3774 for i in main.activeNodes:
3775 t = main.Thread( target=main.CLIs[i].setTestRemove,
3776 name="setTestRemoveAll-" + str( i ),
3777 args=[ onosSetName, addAllValue ] )
3778 threads.append( t )
3779 t.start()
3780 for t in threads:
3781 t.join()
3782 removeAllResponses.append( t.result )
3783 except Exception, e:
3784 main.log.exception(e)
3785
3786 # main.TRUE = successfully changed the set
3787 # main.FALSE = action resulted in no change in set
3788 # main.ERROR - Some error in executing the function
3789 removeAllResults = main.TRUE
3790 for i in range( len( main.activeNodes ) ):
3791 if removeAllResponses[ i ] == main.TRUE:
3792 # All is well
3793 pass
3794 elif removeAllResponses[ i ] == main.FALSE:
3795 # not in set, probably fine
3796 pass
3797 elif removeAllResponses[ i ] == main.ERROR:
3798 # Error in execution
3799 removeAllResults = main.FALSE
3800 else:
3801 # unexpected result
3802 removeAllResults = main.FALSE
3803 if removeAllResults != main.TRUE:
3804 main.log.error( "Error executing set removeAll" )
3805
3806 # Check if set is still correct
3807 size = len( onosSet )
3808 getResponses = []
3809 threads = []
3810 for i in main.activeNodes:
3811 t = main.Thread( target=main.CLIs[i].setTestGet,
3812 name="setTestGet-" + str( i ),
3813 args=[ onosSetName ] )
3814 threads.append( t )
3815 t.start()
3816 for t in threads:
3817 t.join()
3818 getResponses.append( t.result )
3819 getResults = main.TRUE
3820 for i in range( len( main.activeNodes ) ):
3821 node = str( main.activeNodes[i] + 1 )
3822 if isinstance( getResponses[ i ], list):
3823 current = set( getResponses[ i ] )
3824 if len( current ) == len( getResponses[ i ] ):
3825 # no repeats
3826 if onosSet != current:
3827 main.log.error( "ONOS" + node +
3828 " has incorrect view" +
3829 " of set " + onosSetName + ":\n" +
3830 str( getResponses[ i ] ) )
3831 main.log.debug( "Expected: " + str( onosSet ) )
3832 main.log.debug( "Actual: " + str( current ) )
3833 getResults = main.FALSE
3834 else:
3835 # error, set is not a set
3836 main.log.error( "ONOS" + node +
3837 " has repeat elements in" +
3838 " set " + onosSetName + ":\n" +
3839 str( getResponses[ i ] ) )
3840 getResults = main.FALSE
3841 elif getResponses[ i ] == main.ERROR:
3842 getResults = main.FALSE
3843 sizeResponses = []
3844 threads = []
3845 for i in main.activeNodes:
3846 t = main.Thread( target=main.CLIs[i].setTestSize,
3847 name="setTestSize-" + str( i ),
3848 args=[ onosSetName ] )
3849 threads.append( t )
3850 t.start()
3851 for t in threads:
3852 t.join()
3853 sizeResponses.append( t.result )
3854 sizeResults = main.TRUE
3855 for i in range( len( main.activeNodes ) ):
3856 node = str( main.activeNodes[i] + 1 )
3857 if size != sizeResponses[ i ]:
3858 sizeResults = main.FALSE
3859 main.log.error( "ONOS" + node +
3860 " expected a size of " + str( size ) +
3861 " for set " + onosSetName +
3862 " but got " + str( sizeResponses[ i ] ) )
3863 removeAllResults = removeAllResults and getResults and sizeResults
3864 utilities.assert_equals( expect=main.TRUE,
3865 actual=removeAllResults,
3866 onpass="Set removeAll correct",
3867 onfail="Set removeAll was incorrect" )
3868
3869 main.step( "Distributed Set addAll()" )
3870 onosSet.update( addAllValue.split() )
3871 addResponses = []
3872 threads = []
3873 for i in main.activeNodes:
3874 t = main.Thread( target=main.CLIs[i].setTestAdd,
3875 name="setTestAddAll-" + str( i ),
3876 args=[ onosSetName, addAllValue ] )
3877 threads.append( t )
3878 t.start()
3879 for t in threads:
3880 t.join()
3881 addResponses.append( t.result )
3882
3883 # main.TRUE = successfully changed the set
3884 # main.FALSE = action resulted in no change in set
3885 # main.ERROR - Some error in executing the function
3886 addAllResults = main.TRUE
3887 for i in range( len( main.activeNodes ) ):
3888 if addResponses[ i ] == main.TRUE:
3889 # All is well
3890 pass
3891 elif addResponses[ i ] == main.FALSE:
3892 # Already in set, probably fine
3893 pass
3894 elif addResponses[ i ] == main.ERROR:
3895 # Error in execution
3896 addAllResults = main.FALSE
3897 else:
3898 # unexpected result
3899 addAllResults = main.FALSE
3900 if addAllResults != main.TRUE:
3901 main.log.error( "Error executing set addAll" )
3902
3903 # Check if set is still correct
3904 size = len( onosSet )
3905 getResponses = []
3906 threads = []
3907 for i in main.activeNodes:
3908 t = main.Thread( target=main.CLIs[i].setTestGet,
3909 name="setTestGet-" + str( i ),
3910 args=[ onosSetName ] )
3911 threads.append( t )
3912 t.start()
3913 for t in threads:
3914 t.join()
3915 getResponses.append( t.result )
3916 getResults = main.TRUE
3917 for i in range( len( main.activeNodes ) ):
3918 node = str( main.activeNodes[i] + 1 )
3919 if isinstance( getResponses[ i ], list):
3920 current = set( getResponses[ i ] )
3921 if len( current ) == len( getResponses[ i ] ):
3922 # no repeats
3923 if onosSet != current:
3924 main.log.error( "ONOS" + node +
3925 " has incorrect view" +
3926 " of set " + onosSetName + ":\n" +
3927 str( getResponses[ i ] ) )
3928 main.log.debug( "Expected: " + str( onosSet ) )
3929 main.log.debug( "Actual: " + str( current ) )
3930 getResults = main.FALSE
3931 else:
3932 # error, set is not a set
3933 main.log.error( "ONOS" + node +
3934 " has repeat elements in" +
3935 " set " + onosSetName + ":\n" +
3936 str( getResponses[ i ] ) )
3937 getResults = main.FALSE
3938 elif getResponses[ i ] == main.ERROR:
3939 getResults = main.FALSE
3940 sizeResponses = []
3941 threads = []
3942 for i in main.activeNodes:
3943 t = main.Thread( target=main.CLIs[i].setTestSize,
3944 name="setTestSize-" + str( i ),
3945 args=[ onosSetName ] )
3946 threads.append( t )
3947 t.start()
3948 for t in threads:
3949 t.join()
3950 sizeResponses.append( t.result )
3951 sizeResults = main.TRUE
3952 for i in range( len( main.activeNodes ) ):
3953 node = str( main.activeNodes[i] + 1 )
3954 if size != sizeResponses[ i ]:
3955 sizeResults = main.FALSE
3956 main.log.error( "ONOS" + node +
3957 " expected a size of " + str( size ) +
3958 " for set " + onosSetName +
3959 " but got " + str( sizeResponses[ i ] ) )
3960 addAllResults = addAllResults and getResults and sizeResults
3961 utilities.assert_equals( expect=main.TRUE,
3962 actual=addAllResults,
3963 onpass="Set addAll correct",
3964 onfail="Set addAll was incorrect" )
3965
3966 main.step( "Distributed Set clear()" )
3967 onosSet.clear()
3968 clearResponses = []
3969 threads = []
3970 for i in main.activeNodes:
3971 t = main.Thread( target=main.CLIs[i].setTestRemove,
3972 name="setTestClear-" + str( i ),
3973 args=[ onosSetName, " "], # Values doesn't matter
3974 kwargs={ "clear": True } )
3975 threads.append( t )
3976 t.start()
3977 for t in threads:
3978 t.join()
3979 clearResponses.append( t.result )
3980
3981 # main.TRUE = successfully changed the set
3982 # main.FALSE = action resulted in no change in set
3983 # main.ERROR - Some error in executing the function
3984 clearResults = main.TRUE
3985 for i in range( len( main.activeNodes ) ):
3986 if clearResponses[ i ] == main.TRUE:
3987 # All is well
3988 pass
3989 elif clearResponses[ i ] == main.FALSE:
3990 # Nothing set, probably fine
3991 pass
3992 elif clearResponses[ i ] == main.ERROR:
3993 # Error in execution
3994 clearResults = main.FALSE
3995 else:
3996 # unexpected result
3997 clearResults = main.FALSE
3998 if clearResults != main.TRUE:
3999 main.log.error( "Error executing set clear" )
4000
4001 # Check if set is still correct
4002 size = len( onosSet )
4003 getResponses = []
4004 threads = []
4005 for i in main.activeNodes:
4006 t = main.Thread( target=main.CLIs[i].setTestGet,
4007 name="setTestGet-" + str( i ),
4008 args=[ onosSetName ] )
4009 threads.append( t )
4010 t.start()
4011 for t in threads:
4012 t.join()
4013 getResponses.append( t.result )
4014 getResults = main.TRUE
4015 for i in range( len( main.activeNodes ) ):
4016 node = str( main.activeNodes[i] + 1 )
4017 if isinstance( getResponses[ i ], list):
4018 current = set( getResponses[ i ] )
4019 if len( current ) == len( getResponses[ i ] ):
4020 # no repeats
4021 if onosSet != current:
4022 main.log.error( "ONOS" + node +
4023 " has incorrect view" +
4024 " of set " + onosSetName + ":\n" +
4025 str( getResponses[ i ] ) )
4026 main.log.debug( "Expected: " + str( onosSet ) )
4027 main.log.debug( "Actual: " + str( current ) )
4028 getResults = main.FALSE
4029 else:
4030 # error, set is not a set
4031 main.log.error( "ONOS" + node +
4032 " has repeat elements in" +
4033 " set " + onosSetName + ":\n" +
4034 str( getResponses[ i ] ) )
4035 getResults = main.FALSE
4036 elif getResponses[ i ] == main.ERROR:
4037 getResults = main.FALSE
4038 sizeResponses = []
4039 threads = []
4040 for i in main.activeNodes:
4041 t = main.Thread( target=main.CLIs[i].setTestSize,
4042 name="setTestSize-" + str( i ),
4043 args=[ onosSetName ] )
4044 threads.append( t )
4045 t.start()
4046 for t in threads:
4047 t.join()
4048 sizeResponses.append( t.result )
4049 sizeResults = main.TRUE
4050 for i in range( len( main.activeNodes ) ):
4051 node = str( main.activeNodes[i] + 1 )
4052 if size != sizeResponses[ i ]:
4053 sizeResults = main.FALSE
4054 main.log.error( "ONOS" + node +
4055 " expected a size of " + str( size ) +
4056 " for set " + onosSetName +
4057 " but got " + str( sizeResponses[ i ] ) )
4058 clearResults = clearResults and getResults and sizeResults
4059 utilities.assert_equals( expect=main.TRUE,
4060 actual=clearResults,
4061 onpass="Set clear correct",
4062 onfail="Set clear was incorrect" )
4063
4064 main.step( "Distributed Set addAll()" )
4065 onosSet.update( addAllValue.split() )
4066 addResponses = []
4067 threads = []
4068 for i in main.activeNodes:
4069 t = main.Thread( target=main.CLIs[i].setTestAdd,
4070 name="setTestAddAll-" + str( i ),
4071 args=[ onosSetName, addAllValue ] )
4072 threads.append( t )
4073 t.start()
4074 for t in threads:
4075 t.join()
4076 addResponses.append( t.result )
4077
4078 # main.TRUE = successfully changed the set
4079 # main.FALSE = action resulted in no change in set
4080 # main.ERROR - Some error in executing the function
4081 addAllResults = main.TRUE
4082 for i in range( len( main.activeNodes ) ):
4083 if addResponses[ i ] == main.TRUE:
4084 # All is well
4085 pass
4086 elif addResponses[ i ] == main.FALSE:
4087 # Already in set, probably fine
4088 pass
4089 elif addResponses[ i ] == main.ERROR:
4090 # Error in execution
4091 addAllResults = main.FALSE
4092 else:
4093 # unexpected result
4094 addAllResults = main.FALSE
4095 if addAllResults != main.TRUE:
4096 main.log.error( "Error executing set addAll" )
4097
4098 # Check if set is still correct
4099 size = len( onosSet )
4100 getResponses = []
4101 threads = []
4102 for i in main.activeNodes:
4103 t = main.Thread( target=main.CLIs[i].setTestGet,
4104 name="setTestGet-" + str( i ),
4105 args=[ onosSetName ] )
4106 threads.append( t )
4107 t.start()
4108 for t in threads:
4109 t.join()
4110 getResponses.append( t.result )
4111 getResults = main.TRUE
4112 for i in range( len( main.activeNodes ) ):
4113 node = str( main.activeNodes[i] + 1 )
4114 if isinstance( getResponses[ i ], list):
4115 current = set( getResponses[ i ] )
4116 if len( current ) == len( getResponses[ i ] ):
4117 # no repeats
4118 if onosSet != current:
4119 main.log.error( "ONOS" + node +
4120 " has incorrect view" +
4121 " of set " + onosSetName + ":\n" +
4122 str( getResponses[ i ] ) )
4123 main.log.debug( "Expected: " + str( onosSet ) )
4124 main.log.debug( "Actual: " + str( current ) )
4125 getResults = main.FALSE
4126 else:
4127 # error, set is not a set
4128 main.log.error( "ONOS" + node +
4129 " has repeat elements in" +
4130 " set " + onosSetName + ":\n" +
4131 str( getResponses[ i ] ) )
4132 getResults = main.FALSE
4133 elif getResponses[ i ] == main.ERROR:
4134 getResults = main.FALSE
4135 sizeResponses = []
4136 threads = []
4137 for i in main.activeNodes:
4138 t = main.Thread( target=main.CLIs[i].setTestSize,
4139 name="setTestSize-" + str( i ),
4140 args=[ onosSetName ] )
4141 threads.append( t )
4142 t.start()
4143 for t in threads:
4144 t.join()
4145 sizeResponses.append( t.result )
4146 sizeResults = main.TRUE
4147 for i in range( len( main.activeNodes ) ):
4148 node = str( main.activeNodes[i] + 1 )
4149 if size != sizeResponses[ i ]:
4150 sizeResults = main.FALSE
4151 main.log.error( "ONOS" + node +
4152 " expected a size of " + str( size ) +
4153 " for set " + onosSetName +
4154 " but got " + str( sizeResponses[ i ] ) )
4155 addAllResults = addAllResults and getResults and sizeResults
4156 utilities.assert_equals( expect=main.TRUE,
4157 actual=addAllResults,
4158 onpass="Set addAll correct",
4159 onfail="Set addAll was incorrect" )
4160
4161 main.step( "Distributed Set retain()" )
4162 onosSet.intersection_update( retainValue.split() )
4163 retainResponses = []
4164 threads = []
4165 for i in main.activeNodes:
4166 t = main.Thread( target=main.CLIs[i].setTestRemove,
4167 name="setTestRetain-" + str( i ),
4168 args=[ onosSetName, retainValue ],
4169 kwargs={ "retain": True } )
4170 threads.append( t )
4171 t.start()
4172 for t in threads:
4173 t.join()
4174 retainResponses.append( t.result )
4175
4176 # main.TRUE = successfully changed the set
4177 # main.FALSE = action resulted in no change in set
4178 # main.ERROR - Some error in executing the function
4179 retainResults = main.TRUE
4180 for i in range( len( main.activeNodes ) ):
4181 if retainResponses[ i ] == main.TRUE:
4182 # All is well
4183 pass
4184 elif retainResponses[ i ] == main.FALSE:
4185 # Already in set, probably fine
4186 pass
4187 elif retainResponses[ i ] == main.ERROR:
4188 # Error in execution
4189 retainResults = main.FALSE
4190 else:
4191 # unexpected result
4192 retainResults = main.FALSE
4193 if retainResults != main.TRUE:
4194 main.log.error( "Error executing set retain" )
4195
4196 # Check if set is still correct
4197 size = len( onosSet )
4198 getResponses = []
4199 threads = []
4200 for i in main.activeNodes:
4201 t = main.Thread( target=main.CLIs[i].setTestGet,
4202 name="setTestGet-" + str( i ),
4203 args=[ onosSetName ] )
4204 threads.append( t )
4205 t.start()
4206 for t in threads:
4207 t.join()
4208 getResponses.append( t.result )
4209 getResults = main.TRUE
4210 for i in range( len( main.activeNodes ) ):
4211 node = str( main.activeNodes[i] + 1 )
4212 if isinstance( getResponses[ i ], list):
4213 current = set( getResponses[ i ] )
4214 if len( current ) == len( getResponses[ i ] ):
4215 # no repeats
4216 if onosSet != current:
4217 main.log.error( "ONOS" + node +
4218 " has incorrect view" +
4219 " of set " + onosSetName + ":\n" +
4220 str( getResponses[ i ] ) )
4221 main.log.debug( "Expected: " + str( onosSet ) )
4222 main.log.debug( "Actual: " + str( current ) )
4223 getResults = main.FALSE
4224 else:
4225 # error, set is not a set
4226 main.log.error( "ONOS" + node +
4227 " has repeat elements in" +
4228 " set " + onosSetName + ":\n" +
4229 str( getResponses[ i ] ) )
4230 getResults = main.FALSE
4231 elif getResponses[ i ] == main.ERROR:
4232 getResults = main.FALSE
4233 sizeResponses = []
4234 threads = []
4235 for i in main.activeNodes:
4236 t = main.Thread( target=main.CLIs[i].setTestSize,
4237 name="setTestSize-" + str( i ),
4238 args=[ onosSetName ] )
4239 threads.append( t )
4240 t.start()
4241 for t in threads:
4242 t.join()
4243 sizeResponses.append( t.result )
4244 sizeResults = main.TRUE
4245 for i in range( len( main.activeNodes ) ):
4246 node = str( main.activeNodes[i] + 1 )
4247 if size != sizeResponses[ i ]:
4248 sizeResults = main.FALSE
4249 main.log.error( "ONOS" + node + " expected a size of " +
4250 str( size ) + " for set " + onosSetName +
4251 " but got " + str( sizeResponses[ i ] ) )
4252 retainResults = retainResults and getResults and sizeResults
4253 utilities.assert_equals( expect=main.TRUE,
4254 actual=retainResults,
4255 onpass="Set retain correct",
4256 onfail="Set retain was incorrect" )
4257
4258 # Transactional maps
4259 main.step( "Partitioned Transactional maps put" )
4260 tMapValue = "Testing"
4261 numKeys = 100
4262 putResult = True
4263 node = main.activeNodes[0]
4264 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4265 if putResponses and len( putResponses ) == 100:
4266 for i in putResponses:
4267 if putResponses[ i ][ 'value' ] != tMapValue:
4268 putResult = False
4269 else:
4270 putResult = False
4271 if not putResult:
4272 main.log.debug( "Put response values: " + str( putResponses ) )
4273 utilities.assert_equals( expect=True,
4274 actual=putResult,
4275 onpass="Partitioned Transactional Map put successful",
4276 onfail="Partitioned Transactional Map put values are incorrect" )
4277
4278 main.step( "Partitioned Transactional maps get" )
Jon Hall9bfadd22016-05-11 14:48:07 -07004279 # FIXME: is this sleep needed?
4280 time.sleep( 5 )
4281
Jon Hall9ebd1bd2016-04-19 01:37:17 -07004282 getCheck = True
4283 for n in range( 1, numKeys + 1 ):
4284 getResponses = []
4285 threads = []
4286 valueCheck = True
4287 for i in main.activeNodes:
4288 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4289 name="TMap-get-" + str( i ),
4290 args=[ "Key" + str( n ) ] )
4291 threads.append( t )
4292 t.start()
4293 for t in threads:
4294 t.join()
4295 getResponses.append( t.result )
4296 for node in getResponses:
4297 if node != tMapValue:
4298 valueCheck = False
4299 if not valueCheck:
4300 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4301 main.log.warn( getResponses )
4302 getCheck = getCheck and valueCheck
4303 utilities.assert_equals( expect=True,
4304 actual=getCheck,
4305 onpass="Partitioned Transactional Map get values were correct",
4306 onfail="Partitioned Transactional Map values incorrect" )