blob: 43f89fb6413bca7039464f6c381628d23f76842d [file] [log] [blame]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001"""
2Description: This test is to determine if ONOS can handle
3 dynamic scaling of the cluster size.
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE6: The scaling case.
13CASE7: Check state after control plane failure
14CASE8: Compare topo
15CASE9: Link s3-s28 down
16CASE10: Link s3-s28 up
17CASE11: Switch down
18CASE12: Switch up
19CASE13: Clean up
20CASE14: start election app on all onos nodes
21CASE15: Check that Leadership Election is still functional
22CASE16: Install Distributed Primitives app
23CASE17: Check for basic functionality with distributed primitives
24"""
25
26
27class HAscaling:
28
29 def __init__( self ):
30 self.default = ''
31
32 def CASE1( self, main ):
33 """
34 CASE1 is to compile ONOS and push it to the test machines
35
36 Startup sequence:
37 cell <name>
38 onos-verify-cell
39 NOTE: temporary - onos-remove-raft-logs
40 onos-uninstall
41 start mininet
42 git pull
43 mvn clean install
44 onos-package
45 onos-install -f
46 onos-wait-for-start
47 start cli sessions
48 start tcpdump
49 """
50 import time
51 import os
52 import re
53 main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
54 "initialization" )
55 main.case( "Setting up test environment" )
56 main.caseExplanation = "Setup the test environment including " +\
57 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
59
60 # load some variables from the params file
61 PULLCODE = False
62 if main.params[ 'Git' ] == 'True':
63 PULLCODE = True
64 gitBranch = main.params[ 'branch' ]
65 cellName = main.params[ 'ENV' ][ 'cellName' ]
66
67 main.numCtrls = int( main.params[ 'num_controllers' ] )
68 if main.ONOSbench.maxNodes:
69 if main.ONOSbench.maxNodes < main.numCtrls:
70 main.numCtrls = int( main.ONOSbench.maxNodes )
71 # set global variables
72 # These are for csv plotting in jenkins
73 global labels
74 global data
75 labels = []
76 data = []
77
78 try:
79 from tests.HA.dependencies.HA import HA
80 main.HA = HA()
81 from tests.HA.HAscaling.dependencies.Server import Server
82 main.Server = Server()
83 except Exception as e:
84 main.log.exception( e )
85 main.cleanup()
86 main.exit()
87
88 main.CLIs = []
89 main.nodes = []
90 ipList = []
91 for i in range( 1, main.numCtrls + 1 ):
92 try:
93 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
94 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
95 ipList.append( main.nodes[ -1 ].ip_address )
96 except AttributeError:
97 break
98
99 main.step( "Create cell file" )
100 cellAppString = main.params[ 'ENV' ][ 'appString' ]
101 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
102 main.Mininet1.ip_address,
103 cellAppString, ipList )
104
105 main.step( "Applying cell variable to environment" )
106 cellResult = main.ONOSbench.setCell( cellName )
107 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
108 onpass="Set cell successfull",
109 onfail="Failled to set cell" )
110
111 main.step( "Verify connectivity to cell" )
112 verifyResult = main.ONOSbench.verifyCell()
113 utilities.assert_equals( expect=main.TRUE, actual=verifyResult,
114 onpass="Verify cell passed",
115 onfail="Failled to verify cell" )
116
117 # FIXME:this is short term fix
118 main.log.info( "Removing raft logs" )
119 main.ONOSbench.onosRemoveRaftLogs()
120
121 main.log.info( "Uninstalling ONOS" )
122 for node in main.nodes:
123 main.ONOSbench.onosUninstall( node.ip_address )
124
125 # Make sure ONOS is DEAD
126 main.log.info( "Killing any ONOS processes" )
127 killResults = main.TRUE
128 for node in main.nodes:
129 killed = main.ONOSbench.onosKill( node.ip_address )
130 killResults = killResults and killed
131
132 main.step( "Setup server for cluster metadata file" )
Jon Halla6c90b22016-05-06 10:53:09 -0700133 port = main.params['serverPort']
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700134 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
135 main.log.debug( "Root dir: {}".format( rootDir ) )
136 status = main.Server.start( main.ONOSbench,
137 rootDir,
138 port=port,
139 logDir=main.logdir + "/server.log" )
140 utilities.assert_equals( expect=main.TRUE, actual=status,
141 onpass="Server started",
142 onfail="Failled to start SimpleHTTPServer" )
143
144 main.step( "Generate initial metadata file" )
145 main.scaling = main.params['scaling'].split( "," )
146 main.log.debug( main.scaling )
147 scale = main.scaling.pop(0)
148 main.log.debug( scale)
149 if "e" in scale:
150 equal = True
151 else:
152 equal = False
153 main.log.debug( equal)
154 main.numCtrls = int( re.search( "\d+", scale ).group(0) )
155 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
156 utilities.assert_equals( expect=main.TRUE, actual=genResult,
157 onpass="New cluster metadata file generated",
158 onfail="Failled to generate new metadata file" )
159
160 cleanInstallResult = main.TRUE
161 gitPullResult = main.TRUE
162
163 main.step( "Starting Mininet" )
164 # scp topo file to mininet
165 # TODO: move to params?
166 topoName = "obelisk.py"
167 filePath = main.ONOSbench.home + "/tools/test/topos/"
168 main.ONOSbench.scp( main.Mininet1,
169 filePath + topoName,
170 main.Mininet1.home,
171 direction="to" )
172 mnResult = main.Mininet1.startNet( )
173 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
174 onpass="Mininet Started",
175 onfail="Error starting Mininet" )
176
177 main.step( "Git checkout and pull " + gitBranch )
178 if PULLCODE:
179 main.ONOSbench.gitCheckout( gitBranch )
180 gitPullResult = main.ONOSbench.gitPull()
181 # values of 1 or 3 are good
182 utilities.assert_lesser( expect=0, actual=gitPullResult,
183 onpass="Git pull successful",
184 onfail="Git pull failed" )
185 main.ONOSbench.getVersion( report=True )
186
187 main.step( "Using mvn clean install" )
188 cleanInstallResult = main.TRUE
189 if PULLCODE and gitPullResult == main.TRUE:
190 cleanInstallResult = main.ONOSbench.cleanInstall()
191 else:
192 main.log.warn( "Did not pull new code so skipping mvn " +
193 "clean install" )
194 utilities.assert_equals( expect=main.TRUE,
195 actual=cleanInstallResult,
196 onpass="MCI successful",
197 onfail="MCI failed" )
198 # GRAPHS
199 # NOTE: important params here:
200 # job = name of Jenkins job
201 # Plot Name = Plot-HA, only can be used if multiple plots
202 # index = The number of the graph under plot name
203 job = "HAscaling"
204 plotName = "Plot-HA"
205 index = "0"
206 graphs = '<ac:structured-macro ac:name="html">\n'
207 graphs += '<ac:plain-text-body><![CDATA[\n'
208 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
209 '/plot/' + plotName + '/getPlot?index=' + index +\
210 '&width=500&height=300"' +\
211 'noborder="0" width="500" height="300" scrolling="yes" ' +\
212 'seamless="seamless"></iframe>\n'
213 graphs += ']]></ac:plain-text-body>\n'
214 graphs += '</ac:structured-macro>\n'
215 main.log.wiki(graphs)
216
217 main.step( "Copying backup config files" )
218 path = "~/onos/tools/package/bin/onos-service"
219 cp = main.ONOSbench.scp( main.ONOSbench,
220 path,
221 path + ".backup",
222 direction="to" )
223
224 utilities.assert_equals( expect=main.TRUE,
225 actual=cp,
226 onpass="Copy backup config file succeeded",
227 onfail="Copy backup config file failed" )
228 # we need to modify the onos-service file to use remote metadata file
229 # url for cluster metadata file
230 ip = main.ONOSbench.getIpAddr()
231 metaFile = "cluster.json"
232 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, port, metaFile )
233 main.log.warn( javaArgs )
234 main.log.warn( repr( javaArgs ) )
235 handle = main.ONOSbench.handle
236 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, path )
237 main.log.warn( sed )
238 main.log.warn( repr( sed ) )
239 handle.sendline( sed )
240 handle.expect( "\$" )
241 main.log.debug( repr( handle.before ) )
242
243 main.step( "Creating ONOS package" )
244 packageResult = main.ONOSbench.onosPackage()
245 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
246 onpass="ONOS package successful",
247 onfail="ONOS package failed" )
248
249 main.step( "Installing ONOS package" )
250 onosInstallResult = main.TRUE
251 for i in range( main.ONOSbench.maxNodes ):
252 node = main.nodes[i]
253 options = "-f"
254 if i >= main.numCtrls:
255 options = "-nf" # Don't start more than the current scale
256 tmpResult = main.ONOSbench.onosInstall( options=options,
257 node=node.ip_address )
258 onosInstallResult = onosInstallResult and tmpResult
259 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
260 onpass="ONOS install successful",
261 onfail="ONOS install failed" )
262
263 # Cleanup custom onos-service file
264 main.ONOSbench.scp( main.ONOSbench,
265 path + ".backup",
266 path,
267 direction="to" )
268
269 main.step( "Checking if ONOS is up yet" )
270 for i in range( 2 ):
271 onosIsupResult = main.TRUE
272 for i in range( main.numCtrls ):
273 node = main.nodes[i]
274 started = main.ONOSbench.isup( node.ip_address )
275 if not started:
276 main.log.error( node.name + " hasn't started" )
277 onosIsupResult = onosIsupResult and started
278 if onosIsupResult == main.TRUE:
279 break
280 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
281 onpass="ONOS startup successful",
282 onfail="ONOS startup failed" )
283
284 main.log.step( "Starting ONOS CLI sessions" )
285 cliResults = main.TRUE
286 threads = []
287 for i in range( main.numCtrls ):
288 t = main.Thread( target=main.CLIs[i].startOnosCli,
289 name="startOnosCli-" + str( i ),
290 args=[main.nodes[i].ip_address] )
291 threads.append( t )
292 t.start()
293
294 for t in threads:
295 t.join()
296 cliResults = cliResults and t.result
297 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
298 onpass="ONOS cli startup successful",
299 onfail="ONOS cli startup failed" )
300
301 # Create a list of active nodes for use when some nodes are stopped
302 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
303
304 if main.params[ 'tcpdump' ].lower() == "true":
305 main.step( "Start Packet Capture MN" )
306 main.Mininet2.startTcpdump(
307 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
308 + "-MN.pcap",
309 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
310 port=main.params[ 'MNtcpdump' ][ 'port' ] )
311
312 main.step( "Checking ONOS nodes" )
313 nodeResults = utilities.retry( main.HA.nodesCheck,
314 False,
315 args=[main.activeNodes],
316 attempts=5 )
317 utilities.assert_equals( expect=True, actual=nodeResults,
318 onpass="Nodes check successful",
319 onfail="Nodes check NOT successful" )
320
321 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700322 for i in main.activeNodes:
323 cli = main.CLIs[i]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700324 main.log.debug( "{} components not ACTIVE: \n{}".format(
325 cli.name,
326 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700327 main.log.error( "Failed to start ONOS, stopping test" )
328 main.cleanup()
329 main.exit()
330
331 main.step( "Activate apps defined in the params file" )
332 # get data from the params
333 apps = main.params.get( 'apps' )
334 if apps:
335 apps = apps.split(',')
336 main.log.warn( apps )
337 activateResult = True
338 for app in apps:
339 main.CLIs[ 0 ].app( app, "Activate" )
340 # TODO: check this worked
341 time.sleep( 10 ) # wait for apps to activate
342 for app in apps:
343 state = main.CLIs[ 0 ].appStatus( app )
344 if state == "ACTIVE":
345 activateResult = activateResult and True
346 else:
347 main.log.error( "{} is in {} state".format( app, state ) )
348 activateResult = False
349 utilities.assert_equals( expect=True,
350 actual=activateResult,
351 onpass="Successfully activated apps",
352 onfail="Failed to activate apps" )
353 else:
354 main.log.warn( "No apps were specified to be loaded after startup" )
355
356 main.step( "Set ONOS configurations" )
357 config = main.params.get( 'ONOS_Configuration' )
358 if config:
359 main.log.debug( config )
360 checkResult = main.TRUE
361 for component in config:
362 for setting in config[component]:
363 value = config[component][setting]
364 check = main.CLIs[ 0 ].setCfg( component, setting, value )
365 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
366 checkResult = check and checkResult
367 utilities.assert_equals( expect=main.TRUE,
368 actual=checkResult,
369 onpass="Successfully set config",
370 onfail="Failed to set config" )
371 else:
372 main.log.warn( "No configurations were specified to be changed after startup" )
373
374 main.step( "App Ids check" )
375 appCheck = main.TRUE
376 threads = []
377 for i in main.activeNodes:
378 t = main.Thread( target=main.CLIs[i].appToIDCheck,
379 name="appToIDCheck-" + str( i ),
380 args=[] )
381 threads.append( t )
382 t.start()
383
384 for t in threads:
385 t.join()
386 appCheck = appCheck and t.result
387 if appCheck != main.TRUE:
388 node = main.activeNodes[0]
389 main.log.warn( main.CLIs[node].apps() )
390 main.log.warn( main.CLIs[node].appIDs() )
391 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
392 onpass="App Ids seem to be correct",
393 onfail="Something is wrong with app Ids" )
394
395 def CASE2( self, main ):
396 """
397 Assign devices to controllers
398 """
399 import re
400 assert main.numCtrls, "main.numCtrls not defined"
401 assert main, "main not defined"
402 assert utilities.assert_equals, "utilities.assert_equals not defined"
403 assert main.CLIs, "main.CLIs not defined"
404 assert main.nodes, "main.nodes not defined"
405
406 main.case( "Assigning devices to controllers" )
407 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
408 "and check that an ONOS node becomes the " +\
409 "master of the device."
410 main.step( "Assign switches to controllers" )
411
412 ipList = []
413 for i in range( main.ONOSbench.maxNodes ):
414 ipList.append( main.nodes[ i ].ip_address )
415 swList = []
416 for i in range( 1, 29 ):
417 swList.append( "s" + str( i ) )
418 main.Mininet1.assignSwController( sw=swList, ip=ipList )
419
420 mastershipCheck = main.TRUE
421 for i in range( 1, 29 ):
422 response = main.Mininet1.getSwController( "s" + str( i ) )
423 try:
424 main.log.info( str( response ) )
425 except Exception:
426 main.log.info( repr( response ) )
427 for node in main.nodes:
428 if re.search( "tcp:" + node.ip_address, response ):
429 mastershipCheck = mastershipCheck and main.TRUE
430 else:
431 main.log.error( "Error, node " + node.ip_address + " is " +
432 "not in the list of controllers s" +
433 str( i ) + " is connecting to." )
434 mastershipCheck = main.FALSE
435 utilities.assert_equals(
436 expect=main.TRUE,
437 actual=mastershipCheck,
438 onpass="Switch mastership assigned correctly",
439 onfail="Switches not assigned correctly to controllers" )
440
441 def CASE21( self, main ):
442 """
443 Assign mastership to controllers
444 """
445 import time
446 assert main.numCtrls, "main.numCtrls not defined"
447 assert main, "main not defined"
448 assert utilities.assert_equals, "utilities.assert_equals not defined"
449 assert main.CLIs, "main.CLIs not defined"
450 assert main.nodes, "main.nodes not defined"
451
452 main.case( "Assigning Controller roles for switches" )
453 main.caseExplanation = "Check that ONOS is connected to each " +\
454 "device. Then manually assign" +\
455 " mastership to specific ONOS nodes using" +\
456 " 'device-role'"
457 main.step( "Assign mastership of switches to specific controllers" )
458 # Manually assign mastership to the controller we want
459 roleCall = main.TRUE
460
461 ipList = [ ]
462 deviceList = []
463 onosCli = main.CLIs[ main.activeNodes[0] ]
464 try:
465 # Assign mastership to specific controllers. This assignment was
466 # determined for a 7 node cluser, but will work with any sized
467 # cluster
468 for i in range( 1, 29 ): # switches 1 through 28
469 # set up correct variables:
470 if i == 1:
471 c = 0
472 ip = main.nodes[ c ].ip_address # ONOS1
473 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
474 elif i == 2:
475 c = 1 % main.numCtrls
476 ip = main.nodes[ c ].ip_address # ONOS2
477 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
478 elif i == 3:
479 c = 1 % main.numCtrls
480 ip = main.nodes[ c ].ip_address # ONOS2
481 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
482 elif i == 4:
483 c = 3 % main.numCtrls
484 ip = main.nodes[ c ].ip_address # ONOS4
485 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
486 elif i == 5:
487 c = 2 % main.numCtrls
488 ip = main.nodes[ c ].ip_address # ONOS3
489 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
490 elif i == 6:
491 c = 2 % main.numCtrls
492 ip = main.nodes[ c ].ip_address # ONOS3
493 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
494 elif i == 7:
495 c = 5 % main.numCtrls
496 ip = main.nodes[ c ].ip_address # ONOS6
497 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
498 elif i >= 8 and i <= 17:
499 c = 4 % main.numCtrls
500 ip = main.nodes[ c ].ip_address # ONOS5
501 dpid = '3' + str( i ).zfill( 3 )
502 deviceId = onosCli.getDevice( dpid ).get( 'id' )
503 elif i >= 18 and i <= 27:
504 c = 6 % main.numCtrls
505 ip = main.nodes[ c ].ip_address # ONOS7
506 dpid = '6' + str( i ).zfill( 3 )
507 deviceId = onosCli.getDevice( dpid ).get( 'id' )
508 elif i == 28:
509 c = 0
510 ip = main.nodes[ c ].ip_address # ONOS1
511 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
512 else:
513 main.log.error( "You didn't write an else statement for " +
514 "switch s" + str( i ) )
515 roleCall = main.FALSE
516 # Assign switch
517 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
518 # TODO: make this controller dynamic
519 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
520 ipList.append( ip )
521 deviceList.append( deviceId )
522 except ( AttributeError, AssertionError ):
523 main.log.exception( "Something is wrong with ONOS device view" )
524 main.log.info( onosCli.devices() )
525 utilities.assert_equals(
526 expect=main.TRUE,
527 actual=roleCall,
528 onpass="Re-assigned switch mastership to designated controller",
529 onfail="Something wrong with deviceRole calls" )
530
531 main.step( "Check mastership was correctly assigned" )
532 roleCheck = main.TRUE
533 # NOTE: This is due to the fact that device mastership change is not
534 # atomic and is actually a multi step process
535 time.sleep( 5 )
536 for i in range( len( ipList ) ):
537 ip = ipList[i]
538 deviceId = deviceList[i]
539 # Check assignment
540 master = onosCli.getRole( deviceId ).get( 'master' )
541 if ip in master:
542 roleCheck = roleCheck and main.TRUE
543 else:
544 roleCheck = roleCheck and main.FALSE
545 main.log.error( "Error, controller " + ip + " is not" +
546 " master " + "of device " +
547 str( deviceId ) + ". Master is " +
548 repr( master ) + "." )
549 utilities.assert_equals(
550 expect=main.TRUE,
551 actual=roleCheck,
552 onpass="Switches were successfully reassigned to designated " +
553 "controller",
554 onfail="Switches were not successfully reassigned" )
555
556 def CASE3( self, main ):
557 """
558 Assign intents
559 """
560 import time
561 import json
562 assert main.numCtrls, "main.numCtrls not defined"
563 assert main, "main not defined"
564 assert utilities.assert_equals, "utilities.assert_equals not defined"
565 assert main.CLIs, "main.CLIs not defined"
566 assert main.nodes, "main.nodes not defined"
567 try:
568 labels
569 except NameError:
570 main.log.error( "labels not defined, setting to []" )
571 labels = []
572 try:
573 data
574 except NameError:
575 main.log.error( "data not defined, setting to []" )
576 data = []
577 # NOTE: we must reinstall intents until we have a persistant intent
578 # datastore!
579 main.case( "Adding host Intents" )
580 main.caseExplanation = "Discover hosts by using pingall then " +\
581 "assign predetermined host-to-host intents." +\
582 " After installation, check that the intent" +\
583 " is distributed to all nodes and the state" +\
584 " is INSTALLED"
585
586 # install onos-app-fwd
587 main.step( "Install reactive forwarding app" )
588 onosCli = main.CLIs[ main.activeNodes[0] ]
589 installResults = onosCli.activateApp( "org.onosproject.fwd" )
590 utilities.assert_equals( expect=main.TRUE, actual=installResults,
591 onpass="Install fwd successful",
592 onfail="Install fwd failed" )
593
594 main.step( "Check app ids" )
595 appCheck = main.TRUE
596 threads = []
597 for i in main.activeNodes:
598 t = main.Thread( target=main.CLIs[i].appToIDCheck,
599 name="appToIDCheck-" + str( i ),
600 args=[] )
601 threads.append( t )
602 t.start()
603
604 for t in threads:
605 t.join()
606 appCheck = appCheck and t.result
607 if appCheck != main.TRUE:
608 main.log.warn( onosCli.apps() )
609 main.log.warn( onosCli.appIDs() )
610 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
611 onpass="App Ids seem to be correct",
612 onfail="Something is wrong with app Ids" )
613
614 main.step( "Discovering Hosts( Via pingall for now )" )
615 # FIXME: Once we have a host discovery mechanism, use that instead
616 # REACTIVE FWD test
617 pingResult = main.FALSE
618 passMsg = "Reactive Pingall test passed"
619 time1 = time.time()
620 pingResult = main.Mininet1.pingall()
621 time2 = time.time()
622 if not pingResult:
623 main.log.warn("First pingall failed. Trying again...")
624 pingResult = main.Mininet1.pingall()
625 passMsg += " on the second try"
626 utilities.assert_equals(
627 expect=main.TRUE,
628 actual=pingResult,
629 onpass= passMsg,
630 onfail="Reactive Pingall failed, " +
631 "one or more ping pairs failed" )
632 main.log.info( "Time for pingall: %2f seconds" %
633 ( time2 - time1 ) )
634 # timeout for fwd flows
635 time.sleep( 11 )
636 # uninstall onos-app-fwd
637 main.step( "Uninstall reactive forwarding app" )
638 node = main.activeNodes[0]
639 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
640 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
641 onpass="Uninstall fwd successful",
642 onfail="Uninstall fwd failed" )
643
644 main.step( "Check app ids" )
645 threads = []
646 appCheck2 = main.TRUE
647 for i in main.activeNodes:
648 t = main.Thread( target=main.CLIs[i].appToIDCheck,
649 name="appToIDCheck-" + str( i ),
650 args=[] )
651 threads.append( t )
652 t.start()
653
654 for t in threads:
655 t.join()
656 appCheck2 = appCheck2 and t.result
657 if appCheck2 != main.TRUE:
658 node = main.activeNodes[0]
659 main.log.warn( main.CLIs[node].apps() )
660 main.log.warn( main.CLIs[node].appIDs() )
661 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
662 onpass="App Ids seem to be correct",
663 onfail="Something is wrong with app Ids" )
664
665 main.step( "Add host intents via cli" )
666 intentIds = []
667 # TODO: move the host numbers to params
668 # Maybe look at all the paths we ping?
669 intentAddResult = True
670 hostResult = main.TRUE
671 for i in range( 8, 18 ):
672 main.log.info( "Adding host intent between h" + str( i ) +
673 " and h" + str( i + 10 ) )
674 host1 = "00:00:00:00:00:" + \
675 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
676 host2 = "00:00:00:00:00:" + \
677 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
678 # NOTE: getHost can return None
679 host1Dict = onosCli.getHost( host1 )
680 host2Dict = onosCli.getHost( host2 )
681 host1Id = None
682 host2Id = None
683 if host1Dict and host2Dict:
684 host1Id = host1Dict.get( 'id', None )
685 host2Id = host2Dict.get( 'id', None )
686 if host1Id and host2Id:
687 nodeNum = ( i % len( main.activeNodes ) )
688 node = main.activeNodes[nodeNum]
689 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
690 if tmpId:
691 main.log.info( "Added intent with id: " + tmpId )
692 intentIds.append( tmpId )
693 else:
694 main.log.error( "addHostIntent returned: " +
695 repr( tmpId ) )
696 else:
697 main.log.error( "Error, getHost() failed for h" + str( i ) +
698 " and/or h" + str( i + 10 ) )
699 node = main.activeNodes[0]
700 hosts = main.CLIs[node].hosts()
701 main.log.warn( "Hosts output: " )
702 try:
703 main.log.warn( json.dumps( json.loads( hosts ),
704 sort_keys=True,
705 indent=4,
706 separators=( ',', ': ' ) ) )
707 except ( ValueError, TypeError ):
708 main.log.warn( repr( hosts ) )
709 hostResult = main.FALSE
710 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
711 onpass="Found a host id for each host",
712 onfail="Error looking up host ids" )
713
714 intentStart = time.time()
715 onosIds = onosCli.getAllIntentsId()
716 main.log.info( "Submitted intents: " + str( intentIds ) )
717 main.log.info( "Intents in ONOS: " + str( onosIds ) )
718 for intent in intentIds:
719 if intent in onosIds:
720 pass # intent submitted is in onos
721 else:
722 intentAddResult = False
723 if intentAddResult:
724 intentStop = time.time()
725 else:
726 intentStop = None
727 # Print the intent states
728 intents = onosCli.intents()
729 intentStates = []
730 installedCheck = True
731 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
732 count = 0
733 try:
734 for intent in json.loads( intents ):
735 state = intent.get( 'state', None )
736 if "INSTALLED" not in state:
737 installedCheck = False
738 intentId = intent.get( 'id', None )
739 intentStates.append( ( intentId, state ) )
740 except ( ValueError, TypeError ):
741 main.log.exception( "Error parsing intents" )
742 # add submitted intents not in the store
743 tmplist = [ i for i, s in intentStates ]
744 missingIntents = False
745 for i in intentIds:
746 if i not in tmplist:
747 intentStates.append( ( i, " - " ) )
748 missingIntents = True
749 intentStates.sort()
750 for i, s in intentStates:
751 count += 1
752 main.log.info( "%-6s%-15s%-15s" %
753 ( str( count ), str( i ), str( s ) ) )
754 leaders = onosCli.leaders()
755 try:
756 missing = False
757 if leaders:
758 parsedLeaders = json.loads( leaders )
759 main.log.warn( json.dumps( parsedLeaders,
760 sort_keys=True,
761 indent=4,
762 separators=( ',', ': ' ) ) )
763 # check for all intent partitions
764 topics = []
765 for i in range( 14 ):
766 topics.append( "intent-partition-" + str( i ) )
767 main.log.debug( topics )
768 ONOStopics = [ j['topic'] for j in parsedLeaders ]
769 for topic in topics:
770 if topic not in ONOStopics:
771 main.log.error( "Error: " + topic +
772 " not in leaders" )
773 missing = True
774 else:
775 main.log.error( "leaders() returned None" )
776 except ( ValueError, TypeError ):
777 main.log.exception( "Error parsing leaders" )
778 main.log.error( repr( leaders ) )
779 # Check all nodes
780 if missing:
781 for i in main.activeNodes:
782 response = main.CLIs[i].leaders( jsonFormat=False)
783 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
784 str( response ) )
785
786 partitions = onosCli.partitions()
787 try:
788 if partitions :
789 parsedPartitions = json.loads( partitions )
790 main.log.warn( json.dumps( parsedPartitions,
791 sort_keys=True,
792 indent=4,
793 separators=( ',', ': ' ) ) )
794 # TODO check for a leader in all paritions
795 # TODO check for consistency among nodes
796 else:
797 main.log.error( "partitions() returned None" )
798 except ( ValueError, TypeError ):
799 main.log.exception( "Error parsing partitions" )
800 main.log.error( repr( partitions ) )
801 pendingMap = onosCli.pendingMap()
802 try:
803 if pendingMap :
804 parsedPending = json.loads( pendingMap )
805 main.log.warn( json.dumps( parsedPending,
806 sort_keys=True,
807 indent=4,
808 separators=( ',', ': ' ) ) )
809 # TODO check something here?
810 else:
811 main.log.error( "pendingMap() returned None" )
812 except ( ValueError, TypeError ):
813 main.log.exception( "Error parsing pending map" )
814 main.log.error( repr( pendingMap ) )
815
816 intentAddResult = bool( intentAddResult and not missingIntents and
817 installedCheck )
818 if not intentAddResult:
819 main.log.error( "Error in pushing host intents to ONOS" )
820
821 main.step( "Intent Anti-Entropy dispersion" )
822 for j in range(100):
823 correct = True
824 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
825 for i in main.activeNodes:
826 onosIds = []
827 ids = main.CLIs[i].getAllIntentsId()
828 onosIds.append( ids )
829 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
830 str( sorted( onosIds ) ) )
831 if sorted( ids ) != sorted( intentIds ):
832 main.log.warn( "Set of intent IDs doesn't match" )
833 correct = False
834 break
835 else:
836 intents = json.loads( main.CLIs[i].intents() )
837 for intent in intents:
838 if intent[ 'state' ] != "INSTALLED":
839 main.log.warn( "Intent " + intent[ 'id' ] +
840 " is " + intent[ 'state' ] )
841 correct = False
842 break
843 if correct:
844 break
845 else:
846 time.sleep(1)
847 if not intentStop:
848 intentStop = time.time()
849 global gossipTime
850 gossipTime = intentStop - intentStart
851 main.log.info( "It took about " + str( gossipTime ) +
852 " seconds for all intents to appear in each node" )
853 append = False
854 title = "Gossip Intents"
855 count = 1
856 while append is False:
857 curTitle = title + str( count )
858 if curTitle not in labels:
859 labels.append( curTitle )
860 data.append( str( gossipTime ) )
861 append = True
862 else:
863 count += 1
864 gossipPeriod = int( main.params['timers']['gossip'] )
865 maxGossipTime = gossipPeriod * len( main.activeNodes )
866 utilities.assert_greater_equals(
867 expect=maxGossipTime, actual=gossipTime,
868 onpass="ECM anti-entropy for intents worked within " +
869 "expected time",
870 onfail="Intent ECM anti-entropy took too long. " +
871 "Expected time:{}, Actual time:{}".format( maxGossipTime,
872 gossipTime ) )
873 if gossipTime <= maxGossipTime:
874 intentAddResult = True
875
876 if not intentAddResult or "key" in pendingMap:
877 import time
878 installedCheck = True
879 main.log.info( "Sleeping 60 seconds to see if intents are found" )
880 time.sleep( 60 )
881 onosIds = onosCli.getAllIntentsId()
882 main.log.info( "Submitted intents: " + str( intentIds ) )
883 main.log.info( "Intents in ONOS: " + str( onosIds ) )
884 # Print the intent states
885 intents = onosCli.intents()
886 intentStates = []
887 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
888 count = 0
889 try:
890 for intent in json.loads( intents ):
891 # Iter through intents of a node
892 state = intent.get( 'state', None )
893 if "INSTALLED" not in state:
894 installedCheck = False
895 intentId = intent.get( 'id', None )
896 intentStates.append( ( intentId, state ) )
897 except ( ValueError, TypeError ):
898 main.log.exception( "Error parsing intents" )
899 # add submitted intents not in the store
900 tmplist = [ i for i, s in intentStates ]
901 for i in intentIds:
902 if i not in tmplist:
903 intentStates.append( ( i, " - " ) )
904 intentStates.sort()
905 for i, s in intentStates:
906 count += 1
907 main.log.info( "%-6s%-15s%-15s" %
908 ( str( count ), str( i ), str( s ) ) )
909 leaders = onosCli.leaders()
910 try:
911 missing = False
912 if leaders:
913 parsedLeaders = json.loads( leaders )
914 main.log.warn( json.dumps( parsedLeaders,
915 sort_keys=True,
916 indent=4,
917 separators=( ',', ': ' ) ) )
918 # check for all intent partitions
919 # check for election
920 topics = []
921 for i in range( 14 ):
922 topics.append( "intent-partition-" + str( i ) )
923 # FIXME: this should only be after we start the app
924 topics.append( "org.onosproject.election" )
925 main.log.debug( topics )
926 ONOStopics = [ j['topic'] for j in parsedLeaders ]
927 for topic in topics:
928 if topic not in ONOStopics:
929 main.log.error( "Error: " + topic +
930 " not in leaders" )
931 missing = True
932 else:
933 main.log.error( "leaders() returned None" )
934 except ( ValueError, TypeError ):
935 main.log.exception( "Error parsing leaders" )
936 main.log.error( repr( leaders ) )
937 # Check all nodes
938 if missing:
939 for i in main.activeNodes:
940 node = main.CLIs[i]
941 response = node.leaders( jsonFormat=False)
942 main.log.warn( str( node.name ) + " leaders output: \n" +
943 str( response ) )
944
945 partitions = onosCli.partitions()
946 try:
947 if partitions :
948 parsedPartitions = json.loads( partitions )
949 main.log.warn( json.dumps( parsedPartitions,
950 sort_keys=True,
951 indent=4,
952 separators=( ',', ': ' ) ) )
953 # TODO check for a leader in all paritions
954 # TODO check for consistency among nodes
955 else:
956 main.log.error( "partitions() returned None" )
957 except ( ValueError, TypeError ):
958 main.log.exception( "Error parsing partitions" )
959 main.log.error( repr( partitions ) )
960 pendingMap = onosCli.pendingMap()
961 try:
962 if pendingMap :
963 parsedPending = json.loads( pendingMap )
964 main.log.warn( json.dumps( parsedPending,
965 sort_keys=True,
966 indent=4,
967 separators=( ',', ': ' ) ) )
968 # TODO check something here?
969 else:
970 main.log.error( "pendingMap() returned None" )
971 except ( ValueError, TypeError ):
972 main.log.exception( "Error parsing pending map" )
973 main.log.error( repr( pendingMap ) )
974
975 def CASE4( self, main ):
976 """
977 Ping across added host intents
978 """
979 import json
980 import time
981 assert main.numCtrls, "main.numCtrls not defined"
982 assert main, "main not defined"
983 assert utilities.assert_equals, "utilities.assert_equals not defined"
984 assert main.CLIs, "main.CLIs not defined"
985 assert main.nodes, "main.nodes not defined"
986 main.case( "Verify connectivity by sending traffic across Intents" )
987 main.caseExplanation = "Ping across added host intents to check " +\
988 "functionality and check the state of " +\
989 "the intent"
990
991 onosCli = main.CLIs[ main.activeNodes[0] ]
992 main.step( "Check Intent state" )
993 installedCheck = False
994 loopCount = 0
995 while not installedCheck and loopCount < 40:
996 installedCheck = True
997 # Print the intent states
998 intents = onosCli.intents()
999 intentStates = []
1000 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1001 count = 0
1002 # Iter through intents of a node
1003 try:
1004 for intent in json.loads( intents ):
1005 state = intent.get( 'state', None )
1006 if "INSTALLED" not in state:
1007 installedCheck = False
1008 intentId = intent.get( 'id', None )
1009 intentStates.append( ( intentId, state ) )
1010 except ( ValueError, TypeError ):
1011 main.log.exception( "Error parsing intents." )
1012 # Print states
1013 intentStates.sort()
1014 for i, s in intentStates:
1015 count += 1
1016 main.log.info( "%-6s%-15s%-15s" %
1017 ( str( count ), str( i ), str( s ) ) )
1018 if not installedCheck:
1019 time.sleep( 1 )
1020 loopCount += 1
1021 utilities.assert_equals( expect=True, actual=installedCheck,
1022 onpass="Intents are all INSTALLED",
1023 onfail="Intents are not all in " +
1024 "INSTALLED state" )
1025
1026 main.step( "Ping across added host intents" )
1027 PingResult = main.TRUE
1028 for i in range( 8, 18 ):
1029 ping = main.Mininet1.pingHost( src="h" + str( i ),
1030 target="h" + str( i + 10 ) )
1031 PingResult = PingResult and ping
1032 if ping == main.FALSE:
1033 main.log.warn( "Ping failed between h" + str( i ) +
1034 " and h" + str( i + 10 ) )
1035 elif ping == main.TRUE:
1036 main.log.info( "Ping test passed!" )
1037 # Don't set PingResult or you'd override failures
1038 if PingResult == main.FALSE:
1039 main.log.error(
1040 "Intents have not been installed correctly, pings failed." )
1041 # TODO: pretty print
1042 main.log.warn( "ONOS1 intents: " )
1043 try:
1044 tmpIntents = onosCli.intents()
1045 main.log.warn( json.dumps( json.loads( tmpIntents ),
1046 sort_keys=True,
1047 indent=4,
1048 separators=( ',', ': ' ) ) )
1049 except ( ValueError, TypeError ):
1050 main.log.warn( repr( tmpIntents ) )
1051 utilities.assert_equals(
1052 expect=main.TRUE,
1053 actual=PingResult,
1054 onpass="Intents have been installed correctly and pings work",
1055 onfail="Intents have not been installed correctly, pings failed." )
1056
1057 main.step( "Check leadership of topics" )
1058 leaders = onosCli.leaders()
1059 topicCheck = main.TRUE
1060 try:
1061 if leaders:
1062 parsedLeaders = json.loads( leaders )
1063 main.log.warn( json.dumps( parsedLeaders,
1064 sort_keys=True,
1065 indent=4,
1066 separators=( ',', ': ' ) ) )
1067 # check for all intent partitions
1068 # check for election
1069 # TODO: Look at Devices as topics now that it uses this system
1070 topics = []
1071 for i in range( 14 ):
1072 topics.append( "intent-partition-" + str( i ) )
1073 # FIXME: this should only be after we start the app
1074 # FIXME: topics.append( "org.onosproject.election" )
1075 # Print leaders output
1076 main.log.debug( topics )
1077 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1078 for topic in topics:
1079 if topic not in ONOStopics:
1080 main.log.error( "Error: " + topic +
1081 " not in leaders" )
1082 topicCheck = main.FALSE
1083 else:
1084 main.log.error( "leaders() returned None" )
1085 topicCheck = main.FALSE
1086 except ( ValueError, TypeError ):
1087 topicCheck = main.FALSE
1088 main.log.exception( "Error parsing leaders" )
1089 main.log.error( repr( leaders ) )
1090 # TODO: Check for a leader of these topics
1091 # Check all nodes
1092 if topicCheck:
1093 for i in main.activeNodes:
1094 node = main.CLIs[i]
1095 response = node.leaders( jsonFormat=False)
1096 main.log.warn( str( node.name ) + " leaders output: \n" +
1097 str( response ) )
1098
1099 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1100 onpass="intent Partitions is in leaders",
1101 onfail="Some topics were lost " )
1102 # Print partitions
1103 partitions = onosCli.partitions()
1104 try:
1105 if partitions :
1106 parsedPartitions = json.loads( partitions )
1107 main.log.warn( json.dumps( parsedPartitions,
1108 sort_keys=True,
1109 indent=4,
1110 separators=( ',', ': ' ) ) )
1111 # TODO check for a leader in all paritions
1112 # TODO check for consistency among nodes
1113 else:
1114 main.log.error( "partitions() returned None" )
1115 except ( ValueError, TypeError ):
1116 main.log.exception( "Error parsing partitions" )
1117 main.log.error( repr( partitions ) )
1118 # Print Pending Map
1119 pendingMap = onosCli.pendingMap()
1120 try:
1121 if pendingMap :
1122 parsedPending = json.loads( pendingMap )
1123 main.log.warn( json.dumps( parsedPending,
1124 sort_keys=True,
1125 indent=4,
1126 separators=( ',', ': ' ) ) )
1127 # TODO check something here?
1128 else:
1129 main.log.error( "pendingMap() returned None" )
1130 except ( ValueError, TypeError ):
1131 main.log.exception( "Error parsing pending map" )
1132 main.log.error( repr( pendingMap ) )
1133
1134 if not installedCheck:
1135 main.log.info( "Waiting 60 seconds to see if the state of " +
1136 "intents change" )
1137 time.sleep( 60 )
1138 # Print the intent states
1139 intents = onosCli.intents()
1140 intentStates = []
1141 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1142 count = 0
1143 # Iter through intents of a node
1144 try:
1145 for intent in json.loads( intents ):
1146 state = intent.get( 'state', None )
1147 if "INSTALLED" not in state:
1148 installedCheck = False
1149 intentId = intent.get( 'id', None )
1150 intentStates.append( ( intentId, state ) )
1151 except ( ValueError, TypeError ):
1152 main.log.exception( "Error parsing intents." )
1153 intentStates.sort()
1154 for i, s in intentStates:
1155 count += 1
1156 main.log.info( "%-6s%-15s%-15s" %
1157 ( str( count ), str( i ), str( s ) ) )
1158 leaders = onosCli.leaders()
1159 try:
1160 missing = False
1161 if leaders:
1162 parsedLeaders = json.loads( leaders )
1163 main.log.warn( json.dumps( parsedLeaders,
1164 sort_keys=True,
1165 indent=4,
1166 separators=( ',', ': ' ) ) )
1167 # check for all intent partitions
1168 # check for election
1169 topics = []
1170 for i in range( 14 ):
1171 topics.append( "intent-partition-" + str( i ) )
1172 # FIXME: this should only be after we start the app
1173 topics.append( "org.onosproject.election" )
1174 main.log.debug( topics )
1175 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1176 for topic in topics:
1177 if topic not in ONOStopics:
1178 main.log.error( "Error: " + topic +
1179 " not in leaders" )
1180 missing = True
1181 else:
1182 main.log.error( "leaders() returned None" )
1183 except ( ValueError, TypeError ):
1184 main.log.exception( "Error parsing leaders" )
1185 main.log.error( repr( leaders ) )
1186 if missing:
1187 for i in main.activeNodes:
1188 node = main.CLIs[i]
1189 response = node.leaders( jsonFormat=False)
1190 main.log.warn( str( node.name ) + " leaders output: \n" +
1191 str( response ) )
1192
1193 partitions = onosCli.partitions()
1194 try:
1195 if partitions :
1196 parsedPartitions = json.loads( partitions )
1197 main.log.warn( json.dumps( parsedPartitions,
1198 sort_keys=True,
1199 indent=4,
1200 separators=( ',', ': ' ) ) )
1201 # TODO check for a leader in all paritions
1202 # TODO check for consistency among nodes
1203 else:
1204 main.log.error( "partitions() returned None" )
1205 except ( ValueError, TypeError ):
1206 main.log.exception( "Error parsing partitions" )
1207 main.log.error( repr( partitions ) )
1208 pendingMap = onosCli.pendingMap()
1209 try:
1210 if pendingMap :
1211 parsedPending = json.loads( pendingMap )
1212 main.log.warn( json.dumps( parsedPending,
1213 sort_keys=True,
1214 indent=4,
1215 separators=( ',', ': ' ) ) )
1216 # TODO check something here?
1217 else:
1218 main.log.error( "pendingMap() returned None" )
1219 except ( ValueError, TypeError ):
1220 main.log.exception( "Error parsing pending map" )
1221 main.log.error( repr( pendingMap ) )
1222 # Print flowrules
1223 node = main.activeNodes[0]
1224 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1225 main.step( "Wait a minute then ping again" )
1226 # the wait is above
1227 PingResult = main.TRUE
1228 for i in range( 8, 18 ):
1229 ping = main.Mininet1.pingHost( src="h" + str( i ),
1230 target="h" + str( i + 10 ) )
1231 PingResult = PingResult and ping
1232 if ping == main.FALSE:
1233 main.log.warn( "Ping failed between h" + str( i ) +
1234 " and h" + str( i + 10 ) )
1235 elif ping == main.TRUE:
1236 main.log.info( "Ping test passed!" )
1237 # Don't set PingResult or you'd override failures
1238 if PingResult == main.FALSE:
1239 main.log.error(
1240 "Intents have not been installed correctly, pings failed." )
1241 # TODO: pretty print
1242 main.log.warn( "ONOS1 intents: " )
1243 try:
1244 tmpIntents = onosCli.intents()
1245 main.log.warn( json.dumps( json.loads( tmpIntents ),
1246 sort_keys=True,
1247 indent=4,
1248 separators=( ',', ': ' ) ) )
1249 except ( ValueError, TypeError ):
1250 main.log.warn( repr( tmpIntents ) )
1251 utilities.assert_equals(
1252 expect=main.TRUE,
1253 actual=PingResult,
1254 onpass="Intents have been installed correctly and pings work",
1255 onfail="Intents have not been installed correctly, pings failed." )
1256
1257 def CASE5( self, main ):
1258 """
1259 Reading state of ONOS
1260 """
1261 import json
1262 import time
1263 assert main.numCtrls, "main.numCtrls not defined"
1264 assert main, "main not defined"
1265 assert utilities.assert_equals, "utilities.assert_equals not defined"
1266 assert main.CLIs, "main.CLIs not defined"
1267 assert main.nodes, "main.nodes not defined"
1268
1269 main.case( "Setting up and gathering data for current state" )
1270 # The general idea for this test case is to pull the state of
1271 # ( intents,flows, topology,... ) from each ONOS node
1272 # We can then compare them with each other and also with past states
1273
1274 main.step( "Check that each switch has a master" )
1275 global mastershipState
1276 mastershipState = '[]'
1277
1278 # Assert that each device has a master
1279 rolesNotNull = main.TRUE
1280 threads = []
1281 for i in main.activeNodes:
1282 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1283 name="rolesNotNull-" + str( i ),
1284 args=[] )
1285 threads.append( t )
1286 t.start()
1287
1288 for t in threads:
1289 t.join()
1290 rolesNotNull = rolesNotNull and t.result
1291 utilities.assert_equals(
1292 expect=main.TRUE,
1293 actual=rolesNotNull,
1294 onpass="Each device has a master",
1295 onfail="Some devices don't have a master assigned" )
1296
1297 main.step( "Get the Mastership of each switch from each controller" )
1298 ONOSMastership = []
1299 consistentMastership = True
1300 rolesResults = True
1301 threads = []
1302 for i in main.activeNodes:
1303 t = main.Thread( target=main.CLIs[i].roles,
1304 name="roles-" + str( i ),
1305 args=[] )
1306 threads.append( t )
1307 t.start()
1308
1309 for t in threads:
1310 t.join()
1311 ONOSMastership.append( t.result )
1312
1313 for i in range( len( ONOSMastership ) ):
1314 node = str( main.activeNodes[i] + 1 )
1315 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1316 main.log.error( "Error in getting ONOS" + node + " roles" )
1317 main.log.warn( "ONOS" + node + " mastership response: " +
1318 repr( ONOSMastership[i] ) )
1319 rolesResults = False
1320 utilities.assert_equals(
1321 expect=True,
1322 actual=rolesResults,
1323 onpass="No error in reading roles output",
1324 onfail="Error in reading roles from ONOS" )
1325
1326 main.step( "Check for consistency in roles from each controller" )
1327 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1328 main.log.info(
1329 "Switch roles are consistent across all ONOS nodes" )
1330 else:
1331 consistentMastership = False
1332 utilities.assert_equals(
1333 expect=True,
1334 actual=consistentMastership,
1335 onpass="Switch roles are consistent across all ONOS nodes",
1336 onfail="ONOS nodes have different views of switch roles" )
1337
1338 if rolesResults and not consistentMastership:
1339 for i in range( len( main.activeNodes ) ):
1340 node = str( main.activeNodes[i] + 1 )
1341 try:
1342 main.log.warn(
1343 "ONOS" + node + " roles: ",
1344 json.dumps(
1345 json.loads( ONOSMastership[ i ] ),
1346 sort_keys=True,
1347 indent=4,
1348 separators=( ',', ': ' ) ) )
1349 except ( ValueError, TypeError ):
1350 main.log.warn( repr( ONOSMastership[ i ] ) )
1351 elif rolesResults and consistentMastership:
1352 mastershipState = ONOSMastership[ 0 ]
1353
1354 main.step( "Get the intents from each controller" )
1355 global intentState
1356 intentState = []
1357 ONOSIntents = []
1358 consistentIntents = True # Are Intents consistent across nodes?
1359 intentsResults = True # Could we read Intents from ONOS?
1360 threads = []
1361 for i in main.activeNodes:
1362 t = main.Thread( target=main.CLIs[i].intents,
1363 name="intents-" + str( i ),
1364 args=[],
1365 kwargs={ 'jsonFormat': True } )
1366 threads.append( t )
1367 t.start()
1368
1369 for t in threads:
1370 t.join()
1371 ONOSIntents.append( t.result )
1372
1373 for i in range( len( ONOSIntents ) ):
1374 node = str( main.activeNodes[i] + 1 )
1375 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1376 main.log.error( "Error in getting ONOS" + node + " intents" )
1377 main.log.warn( "ONOS" + node + " intents response: " +
1378 repr( ONOSIntents[ i ] ) )
1379 intentsResults = False
1380 utilities.assert_equals(
1381 expect=True,
1382 actual=intentsResults,
1383 onpass="No error in reading intents output",
1384 onfail="Error in reading intents from ONOS" )
1385
1386 main.step( "Check for consistency in Intents from each controller" )
1387 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1388 main.log.info( "Intents are consistent across all ONOS " +
1389 "nodes" )
1390 else:
1391 consistentIntents = False
1392 main.log.error( "Intents not consistent" )
1393 utilities.assert_equals(
1394 expect=True,
1395 actual=consistentIntents,
1396 onpass="Intents are consistent across all ONOS nodes",
1397 onfail="ONOS nodes have different views of intents" )
1398
1399 if intentsResults:
1400 # Try to make it easy to figure out what is happening
1401 #
1402 # Intent ONOS1 ONOS2 ...
1403 # 0x01 INSTALLED INSTALLING
1404 # ... ... ...
1405 # ... ... ...
1406 title = " Id"
1407 for n in main.activeNodes:
1408 title += " " * 10 + "ONOS" + str( n + 1 )
1409 main.log.warn( title )
1410 # get all intent keys in the cluster
1411 keys = []
1412 try:
1413 # Get the set of all intent keys
1414 for nodeStr in ONOSIntents:
1415 node = json.loads( nodeStr )
1416 for intent in node:
1417 keys.append( intent.get( 'id' ) )
1418 keys = set( keys )
1419 # For each intent key, print the state on each node
1420 for key in keys:
1421 row = "%-13s" % key
1422 for nodeStr in ONOSIntents:
1423 node = json.loads( nodeStr )
1424 for intent in node:
1425 if intent.get( 'id', "Error" ) == key:
1426 row += "%-15s" % intent.get( 'state' )
1427 main.log.warn( row )
1428 # End of intent state table
1429 except ValueError as e:
1430 main.log.exception( e )
1431 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1432
1433 if intentsResults and not consistentIntents:
1434 # print the json objects
1435 n = str( main.activeNodes[-1] + 1 )
1436 main.log.debug( "ONOS" + n + " intents: " )
1437 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1438 sort_keys=True,
1439 indent=4,
1440 separators=( ',', ': ' ) ) )
1441 for i in range( len( ONOSIntents ) ):
1442 node = str( main.activeNodes[i] + 1 )
1443 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1444 main.log.debug( "ONOS" + node + " intents: " )
1445 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1446 sort_keys=True,
1447 indent=4,
1448 separators=( ',', ': ' ) ) )
1449 else:
1450 main.log.debug( "ONOS" + node + " intents match ONOS" +
1451 n + " intents" )
1452 elif intentsResults and consistentIntents:
1453 intentState = ONOSIntents[ 0 ]
1454
1455 main.step( "Get the flows from each controller" )
1456 global flowState
1457 flowState = []
1458 ONOSFlows = []
1459 ONOSFlowsJson = []
1460 flowCheck = main.FALSE
1461 consistentFlows = True
1462 flowsResults = True
1463 threads = []
1464 for i in main.activeNodes:
1465 t = main.Thread( target=main.CLIs[i].flows,
1466 name="flows-" + str( i ),
1467 args=[],
1468 kwargs={ 'jsonFormat': True } )
1469 threads.append( t )
1470 t.start()
1471
1472 # NOTE: Flows command can take some time to run
1473 time.sleep(30)
1474 for t in threads:
1475 t.join()
1476 result = t.result
1477 ONOSFlows.append( result )
1478
1479 for i in range( len( ONOSFlows ) ):
1480 num = str( main.activeNodes[i] + 1 )
1481 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1482 main.log.error( "Error in getting ONOS" + num + " flows" )
1483 main.log.warn( "ONOS" + num + " flows response: " +
1484 repr( ONOSFlows[ i ] ) )
1485 flowsResults = False
1486 ONOSFlowsJson.append( None )
1487 else:
1488 try:
1489 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1490 except ( ValueError, TypeError ):
1491 # FIXME: change this to log.error?
1492 main.log.exception( "Error in parsing ONOS" + num +
1493 " response as json." )
1494 main.log.error( repr( ONOSFlows[ i ] ) )
1495 ONOSFlowsJson.append( None )
1496 flowsResults = False
1497 utilities.assert_equals(
1498 expect=True,
1499 actual=flowsResults,
1500 onpass="No error in reading flows output",
1501 onfail="Error in reading flows from ONOS" )
1502
1503 main.step( "Check for consistency in Flows from each controller" )
1504 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1505 if all( tmp ):
1506 main.log.info( "Flow count is consistent across all ONOS nodes" )
1507 else:
1508 consistentFlows = False
1509 utilities.assert_equals(
1510 expect=True,
1511 actual=consistentFlows,
1512 onpass="The flow count is consistent across all ONOS nodes",
1513 onfail="ONOS nodes have different flow counts" )
1514
1515 if flowsResults and not consistentFlows:
1516 for i in range( len( ONOSFlows ) ):
1517 node = str( main.activeNodes[i] + 1 )
1518 try:
1519 main.log.warn(
1520 "ONOS" + node + " flows: " +
1521 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1522 indent=4, separators=( ',', ': ' ) ) )
1523 except ( ValueError, TypeError ):
1524 main.log.warn( "ONOS" + node + " flows: " +
1525 repr( ONOSFlows[ i ] ) )
1526 elif flowsResults and consistentFlows:
1527 flowCheck = main.TRUE
1528 flowState = ONOSFlows[ 0 ]
1529
1530 main.step( "Get the OF Table entries" )
1531 global flows
1532 flows = []
1533 for i in range( 1, 29 ):
1534 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1535 if flowCheck == main.FALSE:
1536 for table in flows:
1537 main.log.warn( table )
1538 # TODO: Compare switch flow tables with ONOS flow tables
1539
1540 main.step( "Start continuous pings" )
1541 main.Mininet2.pingLong(
1542 src=main.params[ 'PING' ][ 'source1' ],
1543 target=main.params[ 'PING' ][ 'target1' ],
1544 pingTime=500 )
1545 main.Mininet2.pingLong(
1546 src=main.params[ 'PING' ][ 'source2' ],
1547 target=main.params[ 'PING' ][ 'target2' ],
1548 pingTime=500 )
1549 main.Mininet2.pingLong(
1550 src=main.params[ 'PING' ][ 'source3' ],
1551 target=main.params[ 'PING' ][ 'target3' ],
1552 pingTime=500 )
1553 main.Mininet2.pingLong(
1554 src=main.params[ 'PING' ][ 'source4' ],
1555 target=main.params[ 'PING' ][ 'target4' ],
1556 pingTime=500 )
1557 main.Mininet2.pingLong(
1558 src=main.params[ 'PING' ][ 'source5' ],
1559 target=main.params[ 'PING' ][ 'target5' ],
1560 pingTime=500 )
1561 main.Mininet2.pingLong(
1562 src=main.params[ 'PING' ][ 'source6' ],
1563 target=main.params[ 'PING' ][ 'target6' ],
1564 pingTime=500 )
1565 main.Mininet2.pingLong(
1566 src=main.params[ 'PING' ][ 'source7' ],
1567 target=main.params[ 'PING' ][ 'target7' ],
1568 pingTime=500 )
1569 main.Mininet2.pingLong(
1570 src=main.params[ 'PING' ][ 'source8' ],
1571 target=main.params[ 'PING' ][ 'target8' ],
1572 pingTime=500 )
1573 main.Mininet2.pingLong(
1574 src=main.params[ 'PING' ][ 'source9' ],
1575 target=main.params[ 'PING' ][ 'target9' ],
1576 pingTime=500 )
1577 main.Mininet2.pingLong(
1578 src=main.params[ 'PING' ][ 'source10' ],
1579 target=main.params[ 'PING' ][ 'target10' ],
1580 pingTime=500 )
1581
1582 main.step( "Collecting topology information from ONOS" )
1583 devices = []
1584 threads = []
1585 for i in main.activeNodes:
1586 t = main.Thread( target=main.CLIs[i].devices,
1587 name="devices-" + str( i ),
1588 args=[ ] )
1589 threads.append( t )
1590 t.start()
1591
1592 for t in threads:
1593 t.join()
1594 devices.append( t.result )
1595 hosts = []
1596 threads = []
1597 for i in main.activeNodes:
1598 t = main.Thread( target=main.CLIs[i].hosts,
1599 name="hosts-" + str( i ),
1600 args=[ ] )
1601 threads.append( t )
1602 t.start()
1603
1604 for t in threads:
1605 t.join()
1606 try:
1607 hosts.append( json.loads( t.result ) )
1608 except ( ValueError, TypeError ):
1609 # FIXME: better handling of this, print which node
1610 # Maybe use thread name?
1611 main.log.exception( "Error parsing json output of hosts" )
1612 main.log.warn( repr( t.result ) )
1613 hosts.append( None )
1614
1615 ports = []
1616 threads = []
1617 for i in main.activeNodes:
1618 t = main.Thread( target=main.CLIs[i].ports,
1619 name="ports-" + str( i ),
1620 args=[ ] )
1621 threads.append( t )
1622 t.start()
1623
1624 for t in threads:
1625 t.join()
1626 ports.append( t.result )
1627 links = []
1628 threads = []
1629 for i in main.activeNodes:
1630 t = main.Thread( target=main.CLIs[i].links,
1631 name="links-" + str( i ),
1632 args=[ ] )
1633 threads.append( t )
1634 t.start()
1635
1636 for t in threads:
1637 t.join()
1638 links.append( t.result )
1639 clusters = []
1640 threads = []
1641 for i in main.activeNodes:
1642 t = main.Thread( target=main.CLIs[i].clusters,
1643 name="clusters-" + str( i ),
1644 args=[ ] )
1645 threads.append( t )
1646 t.start()
1647
1648 for t in threads:
1649 t.join()
1650 clusters.append( t.result )
1651 # Compare json objects for hosts and dataplane clusters
1652
1653 # hosts
1654 main.step( "Host view is consistent across ONOS nodes" )
1655 consistentHostsResult = main.TRUE
1656 for controller in range( len( hosts ) ):
1657 controllerStr = str( main.activeNodes[controller] + 1 )
1658 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1659 if hosts[ controller ] == hosts[ 0 ]:
1660 continue
1661 else: # hosts not consistent
1662 main.log.error( "hosts from ONOS" +
1663 controllerStr +
1664 " is inconsistent with ONOS1" )
1665 main.log.warn( repr( hosts[ controller ] ) )
1666 consistentHostsResult = main.FALSE
1667
1668 else:
1669 main.log.error( "Error in getting ONOS hosts from ONOS" +
1670 controllerStr )
1671 consistentHostsResult = main.FALSE
1672 main.log.warn( "ONOS" + controllerStr +
1673 " hosts response: " +
1674 repr( hosts[ controller ] ) )
1675 utilities.assert_equals(
1676 expect=main.TRUE,
1677 actual=consistentHostsResult,
1678 onpass="Hosts view is consistent across all ONOS nodes",
1679 onfail="ONOS nodes have different views of hosts" )
1680
1681 main.step( "Each host has an IP address" )
1682 ipResult = main.TRUE
1683 for controller in range( 0, len( hosts ) ):
1684 controllerStr = str( main.activeNodes[controller] + 1 )
1685 if hosts[ controller ]:
1686 for host in hosts[ controller ]:
1687 if not host.get( 'ipAddresses', [ ] ):
1688 main.log.error( "Error with host ips on controller" +
1689 controllerStr + ": " + str( host ) )
1690 ipResult = main.FALSE
1691 utilities.assert_equals(
1692 expect=main.TRUE,
1693 actual=ipResult,
1694 onpass="The ips of the hosts aren't empty",
1695 onfail="The ip of at least one host is missing" )
1696
1697 # Strongly connected clusters of devices
1698 main.step( "Cluster view is consistent across ONOS nodes" )
1699 consistentClustersResult = main.TRUE
1700 for controller in range( len( clusters ) ):
1701 controllerStr = str( main.activeNodes[controller] + 1 )
1702 if "Error" not in clusters[ controller ]:
1703 if clusters[ controller ] == clusters[ 0 ]:
1704 continue
1705 else: # clusters not consistent
1706 main.log.error( "clusters from ONOS" + controllerStr +
1707 " is inconsistent with ONOS1" )
1708 consistentClustersResult = main.FALSE
1709
1710 else:
1711 main.log.error( "Error in getting dataplane clusters " +
1712 "from ONOS" + controllerStr )
1713 consistentClustersResult = main.FALSE
1714 main.log.warn( "ONOS" + controllerStr +
1715 " clusters response: " +
1716 repr( clusters[ controller ] ) )
1717 utilities.assert_equals(
1718 expect=main.TRUE,
1719 actual=consistentClustersResult,
1720 onpass="Clusters view is consistent across all ONOS nodes",
1721 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07001722 if not consistentClustersResult:
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001723 main.log.debug( clusters )
Jon Hall64948022016-05-12 13:38:50 -07001724
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001725 # there should always only be one cluster
1726 main.step( "Cluster view correct across ONOS nodes" )
1727 try:
1728 numClusters = len( json.loads( clusters[ 0 ] ) )
1729 except ( ValueError, TypeError ):
1730 main.log.exception( "Error parsing clusters[0]: " +
1731 repr( clusters[ 0 ] ) )
1732 numClusters = "ERROR"
1733 utilities.assert_equals(
1734 expect=1,
1735 actual=numClusters,
1736 onpass="ONOS shows 1 SCC",
1737 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1738
1739 main.step( "Comparing ONOS topology to MN" )
1740 devicesResults = main.TRUE
1741 linksResults = main.TRUE
1742 hostsResults = main.TRUE
1743 mnSwitches = main.Mininet1.getSwitches()
1744 mnLinks = main.Mininet1.getLinks()
1745 mnHosts = main.Mininet1.getHosts()
1746 for controller in main.activeNodes:
1747 controllerStr = str( main.activeNodes[controller] + 1 )
1748 if devices[ controller ] and ports[ controller ] and\
1749 "Error" not in devices[ controller ] and\
1750 "Error" not in ports[ controller ]:
1751 currentDevicesResult = main.Mininet1.compareSwitches(
1752 mnSwitches,
1753 json.loads( devices[ controller ] ),
1754 json.loads( ports[ controller ] ) )
1755 else:
1756 currentDevicesResult = main.FALSE
1757 utilities.assert_equals( expect=main.TRUE,
1758 actual=currentDevicesResult,
1759 onpass="ONOS" + controllerStr +
1760 " Switches view is correct",
1761 onfail="ONOS" + controllerStr +
1762 " Switches view is incorrect" )
1763 if links[ controller ] and "Error" not in links[ controller ]:
1764 currentLinksResult = main.Mininet1.compareLinks(
1765 mnSwitches, mnLinks,
1766 json.loads( links[ controller ] ) )
1767 else:
1768 currentLinksResult = main.FALSE
1769 utilities.assert_equals( expect=main.TRUE,
1770 actual=currentLinksResult,
1771 onpass="ONOS" + controllerStr +
1772 " links view is correct",
1773 onfail="ONOS" + controllerStr +
1774 " links view is incorrect" )
1775
1776 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1777 currentHostsResult = main.Mininet1.compareHosts(
1778 mnHosts,
1779 hosts[ controller ] )
1780 else:
1781 currentHostsResult = main.FALSE
1782 utilities.assert_equals( expect=main.TRUE,
1783 actual=currentHostsResult,
1784 onpass="ONOS" + controllerStr +
1785 " hosts exist in Mininet",
1786 onfail="ONOS" + controllerStr +
1787 " hosts don't match Mininet" )
1788
1789 devicesResults = devicesResults and currentDevicesResult
1790 linksResults = linksResults and currentLinksResult
1791 hostsResults = hostsResults and currentHostsResult
1792
1793 main.step( "Device information is correct" )
1794 utilities.assert_equals(
1795 expect=main.TRUE,
1796 actual=devicesResults,
1797 onpass="Device information is correct",
1798 onfail="Device information is incorrect" )
1799
1800 main.step( "Links are correct" )
1801 utilities.assert_equals(
1802 expect=main.TRUE,
1803 actual=linksResults,
1804 onpass="Link are correct",
1805 onfail="Links are incorrect" )
1806
1807 main.step( "Hosts are correct" )
1808 utilities.assert_equals(
1809 expect=main.TRUE,
1810 actual=hostsResults,
1811 onpass="Hosts are correct",
1812 onfail="Hosts are incorrect" )
1813
1814 def CASE6( self, main ):
1815 """
1816 The Scaling case.
1817 """
1818 import time
1819 import re
1820 assert main.numCtrls, "main.numCtrls not defined"
1821 assert main, "main not defined"
1822 assert utilities.assert_equals, "utilities.assert_equals not defined"
1823 assert main.CLIs, "main.CLIs not defined"
1824 assert main.nodes, "main.nodes not defined"
1825 try:
1826 labels
1827 except NameError:
1828 main.log.error( "labels not defined, setting to []" )
1829 global labels
1830 labels = []
1831 try:
1832 data
1833 except NameError:
1834 main.log.error( "data not defined, setting to []" )
1835 global data
1836 data = []
1837
Jon Hall69b2b982016-05-11 12:04:59 -07001838 main.case( "Scale the number of nodes in the ONOS cluster" )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001839
1840 main.step( "Checking ONOS Logs for errors" )
1841 for i in main.activeNodes:
1842 node = main.nodes[i]
1843 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1844 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1845
1846 """
1847 pop # of nodes from a list, might look like 1,3b,3,5b,5,7b,7,7b,5,5b,3...
1848 modify cluster.json file appropriately
1849 install/deactivate node as needed
1850 """
1851
1852 try:
1853 prevNodes = main.activeNodes
1854 scale = main.scaling.pop(0)
1855 if "e" in scale:
1856 equal = True
1857 else:
1858 equal = False
1859 main.numCtrls = int( re.search( "\d+", scale ).group(0) )
1860 main.log.info( "Scaling to {} nodes".format( main.numCtrls ) )
1861 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
1862 utilities.assert_equals( expect=main.TRUE, actual=genResult,
1863 onpass="New cluster metadata file generated",
1864 onfail="Failled to generate new metadata file" )
1865 time.sleep( 5 ) # Give time for nodes to read new file
1866 except IndexError:
1867 main.cleanup()
1868 main.exit()
1869
1870 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
1871 newNodes = [ x for x in main.activeNodes if x not in prevNodes ]
1872
1873 main.step( "Start new nodes" ) # OR stop old nodes?
1874 started = main.TRUE
1875 for i in newNodes:
1876 started = main.ONOSbench.onosStart( main.nodes[i].ip_address ) and main.TRUE
1877 utilities.assert_equals( expect=main.TRUE, actual=started,
1878 onpass="ONOS started",
1879 onfail="ONOS start NOT successful" )
1880
1881 main.step( "Checking if ONOS is up yet" )
1882 for i in range( 2 ):
1883 onosIsupResult = main.TRUE
1884 for i in main.activeNodes:
1885 node = main.nodes[i]
1886 started = main.ONOSbench.isup( node.ip_address )
1887 if not started:
1888 main.log.error( node.name + " didn't start!" )
1889 onosIsupResult = onosIsupResult and started
1890 if onosIsupResult == main.TRUE:
1891 break
1892 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1893 onpass="ONOS started",
1894 onfail="ONOS start NOT successful" )
1895
1896 main.log.step( "Starting ONOS CLI sessions" )
1897 cliResults = main.TRUE
1898 threads = []
1899 for i in main.activeNodes:
1900 t = main.Thread( target=main.CLIs[i].startOnosCli,
1901 name="startOnosCli-" + str( i ),
1902 args=[main.nodes[i].ip_address] )
1903 threads.append( t )
1904 t.start()
1905
1906 for t in threads:
1907 t.join()
1908 cliResults = cliResults and t.result
1909 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1910 onpass="ONOS cli started",
1911 onfail="ONOS clis did not start" )
1912
1913 main.step( "Checking ONOS nodes" )
1914 nodeResults = utilities.retry( main.HA.nodesCheck,
1915 False,
1916 args=[main.activeNodes],
1917 attempts=5 )
1918 utilities.assert_equals( expect=True, actual=nodeResults,
1919 onpass="Nodes check successful",
1920 onfail="Nodes check NOT successful" )
1921
1922 for i in range( 10 ):
1923 ready = True
1924 for i in main.activeNodes:
1925 cli = main.CLIs[i]
1926 output = cli.summary()
1927 if not output:
1928 ready = False
1929 if ready:
1930 break
1931 time.sleep( 30 )
1932 utilities.assert_equals( expect=True, actual=ready,
1933 onpass="ONOS summary command succeded",
1934 onfail="ONOS summary command failed" )
1935 if not ready:
1936 main.cleanup()
1937 main.exit()
1938
1939 # Rerun for election on new nodes
1940 runResults = main.TRUE
1941 for i in main.activeNodes:
1942 cli = main.CLIs[i]
1943 run = cli.electionTestRun()
1944 if run != main.TRUE:
1945 main.log.error( "Error running for election on " + cli.name )
1946 runResults = runResults and run
1947 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1948 onpass="Reran for election",
1949 onfail="Failed to rerun for election" )
1950
1951 # TODO: Make this configurable
1952 time.sleep( 60 )
1953 for node in main.activeNodes:
1954 main.log.warn( "\n****************** {} **************".format( main.nodes[node].ip_address ) )
1955 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1956 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1957 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
1958 main.log.debug( main.CLIs[node].apps( jsonFormat=False ) )
1959
1960 def CASE7( self, main ):
1961 """
1962 Check state after ONOS scaling
1963 """
1964 import json
1965 assert main.numCtrls, "main.numCtrls not defined"
1966 assert main, "main not defined"
1967 assert utilities.assert_equals, "utilities.assert_equals not defined"
1968 assert main.CLIs, "main.CLIs not defined"
1969 assert main.nodes, "main.nodes not defined"
1970 main.case( "Running ONOS Constant State Tests" )
1971
1972 main.step( "Check that each switch has a master" )
1973 # Assert that each device has a master
1974 rolesNotNull = main.TRUE
1975 threads = []
1976 for i in main.activeNodes:
1977 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1978 name="rolesNotNull-" + str( i ),
1979 args=[ ] )
1980 threads.append( t )
1981 t.start()
1982
1983 for t in threads:
1984 t.join()
1985 rolesNotNull = rolesNotNull and t.result
1986 utilities.assert_equals(
1987 expect=main.TRUE,
1988 actual=rolesNotNull,
1989 onpass="Each device has a master",
1990 onfail="Some devices don't have a master assigned" )
1991
1992 main.step( "Read device roles from ONOS" )
1993 ONOSMastership = []
1994 consistentMastership = True
1995 rolesResults = True
1996 threads = []
1997 for i in main.activeNodes:
1998 t = main.Thread( target=main.CLIs[i].roles,
1999 name="roles-" + str( i ),
2000 args=[] )
2001 threads.append( t )
2002 t.start()
2003
2004 for t in threads:
2005 t.join()
2006 ONOSMastership.append( t.result )
2007
2008 for i in range( len( ONOSMastership ) ):
2009 node = str( main.activeNodes[i] + 1 )
2010 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
2011 main.log.error( "Error in getting ONOS" + node + " roles" )
2012 main.log.warn( "ONOS" + node + " mastership response: " +
2013 repr( ONOSMastership[i] ) )
2014 rolesResults = False
2015 utilities.assert_equals(
2016 expect=True,
2017 actual=rolesResults,
2018 onpass="No error in reading roles output",
2019 onfail="Error in reading roles from ONOS" )
2020
2021 main.step( "Check for consistency in roles from each controller" )
2022 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
2023 main.log.info(
2024 "Switch roles are consistent across all ONOS nodes" )
2025 else:
2026 consistentMastership = False
2027 utilities.assert_equals(
2028 expect=True,
2029 actual=consistentMastership,
2030 onpass="Switch roles are consistent across all ONOS nodes",
2031 onfail="ONOS nodes have different views of switch roles" )
2032
2033 if rolesResults and not consistentMastership:
2034 for i in range( len( ONOSMastership ) ):
2035 node = str( main.activeNodes[i] + 1 )
2036 main.log.warn( "ONOS" + node + " roles: ",
2037 json.dumps( json.loads( ONOSMastership[ i ] ),
2038 sort_keys=True,
2039 indent=4,
2040 separators=( ',', ': ' ) ) )
2041
2042 # NOTE: we expect mastership to change on controller scaling down
2043
2044 main.step( "Get the intents and compare across all nodes" )
2045 ONOSIntents = []
2046 intentCheck = main.FALSE
2047 consistentIntents = True
2048 intentsResults = True
2049 threads = []
2050 for i in main.activeNodes:
2051 t = main.Thread( target=main.CLIs[i].intents,
2052 name="intents-" + str( i ),
2053 args=[],
2054 kwargs={ 'jsonFormat': True } )
2055 threads.append( t )
2056 t.start()
2057
2058 for t in threads:
2059 t.join()
2060 ONOSIntents.append( t.result )
2061
2062 for i in range( len( ONOSIntents) ):
2063 node = str( main.activeNodes[i] + 1 )
2064 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2065 main.log.error( "Error in getting ONOS" + node + " intents" )
2066 main.log.warn( "ONOS" + node + " intents response: " +
2067 repr( ONOSIntents[ i ] ) )
2068 intentsResults = False
2069 utilities.assert_equals(
2070 expect=True,
2071 actual=intentsResults,
2072 onpass="No error in reading intents output",
2073 onfail="Error in reading intents from ONOS" )
2074
2075 main.step( "Check for consistency in Intents from each controller" )
2076 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2077 main.log.info( "Intents are consistent across all ONOS " +
2078 "nodes" )
2079 else:
2080 consistentIntents = False
2081
2082 # Try to make it easy to figure out what is happening
2083 #
2084 # Intent ONOS1 ONOS2 ...
2085 # 0x01 INSTALLED INSTALLING
2086 # ... ... ...
2087 # ... ... ...
2088 title = " ID"
2089 for n in main.activeNodes:
2090 title += " " * 10 + "ONOS" + str( n + 1 )
2091 main.log.warn( title )
2092 # get all intent keys in the cluster
2093 keys = []
2094 for nodeStr in ONOSIntents:
2095 node = json.loads( nodeStr )
2096 for intent in node:
2097 keys.append( intent.get( 'id' ) )
2098 keys = set( keys )
2099 for key in keys:
2100 row = "%-13s" % key
2101 for nodeStr in ONOSIntents:
2102 node = json.loads( nodeStr )
2103 for intent in node:
2104 if intent.get( 'id' ) == key:
2105 row += "%-15s" % intent.get( 'state' )
2106 main.log.warn( row )
2107 # End table view
2108
2109 utilities.assert_equals(
2110 expect=True,
2111 actual=consistentIntents,
2112 onpass="Intents are consistent across all ONOS nodes",
2113 onfail="ONOS nodes have different views of intents" )
2114 intentStates = []
2115 for node in ONOSIntents: # Iter through ONOS nodes
2116 nodeStates = []
2117 # Iter through intents of a node
2118 try:
2119 for intent in json.loads( node ):
2120 nodeStates.append( intent[ 'state' ] )
2121 except ( ValueError, TypeError ):
2122 main.log.exception( "Error in parsing intents" )
2123 main.log.error( repr( node ) )
2124 intentStates.append( nodeStates )
2125 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2126 main.log.info( dict( out ) )
2127
2128 if intentsResults and not consistentIntents:
2129 for i in range( len( main.activeNodes ) ):
2130 node = str( main.activeNodes[i] + 1 )
2131 main.log.warn( "ONOS" + node + " intents: " )
2132 main.log.warn( json.dumps(
2133 json.loads( ONOSIntents[ i ] ),
2134 sort_keys=True,
2135 indent=4,
2136 separators=( ',', ': ' ) ) )
2137 elif intentsResults and consistentIntents:
2138 intentCheck = main.TRUE
2139
2140 main.step( "Compare current intents with intents before the scaling" )
2141 # NOTE: this requires case 5 to pass for intentState to be set.
2142 # maybe we should stop the test if that fails?
2143 sameIntents = main.FALSE
2144 try:
2145 intentState
2146 except NameError:
2147 main.log.warn( "No previous intent state was saved" )
2148 else:
2149 if intentState and intentState == ONOSIntents[ 0 ]:
2150 sameIntents = main.TRUE
2151 main.log.info( "Intents are consistent with before scaling" )
2152 # TODO: possibly the states have changed? we may need to figure out
2153 # what the acceptable states are
2154 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2155 sameIntents = main.TRUE
2156 try:
2157 before = json.loads( intentState )
2158 after = json.loads( ONOSIntents[ 0 ] )
2159 for intent in before:
2160 if intent not in after:
2161 sameIntents = main.FALSE
2162 main.log.debug( "Intent is not currently in ONOS " +
2163 "(at least in the same form):" )
2164 main.log.debug( json.dumps( intent ) )
2165 except ( ValueError, TypeError ):
2166 main.log.exception( "Exception printing intents" )
2167 main.log.debug( repr( ONOSIntents[0] ) )
2168 main.log.debug( repr( intentState ) )
2169 if sameIntents == main.FALSE:
2170 try:
2171 main.log.debug( "ONOS intents before: " )
2172 main.log.debug( json.dumps( json.loads( intentState ),
2173 sort_keys=True, indent=4,
2174 separators=( ',', ': ' ) ) )
2175 main.log.debug( "Current ONOS intents: " )
2176 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2177 sort_keys=True, indent=4,
2178 separators=( ',', ': ' ) ) )
2179 except ( ValueError, TypeError ):
2180 main.log.exception( "Exception printing intents" )
2181 main.log.debug( repr( ONOSIntents[0] ) )
2182 main.log.debug( repr( intentState ) )
2183 utilities.assert_equals(
2184 expect=main.TRUE,
2185 actual=sameIntents,
2186 onpass="Intents are consistent with before scaling",
2187 onfail="The Intents changed during scaling" )
2188 intentCheck = intentCheck and sameIntents
2189
2190 main.step( "Get the OF Table entries and compare to before " +
2191 "component scaling" )
2192 FlowTables = main.TRUE
2193 for i in range( 28 ):
2194 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2195 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2196 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2197 FlowTables = FlowTables and curSwitch
2198 if curSwitch == main.FALSE:
2199 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2200 utilities.assert_equals(
2201 expect=main.TRUE,
2202 actual=FlowTables,
2203 onpass="No changes were found in the flow tables",
2204 onfail="Changes were found in the flow tables" )
2205
2206 main.Mininet2.pingLongKill()
2207 '''
2208 # main.step( "Check the continuous pings to ensure that no packets " +
2209 # "were dropped during component failure" )
2210 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2211 main.params[ 'TESTONIP' ] )
2212 LossInPings = main.FALSE
2213 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2214 for i in range( 8, 18 ):
2215 main.log.info(
2216 "Checking for a loss in pings along flow from s" +
2217 str( i ) )
2218 LossInPings = main.Mininet2.checkForLoss(
2219 "/tmp/ping.h" +
2220 str( i ) ) or LossInPings
2221 if LossInPings == main.TRUE:
2222 main.log.info( "Loss in ping detected" )
2223 elif LossInPings == main.ERROR:
2224 main.log.info( "There are multiple mininet process running" )
2225 elif LossInPings == main.FALSE:
2226 main.log.info( "No Loss in the pings" )
2227 main.log.info( "No loss of dataplane connectivity" )
2228 # utilities.assert_equals(
2229 # expect=main.FALSE,
2230 # actual=LossInPings,
2231 # onpass="No Loss of connectivity",
2232 # onfail="Loss of dataplane connectivity detected" )
2233
2234 # NOTE: Since intents are not persisted with IntnentStore,
2235 # we expect loss in dataplane connectivity
2236 LossInPings = main.FALSE
2237 '''
2238
2239 main.step( "Leadership Election is still functional" )
2240 # Test of LeadershipElection
2241 leaderList = []
2242 leaderResult = main.TRUE
2243
2244 for i in main.activeNodes:
2245 cli = main.CLIs[i]
2246 leaderN = cli.electionTestLeader()
2247 leaderList.append( leaderN )
2248 if leaderN == main.FALSE:
2249 # error in response
2250 main.log.error( "Something is wrong with " +
2251 "electionTestLeader function, check the" +
2252 " error logs" )
2253 leaderResult = main.FALSE
2254 elif leaderN is None:
2255 main.log.error( cli.name +
2256 " shows no leader for the election-app." )
2257 leaderResult = main.FALSE
2258 if len( set( leaderList ) ) != 1:
2259 leaderResult = main.FALSE
2260 main.log.error(
2261 "Inconsistent view of leader for the election test app" )
2262 # TODO: print the list
2263 utilities.assert_equals(
2264 expect=main.TRUE,
2265 actual=leaderResult,
2266 onpass="Leadership election passed",
2267 onfail="Something went wrong with Leadership election" )
2268
2269 def CASE8( self, main ):
2270 """
2271 Compare topo
2272 """
2273 import json
2274 import time
2275 assert main.numCtrls, "main.numCtrls not defined"
2276 assert main, "main not defined"
2277 assert utilities.assert_equals, "utilities.assert_equals not defined"
2278 assert main.CLIs, "main.CLIs not defined"
2279 assert main.nodes, "main.nodes not defined"
2280
2281 main.case( "Compare ONOS Topology view to Mininet topology" )
2282 main.caseExplanation = "Compare topology objects between Mininet" +\
2283 " and ONOS"
2284 topoResult = main.FALSE
2285 topoFailMsg = "ONOS topology don't match Mininet"
2286 elapsed = 0
2287 count = 0
2288 main.step( "Comparing ONOS topology to MN topology" )
2289 startTime = time.time()
2290 # Give time for Gossip to work
2291 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2292 devicesResults = main.TRUE
2293 linksResults = main.TRUE
2294 hostsResults = main.TRUE
2295 hostAttachmentResults = True
2296 count += 1
2297 cliStart = time.time()
2298 devices = []
2299 threads = []
2300 for i in main.activeNodes:
2301 t = main.Thread( target=utilities.retry,
2302 name="devices-" + str( i ),
2303 args=[ main.CLIs[i].devices, [ None ] ],
2304 kwargs= { 'sleep': 5, 'attempts': 5,
2305 'randomTime': True } )
2306 threads.append( t )
2307 t.start()
2308
2309 for t in threads:
2310 t.join()
2311 devices.append( t.result )
2312 hosts = []
2313 ipResult = main.TRUE
2314 threads = []
2315 for i in main.activeNodes:
2316 t = main.Thread( target=utilities.retry,
2317 name="hosts-" + str( i ),
2318 args=[ main.CLIs[i].hosts, [ None ] ],
2319 kwargs= { 'sleep': 5, 'attempts': 5,
2320 'randomTime': True } )
2321 threads.append( t )
2322 t.start()
2323
2324 for t in threads:
2325 t.join()
2326 try:
2327 hosts.append( json.loads( t.result ) )
2328 except ( ValueError, TypeError ):
2329 main.log.exception( "Error parsing hosts results" )
2330 main.log.error( repr( t.result ) )
2331 hosts.append( None )
2332 for controller in range( 0, len( hosts ) ):
2333 controllerStr = str( main.activeNodes[controller] + 1 )
2334 if hosts[ controller ]:
2335 for host in hosts[ controller ]:
2336 if host is None or host.get( 'ipAddresses', [] ) == []:
2337 main.log.error(
2338 "Error with host ipAddresses on controller" +
2339 controllerStr + ": " + str( host ) )
2340 ipResult = main.FALSE
2341 ports = []
2342 threads = []
2343 for i in main.activeNodes:
2344 t = main.Thread( target=utilities.retry,
2345 name="ports-" + str( i ),
2346 args=[ main.CLIs[i].ports, [ None ] ],
2347 kwargs= { 'sleep': 5, 'attempts': 5,
2348 'randomTime': True } )
2349 threads.append( t )
2350 t.start()
2351
2352 for t in threads:
2353 t.join()
2354 ports.append( t.result )
2355 links = []
2356 threads = []
2357 for i in main.activeNodes:
2358 t = main.Thread( target=utilities.retry,
2359 name="links-" + str( i ),
2360 args=[ main.CLIs[i].links, [ None ] ],
2361 kwargs= { 'sleep': 5, 'attempts': 5,
2362 'randomTime': True } )
2363 threads.append( t )
2364 t.start()
2365
2366 for t in threads:
2367 t.join()
2368 links.append( t.result )
2369 clusters = []
2370 threads = []
2371 for i in main.activeNodes:
2372 t = main.Thread( target=utilities.retry,
2373 name="clusters-" + str( i ),
2374 args=[ main.CLIs[i].clusters, [ None ] ],
2375 kwargs= { 'sleep': 5, 'attempts': 5,
2376 'randomTime': True } )
2377 threads.append( t )
2378 t.start()
2379
2380 for t in threads:
2381 t.join()
2382 clusters.append( t.result )
2383
2384 elapsed = time.time() - startTime
2385 cliTime = time.time() - cliStart
2386 print "Elapsed time: " + str( elapsed )
2387 print "CLI time: " + str( cliTime )
2388
2389 if all( e is None for e in devices ) and\
2390 all( e is None for e in hosts ) and\
2391 all( e is None for e in ports ) and\
2392 all( e is None for e in links ) and\
2393 all( e is None for e in clusters ):
2394 topoFailMsg = "Could not get topology from ONOS"
2395 main.log.error( topoFailMsg )
2396 continue # Try again, No use trying to compare
2397
2398 mnSwitches = main.Mininet1.getSwitches()
2399 mnLinks = main.Mininet1.getLinks()
2400 mnHosts = main.Mininet1.getHosts()
2401 for controller in range( len( main.activeNodes ) ):
2402 controllerStr = str( main.activeNodes[controller] + 1 )
2403 if devices[ controller ] and ports[ controller ] and\
2404 "Error" not in devices[ controller ] and\
2405 "Error" not in ports[ controller ]:
2406
2407 try:
2408 currentDevicesResult = main.Mininet1.compareSwitches(
2409 mnSwitches,
2410 json.loads( devices[ controller ] ),
2411 json.loads( ports[ controller ] ) )
2412 except ( TypeError, ValueError ):
2413 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2414 devices[ controller ], ports[ controller ] ) )
2415 else:
2416 currentDevicesResult = main.FALSE
2417 utilities.assert_equals( expect=main.TRUE,
2418 actual=currentDevicesResult,
2419 onpass="ONOS" + controllerStr +
2420 " Switches view is correct",
2421 onfail="ONOS" + controllerStr +
2422 " Switches view is incorrect" )
2423
2424 if links[ controller ] and "Error" not in links[ controller ]:
2425 currentLinksResult = main.Mininet1.compareLinks(
2426 mnSwitches, mnLinks,
2427 json.loads( links[ controller ] ) )
2428 else:
2429 currentLinksResult = main.FALSE
2430 utilities.assert_equals( expect=main.TRUE,
2431 actual=currentLinksResult,
2432 onpass="ONOS" + controllerStr +
2433 " links view is correct",
2434 onfail="ONOS" + controllerStr +
2435 " links view is incorrect" )
2436 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2437 currentHostsResult = main.Mininet1.compareHosts(
2438 mnHosts,
2439 hosts[ controller ] )
2440 elif hosts[ controller ] == []:
2441 currentHostsResult = main.TRUE
2442 else:
2443 currentHostsResult = main.FALSE
2444 utilities.assert_equals( expect=main.TRUE,
2445 actual=currentHostsResult,
2446 onpass="ONOS" + controllerStr +
2447 " hosts exist in Mininet",
2448 onfail="ONOS" + controllerStr +
2449 " hosts don't match Mininet" )
2450 # CHECKING HOST ATTACHMENT POINTS
2451 hostAttachment = True
2452 zeroHosts = False
2453 # FIXME: topo-HA/obelisk specific mappings:
2454 # key is mac and value is dpid
2455 mappings = {}
2456 for i in range( 1, 29 ): # hosts 1 through 28
2457 # set up correct variables:
2458 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2459 if i == 1:
2460 deviceId = "1000".zfill(16)
2461 elif i == 2:
2462 deviceId = "2000".zfill(16)
2463 elif i == 3:
2464 deviceId = "3000".zfill(16)
2465 elif i == 4:
2466 deviceId = "3004".zfill(16)
2467 elif i == 5:
2468 deviceId = "5000".zfill(16)
2469 elif i == 6:
2470 deviceId = "6000".zfill(16)
2471 elif i == 7:
2472 deviceId = "6007".zfill(16)
2473 elif i >= 8 and i <= 17:
2474 dpid = '3' + str( i ).zfill( 3 )
2475 deviceId = dpid.zfill(16)
2476 elif i >= 18 and i <= 27:
2477 dpid = '6' + str( i ).zfill( 3 )
2478 deviceId = dpid.zfill(16)
2479 elif i == 28:
2480 deviceId = "2800".zfill(16)
2481 mappings[ macId ] = deviceId
2482 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2483 if hosts[ controller ] == []:
2484 main.log.warn( "There are no hosts discovered" )
2485 zeroHosts = True
2486 else:
2487 for host in hosts[ controller ]:
2488 mac = None
2489 location = None
2490 device = None
2491 port = None
2492 try:
2493 mac = host.get( 'mac' )
2494 assert mac, "mac field could not be found for this host object"
2495
2496 location = host.get( 'location' )
2497 assert location, "location field could not be found for this host object"
2498
2499 # Trim the protocol identifier off deviceId
2500 device = str( location.get( 'elementId' ) ).split(':')[1]
2501 assert device, "elementId field could not be found for this host location object"
2502
2503 port = location.get( 'port' )
2504 assert port, "port field could not be found for this host location object"
2505
2506 # Now check if this matches where they should be
2507 if mac and device and port:
2508 if str( port ) != "1":
2509 main.log.error( "The attachment port is incorrect for " +
2510 "host " + str( mac ) +
2511 ". Expected: 1 Actual: " + str( port) )
2512 hostAttachment = False
2513 if device != mappings[ str( mac ) ]:
2514 main.log.error( "The attachment device is incorrect for " +
2515 "host " + str( mac ) +
2516 ". Expected: " + mappings[ str( mac ) ] +
2517 " Actual: " + device )
2518 hostAttachment = False
2519 else:
2520 hostAttachment = False
2521 except AssertionError:
2522 main.log.exception( "Json object not as expected" )
2523 main.log.error( repr( host ) )
2524 hostAttachment = False
2525 else:
2526 main.log.error( "No hosts json output or \"Error\"" +
2527 " in output. hosts = " +
2528 repr( hosts[ controller ] ) )
2529 if zeroHosts is False:
2530 # TODO: Find a way to know if there should be hosts in a
2531 # given point of the test
2532 hostAttachment = True
2533
2534 # END CHECKING HOST ATTACHMENT POINTS
2535 devicesResults = devicesResults and currentDevicesResult
2536 linksResults = linksResults and currentLinksResult
2537 hostsResults = hostsResults and currentHostsResult
2538 hostAttachmentResults = hostAttachmentResults and\
2539 hostAttachment
2540 topoResult = ( devicesResults and linksResults
2541 and hostsResults and ipResult and
2542 hostAttachmentResults )
2543 utilities.assert_equals( expect=True,
2544 actual=topoResult,
2545 onpass="ONOS topology matches Mininet",
2546 onfail=topoFailMsg )
2547 # End of While loop to pull ONOS state
2548
2549 # Compare json objects for hosts and dataplane clusters
2550
2551 # hosts
2552 main.step( "Hosts view is consistent across all ONOS nodes" )
2553 consistentHostsResult = main.TRUE
2554 for controller in range( len( hosts ) ):
2555 controllerStr = str( main.activeNodes[controller] + 1 )
2556 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2557 if hosts[ controller ] == hosts[ 0 ]:
2558 continue
2559 else: # hosts not consistent
2560 main.log.error( "hosts from ONOS" + controllerStr +
2561 " is inconsistent with ONOS1" )
2562 main.log.warn( repr( hosts[ controller ] ) )
2563 consistentHostsResult = main.FALSE
2564
2565 else:
2566 main.log.error( "Error in getting ONOS hosts from ONOS" +
2567 controllerStr )
2568 consistentHostsResult = main.FALSE
2569 main.log.warn( "ONOS" + controllerStr +
2570 " hosts response: " +
2571 repr( hosts[ controller ] ) )
2572 utilities.assert_equals(
2573 expect=main.TRUE,
2574 actual=consistentHostsResult,
2575 onpass="Hosts view is consistent across all ONOS nodes",
2576 onfail="ONOS nodes have different views of hosts" )
2577
2578 main.step( "Hosts information is correct" )
2579 hostsResults = hostsResults and ipResult
2580 utilities.assert_equals(
2581 expect=main.TRUE,
2582 actual=hostsResults,
2583 onpass="Host information is correct",
2584 onfail="Host information is incorrect" )
2585
2586 main.step( "Host attachment points to the network" )
2587 utilities.assert_equals(
2588 expect=True,
2589 actual=hostAttachmentResults,
2590 onpass="Hosts are correctly attached to the network",
2591 onfail="ONOS did not correctly attach hosts to the network" )
2592
2593 # Strongly connected clusters of devices
2594 main.step( "Clusters view is consistent across all ONOS nodes" )
2595 consistentClustersResult = main.TRUE
2596 for controller in range( len( clusters ) ):
2597 controllerStr = str( main.activeNodes[controller] + 1 )
2598 if "Error" not in clusters[ controller ]:
2599 if clusters[ controller ] == clusters[ 0 ]:
2600 continue
2601 else: # clusters not consistent
2602 main.log.error( "clusters from ONOS" +
2603 controllerStr +
2604 " is inconsistent with ONOS1" )
2605 consistentClustersResult = main.FALSE
2606 else:
2607 main.log.error( "Error in getting dataplane clusters " +
2608 "from ONOS" + controllerStr )
2609 consistentClustersResult = main.FALSE
2610 main.log.warn( "ONOS" + controllerStr +
2611 " clusters response: " +
2612 repr( clusters[ controller ] ) )
2613 utilities.assert_equals(
2614 expect=main.TRUE,
2615 actual=consistentClustersResult,
2616 onpass="Clusters view is consistent across all ONOS nodes",
2617 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07002618 if not consistentClustersResult:
2619 main.log.debug( clusters )
Jon Hall9ebd1bd2016-04-19 01:37:17 -07002620
2621 main.step( "There is only one SCC" )
2622 # there should always only be one cluster
2623 try:
2624 numClusters = len( json.loads( clusters[ 0 ] ) )
2625 except ( ValueError, TypeError ):
2626 main.log.exception( "Error parsing clusters[0]: " +
2627 repr( clusters[0] ) )
2628 numClusters = "ERROR"
2629 clusterResults = main.FALSE
2630 if numClusters == 1:
2631 clusterResults = main.TRUE
2632 utilities.assert_equals(
2633 expect=1,
2634 actual=numClusters,
2635 onpass="ONOS shows 1 SCC",
2636 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2637
2638 topoResult = ( devicesResults and linksResults
2639 and hostsResults and consistentHostsResult
2640 and consistentClustersResult and clusterResults
2641 and ipResult and hostAttachmentResults )
2642
2643 topoResult = topoResult and int( count <= 2 )
2644 note = "note it takes about " + str( int( cliTime ) ) + \
2645 " seconds for the test to make all the cli calls to fetch " +\
2646 "the topology from each ONOS instance"
2647 main.log.info(
2648 "Very crass estimate for topology discovery/convergence( " +
2649 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2650 str( count ) + " tries" )
2651
2652 main.step( "Device information is correct" )
2653 utilities.assert_equals(
2654 expect=main.TRUE,
2655 actual=devicesResults,
2656 onpass="Device information is correct",
2657 onfail="Device information is incorrect" )
2658
2659 main.step( "Links are correct" )
2660 utilities.assert_equals(
2661 expect=main.TRUE,
2662 actual=linksResults,
2663 onpass="Link are correct",
2664 onfail="Links are incorrect" )
2665
2666 main.step( "Hosts are correct" )
2667 utilities.assert_equals(
2668 expect=main.TRUE,
2669 actual=hostsResults,
2670 onpass="Hosts are correct",
2671 onfail="Hosts are incorrect" )
2672
2673 # FIXME: move this to an ONOS state case
2674 main.step( "Checking ONOS nodes" )
2675 nodeResults = utilities.retry( main.HA.nodesCheck,
2676 False,
2677 args=[main.activeNodes],
2678 attempts=5 )
2679 utilities.assert_equals( expect=True, actual=nodeResults,
2680 onpass="Nodes check successful",
2681 onfail="Nodes check NOT successful" )
2682 if not nodeResults:
2683 for i in main.activeNodes:
2684 main.log.debug( "{} components not ACTIVE: \n{}".format(
2685 main.CLIs[i].name,
2686 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
2687
2688 def CASE9( self, main ):
2689 """
2690 Link s3-s28 down
2691 """
2692 import time
2693 assert main.numCtrls, "main.numCtrls not defined"
2694 assert main, "main not defined"
2695 assert utilities.assert_equals, "utilities.assert_equals not defined"
2696 assert main.CLIs, "main.CLIs not defined"
2697 assert main.nodes, "main.nodes not defined"
2698 # NOTE: You should probably run a topology check after this
2699
2700 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2701
2702 description = "Turn off a link to ensure that Link Discovery " +\
2703 "is working properly"
2704 main.case( description )
2705
2706 main.step( "Kill Link between s3 and s28" )
2707 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2708 main.log.info( "Waiting " + str( linkSleep ) +
2709 " seconds for link down to be discovered" )
2710 time.sleep( linkSleep )
2711 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2712 onpass="Link down successful",
2713 onfail="Failed to bring link down" )
2714 # TODO do some sort of check here
2715
2716 def CASE10( self, main ):
2717 """
2718 Link s3-s28 up
2719 """
2720 import time
2721 assert main.numCtrls, "main.numCtrls not defined"
2722 assert main, "main not defined"
2723 assert utilities.assert_equals, "utilities.assert_equals not defined"
2724 assert main.CLIs, "main.CLIs not defined"
2725 assert main.nodes, "main.nodes not defined"
2726 # NOTE: You should probably run a topology check after this
2727
2728 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2729
2730 description = "Restore a link to ensure that Link Discovery is " + \
2731 "working properly"
2732 main.case( description )
2733
2734 main.step( "Bring link between s3 and s28 back up" )
2735 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2736 main.log.info( "Waiting " + str( linkSleep ) +
2737 " seconds for link up to be discovered" )
2738 time.sleep( linkSleep )
2739 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2740 onpass="Link up successful",
2741 onfail="Failed to bring link up" )
2742 # TODO do some sort of check here
2743
2744 def CASE11( self, main ):
2745 """
2746 Switch Down
2747 """
2748 # NOTE: You should probably run a topology check after this
2749 import time
2750 assert main.numCtrls, "main.numCtrls not defined"
2751 assert main, "main not defined"
2752 assert utilities.assert_equals, "utilities.assert_equals not defined"
2753 assert main.CLIs, "main.CLIs not defined"
2754 assert main.nodes, "main.nodes not defined"
2755
2756 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2757
2758 description = "Killing a switch to ensure it is discovered correctly"
2759 onosCli = main.CLIs[ main.activeNodes[0] ]
2760 main.case( description )
2761 switch = main.params[ 'kill' ][ 'switch' ]
2762 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2763
2764 # TODO: Make this switch parameterizable
2765 main.step( "Kill " + switch )
2766 main.log.info( "Deleting " + switch )
2767 main.Mininet1.delSwitch( switch )
2768 main.log.info( "Waiting " + str( switchSleep ) +
2769 " seconds for switch down to be discovered" )
2770 time.sleep( switchSleep )
2771 device = onosCli.getDevice( dpid=switchDPID )
2772 # Peek at the deleted switch
2773 main.log.warn( str( device ) )
2774 result = main.FALSE
2775 if device and device[ 'available' ] is False:
2776 result = main.TRUE
2777 utilities.assert_equals( expect=main.TRUE, actual=result,
2778 onpass="Kill switch successful",
2779 onfail="Failed to kill switch?" )
2780
2781 def CASE12( self, main ):
2782 """
2783 Switch Up
2784 """
2785 # NOTE: You should probably run a topology check after this
2786 import time
2787 assert main.numCtrls, "main.numCtrls not defined"
2788 assert main, "main not defined"
2789 assert utilities.assert_equals, "utilities.assert_equals not defined"
2790 assert main.CLIs, "main.CLIs not defined"
2791 assert main.nodes, "main.nodes not defined"
2792
2793 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2794 switch = main.params[ 'kill' ][ 'switch' ]
2795 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2796 links = main.params[ 'kill' ][ 'links' ].split()
2797 onosCli = main.CLIs[ main.activeNodes[0] ]
2798 description = "Adding a switch to ensure it is discovered correctly"
2799 main.case( description )
2800
2801 main.step( "Add back " + switch )
2802 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2803 for peer in links:
2804 main.Mininet1.addLink( switch, peer )
2805 ipList = [ node.ip_address for node in main.nodes ]
2806 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2807 main.log.info( "Waiting " + str( switchSleep ) +
2808 " seconds for switch up to be discovered" )
2809 time.sleep( switchSleep )
2810 device = onosCli.getDevice( dpid=switchDPID )
2811 # Peek at the deleted switch
2812 main.log.warn( str( device ) )
2813 result = main.FALSE
2814 if device and device[ 'available' ]:
2815 result = main.TRUE
2816 utilities.assert_equals( expect=main.TRUE, actual=result,
2817 onpass="add switch successful",
2818 onfail="Failed to add switch?" )
2819
2820 def CASE13( self, main ):
2821 """
2822 Clean up
2823 """
2824 assert main.numCtrls, "main.numCtrls not defined"
2825 assert main, "main not defined"
2826 assert utilities.assert_equals, "utilities.assert_equals not defined"
2827 assert main.CLIs, "main.CLIs not defined"
2828 assert main.nodes, "main.nodes not defined"
2829
2830 main.case( "Test Cleanup" )
2831 main.step( "Killing tcpdumps" )
2832 main.Mininet2.stopTcpdump()
2833
2834 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2835 main.step( "Copying MN pcap and ONOS log files to test station" )
2836 # NOTE: MN Pcap file is being saved to logdir.
2837 # We scp this file as MN and TestON aren't necessarily the same vm
2838
2839 # FIXME: To be replaced with a Jenkin's post script
2840 # TODO: Load these from params
2841 # NOTE: must end in /
2842 logFolder = "/opt/onos/log/"
2843 logFiles = [ "karaf.log", "karaf.log.1" ]
2844 # NOTE: must end in /
2845 for f in logFiles:
2846 for node in main.nodes:
2847 dstName = main.logdir + "/" + node.name + "-" + f
2848 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2849 logFolder + f, dstName )
2850 # std*.log's
2851 # NOTE: must end in /
2852 logFolder = "/opt/onos/var/"
2853 logFiles = [ "stderr.log", "stdout.log" ]
2854 # NOTE: must end in /
2855 for f in logFiles:
2856 for node in main.nodes:
2857 dstName = main.logdir + "/" + node.name + "-" + f
2858 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2859 logFolder + f, dstName )
2860 else:
2861 main.log.debug( "skipping saving log files" )
2862
2863 main.step( "Stopping Mininet" )
2864 mnResult = main.Mininet1.stopNet()
2865 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2866 onpass="Mininet stopped",
2867 onfail="MN cleanup NOT successful" )
2868
2869 main.step( "Checking ONOS Logs for errors" )
2870 for node in main.nodes:
2871 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2872 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2873
2874 try:
2875 timerLog = open( main.logdir + "/Timers.csv", 'w')
2876 main.log.error( ", ".join( labels ) + "\n" + ", ".join( data ) )
2877 timerLog.write( ", ".join( labels ) + "\n" + ", ".join( data ) )
2878 timerLog.close()
2879 except NameError, e:
2880 main.log.exception(e)
2881
2882 main.step( "Stopping webserver" )
2883 status = main.Server.stop( )
2884 utilities.assert_equals( expect=main.TRUE, actual=status,
2885 onpass="Stop Server",
2886 onfail="Failled to stop SimpleHTTPServer" )
2887 del main.Server
2888
2889 def CASE14( self, main ):
2890 """
2891 start election app on all onos nodes
2892 """
2893 import time
2894 assert main.numCtrls, "main.numCtrls not defined"
2895 assert main, "main not defined"
2896 assert utilities.assert_equals, "utilities.assert_equals not defined"
2897 assert main.CLIs, "main.CLIs not defined"
2898 assert main.nodes, "main.nodes not defined"
2899
2900 main.case("Start Leadership Election app")
2901 main.step( "Install leadership election app" )
2902 onosCli = main.CLIs[ main.activeNodes[0] ]
2903 appResult = onosCli.activateApp( "org.onosproject.election" )
2904 utilities.assert_equals(
2905 expect=main.TRUE,
2906 actual=appResult,
2907 onpass="Election app installed",
2908 onfail="Something went wrong with installing Leadership election" )
2909
2910 main.step( "Run for election on each node" )
2911 for i in main.activeNodes:
2912 main.CLIs[i].electionTestRun()
2913 time.sleep(5)
2914 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2915 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
2916 utilities.assert_equals(
2917 expect=True,
2918 actual=sameResult,
2919 onpass="All nodes see the same leaderboards",
2920 onfail="Inconsistent leaderboards" )
2921
2922 if sameResult:
2923 leader = leaders[ 0 ][ 0 ]
2924 if main.nodes[ main.activeNodes[0] ].ip_address in leader:
2925 correctLeader = True
2926 else:
2927 correctLeader = False
2928 main.step( "First node was elected leader" )
2929 utilities.assert_equals(
2930 expect=True,
2931 actual=correctLeader,
2932 onpass="Correct leader was elected",
2933 onfail="Incorrect leader" )
2934
2935 def CASE15( self, main ):
2936 """
2937 Check that Leadership Election is still functional
2938 15.1 Run election on each node
2939 15.2 Check that each node has the same leaders and candidates
2940 15.3 Find current leader and withdraw
2941 15.4 Check that a new node was elected leader
2942 15.5 Check that that new leader was the candidate of old leader
2943 15.6 Run for election on old leader
2944 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2945 15.8 Make sure that the old leader was added to the candidate list
2946
2947 old and new variable prefixes refer to data from before vs after
2948 withdrawl and later before withdrawl vs after re-election
2949 """
2950 import time
2951 assert main.numCtrls, "main.numCtrls not defined"
2952 assert main, "main not defined"
2953 assert utilities.assert_equals, "utilities.assert_equals not defined"
2954 assert main.CLIs, "main.CLIs not defined"
2955 assert main.nodes, "main.nodes not defined"
2956
2957 description = "Check that Leadership Election is still functional"
2958 main.case( description )
2959 # NOTE: Need to re-run after restarts since being a canidate is not persistant
2960
2961 oldLeaders = [] # list of lists of each nodes' candidates before
2962 newLeaders = [] # list of lists of each nodes' candidates after
2963 oldLeader = '' # the old leader from oldLeaders, None if not same
2964 newLeader = '' # the new leaders fron newLoeaders, None if not same
2965 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2966 expectNoLeader = False # True when there is only one leader
2967 if main.numCtrls == 1:
2968 expectNoLeader = True
2969
2970 main.step( "Run for election on each node" )
2971 electionResult = main.TRUE
2972
2973 for i in main.activeNodes: # run test election on each node
2974 if main.CLIs[i].electionTestRun() == main.FALSE:
2975 electionResult = main.FALSE
2976 utilities.assert_equals(
2977 expect=main.TRUE,
2978 actual=electionResult,
2979 onpass="All nodes successfully ran for leadership",
2980 onfail="At least one node failed to run for leadership" )
2981
2982 if electionResult == main.FALSE:
2983 main.log.error(
2984 "Skipping Test Case because Election Test App isn't loaded" )
2985 main.skipCase()
2986
2987 main.step( "Check that each node shows the same leader and candidates" )
2988 failMessage = "Nodes have different leaderboards"
2989 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2990 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
2991 if sameResult:
2992 oldLeader = oldLeaders[ 0 ][ 0 ]
2993 main.log.warn( oldLeader )
2994 else:
2995 oldLeader = None
2996 utilities.assert_equals(
2997 expect=True,
2998 actual=sameResult,
2999 onpass="Leaderboards are consistent for the election topic",
3000 onfail=failMessage )
3001
3002 main.step( "Find current leader and withdraw" )
3003 withdrawResult = main.TRUE
3004 # do some sanity checking on leader before using it
3005 if oldLeader is None:
3006 main.log.error( "Leadership isn't consistent." )
3007 withdrawResult = main.FALSE
3008 # Get the CLI of the oldLeader
3009 for i in main.activeNodes:
3010 if oldLeader == main.nodes[ i ].ip_address:
3011 oldLeaderCLI = main.CLIs[ i ]
3012 break
3013 else: # FOR/ELSE statement
3014 main.log.error( "Leader election, could not find current leader" )
3015 if oldLeader:
3016 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3017 utilities.assert_equals(
3018 expect=main.TRUE,
3019 actual=withdrawResult,
3020 onpass="Node was withdrawn from election",
3021 onfail="Node was not withdrawn from election" )
3022
3023 main.step( "Check that a new node was elected leader" )
3024 failMessage = "Nodes have different leaders"
3025 # Get new leaders and candidates
3026 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
3027 newLeader = None
3028 if newLeaderResult:
3029 if newLeaders[ 0 ][ 0 ] == 'none':
3030 main.log.error( "No leader was elected on at least 1 node" )
3031 if not expectNoLeader:
3032 newLeaderResult = False
3033 newLeader = newLeaders[ 0 ][ 0 ]
3034
3035 # Check that the new leader is not the older leader, which was withdrawn
3036 if newLeader == oldLeader:
3037 newLeaderResult = False
3038 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3039 " as the current leader" )
3040 utilities.assert_equals(
3041 expect=True,
3042 actual=newLeaderResult,
3043 onpass="Leadership election passed",
3044 onfail="Something went wrong with Leadership election" )
3045
3046 main.step( "Check that that new leader was the candidate of old leader" )
3047 # candidates[ 2 ] should become the top candidate after withdrawl
3048 correctCandidateResult = main.TRUE
3049 if expectNoLeader:
3050 if newLeader == 'none':
3051 main.log.info( "No leader expected. None found. Pass" )
3052 correctCandidateResult = main.TRUE
3053 else:
3054 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3055 correctCandidateResult = main.FALSE
3056 elif len( oldLeaders[0] ) >= 3:
3057 if newLeader == oldLeaders[ 0 ][ 2 ]:
3058 # correct leader was elected
3059 correctCandidateResult = main.TRUE
3060 else:
3061 correctCandidateResult = main.FALSE
3062 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3063 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3064 else:
3065 main.log.warn( "Could not determine who should be the correct leader" )
3066 main.log.debug( oldLeaders[ 0 ] )
3067 correctCandidateResult = main.FALSE
3068 utilities.assert_equals(
3069 expect=main.TRUE,
3070 actual=correctCandidateResult,
3071 onpass="Correct Candidate Elected",
3072 onfail="Incorrect Candidate Elected" )
3073
3074 main.step( "Run for election on old leader( just so everyone " +
3075 "is in the hat )" )
3076 if oldLeaderCLI is not None:
3077 runResult = oldLeaderCLI.electionTestRun()
3078 else:
3079 main.log.error( "No old leader to re-elect" )
3080 runResult = main.FALSE
3081 utilities.assert_equals(
3082 expect=main.TRUE,
3083 actual=runResult,
3084 onpass="App re-ran for election",
3085 onfail="App failed to run for election" )
3086
3087 main.step(
3088 "Check that oldLeader is a candidate, and leader if only 1 node" )
3089 # verify leader didn't just change
3090 # Get new leaders and candidates
3091 reRunLeaders = []
3092 time.sleep( 5 ) # Paremterize
3093 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
3094
3095 # Check that the re-elected node is last on the candidate List
3096 if not reRunLeaders[0]:
3097 positionResult = main.FALSE
3098 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3099 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3100 str( reRunLeaders[ 0 ] ) ) )
3101 positionResult = main.FALSE
3102 utilities.assert_equals(
3103 expect=True,
3104 actual=positionResult,
3105 onpass="Old leader successfully re-ran for election",
3106 onfail="Something went wrong with Leadership election after " +
3107 "the old leader re-ran for election" )
3108
3109 def CASE16( self, main ):
3110 """
3111 Install Distributed Primitives app
3112 """
3113 import time
3114 assert main.numCtrls, "main.numCtrls not defined"
3115 assert main, "main not defined"
3116 assert utilities.assert_equals, "utilities.assert_equals not defined"
3117 assert main.CLIs, "main.CLIs not defined"
3118 assert main.nodes, "main.nodes not defined"
3119
3120 # Variables for the distributed primitives tests
3121 global pCounterName
3122 global pCounterValue
3123 global onosSet
3124 global onosSetName
3125 pCounterName = "TestON-Partitions"
3126 pCounterValue = 0
3127 onosSet = set([])
3128 onosSetName = "TestON-set"
3129
3130 description = "Install Primitives app"
3131 main.case( description )
3132 main.step( "Install Primitives app" )
3133 appName = "org.onosproject.distributedprimitives"
3134 node = main.activeNodes[0]
3135 appResults = main.CLIs[node].activateApp( appName )
3136 utilities.assert_equals( expect=main.TRUE,
3137 actual=appResults,
3138 onpass="Primitives app activated",
3139 onfail="Primitives app not activated" )
3140 time.sleep( 5 ) # To allow all nodes to activate
3141
3142 def CASE17( self, main ):
3143 """
3144 Check for basic functionality with distributed primitives
3145 """
3146 # Make sure variables are defined/set
3147 assert main.numCtrls, "main.numCtrls not defined"
3148 assert main, "main not defined"
3149 assert utilities.assert_equals, "utilities.assert_equals not defined"
3150 assert main.CLIs, "main.CLIs not defined"
3151 assert main.nodes, "main.nodes not defined"
3152 assert pCounterName, "pCounterName not defined"
3153 assert onosSetName, "onosSetName not defined"
3154 # NOTE: assert fails if value is 0/None/Empty/False
3155 try:
3156 pCounterValue
3157 except NameError:
3158 main.log.error( "pCounterValue not defined, setting to 0" )
3159 pCounterValue = 0
3160 try:
3161 onosSet
3162 except NameError:
3163 main.log.error( "onosSet not defined, setting to empty Set" )
3164 onosSet = set([])
3165 # Variables for the distributed primitives tests. These are local only
3166 addValue = "a"
3167 addAllValue = "a b c d e f"
3168 retainValue = "c d e f"
3169
3170 description = "Check for basic functionality with distributed " +\
3171 "primitives"
3172 main.case( description )
3173 main.caseExplanation = "Test the methods of the distributed " +\
3174 "primitives (counters and sets) throught the cli"
3175 # DISTRIBUTED ATOMIC COUNTERS
3176 # Partitioned counters
3177 main.step( "Increment then get a default counter on each node" )
3178 pCounters = []
3179 threads = []
3180 addedPValues = []
3181 for i in main.activeNodes:
3182 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3183 name="counterAddAndGet-" + str( i ),
3184 args=[ pCounterName ] )
3185 pCounterValue += 1
3186 addedPValues.append( pCounterValue )
3187 threads.append( t )
3188 t.start()
3189
3190 for t in threads:
3191 t.join()
3192 pCounters.append( t.result )
3193 # Check that counter incremented numController times
3194 pCounterResults = True
3195 for i in addedPValues:
3196 tmpResult = i in pCounters
3197 pCounterResults = pCounterResults and tmpResult
3198 if not tmpResult:
3199 main.log.error( str( i ) + " is not in partitioned "
3200 "counter incremented results" )
3201 utilities.assert_equals( expect=True,
3202 actual=pCounterResults,
3203 onpass="Default counter incremented",
3204 onfail="Error incrementing default" +
3205 " counter" )
3206
3207 main.step( "Get then Increment a default counter on each node" )
3208 pCounters = []
3209 threads = []
3210 addedPValues = []
3211 for i in main.activeNodes:
3212 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3213 name="counterGetAndAdd-" + str( i ),
3214 args=[ pCounterName ] )
3215 addedPValues.append( pCounterValue )
3216 pCounterValue += 1
3217 threads.append( t )
3218 t.start()
3219
3220 for t in threads:
3221 t.join()
3222 pCounters.append( t.result )
3223 # Check that counter incremented numController times
3224 pCounterResults = True
3225 for i in addedPValues:
3226 tmpResult = i in pCounters
3227 pCounterResults = pCounterResults and tmpResult
3228 if not tmpResult:
3229 main.log.error( str( i ) + " is not in partitioned "
3230 "counter incremented results" )
3231 utilities.assert_equals( expect=True,
3232 actual=pCounterResults,
3233 onpass="Default counter incremented",
3234 onfail="Error incrementing default" +
3235 " counter" )
3236
3237 main.step( "Counters we added have the correct values" )
3238 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3239 utilities.assert_equals( expect=main.TRUE,
3240 actual=incrementCheck,
3241 onpass="Added counters are correct",
3242 onfail="Added counters are incorrect" )
3243
3244 main.step( "Add -8 to then get a default counter on each node" )
3245 pCounters = []
3246 threads = []
3247 addedPValues = []
3248 for i in main.activeNodes:
3249 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3250 name="counterIncrement-" + str( i ),
3251 args=[ pCounterName ],
3252 kwargs={ "delta": -8 } )
3253 pCounterValue += -8
3254 addedPValues.append( pCounterValue )
3255 threads.append( t )
3256 t.start()
3257
3258 for t in threads:
3259 t.join()
3260 pCounters.append( t.result )
3261 # Check that counter incremented numController times
3262 pCounterResults = True
3263 for i in addedPValues:
3264 tmpResult = i in pCounters
3265 pCounterResults = pCounterResults and tmpResult
3266 if not tmpResult:
3267 main.log.error( str( i ) + " is not in partitioned "
3268 "counter incremented results" )
3269 utilities.assert_equals( expect=True,
3270 actual=pCounterResults,
3271 onpass="Default counter incremented",
3272 onfail="Error incrementing default" +
3273 " counter" )
3274
3275 main.step( "Add 5 to then get a default counter on each node" )
3276 pCounters = []
3277 threads = []
3278 addedPValues = []
3279 for i in main.activeNodes:
3280 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3281 name="counterIncrement-" + str( i ),
3282 args=[ pCounterName ],
3283 kwargs={ "delta": 5 } )
3284 pCounterValue += 5
3285 addedPValues.append( pCounterValue )
3286 threads.append( t )
3287 t.start()
3288
3289 for t in threads:
3290 t.join()
3291 pCounters.append( t.result )
3292 # Check that counter incremented numController times
3293 pCounterResults = True
3294 for i in addedPValues:
3295 tmpResult = i in pCounters
3296 pCounterResults = pCounterResults and tmpResult
3297 if not tmpResult:
3298 main.log.error( str( i ) + " is not in partitioned "
3299 "counter incremented results" )
3300 utilities.assert_equals( expect=True,
3301 actual=pCounterResults,
3302 onpass="Default counter incremented",
3303 onfail="Error incrementing default" +
3304 " counter" )
3305
3306 main.step( "Get then add 5 to a default counter on each node" )
3307 pCounters = []
3308 threads = []
3309 addedPValues = []
3310 for i in main.activeNodes:
3311 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3312 name="counterIncrement-" + str( i ),
3313 args=[ pCounterName ],
3314 kwargs={ "delta": 5 } )
3315 addedPValues.append( pCounterValue )
3316 pCounterValue += 5
3317 threads.append( t )
3318 t.start()
3319
3320 for t in threads:
3321 t.join()
3322 pCounters.append( t.result )
3323 # Check that counter incremented numController times
3324 pCounterResults = True
3325 for i in addedPValues:
3326 tmpResult = i in pCounters
3327 pCounterResults = pCounterResults and tmpResult
3328 if not tmpResult:
3329 main.log.error( str( i ) + " is not in partitioned "
3330 "counter incremented results" )
3331 utilities.assert_equals( expect=True,
3332 actual=pCounterResults,
3333 onpass="Default counter incremented",
3334 onfail="Error incrementing default" +
3335 " counter" )
3336
3337 main.step( "Counters we added have the correct values" )
3338 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3339 utilities.assert_equals( expect=main.TRUE,
3340 actual=incrementCheck,
3341 onpass="Added counters are correct",
3342 onfail="Added counters are incorrect" )
3343
3344 # DISTRIBUTED SETS
3345 main.step( "Distributed Set get" )
3346 size = len( onosSet )
3347 getResponses = []
3348 threads = []
3349 for i in main.activeNodes:
3350 t = main.Thread( target=main.CLIs[i].setTestGet,
3351 name="setTestGet-" + str( i ),
3352 args=[ onosSetName ] )
3353 threads.append( t )
3354 t.start()
3355 for t in threads:
3356 t.join()
3357 getResponses.append( t.result )
3358
3359 getResults = main.TRUE
3360 for i in range( len( main.activeNodes ) ):
3361 node = str( main.activeNodes[i] + 1 )
3362 if isinstance( getResponses[ i ], list):
3363 current = set( getResponses[ i ] )
3364 if len( current ) == len( getResponses[ i ] ):
3365 # no repeats
3366 if onosSet != current:
3367 main.log.error( "ONOS" + node +
3368 " has incorrect view" +
3369 " of set " + onosSetName + ":\n" +
3370 str( getResponses[ i ] ) )
3371 main.log.debug( "Expected: " + str( onosSet ) )
3372 main.log.debug( "Actual: " + str( current ) )
3373 getResults = main.FALSE
3374 else:
3375 # error, set is not a set
3376 main.log.error( "ONOS" + node +
3377 " has repeat elements in" +
3378 " set " + onosSetName + ":\n" +
3379 str( getResponses[ i ] ) )
3380 getResults = main.FALSE
3381 elif getResponses[ i ] == main.ERROR:
3382 getResults = main.FALSE
3383 utilities.assert_equals( expect=main.TRUE,
3384 actual=getResults,
3385 onpass="Set elements are correct",
3386 onfail="Set elements are incorrect" )
3387
3388 main.step( "Distributed Set size" )
3389 sizeResponses = []
3390 threads = []
3391 for i in main.activeNodes:
3392 t = main.Thread( target=main.CLIs[i].setTestSize,
3393 name="setTestSize-" + str( i ),
3394 args=[ onosSetName ] )
3395 threads.append( t )
3396 t.start()
3397 for t in threads:
3398 t.join()
3399 sizeResponses.append( t.result )
3400
3401 sizeResults = main.TRUE
3402 for i in range( len( main.activeNodes ) ):
3403 node = str( main.activeNodes[i] + 1 )
3404 if size != sizeResponses[ i ]:
3405 sizeResults = main.FALSE
3406 main.log.error( "ONOS" + node +
3407 " expected a size of " + str( size ) +
3408 " for set " + onosSetName +
3409 " but got " + str( sizeResponses[ i ] ) )
3410 utilities.assert_equals( expect=main.TRUE,
3411 actual=sizeResults,
3412 onpass="Set sizes are correct",
3413 onfail="Set sizes are incorrect" )
3414
3415 main.step( "Distributed Set add()" )
3416 onosSet.add( addValue )
3417 addResponses = []
3418 threads = []
3419 for i in main.activeNodes:
3420 t = main.Thread( target=main.CLIs[i].setTestAdd,
3421 name="setTestAdd-" + str( i ),
3422 args=[ onosSetName, addValue ] )
3423 threads.append( t )
3424 t.start()
3425 for t in threads:
3426 t.join()
3427 addResponses.append( t.result )
3428
3429 # main.TRUE = successfully changed the set
3430 # main.FALSE = action resulted in no change in set
3431 # main.ERROR - Some error in executing the function
3432 addResults = main.TRUE
3433 for i in range( len( main.activeNodes ) ):
3434 if addResponses[ i ] == main.TRUE:
3435 # All is well
3436 pass
3437 elif addResponses[ i ] == main.FALSE:
3438 # Already in set, probably fine
3439 pass
3440 elif addResponses[ i ] == main.ERROR:
3441 # Error in execution
3442 addResults = main.FALSE
3443 else:
3444 # unexpected result
3445 addResults = main.FALSE
3446 if addResults != main.TRUE:
3447 main.log.error( "Error executing set add" )
3448
3449 # Check if set is still correct
3450 size = len( onosSet )
3451 getResponses = []
3452 threads = []
3453 for i in main.activeNodes:
3454 t = main.Thread( target=main.CLIs[i].setTestGet,
3455 name="setTestGet-" + str( i ),
3456 args=[ onosSetName ] )
3457 threads.append( t )
3458 t.start()
3459 for t in threads:
3460 t.join()
3461 getResponses.append( t.result )
3462 getResults = main.TRUE
3463 for i in range( len( main.activeNodes ) ):
3464 node = str( main.activeNodes[i] + 1 )
3465 if isinstance( getResponses[ i ], list):
3466 current = set( getResponses[ i ] )
3467 if len( current ) == len( getResponses[ i ] ):
3468 # no repeats
3469 if onosSet != current:
3470 main.log.error( "ONOS" + node + " has incorrect view" +
3471 " of set " + onosSetName + ":\n" +
3472 str( getResponses[ i ] ) )
3473 main.log.debug( "Expected: " + str( onosSet ) )
3474 main.log.debug( "Actual: " + str( current ) )
3475 getResults = main.FALSE
3476 else:
3477 # error, set is not a set
3478 main.log.error( "ONOS" + node + " has repeat elements in" +
3479 " set " + onosSetName + ":\n" +
3480 str( getResponses[ i ] ) )
3481 getResults = main.FALSE
3482 elif getResponses[ i ] == main.ERROR:
3483 getResults = main.FALSE
3484 sizeResponses = []
3485 threads = []
3486 for i in main.activeNodes:
3487 t = main.Thread( target=main.CLIs[i].setTestSize,
3488 name="setTestSize-" + str( i ),
3489 args=[ onosSetName ] )
3490 threads.append( t )
3491 t.start()
3492 for t in threads:
3493 t.join()
3494 sizeResponses.append( t.result )
3495 sizeResults = main.TRUE
3496 for i in range( len( main.activeNodes ) ):
3497 node = str( main.activeNodes[i] + 1 )
3498 if size != sizeResponses[ i ]:
3499 sizeResults = main.FALSE
3500 main.log.error( "ONOS" + node +
3501 " expected a size of " + str( size ) +
3502 " for set " + onosSetName +
3503 " but got " + str( sizeResponses[ i ] ) )
3504 addResults = addResults and getResults and sizeResults
3505 utilities.assert_equals( expect=main.TRUE,
3506 actual=addResults,
3507 onpass="Set add correct",
3508 onfail="Set add was incorrect" )
3509
3510 main.step( "Distributed Set addAll()" )
3511 onosSet.update( addAllValue.split() )
3512 addResponses = []
3513 threads = []
3514 for i in main.activeNodes:
3515 t = main.Thread( target=main.CLIs[i].setTestAdd,
3516 name="setTestAddAll-" + str( i ),
3517 args=[ onosSetName, addAllValue ] )
3518 threads.append( t )
3519 t.start()
3520 for t in threads:
3521 t.join()
3522 addResponses.append( t.result )
3523
3524 # main.TRUE = successfully changed the set
3525 # main.FALSE = action resulted in no change in set
3526 # main.ERROR - Some error in executing the function
3527 addAllResults = main.TRUE
3528 for i in range( len( main.activeNodes ) ):
3529 if addResponses[ i ] == main.TRUE:
3530 # All is well
3531 pass
3532 elif addResponses[ i ] == main.FALSE:
3533 # Already in set, probably fine
3534 pass
3535 elif addResponses[ i ] == main.ERROR:
3536 # Error in execution
3537 addAllResults = main.FALSE
3538 else:
3539 # unexpected result
3540 addAllResults = main.FALSE
3541 if addAllResults != main.TRUE:
3542 main.log.error( "Error executing set addAll" )
3543
3544 # Check if set is still correct
3545 size = len( onosSet )
3546 getResponses = []
3547 threads = []
3548 for i in main.activeNodes:
3549 t = main.Thread( target=main.CLIs[i].setTestGet,
3550 name="setTestGet-" + str( i ),
3551 args=[ onosSetName ] )
3552 threads.append( t )
3553 t.start()
3554 for t in threads:
3555 t.join()
3556 getResponses.append( t.result )
3557 getResults = main.TRUE
3558 for i in range( len( main.activeNodes ) ):
3559 node = str( main.activeNodes[i] + 1 )
3560 if isinstance( getResponses[ i ], list):
3561 current = set( getResponses[ i ] )
3562 if len( current ) == len( getResponses[ i ] ):
3563 # no repeats
3564 if onosSet != current:
3565 main.log.error( "ONOS" + node +
3566 " has incorrect view" +
3567 " of set " + onosSetName + ":\n" +
3568 str( getResponses[ i ] ) )
3569 main.log.debug( "Expected: " + str( onosSet ) )
3570 main.log.debug( "Actual: " + str( current ) )
3571 getResults = main.FALSE
3572 else:
3573 # error, set is not a set
3574 main.log.error( "ONOS" + node +
3575 " has repeat elements in" +
3576 " set " + onosSetName + ":\n" +
3577 str( getResponses[ i ] ) )
3578 getResults = main.FALSE
3579 elif getResponses[ i ] == main.ERROR:
3580 getResults = main.FALSE
3581 sizeResponses = []
3582 threads = []
3583 for i in main.activeNodes:
3584 t = main.Thread( target=main.CLIs[i].setTestSize,
3585 name="setTestSize-" + str( i ),
3586 args=[ onosSetName ] )
3587 threads.append( t )
3588 t.start()
3589 for t in threads:
3590 t.join()
3591 sizeResponses.append( t.result )
3592 sizeResults = main.TRUE
3593 for i in range( len( main.activeNodes ) ):
3594 node = str( main.activeNodes[i] + 1 )
3595 if size != sizeResponses[ i ]:
3596 sizeResults = main.FALSE
3597 main.log.error( "ONOS" + node +
3598 " expected a size of " + str( size ) +
3599 " for set " + onosSetName +
3600 " but got " + str( sizeResponses[ i ] ) )
3601 addAllResults = addAllResults and getResults and sizeResults
3602 utilities.assert_equals( expect=main.TRUE,
3603 actual=addAllResults,
3604 onpass="Set addAll correct",
3605 onfail="Set addAll was incorrect" )
3606
3607 main.step( "Distributed Set contains()" )
3608 containsResponses = []
3609 threads = []
3610 for i in main.activeNodes:
3611 t = main.Thread( target=main.CLIs[i].setTestGet,
3612 name="setContains-" + str( i ),
3613 args=[ onosSetName ],
3614 kwargs={ "values": addValue } )
3615 threads.append( t )
3616 t.start()
3617 for t in threads:
3618 t.join()
3619 # NOTE: This is the tuple
3620 containsResponses.append( t.result )
3621
3622 containsResults = main.TRUE
3623 for i in range( len( main.activeNodes ) ):
3624 if containsResponses[ i ] == main.ERROR:
3625 containsResults = main.FALSE
3626 else:
3627 containsResults = containsResults and\
3628 containsResponses[ i ][ 1 ]
3629 utilities.assert_equals( expect=main.TRUE,
3630 actual=containsResults,
3631 onpass="Set contains is functional",
3632 onfail="Set contains failed" )
3633
3634 main.step( "Distributed Set containsAll()" )
3635 containsAllResponses = []
3636 threads = []
3637 for i in main.activeNodes:
3638 t = main.Thread( target=main.CLIs[i].setTestGet,
3639 name="setContainsAll-" + str( i ),
3640 args=[ onosSetName ],
3641 kwargs={ "values": addAllValue } )
3642 threads.append( t )
3643 t.start()
3644 for t in threads:
3645 t.join()
3646 # NOTE: This is the tuple
3647 containsAllResponses.append( t.result )
3648
3649 containsAllResults = main.TRUE
3650 for i in range( len( main.activeNodes ) ):
3651 if containsResponses[ i ] == main.ERROR:
3652 containsResults = main.FALSE
3653 else:
3654 containsResults = containsResults and\
3655 containsResponses[ i ][ 1 ]
3656 utilities.assert_equals( expect=main.TRUE,
3657 actual=containsAllResults,
3658 onpass="Set containsAll is functional",
3659 onfail="Set containsAll failed" )
3660
3661 main.step( "Distributed Set remove()" )
3662 onosSet.remove( addValue )
3663 removeResponses = []
3664 threads = []
3665 for i in main.activeNodes:
3666 t = main.Thread( target=main.CLIs[i].setTestRemove,
3667 name="setTestRemove-" + str( i ),
3668 args=[ onosSetName, addValue ] )
3669 threads.append( t )
3670 t.start()
3671 for t in threads:
3672 t.join()
3673 removeResponses.append( t.result )
3674
3675 # main.TRUE = successfully changed the set
3676 # main.FALSE = action resulted in no change in set
3677 # main.ERROR - Some error in executing the function
3678 removeResults = main.TRUE
3679 for i in range( len( main.activeNodes ) ):
3680 if removeResponses[ i ] == main.TRUE:
3681 # All is well
3682 pass
3683 elif removeResponses[ i ] == main.FALSE:
3684 # not in set, probably fine
3685 pass
3686 elif removeResponses[ i ] == main.ERROR:
3687 # Error in execution
3688 removeResults = main.FALSE
3689 else:
3690 # unexpected result
3691 removeResults = main.FALSE
3692 if removeResults != main.TRUE:
3693 main.log.error( "Error executing set remove" )
3694
3695 # Check if set is still correct
3696 size = len( onosSet )
3697 getResponses = []
3698 threads = []
3699 for i in main.activeNodes:
3700 t = main.Thread( target=main.CLIs[i].setTestGet,
3701 name="setTestGet-" + str( i ),
3702 args=[ onosSetName ] )
3703 threads.append( t )
3704 t.start()
3705 for t in threads:
3706 t.join()
3707 getResponses.append( t.result )
3708 getResults = main.TRUE
3709 for i in range( len( main.activeNodes ) ):
3710 node = str( main.activeNodes[i] + 1 )
3711 if isinstance( getResponses[ i ], list):
3712 current = set( getResponses[ i ] )
3713 if len( current ) == len( getResponses[ i ] ):
3714 # no repeats
3715 if onosSet != current:
3716 main.log.error( "ONOS" + node +
3717 " has incorrect view" +
3718 " of set " + onosSetName + ":\n" +
3719 str( getResponses[ i ] ) )
3720 main.log.debug( "Expected: " + str( onosSet ) )
3721 main.log.debug( "Actual: " + str( current ) )
3722 getResults = main.FALSE
3723 else:
3724 # error, set is not a set
3725 main.log.error( "ONOS" + node +
3726 " has repeat elements in" +
3727 " set " + onosSetName + ":\n" +
3728 str( getResponses[ i ] ) )
3729 getResults = main.FALSE
3730 elif getResponses[ i ] == main.ERROR:
3731 getResults = main.FALSE
3732 sizeResponses = []
3733 threads = []
3734 for i in main.activeNodes:
3735 t = main.Thread( target=main.CLIs[i].setTestSize,
3736 name="setTestSize-" + str( i ),
3737 args=[ onosSetName ] )
3738 threads.append( t )
3739 t.start()
3740 for t in threads:
3741 t.join()
3742 sizeResponses.append( t.result )
3743 sizeResults = main.TRUE
3744 for i in range( len( main.activeNodes ) ):
3745 node = str( main.activeNodes[i] + 1 )
3746 if size != sizeResponses[ i ]:
3747 sizeResults = main.FALSE
3748 main.log.error( "ONOS" + node +
3749 " expected a size of " + str( size ) +
3750 " for set " + onosSetName +
3751 " but got " + str( sizeResponses[ i ] ) )
3752 removeResults = removeResults and getResults and sizeResults
3753 utilities.assert_equals( expect=main.TRUE,
3754 actual=removeResults,
3755 onpass="Set remove correct",
3756 onfail="Set remove was incorrect" )
3757
3758 main.step( "Distributed Set removeAll()" )
3759 onosSet.difference_update( addAllValue.split() )
3760 removeAllResponses = []
3761 threads = []
3762 try:
3763 for i in main.activeNodes:
3764 t = main.Thread( target=main.CLIs[i].setTestRemove,
3765 name="setTestRemoveAll-" + str( i ),
3766 args=[ onosSetName, addAllValue ] )
3767 threads.append( t )
3768 t.start()
3769 for t in threads:
3770 t.join()
3771 removeAllResponses.append( t.result )
3772 except Exception, e:
3773 main.log.exception(e)
3774
3775 # main.TRUE = successfully changed the set
3776 # main.FALSE = action resulted in no change in set
3777 # main.ERROR - Some error in executing the function
3778 removeAllResults = main.TRUE
3779 for i in range( len( main.activeNodes ) ):
3780 if removeAllResponses[ i ] == main.TRUE:
3781 # All is well
3782 pass
3783 elif removeAllResponses[ i ] == main.FALSE:
3784 # not in set, probably fine
3785 pass
3786 elif removeAllResponses[ i ] == main.ERROR:
3787 # Error in execution
3788 removeAllResults = main.FALSE
3789 else:
3790 # unexpected result
3791 removeAllResults = main.FALSE
3792 if removeAllResults != main.TRUE:
3793 main.log.error( "Error executing set removeAll" )
3794
3795 # Check if set is still correct
3796 size = len( onosSet )
3797 getResponses = []
3798 threads = []
3799 for i in main.activeNodes:
3800 t = main.Thread( target=main.CLIs[i].setTestGet,
3801 name="setTestGet-" + str( i ),
3802 args=[ onosSetName ] )
3803 threads.append( t )
3804 t.start()
3805 for t in threads:
3806 t.join()
3807 getResponses.append( t.result )
3808 getResults = main.TRUE
3809 for i in range( len( main.activeNodes ) ):
3810 node = str( main.activeNodes[i] + 1 )
3811 if isinstance( getResponses[ i ], list):
3812 current = set( getResponses[ i ] )
3813 if len( current ) == len( getResponses[ i ] ):
3814 # no repeats
3815 if onosSet != current:
3816 main.log.error( "ONOS" + node +
3817 " has incorrect view" +
3818 " of set " + onosSetName + ":\n" +
3819 str( getResponses[ i ] ) )
3820 main.log.debug( "Expected: " + str( onosSet ) )
3821 main.log.debug( "Actual: " + str( current ) )
3822 getResults = main.FALSE
3823 else:
3824 # error, set is not a set
3825 main.log.error( "ONOS" + node +
3826 " has repeat elements in" +
3827 " set " + onosSetName + ":\n" +
3828 str( getResponses[ i ] ) )
3829 getResults = main.FALSE
3830 elif getResponses[ i ] == main.ERROR:
3831 getResults = main.FALSE
3832 sizeResponses = []
3833 threads = []
3834 for i in main.activeNodes:
3835 t = main.Thread( target=main.CLIs[i].setTestSize,
3836 name="setTestSize-" + str( i ),
3837 args=[ onosSetName ] )
3838 threads.append( t )
3839 t.start()
3840 for t in threads:
3841 t.join()
3842 sizeResponses.append( t.result )
3843 sizeResults = main.TRUE
3844 for i in range( len( main.activeNodes ) ):
3845 node = str( main.activeNodes[i] + 1 )
3846 if size != sizeResponses[ i ]:
3847 sizeResults = main.FALSE
3848 main.log.error( "ONOS" + node +
3849 " expected a size of " + str( size ) +
3850 " for set " + onosSetName +
3851 " but got " + str( sizeResponses[ i ] ) )
3852 removeAllResults = removeAllResults and getResults and sizeResults
3853 utilities.assert_equals( expect=main.TRUE,
3854 actual=removeAllResults,
3855 onpass="Set removeAll correct",
3856 onfail="Set removeAll was incorrect" )
3857
3858 main.step( "Distributed Set addAll()" )
3859 onosSet.update( addAllValue.split() )
3860 addResponses = []
3861 threads = []
3862 for i in main.activeNodes:
3863 t = main.Thread( target=main.CLIs[i].setTestAdd,
3864 name="setTestAddAll-" + str( i ),
3865 args=[ onosSetName, addAllValue ] )
3866 threads.append( t )
3867 t.start()
3868 for t in threads:
3869 t.join()
3870 addResponses.append( t.result )
3871
3872 # main.TRUE = successfully changed the set
3873 # main.FALSE = action resulted in no change in set
3874 # main.ERROR - Some error in executing the function
3875 addAllResults = main.TRUE
3876 for i in range( len( main.activeNodes ) ):
3877 if addResponses[ i ] == main.TRUE:
3878 # All is well
3879 pass
3880 elif addResponses[ i ] == main.FALSE:
3881 # Already in set, probably fine
3882 pass
3883 elif addResponses[ i ] == main.ERROR:
3884 # Error in execution
3885 addAllResults = main.FALSE
3886 else:
3887 # unexpected result
3888 addAllResults = main.FALSE
3889 if addAllResults != main.TRUE:
3890 main.log.error( "Error executing set addAll" )
3891
3892 # Check if set is still correct
3893 size = len( onosSet )
3894 getResponses = []
3895 threads = []
3896 for i in main.activeNodes:
3897 t = main.Thread( target=main.CLIs[i].setTestGet,
3898 name="setTestGet-" + str( i ),
3899 args=[ onosSetName ] )
3900 threads.append( t )
3901 t.start()
3902 for t in threads:
3903 t.join()
3904 getResponses.append( t.result )
3905 getResults = main.TRUE
3906 for i in range( len( main.activeNodes ) ):
3907 node = str( main.activeNodes[i] + 1 )
3908 if isinstance( getResponses[ i ], list):
3909 current = set( getResponses[ i ] )
3910 if len( current ) == len( getResponses[ i ] ):
3911 # no repeats
3912 if onosSet != current:
3913 main.log.error( "ONOS" + node +
3914 " has incorrect view" +
3915 " of set " + onosSetName + ":\n" +
3916 str( getResponses[ i ] ) )
3917 main.log.debug( "Expected: " + str( onosSet ) )
3918 main.log.debug( "Actual: " + str( current ) )
3919 getResults = main.FALSE
3920 else:
3921 # error, set is not a set
3922 main.log.error( "ONOS" + node +
3923 " has repeat elements in" +
3924 " set " + onosSetName + ":\n" +
3925 str( getResponses[ i ] ) )
3926 getResults = main.FALSE
3927 elif getResponses[ i ] == main.ERROR:
3928 getResults = main.FALSE
3929 sizeResponses = []
3930 threads = []
3931 for i in main.activeNodes:
3932 t = main.Thread( target=main.CLIs[i].setTestSize,
3933 name="setTestSize-" + str( i ),
3934 args=[ onosSetName ] )
3935 threads.append( t )
3936 t.start()
3937 for t in threads:
3938 t.join()
3939 sizeResponses.append( t.result )
3940 sizeResults = main.TRUE
3941 for i in range( len( main.activeNodes ) ):
3942 node = str( main.activeNodes[i] + 1 )
3943 if size != sizeResponses[ i ]:
3944 sizeResults = main.FALSE
3945 main.log.error( "ONOS" + node +
3946 " expected a size of " + str( size ) +
3947 " for set " + onosSetName +
3948 " but got " + str( sizeResponses[ i ] ) )
3949 addAllResults = addAllResults and getResults and sizeResults
3950 utilities.assert_equals( expect=main.TRUE,
3951 actual=addAllResults,
3952 onpass="Set addAll correct",
3953 onfail="Set addAll was incorrect" )
3954
3955 main.step( "Distributed Set clear()" )
3956 onosSet.clear()
3957 clearResponses = []
3958 threads = []
3959 for i in main.activeNodes:
3960 t = main.Thread( target=main.CLIs[i].setTestRemove,
3961 name="setTestClear-" + str( i ),
3962 args=[ onosSetName, " "], # Values doesn't matter
3963 kwargs={ "clear": True } )
3964 threads.append( t )
3965 t.start()
3966 for t in threads:
3967 t.join()
3968 clearResponses.append( t.result )
3969
3970 # main.TRUE = successfully changed the set
3971 # main.FALSE = action resulted in no change in set
3972 # main.ERROR - Some error in executing the function
3973 clearResults = main.TRUE
3974 for i in range( len( main.activeNodes ) ):
3975 if clearResponses[ i ] == main.TRUE:
3976 # All is well
3977 pass
3978 elif clearResponses[ i ] == main.FALSE:
3979 # Nothing set, probably fine
3980 pass
3981 elif clearResponses[ i ] == main.ERROR:
3982 # Error in execution
3983 clearResults = main.FALSE
3984 else:
3985 # unexpected result
3986 clearResults = main.FALSE
3987 if clearResults != main.TRUE:
3988 main.log.error( "Error executing set clear" )
3989
3990 # Check if set is still correct
3991 size = len( onosSet )
3992 getResponses = []
3993 threads = []
3994 for i in main.activeNodes:
3995 t = main.Thread( target=main.CLIs[i].setTestGet,
3996 name="setTestGet-" + str( i ),
3997 args=[ onosSetName ] )
3998 threads.append( t )
3999 t.start()
4000 for t in threads:
4001 t.join()
4002 getResponses.append( t.result )
4003 getResults = main.TRUE
4004 for i in range( len( main.activeNodes ) ):
4005 node = str( main.activeNodes[i] + 1 )
4006 if isinstance( getResponses[ i ], list):
4007 current = set( getResponses[ i ] )
4008 if len( current ) == len( getResponses[ i ] ):
4009 # no repeats
4010 if onosSet != current:
4011 main.log.error( "ONOS" + node +
4012 " has incorrect view" +
4013 " of set " + onosSetName + ":\n" +
4014 str( getResponses[ i ] ) )
4015 main.log.debug( "Expected: " + str( onosSet ) )
4016 main.log.debug( "Actual: " + str( current ) )
4017 getResults = main.FALSE
4018 else:
4019 # error, set is not a set
4020 main.log.error( "ONOS" + node +
4021 " has repeat elements in" +
4022 " set " + onosSetName + ":\n" +
4023 str( getResponses[ i ] ) )
4024 getResults = main.FALSE
4025 elif getResponses[ i ] == main.ERROR:
4026 getResults = main.FALSE
4027 sizeResponses = []
4028 threads = []
4029 for i in main.activeNodes:
4030 t = main.Thread( target=main.CLIs[i].setTestSize,
4031 name="setTestSize-" + str( i ),
4032 args=[ onosSetName ] )
4033 threads.append( t )
4034 t.start()
4035 for t in threads:
4036 t.join()
4037 sizeResponses.append( t.result )
4038 sizeResults = main.TRUE
4039 for i in range( len( main.activeNodes ) ):
4040 node = str( main.activeNodes[i] + 1 )
4041 if size != sizeResponses[ i ]:
4042 sizeResults = main.FALSE
4043 main.log.error( "ONOS" + node +
4044 " expected a size of " + str( size ) +
4045 " for set " + onosSetName +
4046 " but got " + str( sizeResponses[ i ] ) )
4047 clearResults = clearResults and getResults and sizeResults
4048 utilities.assert_equals( expect=main.TRUE,
4049 actual=clearResults,
4050 onpass="Set clear correct",
4051 onfail="Set clear was incorrect" )
4052
4053 main.step( "Distributed Set addAll()" )
4054 onosSet.update( addAllValue.split() )
4055 addResponses = []
4056 threads = []
4057 for i in main.activeNodes:
4058 t = main.Thread( target=main.CLIs[i].setTestAdd,
4059 name="setTestAddAll-" + str( i ),
4060 args=[ onosSetName, addAllValue ] )
4061 threads.append( t )
4062 t.start()
4063 for t in threads:
4064 t.join()
4065 addResponses.append( t.result )
4066
4067 # main.TRUE = successfully changed the set
4068 # main.FALSE = action resulted in no change in set
4069 # main.ERROR - Some error in executing the function
4070 addAllResults = main.TRUE
4071 for i in range( len( main.activeNodes ) ):
4072 if addResponses[ i ] == main.TRUE:
4073 # All is well
4074 pass
4075 elif addResponses[ i ] == main.FALSE:
4076 # Already in set, probably fine
4077 pass
4078 elif addResponses[ i ] == main.ERROR:
4079 # Error in execution
4080 addAllResults = main.FALSE
4081 else:
4082 # unexpected result
4083 addAllResults = main.FALSE
4084 if addAllResults != main.TRUE:
4085 main.log.error( "Error executing set addAll" )
4086
4087 # Check if set is still correct
4088 size = len( onosSet )
4089 getResponses = []
4090 threads = []
4091 for i in main.activeNodes:
4092 t = main.Thread( target=main.CLIs[i].setTestGet,
4093 name="setTestGet-" + str( i ),
4094 args=[ onosSetName ] )
4095 threads.append( t )
4096 t.start()
4097 for t in threads:
4098 t.join()
4099 getResponses.append( t.result )
4100 getResults = main.TRUE
4101 for i in range( len( main.activeNodes ) ):
4102 node = str( main.activeNodes[i] + 1 )
4103 if isinstance( getResponses[ i ], list):
4104 current = set( getResponses[ i ] )
4105 if len( current ) == len( getResponses[ i ] ):
4106 # no repeats
4107 if onosSet != current:
4108 main.log.error( "ONOS" + node +
4109 " has incorrect view" +
4110 " of set " + onosSetName + ":\n" +
4111 str( getResponses[ i ] ) )
4112 main.log.debug( "Expected: " + str( onosSet ) )
4113 main.log.debug( "Actual: " + str( current ) )
4114 getResults = main.FALSE
4115 else:
4116 # error, set is not a set
4117 main.log.error( "ONOS" + node +
4118 " has repeat elements in" +
4119 " set " + onosSetName + ":\n" +
4120 str( getResponses[ i ] ) )
4121 getResults = main.FALSE
4122 elif getResponses[ i ] == main.ERROR:
4123 getResults = main.FALSE
4124 sizeResponses = []
4125 threads = []
4126 for i in main.activeNodes:
4127 t = main.Thread( target=main.CLIs[i].setTestSize,
4128 name="setTestSize-" + str( i ),
4129 args=[ onosSetName ] )
4130 threads.append( t )
4131 t.start()
4132 for t in threads:
4133 t.join()
4134 sizeResponses.append( t.result )
4135 sizeResults = main.TRUE
4136 for i in range( len( main.activeNodes ) ):
4137 node = str( main.activeNodes[i] + 1 )
4138 if size != sizeResponses[ i ]:
4139 sizeResults = main.FALSE
4140 main.log.error( "ONOS" + node +
4141 " expected a size of " + str( size ) +
4142 " for set " + onosSetName +
4143 " but got " + str( sizeResponses[ i ] ) )
4144 addAllResults = addAllResults and getResults and sizeResults
4145 utilities.assert_equals( expect=main.TRUE,
4146 actual=addAllResults,
4147 onpass="Set addAll correct",
4148 onfail="Set addAll was incorrect" )
4149
4150 main.step( "Distributed Set retain()" )
4151 onosSet.intersection_update( retainValue.split() )
4152 retainResponses = []
4153 threads = []
4154 for i in main.activeNodes:
4155 t = main.Thread( target=main.CLIs[i].setTestRemove,
4156 name="setTestRetain-" + str( i ),
4157 args=[ onosSetName, retainValue ],
4158 kwargs={ "retain": True } )
4159 threads.append( t )
4160 t.start()
4161 for t in threads:
4162 t.join()
4163 retainResponses.append( t.result )
4164
4165 # main.TRUE = successfully changed the set
4166 # main.FALSE = action resulted in no change in set
4167 # main.ERROR - Some error in executing the function
4168 retainResults = main.TRUE
4169 for i in range( len( main.activeNodes ) ):
4170 if retainResponses[ i ] == main.TRUE:
4171 # All is well
4172 pass
4173 elif retainResponses[ i ] == main.FALSE:
4174 # Already in set, probably fine
4175 pass
4176 elif retainResponses[ i ] == main.ERROR:
4177 # Error in execution
4178 retainResults = main.FALSE
4179 else:
4180 # unexpected result
4181 retainResults = main.FALSE
4182 if retainResults != main.TRUE:
4183 main.log.error( "Error executing set retain" )
4184
4185 # Check if set is still correct
4186 size = len( onosSet )
4187 getResponses = []
4188 threads = []
4189 for i in main.activeNodes:
4190 t = main.Thread( target=main.CLIs[i].setTestGet,
4191 name="setTestGet-" + str( i ),
4192 args=[ onosSetName ] )
4193 threads.append( t )
4194 t.start()
4195 for t in threads:
4196 t.join()
4197 getResponses.append( t.result )
4198 getResults = main.TRUE
4199 for i in range( len( main.activeNodes ) ):
4200 node = str( main.activeNodes[i] + 1 )
4201 if isinstance( getResponses[ i ], list):
4202 current = set( getResponses[ i ] )
4203 if len( current ) == len( getResponses[ i ] ):
4204 # no repeats
4205 if onosSet != current:
4206 main.log.error( "ONOS" + node +
4207 " has incorrect view" +
4208 " of set " + onosSetName + ":\n" +
4209 str( getResponses[ i ] ) )
4210 main.log.debug( "Expected: " + str( onosSet ) )
4211 main.log.debug( "Actual: " + str( current ) )
4212 getResults = main.FALSE
4213 else:
4214 # error, set is not a set
4215 main.log.error( "ONOS" + node +
4216 " has repeat elements in" +
4217 " set " + onosSetName + ":\n" +
4218 str( getResponses[ i ] ) )
4219 getResults = main.FALSE
4220 elif getResponses[ i ] == main.ERROR:
4221 getResults = main.FALSE
4222 sizeResponses = []
4223 threads = []
4224 for i in main.activeNodes:
4225 t = main.Thread( target=main.CLIs[i].setTestSize,
4226 name="setTestSize-" + str( i ),
4227 args=[ onosSetName ] )
4228 threads.append( t )
4229 t.start()
4230 for t in threads:
4231 t.join()
4232 sizeResponses.append( t.result )
4233 sizeResults = main.TRUE
4234 for i in range( len( main.activeNodes ) ):
4235 node = str( main.activeNodes[i] + 1 )
4236 if size != sizeResponses[ i ]:
4237 sizeResults = main.FALSE
4238 main.log.error( "ONOS" + node + " expected a size of " +
4239 str( size ) + " for set " + onosSetName +
4240 " but got " + str( sizeResponses[ i ] ) )
4241 retainResults = retainResults and getResults and sizeResults
4242 utilities.assert_equals( expect=main.TRUE,
4243 actual=retainResults,
4244 onpass="Set retain correct",
4245 onfail="Set retain was incorrect" )
4246
4247 # Transactional maps
4248 main.step( "Partitioned Transactional maps put" )
4249 tMapValue = "Testing"
4250 numKeys = 100
4251 putResult = True
4252 node = main.activeNodes[0]
4253 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4254 if putResponses and len( putResponses ) == 100:
4255 for i in putResponses:
4256 if putResponses[ i ][ 'value' ] != tMapValue:
4257 putResult = False
4258 else:
4259 putResult = False
4260 if not putResult:
4261 main.log.debug( "Put response values: " + str( putResponses ) )
4262 utilities.assert_equals( expect=True,
4263 actual=putResult,
4264 onpass="Partitioned Transactional Map put successful",
4265 onfail="Partitioned Transactional Map put values are incorrect" )
4266
4267 main.step( "Partitioned Transactional maps get" )
Jon Hall9bfadd22016-05-11 14:48:07 -07004268 # FIXME: is this sleep needed?
4269 time.sleep( 5 )
4270
Jon Hall9ebd1bd2016-04-19 01:37:17 -07004271 getCheck = True
4272 for n in range( 1, numKeys + 1 ):
4273 getResponses = []
4274 threads = []
4275 valueCheck = True
4276 for i in main.activeNodes:
4277 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4278 name="TMap-get-" + str( i ),
4279 args=[ "Key" + str( n ) ] )
4280 threads.append( t )
4281 t.start()
4282 for t in threads:
4283 t.join()
4284 getResponses.append( t.result )
4285 for node in getResponses:
4286 if node != tMapValue:
4287 valueCheck = False
4288 if not valueCheck:
4289 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4290 main.log.warn( getResponses )
4291 getCheck = getCheck and valueCheck
4292 utilities.assert_equals( expect=True,
4293 actual=getCheck,
4294 onpass="Partitioned Transactional Map get values were correct",
4295 onfail="Partitioned Transactional Map values incorrect" )