blob: 3c98ab73f62aeaa63ab4211ca13acb33cd8a8cbf [file] [log] [blame]
Jon Hall9ebd1bd2016-04-19 01:37:17 -07001"""
2Description: This test is to determine if ONOS can handle
3 dynamic scaling of the cluster size.
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE6: The scaling case.
13CASE7: Check state after control plane failure
14CASE8: Compare topo
15CASE9: Link s3-s28 down
16CASE10: Link s3-s28 up
17CASE11: Switch down
18CASE12: Switch up
19CASE13: Clean up
20CASE14: start election app on all onos nodes
21CASE15: Check that Leadership Election is still functional
22CASE16: Install Distributed Primitives app
23CASE17: Check for basic functionality with distributed primitives
24"""
25
26
27class HAscaling:
28
29 def __init__( self ):
30 self.default = ''
31
32 def CASE1( self, main ):
33 """
34 CASE1 is to compile ONOS and push it to the test machines
35
36 Startup sequence:
37 cell <name>
38 onos-verify-cell
39 NOTE: temporary - onos-remove-raft-logs
40 onos-uninstall
41 start mininet
42 git pull
43 mvn clean install
44 onos-package
45 onos-install -f
46 onos-wait-for-start
47 start cli sessions
48 start tcpdump
49 """
50 import time
51 import os
52 import re
53 main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
54 "initialization" )
55 main.case( "Setting up test environment" )
56 main.caseExplanation = "Setup the test environment including " +\
57 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
59
60 # load some variables from the params file
61 PULLCODE = False
62 if main.params[ 'Git' ] == 'True':
63 PULLCODE = True
64 gitBranch = main.params[ 'branch' ]
65 cellName = main.params[ 'ENV' ][ 'cellName' ]
66
67 main.numCtrls = int( main.params[ 'num_controllers' ] )
68 if main.ONOSbench.maxNodes:
69 if main.ONOSbench.maxNodes < main.numCtrls:
70 main.numCtrls = int( main.ONOSbench.maxNodes )
71 # set global variables
72 # These are for csv plotting in jenkins
73 global labels
74 global data
75 labels = []
76 data = []
77
78 try:
79 from tests.HA.dependencies.HA import HA
80 main.HA = HA()
81 from tests.HA.HAscaling.dependencies.Server import Server
82 main.Server = Server()
83 except Exception as e:
84 main.log.exception( e )
85 main.cleanup()
86 main.exit()
87
88 main.CLIs = []
89 main.nodes = []
90 ipList = []
91 for i in range( 1, main.numCtrls + 1 ):
92 try:
93 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
94 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
95 ipList.append( main.nodes[ -1 ].ip_address )
96 except AttributeError:
97 break
98
99 main.step( "Create cell file" )
100 cellAppString = main.params[ 'ENV' ][ 'appString' ]
101 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
102 main.Mininet1.ip_address,
103 cellAppString, ipList )
104
105 main.step( "Applying cell variable to environment" )
106 cellResult = main.ONOSbench.setCell( cellName )
107 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
108 onpass="Set cell successfull",
109 onfail="Failled to set cell" )
110
111 main.step( "Verify connectivity to cell" )
112 verifyResult = main.ONOSbench.verifyCell()
113 utilities.assert_equals( expect=main.TRUE, actual=verifyResult,
114 onpass="Verify cell passed",
115 onfail="Failled to verify cell" )
116
117 # FIXME:this is short term fix
118 main.log.info( "Removing raft logs" )
119 main.ONOSbench.onosRemoveRaftLogs()
120
121 main.log.info( "Uninstalling ONOS" )
122 for node in main.nodes:
123 main.ONOSbench.onosUninstall( node.ip_address )
124
125 # Make sure ONOS is DEAD
126 main.log.info( "Killing any ONOS processes" )
127 killResults = main.TRUE
128 for node in main.nodes:
129 killed = main.ONOSbench.onosKill( node.ip_address )
130 killResults = killResults and killed
131
132 main.step( "Setup server for cluster metadata file" )
Jon Halla6c90b22016-05-06 10:53:09 -0700133 port = main.params['serverPort']
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700134 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
135 main.log.debug( "Root dir: {}".format( rootDir ) )
136 status = main.Server.start( main.ONOSbench,
137 rootDir,
138 port=port,
139 logDir=main.logdir + "/server.log" )
140 utilities.assert_equals( expect=main.TRUE, actual=status,
141 onpass="Server started",
142 onfail="Failled to start SimpleHTTPServer" )
143
144 main.step( "Generate initial metadata file" )
145 main.scaling = main.params['scaling'].split( "," )
146 main.log.debug( main.scaling )
147 scale = main.scaling.pop(0)
148 main.log.debug( scale)
149 if "e" in scale:
150 equal = True
151 else:
152 equal = False
153 main.log.debug( equal)
154 main.numCtrls = int( re.search( "\d+", scale ).group(0) )
155 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
156 utilities.assert_equals( expect=main.TRUE, actual=genResult,
157 onpass="New cluster metadata file generated",
158 onfail="Failled to generate new metadata file" )
159
160 cleanInstallResult = main.TRUE
161 gitPullResult = main.TRUE
162
163 main.step( "Starting Mininet" )
164 # scp topo file to mininet
165 # TODO: move to params?
166 topoName = "obelisk.py"
167 filePath = main.ONOSbench.home + "/tools/test/topos/"
168 main.ONOSbench.scp( main.Mininet1,
169 filePath + topoName,
170 main.Mininet1.home,
171 direction="to" )
172 mnResult = main.Mininet1.startNet( )
173 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
174 onpass="Mininet Started",
175 onfail="Error starting Mininet" )
176
177 main.step( "Git checkout and pull " + gitBranch )
178 if PULLCODE:
179 main.ONOSbench.gitCheckout( gitBranch )
180 gitPullResult = main.ONOSbench.gitPull()
181 # values of 1 or 3 are good
182 utilities.assert_lesser( expect=0, actual=gitPullResult,
183 onpass="Git pull successful",
184 onfail="Git pull failed" )
185 main.ONOSbench.getVersion( report=True )
186
187 main.step( "Using mvn clean install" )
188 cleanInstallResult = main.TRUE
189 if PULLCODE and gitPullResult == main.TRUE:
190 cleanInstallResult = main.ONOSbench.cleanInstall()
191 else:
192 main.log.warn( "Did not pull new code so skipping mvn " +
193 "clean install" )
194 utilities.assert_equals( expect=main.TRUE,
195 actual=cleanInstallResult,
196 onpass="MCI successful",
197 onfail="MCI failed" )
198 # GRAPHS
199 # NOTE: important params here:
200 # job = name of Jenkins job
201 # Plot Name = Plot-HA, only can be used if multiple plots
202 # index = The number of the graph under plot name
203 job = "HAscaling"
204 plotName = "Plot-HA"
205 index = "0"
206 graphs = '<ac:structured-macro ac:name="html">\n'
207 graphs += '<ac:plain-text-body><![CDATA[\n'
208 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
209 '/plot/' + plotName + '/getPlot?index=' + index +\
210 '&width=500&height=300"' +\
211 'noborder="0" width="500" height="300" scrolling="yes" ' +\
212 'seamless="seamless"></iframe>\n'
213 graphs += ']]></ac:plain-text-body>\n'
214 graphs += '</ac:structured-macro>\n'
215 main.log.wiki(graphs)
216
217 main.step( "Copying backup config files" )
218 path = "~/onos/tools/package/bin/onos-service"
219 cp = main.ONOSbench.scp( main.ONOSbench,
220 path,
221 path + ".backup",
222 direction="to" )
223
224 utilities.assert_equals( expect=main.TRUE,
225 actual=cp,
226 onpass="Copy backup config file succeeded",
227 onfail="Copy backup config file failed" )
228 # we need to modify the onos-service file to use remote metadata file
229 # url for cluster metadata file
230 ip = main.ONOSbench.getIpAddr()
231 metaFile = "cluster.json"
232 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, port, metaFile )
233 main.log.warn( javaArgs )
234 main.log.warn( repr( javaArgs ) )
235 handle = main.ONOSbench.handle
236 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, path )
237 main.log.warn( sed )
238 main.log.warn( repr( sed ) )
239 handle.sendline( sed )
240 handle.expect( "\$" )
241 main.log.debug( repr( handle.before ) )
242
243 main.step( "Creating ONOS package" )
244 packageResult = main.ONOSbench.onosPackage()
245 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
246 onpass="ONOS package successful",
247 onfail="ONOS package failed" )
248
249 main.step( "Installing ONOS package" )
250 onosInstallResult = main.TRUE
251 for i in range( main.ONOSbench.maxNodes ):
252 node = main.nodes[i]
253 options = "-f"
254 if i >= main.numCtrls:
255 options = "-nf" # Don't start more than the current scale
256 tmpResult = main.ONOSbench.onosInstall( options=options,
257 node=node.ip_address )
258 onosInstallResult = onosInstallResult and tmpResult
259 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
260 onpass="ONOS install successful",
261 onfail="ONOS install failed" )
262
263 # Cleanup custom onos-service file
264 main.ONOSbench.scp( main.ONOSbench,
265 path + ".backup",
266 path,
267 direction="to" )
268
269 main.step( "Checking if ONOS is up yet" )
270 for i in range( 2 ):
271 onosIsupResult = main.TRUE
272 for i in range( main.numCtrls ):
273 node = main.nodes[i]
274 started = main.ONOSbench.isup( node.ip_address )
275 if not started:
276 main.log.error( node.name + " hasn't started" )
277 onosIsupResult = onosIsupResult and started
278 if onosIsupResult == main.TRUE:
279 break
280 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
281 onpass="ONOS startup successful",
282 onfail="ONOS startup failed" )
283
284 main.log.step( "Starting ONOS CLI sessions" )
285 cliResults = main.TRUE
286 threads = []
287 for i in range( main.numCtrls ):
288 t = main.Thread( target=main.CLIs[i].startOnosCli,
289 name="startOnosCli-" + str( i ),
290 args=[main.nodes[i].ip_address] )
291 threads.append( t )
292 t.start()
293
294 for t in threads:
295 t.join()
296 cliResults = cliResults and t.result
297 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
298 onpass="ONOS cli startup successful",
299 onfail="ONOS cli startup failed" )
300
301 # Create a list of active nodes for use when some nodes are stopped
302 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
303
304 if main.params[ 'tcpdump' ].lower() == "true":
305 main.step( "Start Packet Capture MN" )
306 main.Mininet2.startTcpdump(
307 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
308 + "-MN.pcap",
309 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
310 port=main.params[ 'MNtcpdump' ][ 'port' ] )
311
312 main.step( "Checking ONOS nodes" )
313 nodeResults = utilities.retry( main.HA.nodesCheck,
314 False,
315 args=[main.activeNodes],
316 attempts=5 )
317 utilities.assert_equals( expect=True, actual=nodeResults,
318 onpass="Nodes check successful",
319 onfail="Nodes check NOT successful" )
320
321 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700322 for i in main.activeNodes:
323 cli = main.CLIs[i]
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700324 main.log.debug( "{} components not ACTIVE: \n{}".format(
325 cli.name,
326 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall9ebd1bd2016-04-19 01:37:17 -0700327 main.log.error( "Failed to start ONOS, stopping test" )
328 main.cleanup()
329 main.exit()
330
331 main.step( "Activate apps defined in the params file" )
332 # get data from the params
333 apps = main.params.get( 'apps' )
334 if apps:
335 apps = apps.split(',')
336 main.log.warn( apps )
337 activateResult = True
338 for app in apps:
339 main.CLIs[ 0 ].app( app, "Activate" )
340 # TODO: check this worked
341 time.sleep( 10 ) # wait for apps to activate
342 for app in apps:
343 state = main.CLIs[ 0 ].appStatus( app )
344 if state == "ACTIVE":
345 activateResult = activateResult and True
346 else:
347 main.log.error( "{} is in {} state".format( app, state ) )
348 activateResult = False
349 utilities.assert_equals( expect=True,
350 actual=activateResult,
351 onpass="Successfully activated apps",
352 onfail="Failed to activate apps" )
353 else:
354 main.log.warn( "No apps were specified to be loaded after startup" )
355
356 main.step( "Set ONOS configurations" )
357 config = main.params.get( 'ONOS_Configuration' )
358 if config:
359 main.log.debug( config )
360 checkResult = main.TRUE
361 for component in config:
362 for setting in config[component]:
363 value = config[component][setting]
364 check = main.CLIs[ 0 ].setCfg( component, setting, value )
365 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
366 checkResult = check and checkResult
367 utilities.assert_equals( expect=main.TRUE,
368 actual=checkResult,
369 onpass="Successfully set config",
370 onfail="Failed to set config" )
371 else:
372 main.log.warn( "No configurations were specified to be changed after startup" )
373
374 main.step( "App Ids check" )
375 appCheck = main.TRUE
376 threads = []
377 for i in main.activeNodes:
378 t = main.Thread( target=main.CLIs[i].appToIDCheck,
379 name="appToIDCheck-" + str( i ),
380 args=[] )
381 threads.append( t )
382 t.start()
383
384 for t in threads:
385 t.join()
386 appCheck = appCheck and t.result
387 if appCheck != main.TRUE:
388 node = main.activeNodes[0]
389 main.log.warn( main.CLIs[node].apps() )
390 main.log.warn( main.CLIs[node].appIDs() )
391 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
392 onpass="App Ids seem to be correct",
393 onfail="Something is wrong with app Ids" )
394
395 def CASE2( self, main ):
396 """
397 Assign devices to controllers
398 """
399 import re
400 assert main.numCtrls, "main.numCtrls not defined"
401 assert main, "main not defined"
402 assert utilities.assert_equals, "utilities.assert_equals not defined"
403 assert main.CLIs, "main.CLIs not defined"
404 assert main.nodes, "main.nodes not defined"
405
406 main.case( "Assigning devices to controllers" )
407 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
408 "and check that an ONOS node becomes the " +\
409 "master of the device."
410 main.step( "Assign switches to controllers" )
411
412 ipList = []
413 for i in range( main.ONOSbench.maxNodes ):
414 ipList.append( main.nodes[ i ].ip_address )
415 swList = []
416 for i in range( 1, 29 ):
417 swList.append( "s" + str( i ) )
418 main.Mininet1.assignSwController( sw=swList, ip=ipList )
419
420 mastershipCheck = main.TRUE
421 for i in range( 1, 29 ):
422 response = main.Mininet1.getSwController( "s" + str( i ) )
423 try:
424 main.log.info( str( response ) )
425 except Exception:
426 main.log.info( repr( response ) )
427 for node in main.nodes:
428 if re.search( "tcp:" + node.ip_address, response ):
429 mastershipCheck = mastershipCheck and main.TRUE
430 else:
431 main.log.error( "Error, node " + node.ip_address + " is " +
432 "not in the list of controllers s" +
433 str( i ) + " is connecting to." )
434 mastershipCheck = main.FALSE
435 utilities.assert_equals(
436 expect=main.TRUE,
437 actual=mastershipCheck,
438 onpass="Switch mastership assigned correctly",
439 onfail="Switches not assigned correctly to controllers" )
440
441 def CASE21( self, main ):
442 """
443 Assign mastership to controllers
444 """
445 import time
446 assert main.numCtrls, "main.numCtrls not defined"
447 assert main, "main not defined"
448 assert utilities.assert_equals, "utilities.assert_equals not defined"
449 assert main.CLIs, "main.CLIs not defined"
450 assert main.nodes, "main.nodes not defined"
451
452 main.case( "Assigning Controller roles for switches" )
453 main.caseExplanation = "Check that ONOS is connected to each " +\
454 "device. Then manually assign" +\
455 " mastership to specific ONOS nodes using" +\
456 " 'device-role'"
457 main.step( "Assign mastership of switches to specific controllers" )
458 # Manually assign mastership to the controller we want
459 roleCall = main.TRUE
460
461 ipList = [ ]
462 deviceList = []
463 onosCli = main.CLIs[ main.activeNodes[0] ]
464 try:
465 # Assign mastership to specific controllers. This assignment was
466 # determined for a 7 node cluser, but will work with any sized
467 # cluster
468 for i in range( 1, 29 ): # switches 1 through 28
469 # set up correct variables:
470 if i == 1:
471 c = 0
472 ip = main.nodes[ c ].ip_address # ONOS1
473 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
474 elif i == 2:
475 c = 1 % main.numCtrls
476 ip = main.nodes[ c ].ip_address # ONOS2
477 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
478 elif i == 3:
479 c = 1 % main.numCtrls
480 ip = main.nodes[ c ].ip_address # ONOS2
481 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
482 elif i == 4:
483 c = 3 % main.numCtrls
484 ip = main.nodes[ c ].ip_address # ONOS4
485 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
486 elif i == 5:
487 c = 2 % main.numCtrls
488 ip = main.nodes[ c ].ip_address # ONOS3
489 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
490 elif i == 6:
491 c = 2 % main.numCtrls
492 ip = main.nodes[ c ].ip_address # ONOS3
493 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
494 elif i == 7:
495 c = 5 % main.numCtrls
496 ip = main.nodes[ c ].ip_address # ONOS6
497 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
498 elif i >= 8 and i <= 17:
499 c = 4 % main.numCtrls
500 ip = main.nodes[ c ].ip_address # ONOS5
501 dpid = '3' + str( i ).zfill( 3 )
502 deviceId = onosCli.getDevice( dpid ).get( 'id' )
503 elif i >= 18 and i <= 27:
504 c = 6 % main.numCtrls
505 ip = main.nodes[ c ].ip_address # ONOS7
506 dpid = '6' + str( i ).zfill( 3 )
507 deviceId = onosCli.getDevice( dpid ).get( 'id' )
508 elif i == 28:
509 c = 0
510 ip = main.nodes[ c ].ip_address # ONOS1
511 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
512 else:
513 main.log.error( "You didn't write an else statement for " +
514 "switch s" + str( i ) )
515 roleCall = main.FALSE
516 # Assign switch
517 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
518 # TODO: make this controller dynamic
519 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
520 ipList.append( ip )
521 deviceList.append( deviceId )
522 except ( AttributeError, AssertionError ):
523 main.log.exception( "Something is wrong with ONOS device view" )
524 main.log.info( onosCli.devices() )
525 utilities.assert_equals(
526 expect=main.TRUE,
527 actual=roleCall,
528 onpass="Re-assigned switch mastership to designated controller",
529 onfail="Something wrong with deviceRole calls" )
530
531 main.step( "Check mastership was correctly assigned" )
532 roleCheck = main.TRUE
533 # NOTE: This is due to the fact that device mastership change is not
534 # atomic and is actually a multi step process
535 time.sleep( 5 )
536 for i in range( len( ipList ) ):
537 ip = ipList[i]
538 deviceId = deviceList[i]
539 # Check assignment
540 master = onosCli.getRole( deviceId ).get( 'master' )
541 if ip in master:
542 roleCheck = roleCheck and main.TRUE
543 else:
544 roleCheck = roleCheck and main.FALSE
545 main.log.error( "Error, controller " + ip + " is not" +
546 " master " + "of device " +
547 str( deviceId ) + ". Master is " +
548 repr( master ) + "." )
549 utilities.assert_equals(
550 expect=main.TRUE,
551 actual=roleCheck,
552 onpass="Switches were successfully reassigned to designated " +
553 "controller",
554 onfail="Switches were not successfully reassigned" )
555
556 def CASE3( self, main ):
557 """
558 Assign intents
559 """
560 import time
561 import json
562 assert main.numCtrls, "main.numCtrls not defined"
563 assert main, "main not defined"
564 assert utilities.assert_equals, "utilities.assert_equals not defined"
565 assert main.CLIs, "main.CLIs not defined"
566 assert main.nodes, "main.nodes not defined"
567 try:
568 labels
569 except NameError:
570 main.log.error( "labels not defined, setting to []" )
571 labels = []
572 try:
573 data
574 except NameError:
575 main.log.error( "data not defined, setting to []" )
576 data = []
577 # NOTE: we must reinstall intents until we have a persistant intent
578 # datastore!
579 main.case( "Adding host Intents" )
580 main.caseExplanation = "Discover hosts by using pingall then " +\
581 "assign predetermined host-to-host intents." +\
582 " After installation, check that the intent" +\
583 " is distributed to all nodes and the state" +\
584 " is INSTALLED"
585
586 # install onos-app-fwd
587 main.step( "Install reactive forwarding app" )
588 onosCli = main.CLIs[ main.activeNodes[0] ]
589 installResults = onosCli.activateApp( "org.onosproject.fwd" )
590 utilities.assert_equals( expect=main.TRUE, actual=installResults,
591 onpass="Install fwd successful",
592 onfail="Install fwd failed" )
593
594 main.step( "Check app ids" )
595 appCheck = main.TRUE
596 threads = []
597 for i in main.activeNodes:
598 t = main.Thread( target=main.CLIs[i].appToIDCheck,
599 name="appToIDCheck-" + str( i ),
600 args=[] )
601 threads.append( t )
602 t.start()
603
604 for t in threads:
605 t.join()
606 appCheck = appCheck and t.result
607 if appCheck != main.TRUE:
608 main.log.warn( onosCli.apps() )
609 main.log.warn( onosCli.appIDs() )
610 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
611 onpass="App Ids seem to be correct",
612 onfail="Something is wrong with app Ids" )
613
614 main.step( "Discovering Hosts( Via pingall for now )" )
615 # FIXME: Once we have a host discovery mechanism, use that instead
616 # REACTIVE FWD test
617 pingResult = main.FALSE
618 passMsg = "Reactive Pingall test passed"
619 time1 = time.time()
620 pingResult = main.Mininet1.pingall()
621 time2 = time.time()
622 if not pingResult:
623 main.log.warn("First pingall failed. Trying again...")
624 pingResult = main.Mininet1.pingall()
625 passMsg += " on the second try"
626 utilities.assert_equals(
627 expect=main.TRUE,
628 actual=pingResult,
629 onpass= passMsg,
630 onfail="Reactive Pingall failed, " +
631 "one or more ping pairs failed" )
632 main.log.info( "Time for pingall: %2f seconds" %
633 ( time2 - time1 ) )
634 # timeout for fwd flows
635 time.sleep( 11 )
636 # uninstall onos-app-fwd
637 main.step( "Uninstall reactive forwarding app" )
638 node = main.activeNodes[0]
639 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
640 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
641 onpass="Uninstall fwd successful",
642 onfail="Uninstall fwd failed" )
643
644 main.step( "Check app ids" )
645 threads = []
646 appCheck2 = main.TRUE
647 for i in main.activeNodes:
648 t = main.Thread( target=main.CLIs[i].appToIDCheck,
649 name="appToIDCheck-" + str( i ),
650 args=[] )
651 threads.append( t )
652 t.start()
653
654 for t in threads:
655 t.join()
656 appCheck2 = appCheck2 and t.result
657 if appCheck2 != main.TRUE:
658 node = main.activeNodes[0]
659 main.log.warn( main.CLIs[node].apps() )
660 main.log.warn( main.CLIs[node].appIDs() )
661 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
662 onpass="App Ids seem to be correct",
663 onfail="Something is wrong with app Ids" )
664
665 main.step( "Add host intents via cli" )
666 intentIds = []
667 # TODO: move the host numbers to params
668 # Maybe look at all the paths we ping?
669 intentAddResult = True
670 hostResult = main.TRUE
671 for i in range( 8, 18 ):
672 main.log.info( "Adding host intent between h" + str( i ) +
673 " and h" + str( i + 10 ) )
674 host1 = "00:00:00:00:00:" + \
675 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
676 host2 = "00:00:00:00:00:" + \
677 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
678 # NOTE: getHost can return None
679 host1Dict = onosCli.getHost( host1 )
680 host2Dict = onosCli.getHost( host2 )
681 host1Id = None
682 host2Id = None
683 if host1Dict and host2Dict:
684 host1Id = host1Dict.get( 'id', None )
685 host2Id = host2Dict.get( 'id', None )
686 if host1Id and host2Id:
687 nodeNum = ( i % len( main.activeNodes ) )
688 node = main.activeNodes[nodeNum]
689 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
690 if tmpId:
691 main.log.info( "Added intent with id: " + tmpId )
692 intentIds.append( tmpId )
693 else:
694 main.log.error( "addHostIntent returned: " +
695 repr( tmpId ) )
696 else:
697 main.log.error( "Error, getHost() failed for h" + str( i ) +
698 " and/or h" + str( i + 10 ) )
699 node = main.activeNodes[0]
700 hosts = main.CLIs[node].hosts()
701 main.log.warn( "Hosts output: " )
702 try:
703 main.log.warn( json.dumps( json.loads( hosts ),
704 sort_keys=True,
705 indent=4,
706 separators=( ',', ': ' ) ) )
707 except ( ValueError, TypeError ):
708 main.log.warn( repr( hosts ) )
709 hostResult = main.FALSE
710 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
711 onpass="Found a host id for each host",
712 onfail="Error looking up host ids" )
713
714 intentStart = time.time()
715 onosIds = onosCli.getAllIntentsId()
716 main.log.info( "Submitted intents: " + str( intentIds ) )
717 main.log.info( "Intents in ONOS: " + str( onosIds ) )
718 for intent in intentIds:
719 if intent in onosIds:
720 pass # intent submitted is in onos
721 else:
722 intentAddResult = False
723 if intentAddResult:
724 intentStop = time.time()
725 else:
726 intentStop = None
727 # Print the intent states
728 intents = onosCli.intents()
729 intentStates = []
730 installedCheck = True
731 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
732 count = 0
733 try:
734 for intent in json.loads( intents ):
735 state = intent.get( 'state', None )
736 if "INSTALLED" not in state:
737 installedCheck = False
738 intentId = intent.get( 'id', None )
739 intentStates.append( ( intentId, state ) )
740 except ( ValueError, TypeError ):
741 main.log.exception( "Error parsing intents" )
742 # add submitted intents not in the store
743 tmplist = [ i for i, s in intentStates ]
744 missingIntents = False
745 for i in intentIds:
746 if i not in tmplist:
747 intentStates.append( ( i, " - " ) )
748 missingIntents = True
749 intentStates.sort()
750 for i, s in intentStates:
751 count += 1
752 main.log.info( "%-6s%-15s%-15s" %
753 ( str( count ), str( i ), str( s ) ) )
754 leaders = onosCli.leaders()
755 try:
756 missing = False
757 if leaders:
758 parsedLeaders = json.loads( leaders )
759 main.log.warn( json.dumps( parsedLeaders,
760 sort_keys=True,
761 indent=4,
762 separators=( ',', ': ' ) ) )
763 # check for all intent partitions
764 topics = []
765 for i in range( 14 ):
766 topics.append( "intent-partition-" + str( i ) )
767 main.log.debug( topics )
768 ONOStopics = [ j['topic'] for j in parsedLeaders ]
769 for topic in topics:
770 if topic not in ONOStopics:
771 main.log.error( "Error: " + topic +
772 " not in leaders" )
773 missing = True
774 else:
775 main.log.error( "leaders() returned None" )
776 except ( ValueError, TypeError ):
777 main.log.exception( "Error parsing leaders" )
778 main.log.error( repr( leaders ) )
779 # Check all nodes
780 if missing:
781 for i in main.activeNodes:
782 response = main.CLIs[i].leaders( jsonFormat=False)
783 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
784 str( response ) )
785
786 partitions = onosCli.partitions()
787 try:
788 if partitions :
789 parsedPartitions = json.loads( partitions )
790 main.log.warn( json.dumps( parsedPartitions,
791 sort_keys=True,
792 indent=4,
793 separators=( ',', ': ' ) ) )
794 # TODO check for a leader in all paritions
795 # TODO check for consistency among nodes
796 else:
797 main.log.error( "partitions() returned None" )
798 except ( ValueError, TypeError ):
799 main.log.exception( "Error parsing partitions" )
800 main.log.error( repr( partitions ) )
801 pendingMap = onosCli.pendingMap()
802 try:
803 if pendingMap :
804 parsedPending = json.loads( pendingMap )
805 main.log.warn( json.dumps( parsedPending,
806 sort_keys=True,
807 indent=4,
808 separators=( ',', ': ' ) ) )
809 # TODO check something here?
810 else:
811 main.log.error( "pendingMap() returned None" )
812 except ( ValueError, TypeError ):
813 main.log.exception( "Error parsing pending map" )
814 main.log.error( repr( pendingMap ) )
815
816 intentAddResult = bool( intentAddResult and not missingIntents and
817 installedCheck )
818 if not intentAddResult:
819 main.log.error( "Error in pushing host intents to ONOS" )
820
821 main.step( "Intent Anti-Entropy dispersion" )
822 for j in range(100):
823 correct = True
824 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
825 for i in main.activeNodes:
826 onosIds = []
827 ids = main.CLIs[i].getAllIntentsId()
828 onosIds.append( ids )
829 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
830 str( sorted( onosIds ) ) )
831 if sorted( ids ) != sorted( intentIds ):
832 main.log.warn( "Set of intent IDs doesn't match" )
833 correct = False
834 break
835 else:
836 intents = json.loads( main.CLIs[i].intents() )
837 for intent in intents:
838 if intent[ 'state' ] != "INSTALLED":
839 main.log.warn( "Intent " + intent[ 'id' ] +
840 " is " + intent[ 'state' ] )
841 correct = False
842 break
843 if correct:
844 break
845 else:
846 time.sleep(1)
847 if not intentStop:
848 intentStop = time.time()
849 global gossipTime
850 gossipTime = intentStop - intentStart
851 main.log.info( "It took about " + str( gossipTime ) +
852 " seconds for all intents to appear in each node" )
853 append = False
854 title = "Gossip Intents"
855 count = 1
856 while append is False:
857 curTitle = title + str( count )
858 if curTitle not in labels:
859 labels.append( curTitle )
860 data.append( str( gossipTime ) )
861 append = True
862 else:
863 count += 1
864 gossipPeriod = int( main.params['timers']['gossip'] )
865 maxGossipTime = gossipPeriod * len( main.activeNodes )
866 utilities.assert_greater_equals(
867 expect=maxGossipTime, actual=gossipTime,
868 onpass="ECM anti-entropy for intents worked within " +
869 "expected time",
870 onfail="Intent ECM anti-entropy took too long. " +
871 "Expected time:{}, Actual time:{}".format( maxGossipTime,
872 gossipTime ) )
873 if gossipTime <= maxGossipTime:
874 intentAddResult = True
875
876 if not intentAddResult or "key" in pendingMap:
877 import time
878 installedCheck = True
879 main.log.info( "Sleeping 60 seconds to see if intents are found" )
880 time.sleep( 60 )
881 onosIds = onosCli.getAllIntentsId()
882 main.log.info( "Submitted intents: " + str( intentIds ) )
883 main.log.info( "Intents in ONOS: " + str( onosIds ) )
884 # Print the intent states
885 intents = onosCli.intents()
886 intentStates = []
887 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
888 count = 0
889 try:
890 for intent in json.loads( intents ):
891 # Iter through intents of a node
892 state = intent.get( 'state', None )
893 if "INSTALLED" not in state:
894 installedCheck = False
895 intentId = intent.get( 'id', None )
896 intentStates.append( ( intentId, state ) )
897 except ( ValueError, TypeError ):
898 main.log.exception( "Error parsing intents" )
899 # add submitted intents not in the store
900 tmplist = [ i for i, s in intentStates ]
901 for i in intentIds:
902 if i not in tmplist:
903 intentStates.append( ( i, " - " ) )
904 intentStates.sort()
905 for i, s in intentStates:
906 count += 1
907 main.log.info( "%-6s%-15s%-15s" %
908 ( str( count ), str( i ), str( s ) ) )
909 leaders = onosCli.leaders()
910 try:
911 missing = False
912 if leaders:
913 parsedLeaders = json.loads( leaders )
914 main.log.warn( json.dumps( parsedLeaders,
915 sort_keys=True,
916 indent=4,
917 separators=( ',', ': ' ) ) )
918 # check for all intent partitions
919 # check for election
920 topics = []
921 for i in range( 14 ):
922 topics.append( "intent-partition-" + str( i ) )
923 # FIXME: this should only be after we start the app
924 topics.append( "org.onosproject.election" )
925 main.log.debug( topics )
926 ONOStopics = [ j['topic'] for j in parsedLeaders ]
927 for topic in topics:
928 if topic not in ONOStopics:
929 main.log.error( "Error: " + topic +
930 " not in leaders" )
931 missing = True
932 else:
933 main.log.error( "leaders() returned None" )
934 except ( ValueError, TypeError ):
935 main.log.exception( "Error parsing leaders" )
936 main.log.error( repr( leaders ) )
937 # Check all nodes
938 if missing:
939 for i in main.activeNodes:
940 node = main.CLIs[i]
941 response = node.leaders( jsonFormat=False)
942 main.log.warn( str( node.name ) + " leaders output: \n" +
943 str( response ) )
944
945 partitions = onosCli.partitions()
946 try:
947 if partitions :
948 parsedPartitions = json.loads( partitions )
949 main.log.warn( json.dumps( parsedPartitions,
950 sort_keys=True,
951 indent=4,
952 separators=( ',', ': ' ) ) )
953 # TODO check for a leader in all paritions
954 # TODO check for consistency among nodes
955 else:
956 main.log.error( "partitions() returned None" )
957 except ( ValueError, TypeError ):
958 main.log.exception( "Error parsing partitions" )
959 main.log.error( repr( partitions ) )
960 pendingMap = onosCli.pendingMap()
961 try:
962 if pendingMap :
963 parsedPending = json.loads( pendingMap )
964 main.log.warn( json.dumps( parsedPending,
965 sort_keys=True,
966 indent=4,
967 separators=( ',', ': ' ) ) )
968 # TODO check something here?
969 else:
970 main.log.error( "pendingMap() returned None" )
971 except ( ValueError, TypeError ):
972 main.log.exception( "Error parsing pending map" )
973 main.log.error( repr( pendingMap ) )
974
975 def CASE4( self, main ):
976 """
977 Ping across added host intents
978 """
979 import json
980 import time
981 assert main.numCtrls, "main.numCtrls not defined"
982 assert main, "main not defined"
983 assert utilities.assert_equals, "utilities.assert_equals not defined"
984 assert main.CLIs, "main.CLIs not defined"
985 assert main.nodes, "main.nodes not defined"
986 main.case( "Verify connectivity by sending traffic across Intents" )
987 main.caseExplanation = "Ping across added host intents to check " +\
988 "functionality and check the state of " +\
989 "the intent"
990
991 onosCli = main.CLIs[ main.activeNodes[0] ]
992 main.step( "Check Intent state" )
993 installedCheck = False
994 loopCount = 0
995 while not installedCheck and loopCount < 40:
996 installedCheck = True
997 # Print the intent states
998 intents = onosCli.intents()
999 intentStates = []
1000 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1001 count = 0
1002 # Iter through intents of a node
1003 try:
1004 for intent in json.loads( intents ):
1005 state = intent.get( 'state', None )
1006 if "INSTALLED" not in state:
1007 installedCheck = False
1008 intentId = intent.get( 'id', None )
1009 intentStates.append( ( intentId, state ) )
1010 except ( ValueError, TypeError ):
1011 main.log.exception( "Error parsing intents." )
1012 # Print states
1013 intentStates.sort()
1014 for i, s in intentStates:
1015 count += 1
1016 main.log.info( "%-6s%-15s%-15s" %
1017 ( str( count ), str( i ), str( s ) ) )
1018 if not installedCheck:
1019 time.sleep( 1 )
1020 loopCount += 1
1021 utilities.assert_equals( expect=True, actual=installedCheck,
1022 onpass="Intents are all INSTALLED",
1023 onfail="Intents are not all in " +
1024 "INSTALLED state" )
1025
1026 main.step( "Ping across added host intents" )
1027 PingResult = main.TRUE
1028 for i in range( 8, 18 ):
1029 ping = main.Mininet1.pingHost( src="h" + str( i ),
1030 target="h" + str( i + 10 ) )
1031 PingResult = PingResult and ping
1032 if ping == main.FALSE:
1033 main.log.warn( "Ping failed between h" + str( i ) +
1034 " and h" + str( i + 10 ) )
1035 elif ping == main.TRUE:
1036 main.log.info( "Ping test passed!" )
1037 # Don't set PingResult or you'd override failures
1038 if PingResult == main.FALSE:
1039 main.log.error(
1040 "Intents have not been installed correctly, pings failed." )
1041 # TODO: pretty print
1042 main.log.warn( "ONOS1 intents: " )
1043 try:
1044 tmpIntents = onosCli.intents()
1045 main.log.warn( json.dumps( json.loads( tmpIntents ),
1046 sort_keys=True,
1047 indent=4,
1048 separators=( ',', ': ' ) ) )
1049 except ( ValueError, TypeError ):
1050 main.log.warn( repr( tmpIntents ) )
1051 utilities.assert_equals(
1052 expect=main.TRUE,
1053 actual=PingResult,
1054 onpass="Intents have been installed correctly and pings work",
1055 onfail="Intents have not been installed correctly, pings failed." )
1056
1057 main.step( "Check leadership of topics" )
1058 leaders = onosCli.leaders()
1059 topicCheck = main.TRUE
1060 try:
1061 if leaders:
1062 parsedLeaders = json.loads( leaders )
1063 main.log.warn( json.dumps( parsedLeaders,
1064 sort_keys=True,
1065 indent=4,
1066 separators=( ',', ': ' ) ) )
1067 # check for all intent partitions
1068 # check for election
1069 # TODO: Look at Devices as topics now that it uses this system
1070 topics = []
1071 for i in range( 14 ):
1072 topics.append( "intent-partition-" + str( i ) )
1073 # FIXME: this should only be after we start the app
1074 # FIXME: topics.append( "org.onosproject.election" )
1075 # Print leaders output
1076 main.log.debug( topics )
1077 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1078 for topic in topics:
1079 if topic not in ONOStopics:
1080 main.log.error( "Error: " + topic +
1081 " not in leaders" )
1082 topicCheck = main.FALSE
1083 else:
1084 main.log.error( "leaders() returned None" )
1085 topicCheck = main.FALSE
1086 except ( ValueError, TypeError ):
1087 topicCheck = main.FALSE
1088 main.log.exception( "Error parsing leaders" )
1089 main.log.error( repr( leaders ) )
1090 # TODO: Check for a leader of these topics
1091 # Check all nodes
1092 if topicCheck:
1093 for i in main.activeNodes:
1094 node = main.CLIs[i]
1095 response = node.leaders( jsonFormat=False)
1096 main.log.warn( str( node.name ) + " leaders output: \n" +
1097 str( response ) )
1098
1099 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1100 onpass="intent Partitions is in leaders",
1101 onfail="Some topics were lost " )
1102 # Print partitions
1103 partitions = onosCli.partitions()
1104 try:
1105 if partitions :
1106 parsedPartitions = json.loads( partitions )
1107 main.log.warn( json.dumps( parsedPartitions,
1108 sort_keys=True,
1109 indent=4,
1110 separators=( ',', ': ' ) ) )
1111 # TODO check for a leader in all paritions
1112 # TODO check for consistency among nodes
1113 else:
1114 main.log.error( "partitions() returned None" )
1115 except ( ValueError, TypeError ):
1116 main.log.exception( "Error parsing partitions" )
1117 main.log.error( repr( partitions ) )
1118 # Print Pending Map
1119 pendingMap = onosCli.pendingMap()
1120 try:
1121 if pendingMap :
1122 parsedPending = json.loads( pendingMap )
1123 main.log.warn( json.dumps( parsedPending,
1124 sort_keys=True,
1125 indent=4,
1126 separators=( ',', ': ' ) ) )
1127 # TODO check something here?
1128 else:
1129 main.log.error( "pendingMap() returned None" )
1130 except ( ValueError, TypeError ):
1131 main.log.exception( "Error parsing pending map" )
1132 main.log.error( repr( pendingMap ) )
1133
1134 if not installedCheck:
1135 main.log.info( "Waiting 60 seconds to see if the state of " +
1136 "intents change" )
1137 time.sleep( 60 )
1138 # Print the intent states
1139 intents = onosCli.intents()
1140 intentStates = []
1141 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1142 count = 0
1143 # Iter through intents of a node
1144 try:
1145 for intent in json.loads( intents ):
1146 state = intent.get( 'state', None )
1147 if "INSTALLED" not in state:
1148 installedCheck = False
1149 intentId = intent.get( 'id', None )
1150 intentStates.append( ( intentId, state ) )
1151 except ( ValueError, TypeError ):
1152 main.log.exception( "Error parsing intents." )
1153 intentStates.sort()
1154 for i, s in intentStates:
1155 count += 1
1156 main.log.info( "%-6s%-15s%-15s" %
1157 ( str( count ), str( i ), str( s ) ) )
1158 leaders = onosCli.leaders()
1159 try:
1160 missing = False
1161 if leaders:
1162 parsedLeaders = json.loads( leaders )
1163 main.log.warn( json.dumps( parsedLeaders,
1164 sort_keys=True,
1165 indent=4,
1166 separators=( ',', ': ' ) ) )
1167 # check for all intent partitions
1168 # check for election
1169 topics = []
1170 for i in range( 14 ):
1171 topics.append( "intent-partition-" + str( i ) )
1172 # FIXME: this should only be after we start the app
1173 topics.append( "org.onosproject.election" )
1174 main.log.debug( topics )
1175 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1176 for topic in topics:
1177 if topic not in ONOStopics:
1178 main.log.error( "Error: " + topic +
1179 " not in leaders" )
1180 missing = True
1181 else:
1182 main.log.error( "leaders() returned None" )
1183 except ( ValueError, TypeError ):
1184 main.log.exception( "Error parsing leaders" )
1185 main.log.error( repr( leaders ) )
1186 if missing:
1187 for i in main.activeNodes:
1188 node = main.CLIs[i]
1189 response = node.leaders( jsonFormat=False)
1190 main.log.warn( str( node.name ) + " leaders output: \n" +
1191 str( response ) )
1192
1193 partitions = onosCli.partitions()
1194 try:
1195 if partitions :
1196 parsedPartitions = json.loads( partitions )
1197 main.log.warn( json.dumps( parsedPartitions,
1198 sort_keys=True,
1199 indent=4,
1200 separators=( ',', ': ' ) ) )
1201 # TODO check for a leader in all paritions
1202 # TODO check for consistency among nodes
1203 else:
1204 main.log.error( "partitions() returned None" )
1205 except ( ValueError, TypeError ):
1206 main.log.exception( "Error parsing partitions" )
1207 main.log.error( repr( partitions ) )
1208 pendingMap = onosCli.pendingMap()
1209 try:
1210 if pendingMap :
1211 parsedPending = json.loads( pendingMap )
1212 main.log.warn( json.dumps( parsedPending,
1213 sort_keys=True,
1214 indent=4,
1215 separators=( ',', ': ' ) ) )
1216 # TODO check something here?
1217 else:
1218 main.log.error( "pendingMap() returned None" )
1219 except ( ValueError, TypeError ):
1220 main.log.exception( "Error parsing pending map" )
1221 main.log.error( repr( pendingMap ) )
1222 # Print flowrules
1223 node = main.activeNodes[0]
1224 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1225 main.step( "Wait a minute then ping again" )
1226 # the wait is above
1227 PingResult = main.TRUE
1228 for i in range( 8, 18 ):
1229 ping = main.Mininet1.pingHost( src="h" + str( i ),
1230 target="h" + str( i + 10 ) )
1231 PingResult = PingResult and ping
1232 if ping == main.FALSE:
1233 main.log.warn( "Ping failed between h" + str( i ) +
1234 " and h" + str( i + 10 ) )
1235 elif ping == main.TRUE:
1236 main.log.info( "Ping test passed!" )
1237 # Don't set PingResult or you'd override failures
1238 if PingResult == main.FALSE:
1239 main.log.error(
1240 "Intents have not been installed correctly, pings failed." )
1241 # TODO: pretty print
1242 main.log.warn( "ONOS1 intents: " )
1243 try:
1244 tmpIntents = onosCli.intents()
1245 main.log.warn( json.dumps( json.loads( tmpIntents ),
1246 sort_keys=True,
1247 indent=4,
1248 separators=( ',', ': ' ) ) )
1249 except ( ValueError, TypeError ):
1250 main.log.warn( repr( tmpIntents ) )
1251 utilities.assert_equals(
1252 expect=main.TRUE,
1253 actual=PingResult,
1254 onpass="Intents have been installed correctly and pings work",
1255 onfail="Intents have not been installed correctly, pings failed." )
1256
1257 def CASE5( self, main ):
1258 """
1259 Reading state of ONOS
1260 """
1261 import json
1262 import time
1263 assert main.numCtrls, "main.numCtrls not defined"
1264 assert main, "main not defined"
1265 assert utilities.assert_equals, "utilities.assert_equals not defined"
1266 assert main.CLIs, "main.CLIs not defined"
1267 assert main.nodes, "main.nodes not defined"
1268
1269 main.case( "Setting up and gathering data for current state" )
1270 # The general idea for this test case is to pull the state of
1271 # ( intents,flows, topology,... ) from each ONOS node
1272 # We can then compare them with each other and also with past states
1273
1274 main.step( "Check that each switch has a master" )
1275 global mastershipState
1276 mastershipState = '[]'
1277
1278 # Assert that each device has a master
1279 rolesNotNull = main.TRUE
1280 threads = []
1281 for i in main.activeNodes:
1282 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1283 name="rolesNotNull-" + str( i ),
1284 args=[] )
1285 threads.append( t )
1286 t.start()
1287
1288 for t in threads:
1289 t.join()
1290 rolesNotNull = rolesNotNull and t.result
1291 utilities.assert_equals(
1292 expect=main.TRUE,
1293 actual=rolesNotNull,
1294 onpass="Each device has a master",
1295 onfail="Some devices don't have a master assigned" )
1296
1297 main.step( "Get the Mastership of each switch from each controller" )
1298 ONOSMastership = []
1299 consistentMastership = True
1300 rolesResults = True
1301 threads = []
1302 for i in main.activeNodes:
1303 t = main.Thread( target=main.CLIs[i].roles,
1304 name="roles-" + str( i ),
1305 args=[] )
1306 threads.append( t )
1307 t.start()
1308
1309 for t in threads:
1310 t.join()
1311 ONOSMastership.append( t.result )
1312
1313 for i in range( len( ONOSMastership ) ):
1314 node = str( main.activeNodes[i] + 1 )
1315 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1316 main.log.error( "Error in getting ONOS" + node + " roles" )
1317 main.log.warn( "ONOS" + node + " mastership response: " +
1318 repr( ONOSMastership[i] ) )
1319 rolesResults = False
1320 utilities.assert_equals(
1321 expect=True,
1322 actual=rolesResults,
1323 onpass="No error in reading roles output",
1324 onfail="Error in reading roles from ONOS" )
1325
1326 main.step( "Check for consistency in roles from each controller" )
1327 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1328 main.log.info(
1329 "Switch roles are consistent across all ONOS nodes" )
1330 else:
1331 consistentMastership = False
1332 utilities.assert_equals(
1333 expect=True,
1334 actual=consistentMastership,
1335 onpass="Switch roles are consistent across all ONOS nodes",
1336 onfail="ONOS nodes have different views of switch roles" )
1337
1338 if rolesResults and not consistentMastership:
1339 for i in range( len( main.activeNodes ) ):
1340 node = str( main.activeNodes[i] + 1 )
1341 try:
1342 main.log.warn(
1343 "ONOS" + node + " roles: ",
1344 json.dumps(
1345 json.loads( ONOSMastership[ i ] ),
1346 sort_keys=True,
1347 indent=4,
1348 separators=( ',', ': ' ) ) )
1349 except ( ValueError, TypeError ):
1350 main.log.warn( repr( ONOSMastership[ i ] ) )
1351 elif rolesResults and consistentMastership:
1352 mastershipState = ONOSMastership[ 0 ]
1353
1354 main.step( "Get the intents from each controller" )
1355 global intentState
1356 intentState = []
1357 ONOSIntents = []
1358 consistentIntents = True # Are Intents consistent across nodes?
1359 intentsResults = True # Could we read Intents from ONOS?
1360 threads = []
1361 for i in main.activeNodes:
1362 t = main.Thread( target=main.CLIs[i].intents,
1363 name="intents-" + str( i ),
1364 args=[],
1365 kwargs={ 'jsonFormat': True } )
1366 threads.append( t )
1367 t.start()
1368
1369 for t in threads:
1370 t.join()
1371 ONOSIntents.append( t.result )
1372
1373 for i in range( len( ONOSIntents ) ):
1374 node = str( main.activeNodes[i] + 1 )
1375 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1376 main.log.error( "Error in getting ONOS" + node + " intents" )
1377 main.log.warn( "ONOS" + node + " intents response: " +
1378 repr( ONOSIntents[ i ] ) )
1379 intentsResults = False
1380 utilities.assert_equals(
1381 expect=True,
1382 actual=intentsResults,
1383 onpass="No error in reading intents output",
1384 onfail="Error in reading intents from ONOS" )
1385
1386 main.step( "Check for consistency in Intents from each controller" )
1387 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1388 main.log.info( "Intents are consistent across all ONOS " +
1389 "nodes" )
1390 else:
1391 consistentIntents = False
1392 main.log.error( "Intents not consistent" )
1393 utilities.assert_equals(
1394 expect=True,
1395 actual=consistentIntents,
1396 onpass="Intents are consistent across all ONOS nodes",
1397 onfail="ONOS nodes have different views of intents" )
1398
1399 if intentsResults:
1400 # Try to make it easy to figure out what is happening
1401 #
1402 # Intent ONOS1 ONOS2 ...
1403 # 0x01 INSTALLED INSTALLING
1404 # ... ... ...
1405 # ... ... ...
1406 title = " Id"
1407 for n in main.activeNodes:
1408 title += " " * 10 + "ONOS" + str( n + 1 )
1409 main.log.warn( title )
1410 # get all intent keys in the cluster
1411 keys = []
1412 try:
1413 # Get the set of all intent keys
1414 for nodeStr in ONOSIntents:
1415 node = json.loads( nodeStr )
1416 for intent in node:
1417 keys.append( intent.get( 'id' ) )
1418 keys = set( keys )
1419 # For each intent key, print the state on each node
1420 for key in keys:
1421 row = "%-13s" % key
1422 for nodeStr in ONOSIntents:
1423 node = json.loads( nodeStr )
1424 for intent in node:
1425 if intent.get( 'id', "Error" ) == key:
1426 row += "%-15s" % intent.get( 'state' )
1427 main.log.warn( row )
1428 # End of intent state table
1429 except ValueError as e:
1430 main.log.exception( e )
1431 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1432
1433 if intentsResults and not consistentIntents:
1434 # print the json objects
1435 n = str( main.activeNodes[-1] + 1 )
1436 main.log.debug( "ONOS" + n + " intents: " )
1437 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1438 sort_keys=True,
1439 indent=4,
1440 separators=( ',', ': ' ) ) )
1441 for i in range( len( ONOSIntents ) ):
1442 node = str( main.activeNodes[i] + 1 )
1443 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1444 main.log.debug( "ONOS" + node + " intents: " )
1445 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1446 sort_keys=True,
1447 indent=4,
1448 separators=( ',', ': ' ) ) )
1449 else:
1450 main.log.debug( "ONOS" + node + " intents match ONOS" +
1451 n + " intents" )
1452 elif intentsResults and consistentIntents:
1453 intentState = ONOSIntents[ 0 ]
1454
1455 main.step( "Get the flows from each controller" )
1456 global flowState
1457 flowState = []
1458 ONOSFlows = []
1459 ONOSFlowsJson = []
1460 flowCheck = main.FALSE
1461 consistentFlows = True
1462 flowsResults = True
1463 threads = []
1464 for i in main.activeNodes:
1465 t = main.Thread( target=main.CLIs[i].flows,
1466 name="flows-" + str( i ),
1467 args=[],
1468 kwargs={ 'jsonFormat': True } )
1469 threads.append( t )
1470 t.start()
1471
1472 # NOTE: Flows command can take some time to run
1473 time.sleep(30)
1474 for t in threads:
1475 t.join()
1476 result = t.result
1477 ONOSFlows.append( result )
1478
1479 for i in range( len( ONOSFlows ) ):
1480 num = str( main.activeNodes[i] + 1 )
1481 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1482 main.log.error( "Error in getting ONOS" + num + " flows" )
1483 main.log.warn( "ONOS" + num + " flows response: " +
1484 repr( ONOSFlows[ i ] ) )
1485 flowsResults = False
1486 ONOSFlowsJson.append( None )
1487 else:
1488 try:
1489 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1490 except ( ValueError, TypeError ):
1491 # FIXME: change this to log.error?
1492 main.log.exception( "Error in parsing ONOS" + num +
1493 " response as json." )
1494 main.log.error( repr( ONOSFlows[ i ] ) )
1495 ONOSFlowsJson.append( None )
1496 flowsResults = False
1497 utilities.assert_equals(
1498 expect=True,
1499 actual=flowsResults,
1500 onpass="No error in reading flows output",
1501 onfail="Error in reading flows from ONOS" )
1502
1503 main.step( "Check for consistency in Flows from each controller" )
1504 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1505 if all( tmp ):
1506 main.log.info( "Flow count is consistent across all ONOS nodes" )
1507 else:
1508 consistentFlows = False
1509 utilities.assert_equals(
1510 expect=True,
1511 actual=consistentFlows,
1512 onpass="The flow count is consistent across all ONOS nodes",
1513 onfail="ONOS nodes have different flow counts" )
1514
1515 if flowsResults and not consistentFlows:
1516 for i in range( len( ONOSFlows ) ):
1517 node = str( main.activeNodes[i] + 1 )
1518 try:
1519 main.log.warn(
1520 "ONOS" + node + " flows: " +
1521 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1522 indent=4, separators=( ',', ': ' ) ) )
1523 except ( ValueError, TypeError ):
1524 main.log.warn( "ONOS" + node + " flows: " +
1525 repr( ONOSFlows[ i ] ) )
1526 elif flowsResults and consistentFlows:
1527 flowCheck = main.TRUE
1528 flowState = ONOSFlows[ 0 ]
1529
1530 main.step( "Get the OF Table entries" )
1531 global flows
1532 flows = []
1533 for i in range( 1, 29 ):
1534 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1535 if flowCheck == main.FALSE:
1536 for table in flows:
1537 main.log.warn( table )
1538 # TODO: Compare switch flow tables with ONOS flow tables
1539
1540 main.step( "Start continuous pings" )
1541 main.Mininet2.pingLong(
1542 src=main.params[ 'PING' ][ 'source1' ],
1543 target=main.params[ 'PING' ][ 'target1' ],
1544 pingTime=500 )
1545 main.Mininet2.pingLong(
1546 src=main.params[ 'PING' ][ 'source2' ],
1547 target=main.params[ 'PING' ][ 'target2' ],
1548 pingTime=500 )
1549 main.Mininet2.pingLong(
1550 src=main.params[ 'PING' ][ 'source3' ],
1551 target=main.params[ 'PING' ][ 'target3' ],
1552 pingTime=500 )
1553 main.Mininet2.pingLong(
1554 src=main.params[ 'PING' ][ 'source4' ],
1555 target=main.params[ 'PING' ][ 'target4' ],
1556 pingTime=500 )
1557 main.Mininet2.pingLong(
1558 src=main.params[ 'PING' ][ 'source5' ],
1559 target=main.params[ 'PING' ][ 'target5' ],
1560 pingTime=500 )
1561 main.Mininet2.pingLong(
1562 src=main.params[ 'PING' ][ 'source6' ],
1563 target=main.params[ 'PING' ][ 'target6' ],
1564 pingTime=500 )
1565 main.Mininet2.pingLong(
1566 src=main.params[ 'PING' ][ 'source7' ],
1567 target=main.params[ 'PING' ][ 'target7' ],
1568 pingTime=500 )
1569 main.Mininet2.pingLong(
1570 src=main.params[ 'PING' ][ 'source8' ],
1571 target=main.params[ 'PING' ][ 'target8' ],
1572 pingTime=500 )
1573 main.Mininet2.pingLong(
1574 src=main.params[ 'PING' ][ 'source9' ],
1575 target=main.params[ 'PING' ][ 'target9' ],
1576 pingTime=500 )
1577 main.Mininet2.pingLong(
1578 src=main.params[ 'PING' ][ 'source10' ],
1579 target=main.params[ 'PING' ][ 'target10' ],
1580 pingTime=500 )
1581
1582 main.step( "Collecting topology information from ONOS" )
1583 devices = []
1584 threads = []
1585 for i in main.activeNodes:
1586 t = main.Thread( target=main.CLIs[i].devices,
1587 name="devices-" + str( i ),
1588 args=[ ] )
1589 threads.append( t )
1590 t.start()
1591
1592 for t in threads:
1593 t.join()
1594 devices.append( t.result )
1595 hosts = []
1596 threads = []
1597 for i in main.activeNodes:
1598 t = main.Thread( target=main.CLIs[i].hosts,
1599 name="hosts-" + str( i ),
1600 args=[ ] )
1601 threads.append( t )
1602 t.start()
1603
1604 for t in threads:
1605 t.join()
1606 try:
1607 hosts.append( json.loads( t.result ) )
1608 except ( ValueError, TypeError ):
1609 # FIXME: better handling of this, print which node
1610 # Maybe use thread name?
1611 main.log.exception( "Error parsing json output of hosts" )
1612 main.log.warn( repr( t.result ) )
1613 hosts.append( None )
1614
1615 ports = []
1616 threads = []
1617 for i in main.activeNodes:
1618 t = main.Thread( target=main.CLIs[i].ports,
1619 name="ports-" + str( i ),
1620 args=[ ] )
1621 threads.append( t )
1622 t.start()
1623
1624 for t in threads:
1625 t.join()
1626 ports.append( t.result )
1627 links = []
1628 threads = []
1629 for i in main.activeNodes:
1630 t = main.Thread( target=main.CLIs[i].links,
1631 name="links-" + str( i ),
1632 args=[ ] )
1633 threads.append( t )
1634 t.start()
1635
1636 for t in threads:
1637 t.join()
1638 links.append( t.result )
1639 clusters = []
1640 threads = []
1641 for i in main.activeNodes:
1642 t = main.Thread( target=main.CLIs[i].clusters,
1643 name="clusters-" + str( i ),
1644 args=[ ] )
1645 threads.append( t )
1646 t.start()
1647
1648 for t in threads:
1649 t.join()
1650 clusters.append( t.result )
1651 # Compare json objects for hosts and dataplane clusters
1652
1653 # hosts
1654 main.step( "Host view is consistent across ONOS nodes" )
1655 consistentHostsResult = main.TRUE
1656 for controller in range( len( hosts ) ):
1657 controllerStr = str( main.activeNodes[controller] + 1 )
1658 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1659 if hosts[ controller ] == hosts[ 0 ]:
1660 continue
1661 else: # hosts not consistent
1662 main.log.error( "hosts from ONOS" +
1663 controllerStr +
1664 " is inconsistent with ONOS1" )
1665 main.log.warn( repr( hosts[ controller ] ) )
1666 consistentHostsResult = main.FALSE
1667
1668 else:
1669 main.log.error( "Error in getting ONOS hosts from ONOS" +
1670 controllerStr )
1671 consistentHostsResult = main.FALSE
1672 main.log.warn( "ONOS" + controllerStr +
1673 " hosts response: " +
1674 repr( hosts[ controller ] ) )
1675 utilities.assert_equals(
1676 expect=main.TRUE,
1677 actual=consistentHostsResult,
1678 onpass="Hosts view is consistent across all ONOS nodes",
1679 onfail="ONOS nodes have different views of hosts" )
1680
1681 main.step( "Each host has an IP address" )
1682 ipResult = main.TRUE
1683 for controller in range( 0, len( hosts ) ):
1684 controllerStr = str( main.activeNodes[controller] + 1 )
1685 if hosts[ controller ]:
1686 for host in hosts[ controller ]:
1687 if not host.get( 'ipAddresses', [ ] ):
1688 main.log.error( "Error with host ips on controller" +
1689 controllerStr + ": " + str( host ) )
1690 ipResult = main.FALSE
1691 utilities.assert_equals(
1692 expect=main.TRUE,
1693 actual=ipResult,
1694 onpass="The ips of the hosts aren't empty",
1695 onfail="The ip of at least one host is missing" )
1696
1697 # Strongly connected clusters of devices
1698 main.step( "Cluster view is consistent across ONOS nodes" )
1699 consistentClustersResult = main.TRUE
1700 for controller in range( len( clusters ) ):
1701 controllerStr = str( main.activeNodes[controller] + 1 )
1702 if "Error" not in clusters[ controller ]:
1703 if clusters[ controller ] == clusters[ 0 ]:
1704 continue
1705 else: # clusters not consistent
1706 main.log.error( "clusters from ONOS" + controllerStr +
1707 " is inconsistent with ONOS1" )
1708 consistentClustersResult = main.FALSE
1709
1710 else:
1711 main.log.error( "Error in getting dataplane clusters " +
1712 "from ONOS" + controllerStr )
1713 consistentClustersResult = main.FALSE
1714 main.log.warn( "ONOS" + controllerStr +
1715 " clusters response: " +
1716 repr( clusters[ controller ] ) )
1717 utilities.assert_equals(
1718 expect=main.TRUE,
1719 actual=consistentClustersResult,
1720 onpass="Clusters view is consistent across all ONOS nodes",
1721 onfail="ONOS nodes have different views of clusters" )
1722 if consistentClustersResult != main.TRUE:
1723 main.log.debug( clusters )
1724 # there should always only be one cluster
1725 main.step( "Cluster view correct across ONOS nodes" )
1726 try:
1727 numClusters = len( json.loads( clusters[ 0 ] ) )
1728 except ( ValueError, TypeError ):
1729 main.log.exception( "Error parsing clusters[0]: " +
1730 repr( clusters[ 0 ] ) )
1731 numClusters = "ERROR"
1732 utilities.assert_equals(
1733 expect=1,
1734 actual=numClusters,
1735 onpass="ONOS shows 1 SCC",
1736 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1737
1738 main.step( "Comparing ONOS topology to MN" )
1739 devicesResults = main.TRUE
1740 linksResults = main.TRUE
1741 hostsResults = main.TRUE
1742 mnSwitches = main.Mininet1.getSwitches()
1743 mnLinks = main.Mininet1.getLinks()
1744 mnHosts = main.Mininet1.getHosts()
1745 for controller in main.activeNodes:
1746 controllerStr = str( main.activeNodes[controller] + 1 )
1747 if devices[ controller ] and ports[ controller ] and\
1748 "Error" not in devices[ controller ] and\
1749 "Error" not in ports[ controller ]:
1750 currentDevicesResult = main.Mininet1.compareSwitches(
1751 mnSwitches,
1752 json.loads( devices[ controller ] ),
1753 json.loads( ports[ controller ] ) )
1754 else:
1755 currentDevicesResult = main.FALSE
1756 utilities.assert_equals( expect=main.TRUE,
1757 actual=currentDevicesResult,
1758 onpass="ONOS" + controllerStr +
1759 " Switches view is correct",
1760 onfail="ONOS" + controllerStr +
1761 " Switches view is incorrect" )
1762 if links[ controller ] and "Error" not in links[ controller ]:
1763 currentLinksResult = main.Mininet1.compareLinks(
1764 mnSwitches, mnLinks,
1765 json.loads( links[ controller ] ) )
1766 else:
1767 currentLinksResult = main.FALSE
1768 utilities.assert_equals( expect=main.TRUE,
1769 actual=currentLinksResult,
1770 onpass="ONOS" + controllerStr +
1771 " links view is correct",
1772 onfail="ONOS" + controllerStr +
1773 " links view is incorrect" )
1774
1775 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1776 currentHostsResult = main.Mininet1.compareHosts(
1777 mnHosts,
1778 hosts[ controller ] )
1779 else:
1780 currentHostsResult = main.FALSE
1781 utilities.assert_equals( expect=main.TRUE,
1782 actual=currentHostsResult,
1783 onpass="ONOS" + controllerStr +
1784 " hosts exist in Mininet",
1785 onfail="ONOS" + controllerStr +
1786 " hosts don't match Mininet" )
1787
1788 devicesResults = devicesResults and currentDevicesResult
1789 linksResults = linksResults and currentLinksResult
1790 hostsResults = hostsResults and currentHostsResult
1791
1792 main.step( "Device information is correct" )
1793 utilities.assert_equals(
1794 expect=main.TRUE,
1795 actual=devicesResults,
1796 onpass="Device information is correct",
1797 onfail="Device information is incorrect" )
1798
1799 main.step( "Links are correct" )
1800 utilities.assert_equals(
1801 expect=main.TRUE,
1802 actual=linksResults,
1803 onpass="Link are correct",
1804 onfail="Links are incorrect" )
1805
1806 main.step( "Hosts are correct" )
1807 utilities.assert_equals(
1808 expect=main.TRUE,
1809 actual=hostsResults,
1810 onpass="Hosts are correct",
1811 onfail="Hosts are incorrect" )
1812
1813 def CASE6( self, main ):
1814 """
1815 The Scaling case.
1816 """
1817 import time
1818 import re
1819 assert main.numCtrls, "main.numCtrls not defined"
1820 assert main, "main not defined"
1821 assert utilities.assert_equals, "utilities.assert_equals not defined"
1822 assert main.CLIs, "main.CLIs not defined"
1823 assert main.nodes, "main.nodes not defined"
1824 try:
1825 labels
1826 except NameError:
1827 main.log.error( "labels not defined, setting to []" )
1828 global labels
1829 labels = []
1830 try:
1831 data
1832 except NameError:
1833 main.log.error( "data not defined, setting to []" )
1834 global data
1835 data = []
1836
1837 main.case( "Restart entire ONOS cluster" )
1838
1839 main.step( "Checking ONOS Logs for errors" )
1840 for i in main.activeNodes:
1841 node = main.nodes[i]
1842 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1843 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1844
1845 """
1846 pop # of nodes from a list, might look like 1,3b,3,5b,5,7b,7,7b,5,5b,3...
1847 modify cluster.json file appropriately
1848 install/deactivate node as needed
1849 """
1850
1851 try:
1852 prevNodes = main.activeNodes
1853 scale = main.scaling.pop(0)
1854 if "e" in scale:
1855 equal = True
1856 else:
1857 equal = False
1858 main.numCtrls = int( re.search( "\d+", scale ).group(0) )
1859 main.log.info( "Scaling to {} nodes".format( main.numCtrls ) )
1860 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
1861 utilities.assert_equals( expect=main.TRUE, actual=genResult,
1862 onpass="New cluster metadata file generated",
1863 onfail="Failled to generate new metadata file" )
1864 time.sleep( 5 ) # Give time for nodes to read new file
1865 except IndexError:
1866 main.cleanup()
1867 main.exit()
1868
1869 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
1870 newNodes = [ x for x in main.activeNodes if x not in prevNodes ]
1871
1872 main.step( "Start new nodes" ) # OR stop old nodes?
1873 started = main.TRUE
1874 for i in newNodes:
1875 started = main.ONOSbench.onosStart( main.nodes[i].ip_address ) and main.TRUE
1876 utilities.assert_equals( expect=main.TRUE, actual=started,
1877 onpass="ONOS started",
1878 onfail="ONOS start NOT successful" )
1879
1880 main.step( "Checking if ONOS is up yet" )
1881 for i in range( 2 ):
1882 onosIsupResult = main.TRUE
1883 for i in main.activeNodes:
1884 node = main.nodes[i]
1885 started = main.ONOSbench.isup( node.ip_address )
1886 if not started:
1887 main.log.error( node.name + " didn't start!" )
1888 onosIsupResult = onosIsupResult and started
1889 if onosIsupResult == main.TRUE:
1890 break
1891 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1892 onpass="ONOS started",
1893 onfail="ONOS start NOT successful" )
1894
1895 main.log.step( "Starting ONOS CLI sessions" )
1896 cliResults = main.TRUE
1897 threads = []
1898 for i in main.activeNodes:
1899 t = main.Thread( target=main.CLIs[i].startOnosCli,
1900 name="startOnosCli-" + str( i ),
1901 args=[main.nodes[i].ip_address] )
1902 threads.append( t )
1903 t.start()
1904
1905 for t in threads:
1906 t.join()
1907 cliResults = cliResults and t.result
1908 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1909 onpass="ONOS cli started",
1910 onfail="ONOS clis did not start" )
1911
1912 main.step( "Checking ONOS nodes" )
1913 nodeResults = utilities.retry( main.HA.nodesCheck,
1914 False,
1915 args=[main.activeNodes],
1916 attempts=5 )
1917 utilities.assert_equals( expect=True, actual=nodeResults,
1918 onpass="Nodes check successful",
1919 onfail="Nodes check NOT successful" )
1920
1921 for i in range( 10 ):
1922 ready = True
1923 for i in main.activeNodes:
1924 cli = main.CLIs[i]
1925 output = cli.summary()
1926 if not output:
1927 ready = False
1928 if ready:
1929 break
1930 time.sleep( 30 )
1931 utilities.assert_equals( expect=True, actual=ready,
1932 onpass="ONOS summary command succeded",
1933 onfail="ONOS summary command failed" )
1934 if not ready:
1935 main.cleanup()
1936 main.exit()
1937
1938 # Rerun for election on new nodes
1939 runResults = main.TRUE
1940 for i in main.activeNodes:
1941 cli = main.CLIs[i]
1942 run = cli.electionTestRun()
1943 if run != main.TRUE:
1944 main.log.error( "Error running for election on " + cli.name )
1945 runResults = runResults and run
1946 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1947 onpass="Reran for election",
1948 onfail="Failed to rerun for election" )
1949
1950 # TODO: Make this configurable
1951 time.sleep( 60 )
1952 for node in main.activeNodes:
1953 main.log.warn( "\n****************** {} **************".format( main.nodes[node].ip_address ) )
1954 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1955 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1956 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
1957 main.log.debug( main.CLIs[node].apps( jsonFormat=False ) )
1958
1959 def CASE7( self, main ):
1960 """
1961 Check state after ONOS scaling
1962 """
1963 import json
1964 assert main.numCtrls, "main.numCtrls not defined"
1965 assert main, "main not defined"
1966 assert utilities.assert_equals, "utilities.assert_equals not defined"
1967 assert main.CLIs, "main.CLIs not defined"
1968 assert main.nodes, "main.nodes not defined"
1969 main.case( "Running ONOS Constant State Tests" )
1970
1971 main.step( "Check that each switch has a master" )
1972 # Assert that each device has a master
1973 rolesNotNull = main.TRUE
1974 threads = []
1975 for i in main.activeNodes:
1976 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1977 name="rolesNotNull-" + str( i ),
1978 args=[ ] )
1979 threads.append( t )
1980 t.start()
1981
1982 for t in threads:
1983 t.join()
1984 rolesNotNull = rolesNotNull and t.result
1985 utilities.assert_equals(
1986 expect=main.TRUE,
1987 actual=rolesNotNull,
1988 onpass="Each device has a master",
1989 onfail="Some devices don't have a master assigned" )
1990
1991 main.step( "Read device roles from ONOS" )
1992 ONOSMastership = []
1993 consistentMastership = True
1994 rolesResults = True
1995 threads = []
1996 for i in main.activeNodes:
1997 t = main.Thread( target=main.CLIs[i].roles,
1998 name="roles-" + str( i ),
1999 args=[] )
2000 threads.append( t )
2001 t.start()
2002
2003 for t in threads:
2004 t.join()
2005 ONOSMastership.append( t.result )
2006
2007 for i in range( len( ONOSMastership ) ):
2008 node = str( main.activeNodes[i] + 1 )
2009 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
2010 main.log.error( "Error in getting ONOS" + node + " roles" )
2011 main.log.warn( "ONOS" + node + " mastership response: " +
2012 repr( ONOSMastership[i] ) )
2013 rolesResults = False
2014 utilities.assert_equals(
2015 expect=True,
2016 actual=rolesResults,
2017 onpass="No error in reading roles output",
2018 onfail="Error in reading roles from ONOS" )
2019
2020 main.step( "Check for consistency in roles from each controller" )
2021 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
2022 main.log.info(
2023 "Switch roles are consistent across all ONOS nodes" )
2024 else:
2025 consistentMastership = False
2026 utilities.assert_equals(
2027 expect=True,
2028 actual=consistentMastership,
2029 onpass="Switch roles are consistent across all ONOS nodes",
2030 onfail="ONOS nodes have different views of switch roles" )
2031
2032 if rolesResults and not consistentMastership:
2033 for i in range( len( ONOSMastership ) ):
2034 node = str( main.activeNodes[i] + 1 )
2035 main.log.warn( "ONOS" + node + " roles: ",
2036 json.dumps( json.loads( ONOSMastership[ i ] ),
2037 sort_keys=True,
2038 indent=4,
2039 separators=( ',', ': ' ) ) )
2040
2041 # NOTE: we expect mastership to change on controller scaling down
2042
2043 main.step( "Get the intents and compare across all nodes" )
2044 ONOSIntents = []
2045 intentCheck = main.FALSE
2046 consistentIntents = True
2047 intentsResults = True
2048 threads = []
2049 for i in main.activeNodes:
2050 t = main.Thread( target=main.CLIs[i].intents,
2051 name="intents-" + str( i ),
2052 args=[],
2053 kwargs={ 'jsonFormat': True } )
2054 threads.append( t )
2055 t.start()
2056
2057 for t in threads:
2058 t.join()
2059 ONOSIntents.append( t.result )
2060
2061 for i in range( len( ONOSIntents) ):
2062 node = str( main.activeNodes[i] + 1 )
2063 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2064 main.log.error( "Error in getting ONOS" + node + " intents" )
2065 main.log.warn( "ONOS" + node + " intents response: " +
2066 repr( ONOSIntents[ i ] ) )
2067 intentsResults = False
2068 utilities.assert_equals(
2069 expect=True,
2070 actual=intentsResults,
2071 onpass="No error in reading intents output",
2072 onfail="Error in reading intents from ONOS" )
2073
2074 main.step( "Check for consistency in Intents from each controller" )
2075 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2076 main.log.info( "Intents are consistent across all ONOS " +
2077 "nodes" )
2078 else:
2079 consistentIntents = False
2080
2081 # Try to make it easy to figure out what is happening
2082 #
2083 # Intent ONOS1 ONOS2 ...
2084 # 0x01 INSTALLED INSTALLING
2085 # ... ... ...
2086 # ... ... ...
2087 title = " ID"
2088 for n in main.activeNodes:
2089 title += " " * 10 + "ONOS" + str( n + 1 )
2090 main.log.warn( title )
2091 # get all intent keys in the cluster
2092 keys = []
2093 for nodeStr in ONOSIntents:
2094 node = json.loads( nodeStr )
2095 for intent in node:
2096 keys.append( intent.get( 'id' ) )
2097 keys = set( keys )
2098 for key in keys:
2099 row = "%-13s" % key
2100 for nodeStr in ONOSIntents:
2101 node = json.loads( nodeStr )
2102 for intent in node:
2103 if intent.get( 'id' ) == key:
2104 row += "%-15s" % intent.get( 'state' )
2105 main.log.warn( row )
2106 # End table view
2107
2108 utilities.assert_equals(
2109 expect=True,
2110 actual=consistentIntents,
2111 onpass="Intents are consistent across all ONOS nodes",
2112 onfail="ONOS nodes have different views of intents" )
2113 intentStates = []
2114 for node in ONOSIntents: # Iter through ONOS nodes
2115 nodeStates = []
2116 # Iter through intents of a node
2117 try:
2118 for intent in json.loads( node ):
2119 nodeStates.append( intent[ 'state' ] )
2120 except ( ValueError, TypeError ):
2121 main.log.exception( "Error in parsing intents" )
2122 main.log.error( repr( node ) )
2123 intentStates.append( nodeStates )
2124 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2125 main.log.info( dict( out ) )
2126
2127 if intentsResults and not consistentIntents:
2128 for i in range( len( main.activeNodes ) ):
2129 node = str( main.activeNodes[i] + 1 )
2130 main.log.warn( "ONOS" + node + " intents: " )
2131 main.log.warn( json.dumps(
2132 json.loads( ONOSIntents[ i ] ),
2133 sort_keys=True,
2134 indent=4,
2135 separators=( ',', ': ' ) ) )
2136 elif intentsResults and consistentIntents:
2137 intentCheck = main.TRUE
2138
2139 main.step( "Compare current intents with intents before the scaling" )
2140 # NOTE: this requires case 5 to pass for intentState to be set.
2141 # maybe we should stop the test if that fails?
2142 sameIntents = main.FALSE
2143 try:
2144 intentState
2145 except NameError:
2146 main.log.warn( "No previous intent state was saved" )
2147 else:
2148 if intentState and intentState == ONOSIntents[ 0 ]:
2149 sameIntents = main.TRUE
2150 main.log.info( "Intents are consistent with before scaling" )
2151 # TODO: possibly the states have changed? we may need to figure out
2152 # what the acceptable states are
2153 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2154 sameIntents = main.TRUE
2155 try:
2156 before = json.loads( intentState )
2157 after = json.loads( ONOSIntents[ 0 ] )
2158 for intent in before:
2159 if intent not in after:
2160 sameIntents = main.FALSE
2161 main.log.debug( "Intent is not currently in ONOS " +
2162 "(at least in the same form):" )
2163 main.log.debug( json.dumps( intent ) )
2164 except ( ValueError, TypeError ):
2165 main.log.exception( "Exception printing intents" )
2166 main.log.debug( repr( ONOSIntents[0] ) )
2167 main.log.debug( repr( intentState ) )
2168 if sameIntents == main.FALSE:
2169 try:
2170 main.log.debug( "ONOS intents before: " )
2171 main.log.debug( json.dumps( json.loads( intentState ),
2172 sort_keys=True, indent=4,
2173 separators=( ',', ': ' ) ) )
2174 main.log.debug( "Current ONOS intents: " )
2175 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2176 sort_keys=True, indent=4,
2177 separators=( ',', ': ' ) ) )
2178 except ( ValueError, TypeError ):
2179 main.log.exception( "Exception printing intents" )
2180 main.log.debug( repr( ONOSIntents[0] ) )
2181 main.log.debug( repr( intentState ) )
2182 utilities.assert_equals(
2183 expect=main.TRUE,
2184 actual=sameIntents,
2185 onpass="Intents are consistent with before scaling",
2186 onfail="The Intents changed during scaling" )
2187 intentCheck = intentCheck and sameIntents
2188
2189 main.step( "Get the OF Table entries and compare to before " +
2190 "component scaling" )
2191 FlowTables = main.TRUE
2192 for i in range( 28 ):
2193 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2194 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2195 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2196 FlowTables = FlowTables and curSwitch
2197 if curSwitch == main.FALSE:
2198 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2199 utilities.assert_equals(
2200 expect=main.TRUE,
2201 actual=FlowTables,
2202 onpass="No changes were found in the flow tables",
2203 onfail="Changes were found in the flow tables" )
2204
2205 main.Mininet2.pingLongKill()
2206 '''
2207 # main.step( "Check the continuous pings to ensure that no packets " +
2208 # "were dropped during component failure" )
2209 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2210 main.params[ 'TESTONIP' ] )
2211 LossInPings = main.FALSE
2212 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2213 for i in range( 8, 18 ):
2214 main.log.info(
2215 "Checking for a loss in pings along flow from s" +
2216 str( i ) )
2217 LossInPings = main.Mininet2.checkForLoss(
2218 "/tmp/ping.h" +
2219 str( i ) ) or LossInPings
2220 if LossInPings == main.TRUE:
2221 main.log.info( "Loss in ping detected" )
2222 elif LossInPings == main.ERROR:
2223 main.log.info( "There are multiple mininet process running" )
2224 elif LossInPings == main.FALSE:
2225 main.log.info( "No Loss in the pings" )
2226 main.log.info( "No loss of dataplane connectivity" )
2227 # utilities.assert_equals(
2228 # expect=main.FALSE,
2229 # actual=LossInPings,
2230 # onpass="No Loss of connectivity",
2231 # onfail="Loss of dataplane connectivity detected" )
2232
2233 # NOTE: Since intents are not persisted with IntnentStore,
2234 # we expect loss in dataplane connectivity
2235 LossInPings = main.FALSE
2236 '''
2237
2238 main.step( "Leadership Election is still functional" )
2239 # Test of LeadershipElection
2240 leaderList = []
2241 leaderResult = main.TRUE
2242
2243 for i in main.activeNodes:
2244 cli = main.CLIs[i]
2245 leaderN = cli.electionTestLeader()
2246 leaderList.append( leaderN )
2247 if leaderN == main.FALSE:
2248 # error in response
2249 main.log.error( "Something is wrong with " +
2250 "electionTestLeader function, check the" +
2251 " error logs" )
2252 leaderResult = main.FALSE
2253 elif leaderN is None:
2254 main.log.error( cli.name +
2255 " shows no leader for the election-app." )
2256 leaderResult = main.FALSE
2257 if len( set( leaderList ) ) != 1:
2258 leaderResult = main.FALSE
2259 main.log.error(
2260 "Inconsistent view of leader for the election test app" )
2261 # TODO: print the list
2262 utilities.assert_equals(
2263 expect=main.TRUE,
2264 actual=leaderResult,
2265 onpass="Leadership election passed",
2266 onfail="Something went wrong with Leadership election" )
2267
2268 def CASE8( self, main ):
2269 """
2270 Compare topo
2271 """
2272 import json
2273 import time
2274 assert main.numCtrls, "main.numCtrls not defined"
2275 assert main, "main not defined"
2276 assert utilities.assert_equals, "utilities.assert_equals not defined"
2277 assert main.CLIs, "main.CLIs not defined"
2278 assert main.nodes, "main.nodes not defined"
2279
2280 main.case( "Compare ONOS Topology view to Mininet topology" )
2281 main.caseExplanation = "Compare topology objects between Mininet" +\
2282 " and ONOS"
2283 topoResult = main.FALSE
2284 topoFailMsg = "ONOS topology don't match Mininet"
2285 elapsed = 0
2286 count = 0
2287 main.step( "Comparing ONOS topology to MN topology" )
2288 startTime = time.time()
2289 # Give time for Gossip to work
2290 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2291 devicesResults = main.TRUE
2292 linksResults = main.TRUE
2293 hostsResults = main.TRUE
2294 hostAttachmentResults = True
2295 count += 1
2296 cliStart = time.time()
2297 devices = []
2298 threads = []
2299 for i in main.activeNodes:
2300 t = main.Thread( target=utilities.retry,
2301 name="devices-" + str( i ),
2302 args=[ main.CLIs[i].devices, [ None ] ],
2303 kwargs= { 'sleep': 5, 'attempts': 5,
2304 'randomTime': True } )
2305 threads.append( t )
2306 t.start()
2307
2308 for t in threads:
2309 t.join()
2310 devices.append( t.result )
2311 hosts = []
2312 ipResult = main.TRUE
2313 threads = []
2314 for i in main.activeNodes:
2315 t = main.Thread( target=utilities.retry,
2316 name="hosts-" + str( i ),
2317 args=[ main.CLIs[i].hosts, [ None ] ],
2318 kwargs= { 'sleep': 5, 'attempts': 5,
2319 'randomTime': True } )
2320 threads.append( t )
2321 t.start()
2322
2323 for t in threads:
2324 t.join()
2325 try:
2326 hosts.append( json.loads( t.result ) )
2327 except ( ValueError, TypeError ):
2328 main.log.exception( "Error parsing hosts results" )
2329 main.log.error( repr( t.result ) )
2330 hosts.append( None )
2331 for controller in range( 0, len( hosts ) ):
2332 controllerStr = str( main.activeNodes[controller] + 1 )
2333 if hosts[ controller ]:
2334 for host in hosts[ controller ]:
2335 if host is None or host.get( 'ipAddresses', [] ) == []:
2336 main.log.error(
2337 "Error with host ipAddresses on controller" +
2338 controllerStr + ": " + str( host ) )
2339 ipResult = main.FALSE
2340 ports = []
2341 threads = []
2342 for i in main.activeNodes:
2343 t = main.Thread( target=utilities.retry,
2344 name="ports-" + str( i ),
2345 args=[ main.CLIs[i].ports, [ None ] ],
2346 kwargs= { 'sleep': 5, 'attempts': 5,
2347 'randomTime': True } )
2348 threads.append( t )
2349 t.start()
2350
2351 for t in threads:
2352 t.join()
2353 ports.append( t.result )
2354 links = []
2355 threads = []
2356 for i in main.activeNodes:
2357 t = main.Thread( target=utilities.retry,
2358 name="links-" + str( i ),
2359 args=[ main.CLIs[i].links, [ None ] ],
2360 kwargs= { 'sleep': 5, 'attempts': 5,
2361 'randomTime': True } )
2362 threads.append( t )
2363 t.start()
2364
2365 for t in threads:
2366 t.join()
2367 links.append( t.result )
2368 clusters = []
2369 threads = []
2370 for i in main.activeNodes:
2371 t = main.Thread( target=utilities.retry,
2372 name="clusters-" + str( i ),
2373 args=[ main.CLIs[i].clusters, [ None ] ],
2374 kwargs= { 'sleep': 5, 'attempts': 5,
2375 'randomTime': True } )
2376 threads.append( t )
2377 t.start()
2378
2379 for t in threads:
2380 t.join()
2381 clusters.append( t.result )
2382
2383 elapsed = time.time() - startTime
2384 cliTime = time.time() - cliStart
2385 print "Elapsed time: " + str( elapsed )
2386 print "CLI time: " + str( cliTime )
2387
2388 if all( e is None for e in devices ) and\
2389 all( e is None for e in hosts ) and\
2390 all( e is None for e in ports ) and\
2391 all( e is None for e in links ) and\
2392 all( e is None for e in clusters ):
2393 topoFailMsg = "Could not get topology from ONOS"
2394 main.log.error( topoFailMsg )
2395 continue # Try again, No use trying to compare
2396
2397 mnSwitches = main.Mininet1.getSwitches()
2398 mnLinks = main.Mininet1.getLinks()
2399 mnHosts = main.Mininet1.getHosts()
2400 for controller in range( len( main.activeNodes ) ):
2401 controllerStr = str( main.activeNodes[controller] + 1 )
2402 if devices[ controller ] and ports[ controller ] and\
2403 "Error" not in devices[ controller ] and\
2404 "Error" not in ports[ controller ]:
2405
2406 try:
2407 currentDevicesResult = main.Mininet1.compareSwitches(
2408 mnSwitches,
2409 json.loads( devices[ controller ] ),
2410 json.loads( ports[ controller ] ) )
2411 except ( TypeError, ValueError ):
2412 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2413 devices[ controller ], ports[ controller ] ) )
2414 else:
2415 currentDevicesResult = main.FALSE
2416 utilities.assert_equals( expect=main.TRUE,
2417 actual=currentDevicesResult,
2418 onpass="ONOS" + controllerStr +
2419 " Switches view is correct",
2420 onfail="ONOS" + controllerStr +
2421 " Switches view is incorrect" )
2422
2423 if links[ controller ] and "Error" not in links[ controller ]:
2424 currentLinksResult = main.Mininet1.compareLinks(
2425 mnSwitches, mnLinks,
2426 json.loads( links[ controller ] ) )
2427 else:
2428 currentLinksResult = main.FALSE
2429 utilities.assert_equals( expect=main.TRUE,
2430 actual=currentLinksResult,
2431 onpass="ONOS" + controllerStr +
2432 " links view is correct",
2433 onfail="ONOS" + controllerStr +
2434 " links view is incorrect" )
2435 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2436 currentHostsResult = main.Mininet1.compareHosts(
2437 mnHosts,
2438 hosts[ controller ] )
2439 elif hosts[ controller ] == []:
2440 currentHostsResult = main.TRUE
2441 else:
2442 currentHostsResult = main.FALSE
2443 utilities.assert_equals( expect=main.TRUE,
2444 actual=currentHostsResult,
2445 onpass="ONOS" + controllerStr +
2446 " hosts exist in Mininet",
2447 onfail="ONOS" + controllerStr +
2448 " hosts don't match Mininet" )
2449 # CHECKING HOST ATTACHMENT POINTS
2450 hostAttachment = True
2451 zeroHosts = False
2452 # FIXME: topo-HA/obelisk specific mappings:
2453 # key is mac and value is dpid
2454 mappings = {}
2455 for i in range( 1, 29 ): # hosts 1 through 28
2456 # set up correct variables:
2457 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2458 if i == 1:
2459 deviceId = "1000".zfill(16)
2460 elif i == 2:
2461 deviceId = "2000".zfill(16)
2462 elif i == 3:
2463 deviceId = "3000".zfill(16)
2464 elif i == 4:
2465 deviceId = "3004".zfill(16)
2466 elif i == 5:
2467 deviceId = "5000".zfill(16)
2468 elif i == 6:
2469 deviceId = "6000".zfill(16)
2470 elif i == 7:
2471 deviceId = "6007".zfill(16)
2472 elif i >= 8 and i <= 17:
2473 dpid = '3' + str( i ).zfill( 3 )
2474 deviceId = dpid.zfill(16)
2475 elif i >= 18 and i <= 27:
2476 dpid = '6' + str( i ).zfill( 3 )
2477 deviceId = dpid.zfill(16)
2478 elif i == 28:
2479 deviceId = "2800".zfill(16)
2480 mappings[ macId ] = deviceId
2481 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2482 if hosts[ controller ] == []:
2483 main.log.warn( "There are no hosts discovered" )
2484 zeroHosts = True
2485 else:
2486 for host in hosts[ controller ]:
2487 mac = None
2488 location = None
2489 device = None
2490 port = None
2491 try:
2492 mac = host.get( 'mac' )
2493 assert mac, "mac field could not be found for this host object"
2494
2495 location = host.get( 'location' )
2496 assert location, "location field could not be found for this host object"
2497
2498 # Trim the protocol identifier off deviceId
2499 device = str( location.get( 'elementId' ) ).split(':')[1]
2500 assert device, "elementId field could not be found for this host location object"
2501
2502 port = location.get( 'port' )
2503 assert port, "port field could not be found for this host location object"
2504
2505 # Now check if this matches where they should be
2506 if mac and device and port:
2507 if str( port ) != "1":
2508 main.log.error( "The attachment port is incorrect for " +
2509 "host " + str( mac ) +
2510 ". Expected: 1 Actual: " + str( port) )
2511 hostAttachment = False
2512 if device != mappings[ str( mac ) ]:
2513 main.log.error( "The attachment device is incorrect for " +
2514 "host " + str( mac ) +
2515 ". Expected: " + mappings[ str( mac ) ] +
2516 " Actual: " + device )
2517 hostAttachment = False
2518 else:
2519 hostAttachment = False
2520 except AssertionError:
2521 main.log.exception( "Json object not as expected" )
2522 main.log.error( repr( host ) )
2523 hostAttachment = False
2524 else:
2525 main.log.error( "No hosts json output or \"Error\"" +
2526 " in output. hosts = " +
2527 repr( hosts[ controller ] ) )
2528 if zeroHosts is False:
2529 # TODO: Find a way to know if there should be hosts in a
2530 # given point of the test
2531 hostAttachment = True
2532
2533 # END CHECKING HOST ATTACHMENT POINTS
2534 devicesResults = devicesResults and currentDevicesResult
2535 linksResults = linksResults and currentLinksResult
2536 hostsResults = hostsResults and currentHostsResult
2537 hostAttachmentResults = hostAttachmentResults and\
2538 hostAttachment
2539 topoResult = ( devicesResults and linksResults
2540 and hostsResults and ipResult and
2541 hostAttachmentResults )
2542 utilities.assert_equals( expect=True,
2543 actual=topoResult,
2544 onpass="ONOS topology matches Mininet",
2545 onfail=topoFailMsg )
2546 # End of While loop to pull ONOS state
2547
2548 # Compare json objects for hosts and dataplane clusters
2549
2550 # hosts
2551 main.step( "Hosts view is consistent across all ONOS nodes" )
2552 consistentHostsResult = main.TRUE
2553 for controller in range( len( hosts ) ):
2554 controllerStr = str( main.activeNodes[controller] + 1 )
2555 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2556 if hosts[ controller ] == hosts[ 0 ]:
2557 continue
2558 else: # hosts not consistent
2559 main.log.error( "hosts from ONOS" + controllerStr +
2560 " is inconsistent with ONOS1" )
2561 main.log.warn( repr( hosts[ controller ] ) )
2562 consistentHostsResult = main.FALSE
2563
2564 else:
2565 main.log.error( "Error in getting ONOS hosts from ONOS" +
2566 controllerStr )
2567 consistentHostsResult = main.FALSE
2568 main.log.warn( "ONOS" + controllerStr +
2569 " hosts response: " +
2570 repr( hosts[ controller ] ) )
2571 utilities.assert_equals(
2572 expect=main.TRUE,
2573 actual=consistentHostsResult,
2574 onpass="Hosts view is consistent across all ONOS nodes",
2575 onfail="ONOS nodes have different views of hosts" )
2576
2577 main.step( "Hosts information is correct" )
2578 hostsResults = hostsResults and ipResult
2579 utilities.assert_equals(
2580 expect=main.TRUE,
2581 actual=hostsResults,
2582 onpass="Host information is correct",
2583 onfail="Host information is incorrect" )
2584
2585 main.step( "Host attachment points to the network" )
2586 utilities.assert_equals(
2587 expect=True,
2588 actual=hostAttachmentResults,
2589 onpass="Hosts are correctly attached to the network",
2590 onfail="ONOS did not correctly attach hosts to the network" )
2591
2592 # Strongly connected clusters of devices
2593 main.step( "Clusters view is consistent across all ONOS nodes" )
2594 consistentClustersResult = main.TRUE
2595 for controller in range( len( clusters ) ):
2596 controllerStr = str( main.activeNodes[controller] + 1 )
2597 if "Error" not in clusters[ controller ]:
2598 if clusters[ controller ] == clusters[ 0 ]:
2599 continue
2600 else: # clusters not consistent
2601 main.log.error( "clusters from ONOS" +
2602 controllerStr +
2603 " is inconsistent with ONOS1" )
2604 consistentClustersResult = main.FALSE
2605 else:
2606 main.log.error( "Error in getting dataplane clusters " +
2607 "from ONOS" + controllerStr )
2608 consistentClustersResult = main.FALSE
2609 main.log.warn( "ONOS" + controllerStr +
2610 " clusters response: " +
2611 repr( clusters[ controller ] ) )
2612 utilities.assert_equals(
2613 expect=main.TRUE,
2614 actual=consistentClustersResult,
2615 onpass="Clusters view is consistent across all ONOS nodes",
2616 onfail="ONOS nodes have different views of clusters" )
2617
2618 main.step( "There is only one SCC" )
2619 # there should always only be one cluster
2620 try:
2621 numClusters = len( json.loads( clusters[ 0 ] ) )
2622 except ( ValueError, TypeError ):
2623 main.log.exception( "Error parsing clusters[0]: " +
2624 repr( clusters[0] ) )
2625 numClusters = "ERROR"
2626 clusterResults = main.FALSE
2627 if numClusters == 1:
2628 clusterResults = main.TRUE
2629 utilities.assert_equals(
2630 expect=1,
2631 actual=numClusters,
2632 onpass="ONOS shows 1 SCC",
2633 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2634
2635 topoResult = ( devicesResults and linksResults
2636 and hostsResults and consistentHostsResult
2637 and consistentClustersResult and clusterResults
2638 and ipResult and hostAttachmentResults )
2639
2640 topoResult = topoResult and int( count <= 2 )
2641 note = "note it takes about " + str( int( cliTime ) ) + \
2642 " seconds for the test to make all the cli calls to fetch " +\
2643 "the topology from each ONOS instance"
2644 main.log.info(
2645 "Very crass estimate for topology discovery/convergence( " +
2646 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2647 str( count ) + " tries" )
2648
2649 main.step( "Device information is correct" )
2650 utilities.assert_equals(
2651 expect=main.TRUE,
2652 actual=devicesResults,
2653 onpass="Device information is correct",
2654 onfail="Device information is incorrect" )
2655
2656 main.step( "Links are correct" )
2657 utilities.assert_equals(
2658 expect=main.TRUE,
2659 actual=linksResults,
2660 onpass="Link are correct",
2661 onfail="Links are incorrect" )
2662
2663 main.step( "Hosts are correct" )
2664 utilities.assert_equals(
2665 expect=main.TRUE,
2666 actual=hostsResults,
2667 onpass="Hosts are correct",
2668 onfail="Hosts are incorrect" )
2669
2670 # FIXME: move this to an ONOS state case
2671 main.step( "Checking ONOS nodes" )
2672 nodeResults = utilities.retry( main.HA.nodesCheck,
2673 False,
2674 args=[main.activeNodes],
2675 attempts=5 )
2676 utilities.assert_equals( expect=True, actual=nodeResults,
2677 onpass="Nodes check successful",
2678 onfail="Nodes check NOT successful" )
2679 if not nodeResults:
2680 for i in main.activeNodes:
2681 main.log.debug( "{} components not ACTIVE: \n{}".format(
2682 main.CLIs[i].name,
2683 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
2684
2685 def CASE9( self, main ):
2686 """
2687 Link s3-s28 down
2688 """
2689 import time
2690 assert main.numCtrls, "main.numCtrls not defined"
2691 assert main, "main not defined"
2692 assert utilities.assert_equals, "utilities.assert_equals not defined"
2693 assert main.CLIs, "main.CLIs not defined"
2694 assert main.nodes, "main.nodes not defined"
2695 # NOTE: You should probably run a topology check after this
2696
2697 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2698
2699 description = "Turn off a link to ensure that Link Discovery " +\
2700 "is working properly"
2701 main.case( description )
2702
2703 main.step( "Kill Link between s3 and s28" )
2704 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2705 main.log.info( "Waiting " + str( linkSleep ) +
2706 " seconds for link down to be discovered" )
2707 time.sleep( linkSleep )
2708 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2709 onpass="Link down successful",
2710 onfail="Failed to bring link down" )
2711 # TODO do some sort of check here
2712
2713 def CASE10( self, main ):
2714 """
2715 Link s3-s28 up
2716 """
2717 import time
2718 assert main.numCtrls, "main.numCtrls not defined"
2719 assert main, "main not defined"
2720 assert utilities.assert_equals, "utilities.assert_equals not defined"
2721 assert main.CLIs, "main.CLIs not defined"
2722 assert main.nodes, "main.nodes not defined"
2723 # NOTE: You should probably run a topology check after this
2724
2725 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2726
2727 description = "Restore a link to ensure that Link Discovery is " + \
2728 "working properly"
2729 main.case( description )
2730
2731 main.step( "Bring link between s3 and s28 back up" )
2732 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2733 main.log.info( "Waiting " + str( linkSleep ) +
2734 " seconds for link up to be discovered" )
2735 time.sleep( linkSleep )
2736 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2737 onpass="Link up successful",
2738 onfail="Failed to bring link up" )
2739 # TODO do some sort of check here
2740
2741 def CASE11( self, main ):
2742 """
2743 Switch Down
2744 """
2745 # NOTE: You should probably run a topology check after this
2746 import time
2747 assert main.numCtrls, "main.numCtrls not defined"
2748 assert main, "main not defined"
2749 assert utilities.assert_equals, "utilities.assert_equals not defined"
2750 assert main.CLIs, "main.CLIs not defined"
2751 assert main.nodes, "main.nodes not defined"
2752
2753 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2754
2755 description = "Killing a switch to ensure it is discovered correctly"
2756 onosCli = main.CLIs[ main.activeNodes[0] ]
2757 main.case( description )
2758 switch = main.params[ 'kill' ][ 'switch' ]
2759 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2760
2761 # TODO: Make this switch parameterizable
2762 main.step( "Kill " + switch )
2763 main.log.info( "Deleting " + switch )
2764 main.Mininet1.delSwitch( switch )
2765 main.log.info( "Waiting " + str( switchSleep ) +
2766 " seconds for switch down to be discovered" )
2767 time.sleep( switchSleep )
2768 device = onosCli.getDevice( dpid=switchDPID )
2769 # Peek at the deleted switch
2770 main.log.warn( str( device ) )
2771 result = main.FALSE
2772 if device and device[ 'available' ] is False:
2773 result = main.TRUE
2774 utilities.assert_equals( expect=main.TRUE, actual=result,
2775 onpass="Kill switch successful",
2776 onfail="Failed to kill switch?" )
2777
2778 def CASE12( self, main ):
2779 """
2780 Switch Up
2781 """
2782 # NOTE: You should probably run a topology check after this
2783 import time
2784 assert main.numCtrls, "main.numCtrls not defined"
2785 assert main, "main not defined"
2786 assert utilities.assert_equals, "utilities.assert_equals not defined"
2787 assert main.CLIs, "main.CLIs not defined"
2788 assert main.nodes, "main.nodes not defined"
2789
2790 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2791 switch = main.params[ 'kill' ][ 'switch' ]
2792 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2793 links = main.params[ 'kill' ][ 'links' ].split()
2794 onosCli = main.CLIs[ main.activeNodes[0] ]
2795 description = "Adding a switch to ensure it is discovered correctly"
2796 main.case( description )
2797
2798 main.step( "Add back " + switch )
2799 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2800 for peer in links:
2801 main.Mininet1.addLink( switch, peer )
2802 ipList = [ node.ip_address for node in main.nodes ]
2803 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2804 main.log.info( "Waiting " + str( switchSleep ) +
2805 " seconds for switch up to be discovered" )
2806 time.sleep( switchSleep )
2807 device = onosCli.getDevice( dpid=switchDPID )
2808 # Peek at the deleted switch
2809 main.log.warn( str( device ) )
2810 result = main.FALSE
2811 if device and device[ 'available' ]:
2812 result = main.TRUE
2813 utilities.assert_equals( expect=main.TRUE, actual=result,
2814 onpass="add switch successful",
2815 onfail="Failed to add switch?" )
2816
2817 def CASE13( self, main ):
2818 """
2819 Clean up
2820 """
2821 assert main.numCtrls, "main.numCtrls not defined"
2822 assert main, "main not defined"
2823 assert utilities.assert_equals, "utilities.assert_equals not defined"
2824 assert main.CLIs, "main.CLIs not defined"
2825 assert main.nodes, "main.nodes not defined"
2826
2827 main.case( "Test Cleanup" )
2828 main.step( "Killing tcpdumps" )
2829 main.Mininet2.stopTcpdump()
2830
2831 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2832 main.step( "Copying MN pcap and ONOS log files to test station" )
2833 # NOTE: MN Pcap file is being saved to logdir.
2834 # We scp this file as MN and TestON aren't necessarily the same vm
2835
2836 # FIXME: To be replaced with a Jenkin's post script
2837 # TODO: Load these from params
2838 # NOTE: must end in /
2839 logFolder = "/opt/onos/log/"
2840 logFiles = [ "karaf.log", "karaf.log.1" ]
2841 # NOTE: must end in /
2842 for f in logFiles:
2843 for node in main.nodes:
2844 dstName = main.logdir + "/" + node.name + "-" + f
2845 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2846 logFolder + f, dstName )
2847 # std*.log's
2848 # NOTE: must end in /
2849 logFolder = "/opt/onos/var/"
2850 logFiles = [ "stderr.log", "stdout.log" ]
2851 # NOTE: must end in /
2852 for f in logFiles:
2853 for node in main.nodes:
2854 dstName = main.logdir + "/" + node.name + "-" + f
2855 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2856 logFolder + f, dstName )
2857 else:
2858 main.log.debug( "skipping saving log files" )
2859
2860 main.step( "Stopping Mininet" )
2861 mnResult = main.Mininet1.stopNet()
2862 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2863 onpass="Mininet stopped",
2864 onfail="MN cleanup NOT successful" )
2865
2866 main.step( "Checking ONOS Logs for errors" )
2867 for node in main.nodes:
2868 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2869 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2870
2871 try:
2872 timerLog = open( main.logdir + "/Timers.csv", 'w')
2873 main.log.error( ", ".join( labels ) + "\n" + ", ".join( data ) )
2874 timerLog.write( ", ".join( labels ) + "\n" + ", ".join( data ) )
2875 timerLog.close()
2876 except NameError, e:
2877 main.log.exception(e)
2878
2879 main.step( "Stopping webserver" )
2880 status = main.Server.stop( )
2881 utilities.assert_equals( expect=main.TRUE, actual=status,
2882 onpass="Stop Server",
2883 onfail="Failled to stop SimpleHTTPServer" )
2884 del main.Server
2885
2886 def CASE14( self, main ):
2887 """
2888 start election app on all onos nodes
2889 """
2890 import time
2891 assert main.numCtrls, "main.numCtrls not defined"
2892 assert main, "main not defined"
2893 assert utilities.assert_equals, "utilities.assert_equals not defined"
2894 assert main.CLIs, "main.CLIs not defined"
2895 assert main.nodes, "main.nodes not defined"
2896
2897 main.case("Start Leadership Election app")
2898 main.step( "Install leadership election app" )
2899 onosCli = main.CLIs[ main.activeNodes[0] ]
2900 appResult = onosCli.activateApp( "org.onosproject.election" )
2901 utilities.assert_equals(
2902 expect=main.TRUE,
2903 actual=appResult,
2904 onpass="Election app installed",
2905 onfail="Something went wrong with installing Leadership election" )
2906
2907 main.step( "Run for election on each node" )
2908 for i in main.activeNodes:
2909 main.CLIs[i].electionTestRun()
2910 time.sleep(5)
2911 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2912 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
2913 utilities.assert_equals(
2914 expect=True,
2915 actual=sameResult,
2916 onpass="All nodes see the same leaderboards",
2917 onfail="Inconsistent leaderboards" )
2918
2919 if sameResult:
2920 leader = leaders[ 0 ][ 0 ]
2921 if main.nodes[ main.activeNodes[0] ].ip_address in leader:
2922 correctLeader = True
2923 else:
2924 correctLeader = False
2925 main.step( "First node was elected leader" )
2926 utilities.assert_equals(
2927 expect=True,
2928 actual=correctLeader,
2929 onpass="Correct leader was elected",
2930 onfail="Incorrect leader" )
2931
2932 def CASE15( self, main ):
2933 """
2934 Check that Leadership Election is still functional
2935 15.1 Run election on each node
2936 15.2 Check that each node has the same leaders and candidates
2937 15.3 Find current leader and withdraw
2938 15.4 Check that a new node was elected leader
2939 15.5 Check that that new leader was the candidate of old leader
2940 15.6 Run for election on old leader
2941 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2942 15.8 Make sure that the old leader was added to the candidate list
2943
2944 old and new variable prefixes refer to data from before vs after
2945 withdrawl and later before withdrawl vs after re-election
2946 """
2947 import time
2948 assert main.numCtrls, "main.numCtrls not defined"
2949 assert main, "main not defined"
2950 assert utilities.assert_equals, "utilities.assert_equals not defined"
2951 assert main.CLIs, "main.CLIs not defined"
2952 assert main.nodes, "main.nodes not defined"
2953
2954 description = "Check that Leadership Election is still functional"
2955 main.case( description )
2956 # NOTE: Need to re-run after restarts since being a canidate is not persistant
2957
2958 oldLeaders = [] # list of lists of each nodes' candidates before
2959 newLeaders = [] # list of lists of each nodes' candidates after
2960 oldLeader = '' # the old leader from oldLeaders, None if not same
2961 newLeader = '' # the new leaders fron newLoeaders, None if not same
2962 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2963 expectNoLeader = False # True when there is only one leader
2964 if main.numCtrls == 1:
2965 expectNoLeader = True
2966
2967 main.step( "Run for election on each node" )
2968 electionResult = main.TRUE
2969
2970 for i in main.activeNodes: # run test election on each node
2971 if main.CLIs[i].electionTestRun() == main.FALSE:
2972 electionResult = main.FALSE
2973 utilities.assert_equals(
2974 expect=main.TRUE,
2975 actual=electionResult,
2976 onpass="All nodes successfully ran for leadership",
2977 onfail="At least one node failed to run for leadership" )
2978
2979 if electionResult == main.FALSE:
2980 main.log.error(
2981 "Skipping Test Case because Election Test App isn't loaded" )
2982 main.skipCase()
2983
2984 main.step( "Check that each node shows the same leader and candidates" )
2985 failMessage = "Nodes have different leaderboards"
2986 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2987 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
2988 if sameResult:
2989 oldLeader = oldLeaders[ 0 ][ 0 ]
2990 main.log.warn( oldLeader )
2991 else:
2992 oldLeader = None
2993 utilities.assert_equals(
2994 expect=True,
2995 actual=sameResult,
2996 onpass="Leaderboards are consistent for the election topic",
2997 onfail=failMessage )
2998
2999 main.step( "Find current leader and withdraw" )
3000 withdrawResult = main.TRUE
3001 # do some sanity checking on leader before using it
3002 if oldLeader is None:
3003 main.log.error( "Leadership isn't consistent." )
3004 withdrawResult = main.FALSE
3005 # Get the CLI of the oldLeader
3006 for i in main.activeNodes:
3007 if oldLeader == main.nodes[ i ].ip_address:
3008 oldLeaderCLI = main.CLIs[ i ]
3009 break
3010 else: # FOR/ELSE statement
3011 main.log.error( "Leader election, could not find current leader" )
3012 if oldLeader:
3013 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3014 utilities.assert_equals(
3015 expect=main.TRUE,
3016 actual=withdrawResult,
3017 onpass="Node was withdrawn from election",
3018 onfail="Node was not withdrawn from election" )
3019
3020 main.step( "Check that a new node was elected leader" )
3021 failMessage = "Nodes have different leaders"
3022 # Get new leaders and candidates
3023 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
3024 newLeader = None
3025 if newLeaderResult:
3026 if newLeaders[ 0 ][ 0 ] == 'none':
3027 main.log.error( "No leader was elected on at least 1 node" )
3028 if not expectNoLeader:
3029 newLeaderResult = False
3030 newLeader = newLeaders[ 0 ][ 0 ]
3031
3032 # Check that the new leader is not the older leader, which was withdrawn
3033 if newLeader == oldLeader:
3034 newLeaderResult = False
3035 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3036 " as the current leader" )
3037 utilities.assert_equals(
3038 expect=True,
3039 actual=newLeaderResult,
3040 onpass="Leadership election passed",
3041 onfail="Something went wrong with Leadership election" )
3042
3043 main.step( "Check that that new leader was the candidate of old leader" )
3044 # candidates[ 2 ] should become the top candidate after withdrawl
3045 correctCandidateResult = main.TRUE
3046 if expectNoLeader:
3047 if newLeader == 'none':
3048 main.log.info( "No leader expected. None found. Pass" )
3049 correctCandidateResult = main.TRUE
3050 else:
3051 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3052 correctCandidateResult = main.FALSE
3053 elif len( oldLeaders[0] ) >= 3:
3054 if newLeader == oldLeaders[ 0 ][ 2 ]:
3055 # correct leader was elected
3056 correctCandidateResult = main.TRUE
3057 else:
3058 correctCandidateResult = main.FALSE
3059 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3060 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3061 else:
3062 main.log.warn( "Could not determine who should be the correct leader" )
3063 main.log.debug( oldLeaders[ 0 ] )
3064 correctCandidateResult = main.FALSE
3065 utilities.assert_equals(
3066 expect=main.TRUE,
3067 actual=correctCandidateResult,
3068 onpass="Correct Candidate Elected",
3069 onfail="Incorrect Candidate Elected" )
3070
3071 main.step( "Run for election on old leader( just so everyone " +
3072 "is in the hat )" )
3073 if oldLeaderCLI is not None:
3074 runResult = oldLeaderCLI.electionTestRun()
3075 else:
3076 main.log.error( "No old leader to re-elect" )
3077 runResult = main.FALSE
3078 utilities.assert_equals(
3079 expect=main.TRUE,
3080 actual=runResult,
3081 onpass="App re-ran for election",
3082 onfail="App failed to run for election" )
3083
3084 main.step(
3085 "Check that oldLeader is a candidate, and leader if only 1 node" )
3086 # verify leader didn't just change
3087 # Get new leaders and candidates
3088 reRunLeaders = []
3089 time.sleep( 5 ) # Paremterize
3090 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
3091
3092 # Check that the re-elected node is last on the candidate List
3093 if not reRunLeaders[0]:
3094 positionResult = main.FALSE
3095 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3096 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3097 str( reRunLeaders[ 0 ] ) ) )
3098 positionResult = main.FALSE
3099 utilities.assert_equals(
3100 expect=True,
3101 actual=positionResult,
3102 onpass="Old leader successfully re-ran for election",
3103 onfail="Something went wrong with Leadership election after " +
3104 "the old leader re-ran for election" )
3105
3106 def CASE16( self, main ):
3107 """
3108 Install Distributed Primitives app
3109 """
3110 import time
3111 assert main.numCtrls, "main.numCtrls not defined"
3112 assert main, "main not defined"
3113 assert utilities.assert_equals, "utilities.assert_equals not defined"
3114 assert main.CLIs, "main.CLIs not defined"
3115 assert main.nodes, "main.nodes not defined"
3116
3117 # Variables for the distributed primitives tests
3118 global pCounterName
3119 global pCounterValue
3120 global onosSet
3121 global onosSetName
3122 pCounterName = "TestON-Partitions"
3123 pCounterValue = 0
3124 onosSet = set([])
3125 onosSetName = "TestON-set"
3126
3127 description = "Install Primitives app"
3128 main.case( description )
3129 main.step( "Install Primitives app" )
3130 appName = "org.onosproject.distributedprimitives"
3131 node = main.activeNodes[0]
3132 appResults = main.CLIs[node].activateApp( appName )
3133 utilities.assert_equals( expect=main.TRUE,
3134 actual=appResults,
3135 onpass="Primitives app activated",
3136 onfail="Primitives app not activated" )
3137 time.sleep( 5 ) # To allow all nodes to activate
3138
3139 def CASE17( self, main ):
3140 """
3141 Check for basic functionality with distributed primitives
3142 """
3143 # Make sure variables are defined/set
3144 assert main.numCtrls, "main.numCtrls not defined"
3145 assert main, "main not defined"
3146 assert utilities.assert_equals, "utilities.assert_equals not defined"
3147 assert main.CLIs, "main.CLIs not defined"
3148 assert main.nodes, "main.nodes not defined"
3149 assert pCounterName, "pCounterName not defined"
3150 assert onosSetName, "onosSetName not defined"
3151 # NOTE: assert fails if value is 0/None/Empty/False
3152 try:
3153 pCounterValue
3154 except NameError:
3155 main.log.error( "pCounterValue not defined, setting to 0" )
3156 pCounterValue = 0
3157 try:
3158 onosSet
3159 except NameError:
3160 main.log.error( "onosSet not defined, setting to empty Set" )
3161 onosSet = set([])
3162 # Variables for the distributed primitives tests. These are local only
3163 addValue = "a"
3164 addAllValue = "a b c d e f"
3165 retainValue = "c d e f"
3166
3167 description = "Check for basic functionality with distributed " +\
3168 "primitives"
3169 main.case( description )
3170 main.caseExplanation = "Test the methods of the distributed " +\
3171 "primitives (counters and sets) throught the cli"
3172 # DISTRIBUTED ATOMIC COUNTERS
3173 # Partitioned counters
3174 main.step( "Increment then get a default counter on each node" )
3175 pCounters = []
3176 threads = []
3177 addedPValues = []
3178 for i in main.activeNodes:
3179 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3180 name="counterAddAndGet-" + str( i ),
3181 args=[ pCounterName ] )
3182 pCounterValue += 1
3183 addedPValues.append( pCounterValue )
3184 threads.append( t )
3185 t.start()
3186
3187 for t in threads:
3188 t.join()
3189 pCounters.append( t.result )
3190 # Check that counter incremented numController times
3191 pCounterResults = True
3192 for i in addedPValues:
3193 tmpResult = i in pCounters
3194 pCounterResults = pCounterResults and tmpResult
3195 if not tmpResult:
3196 main.log.error( str( i ) + " is not in partitioned "
3197 "counter incremented results" )
3198 utilities.assert_equals( expect=True,
3199 actual=pCounterResults,
3200 onpass="Default counter incremented",
3201 onfail="Error incrementing default" +
3202 " counter" )
3203
3204 main.step( "Get then Increment a default counter on each node" )
3205 pCounters = []
3206 threads = []
3207 addedPValues = []
3208 for i in main.activeNodes:
3209 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3210 name="counterGetAndAdd-" + str( i ),
3211 args=[ pCounterName ] )
3212 addedPValues.append( pCounterValue )
3213 pCounterValue += 1
3214 threads.append( t )
3215 t.start()
3216
3217 for t in threads:
3218 t.join()
3219 pCounters.append( t.result )
3220 # Check that counter incremented numController times
3221 pCounterResults = True
3222 for i in addedPValues:
3223 tmpResult = i in pCounters
3224 pCounterResults = pCounterResults and tmpResult
3225 if not tmpResult:
3226 main.log.error( str( i ) + " is not in partitioned "
3227 "counter incremented results" )
3228 utilities.assert_equals( expect=True,
3229 actual=pCounterResults,
3230 onpass="Default counter incremented",
3231 onfail="Error incrementing default" +
3232 " counter" )
3233
3234 main.step( "Counters we added have the correct values" )
3235 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3236 utilities.assert_equals( expect=main.TRUE,
3237 actual=incrementCheck,
3238 onpass="Added counters are correct",
3239 onfail="Added counters are incorrect" )
3240
3241 main.step( "Add -8 to then get a default counter on each node" )
3242 pCounters = []
3243 threads = []
3244 addedPValues = []
3245 for i in main.activeNodes:
3246 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3247 name="counterIncrement-" + str( i ),
3248 args=[ pCounterName ],
3249 kwargs={ "delta": -8 } )
3250 pCounterValue += -8
3251 addedPValues.append( pCounterValue )
3252 threads.append( t )
3253 t.start()
3254
3255 for t in threads:
3256 t.join()
3257 pCounters.append( t.result )
3258 # Check that counter incremented numController times
3259 pCounterResults = True
3260 for i in addedPValues:
3261 tmpResult = i in pCounters
3262 pCounterResults = pCounterResults and tmpResult
3263 if not tmpResult:
3264 main.log.error( str( i ) + " is not in partitioned "
3265 "counter incremented results" )
3266 utilities.assert_equals( expect=True,
3267 actual=pCounterResults,
3268 onpass="Default counter incremented",
3269 onfail="Error incrementing default" +
3270 " counter" )
3271
3272 main.step( "Add 5 to then get a default counter on each node" )
3273 pCounters = []
3274 threads = []
3275 addedPValues = []
3276 for i in main.activeNodes:
3277 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3278 name="counterIncrement-" + str( i ),
3279 args=[ pCounterName ],
3280 kwargs={ "delta": 5 } )
3281 pCounterValue += 5
3282 addedPValues.append( pCounterValue )
3283 threads.append( t )
3284 t.start()
3285
3286 for t in threads:
3287 t.join()
3288 pCounters.append( t.result )
3289 # Check that counter incremented numController times
3290 pCounterResults = True
3291 for i in addedPValues:
3292 tmpResult = i in pCounters
3293 pCounterResults = pCounterResults and tmpResult
3294 if not tmpResult:
3295 main.log.error( str( i ) + " is not in partitioned "
3296 "counter incremented results" )
3297 utilities.assert_equals( expect=True,
3298 actual=pCounterResults,
3299 onpass="Default counter incremented",
3300 onfail="Error incrementing default" +
3301 " counter" )
3302
3303 main.step( "Get then add 5 to a default counter on each node" )
3304 pCounters = []
3305 threads = []
3306 addedPValues = []
3307 for i in main.activeNodes:
3308 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3309 name="counterIncrement-" + str( i ),
3310 args=[ pCounterName ],
3311 kwargs={ "delta": 5 } )
3312 addedPValues.append( pCounterValue )
3313 pCounterValue += 5
3314 threads.append( t )
3315 t.start()
3316
3317 for t in threads:
3318 t.join()
3319 pCounters.append( t.result )
3320 # Check that counter incremented numController times
3321 pCounterResults = True
3322 for i in addedPValues:
3323 tmpResult = i in pCounters
3324 pCounterResults = pCounterResults and tmpResult
3325 if not tmpResult:
3326 main.log.error( str( i ) + " is not in partitioned "
3327 "counter incremented results" )
3328 utilities.assert_equals( expect=True,
3329 actual=pCounterResults,
3330 onpass="Default counter incremented",
3331 onfail="Error incrementing default" +
3332 " counter" )
3333
3334 main.step( "Counters we added have the correct values" )
3335 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3336 utilities.assert_equals( expect=main.TRUE,
3337 actual=incrementCheck,
3338 onpass="Added counters are correct",
3339 onfail="Added counters are incorrect" )
3340
3341 # DISTRIBUTED SETS
3342 main.step( "Distributed Set get" )
3343 size = len( onosSet )
3344 getResponses = []
3345 threads = []
3346 for i in main.activeNodes:
3347 t = main.Thread( target=main.CLIs[i].setTestGet,
3348 name="setTestGet-" + str( i ),
3349 args=[ onosSetName ] )
3350 threads.append( t )
3351 t.start()
3352 for t in threads:
3353 t.join()
3354 getResponses.append( t.result )
3355
3356 getResults = main.TRUE
3357 for i in range( len( main.activeNodes ) ):
3358 node = str( main.activeNodes[i] + 1 )
3359 if isinstance( getResponses[ i ], list):
3360 current = set( getResponses[ i ] )
3361 if len( current ) == len( getResponses[ i ] ):
3362 # no repeats
3363 if onosSet != current:
3364 main.log.error( "ONOS" + node +
3365 " has incorrect view" +
3366 " of set " + onosSetName + ":\n" +
3367 str( getResponses[ i ] ) )
3368 main.log.debug( "Expected: " + str( onosSet ) )
3369 main.log.debug( "Actual: " + str( current ) )
3370 getResults = main.FALSE
3371 else:
3372 # error, set is not a set
3373 main.log.error( "ONOS" + node +
3374 " has repeat elements in" +
3375 " set " + onosSetName + ":\n" +
3376 str( getResponses[ i ] ) )
3377 getResults = main.FALSE
3378 elif getResponses[ i ] == main.ERROR:
3379 getResults = main.FALSE
3380 utilities.assert_equals( expect=main.TRUE,
3381 actual=getResults,
3382 onpass="Set elements are correct",
3383 onfail="Set elements are incorrect" )
3384
3385 main.step( "Distributed Set size" )
3386 sizeResponses = []
3387 threads = []
3388 for i in main.activeNodes:
3389 t = main.Thread( target=main.CLIs[i].setTestSize,
3390 name="setTestSize-" + str( i ),
3391 args=[ onosSetName ] )
3392 threads.append( t )
3393 t.start()
3394 for t in threads:
3395 t.join()
3396 sizeResponses.append( t.result )
3397
3398 sizeResults = main.TRUE
3399 for i in range( len( main.activeNodes ) ):
3400 node = str( main.activeNodes[i] + 1 )
3401 if size != sizeResponses[ i ]:
3402 sizeResults = main.FALSE
3403 main.log.error( "ONOS" + node +
3404 " expected a size of " + str( size ) +
3405 " for set " + onosSetName +
3406 " but got " + str( sizeResponses[ i ] ) )
3407 utilities.assert_equals( expect=main.TRUE,
3408 actual=sizeResults,
3409 onpass="Set sizes are correct",
3410 onfail="Set sizes are incorrect" )
3411
3412 main.step( "Distributed Set add()" )
3413 onosSet.add( addValue )
3414 addResponses = []
3415 threads = []
3416 for i in main.activeNodes:
3417 t = main.Thread( target=main.CLIs[i].setTestAdd,
3418 name="setTestAdd-" + str( i ),
3419 args=[ onosSetName, addValue ] )
3420 threads.append( t )
3421 t.start()
3422 for t in threads:
3423 t.join()
3424 addResponses.append( t.result )
3425
3426 # main.TRUE = successfully changed the set
3427 # main.FALSE = action resulted in no change in set
3428 # main.ERROR - Some error in executing the function
3429 addResults = main.TRUE
3430 for i in range( len( main.activeNodes ) ):
3431 if addResponses[ i ] == main.TRUE:
3432 # All is well
3433 pass
3434 elif addResponses[ i ] == main.FALSE:
3435 # Already in set, probably fine
3436 pass
3437 elif addResponses[ i ] == main.ERROR:
3438 # Error in execution
3439 addResults = main.FALSE
3440 else:
3441 # unexpected result
3442 addResults = main.FALSE
3443 if addResults != main.TRUE:
3444 main.log.error( "Error executing set add" )
3445
3446 # Check if set is still correct
3447 size = len( onosSet )
3448 getResponses = []
3449 threads = []
3450 for i in main.activeNodes:
3451 t = main.Thread( target=main.CLIs[i].setTestGet,
3452 name="setTestGet-" + str( i ),
3453 args=[ onosSetName ] )
3454 threads.append( t )
3455 t.start()
3456 for t in threads:
3457 t.join()
3458 getResponses.append( t.result )
3459 getResults = main.TRUE
3460 for i in range( len( main.activeNodes ) ):
3461 node = str( main.activeNodes[i] + 1 )
3462 if isinstance( getResponses[ i ], list):
3463 current = set( getResponses[ i ] )
3464 if len( current ) == len( getResponses[ i ] ):
3465 # no repeats
3466 if onosSet != current:
3467 main.log.error( "ONOS" + node + " has incorrect view" +
3468 " of set " + onosSetName + ":\n" +
3469 str( getResponses[ i ] ) )
3470 main.log.debug( "Expected: " + str( onosSet ) )
3471 main.log.debug( "Actual: " + str( current ) )
3472 getResults = main.FALSE
3473 else:
3474 # error, set is not a set
3475 main.log.error( "ONOS" + node + " has repeat elements in" +
3476 " set " + onosSetName + ":\n" +
3477 str( getResponses[ i ] ) )
3478 getResults = main.FALSE
3479 elif getResponses[ i ] == main.ERROR:
3480 getResults = main.FALSE
3481 sizeResponses = []
3482 threads = []
3483 for i in main.activeNodes:
3484 t = main.Thread( target=main.CLIs[i].setTestSize,
3485 name="setTestSize-" + str( i ),
3486 args=[ onosSetName ] )
3487 threads.append( t )
3488 t.start()
3489 for t in threads:
3490 t.join()
3491 sizeResponses.append( t.result )
3492 sizeResults = main.TRUE
3493 for i in range( len( main.activeNodes ) ):
3494 node = str( main.activeNodes[i] + 1 )
3495 if size != sizeResponses[ i ]:
3496 sizeResults = main.FALSE
3497 main.log.error( "ONOS" + node +
3498 " expected a size of " + str( size ) +
3499 " for set " + onosSetName +
3500 " but got " + str( sizeResponses[ i ] ) )
3501 addResults = addResults and getResults and sizeResults
3502 utilities.assert_equals( expect=main.TRUE,
3503 actual=addResults,
3504 onpass="Set add correct",
3505 onfail="Set add was incorrect" )
3506
3507 main.step( "Distributed Set addAll()" )
3508 onosSet.update( addAllValue.split() )
3509 addResponses = []
3510 threads = []
3511 for i in main.activeNodes:
3512 t = main.Thread( target=main.CLIs[i].setTestAdd,
3513 name="setTestAddAll-" + str( i ),
3514 args=[ onosSetName, addAllValue ] )
3515 threads.append( t )
3516 t.start()
3517 for t in threads:
3518 t.join()
3519 addResponses.append( t.result )
3520
3521 # main.TRUE = successfully changed the set
3522 # main.FALSE = action resulted in no change in set
3523 # main.ERROR - Some error in executing the function
3524 addAllResults = main.TRUE
3525 for i in range( len( main.activeNodes ) ):
3526 if addResponses[ i ] == main.TRUE:
3527 # All is well
3528 pass
3529 elif addResponses[ i ] == main.FALSE:
3530 # Already in set, probably fine
3531 pass
3532 elif addResponses[ i ] == main.ERROR:
3533 # Error in execution
3534 addAllResults = main.FALSE
3535 else:
3536 # unexpected result
3537 addAllResults = main.FALSE
3538 if addAllResults != main.TRUE:
3539 main.log.error( "Error executing set addAll" )
3540
3541 # Check if set is still correct
3542 size = len( onosSet )
3543 getResponses = []
3544 threads = []
3545 for i in main.activeNodes:
3546 t = main.Thread( target=main.CLIs[i].setTestGet,
3547 name="setTestGet-" + str( i ),
3548 args=[ onosSetName ] )
3549 threads.append( t )
3550 t.start()
3551 for t in threads:
3552 t.join()
3553 getResponses.append( t.result )
3554 getResults = main.TRUE
3555 for i in range( len( main.activeNodes ) ):
3556 node = str( main.activeNodes[i] + 1 )
3557 if isinstance( getResponses[ i ], list):
3558 current = set( getResponses[ i ] )
3559 if len( current ) == len( getResponses[ i ] ):
3560 # no repeats
3561 if onosSet != current:
3562 main.log.error( "ONOS" + node +
3563 " has incorrect view" +
3564 " of set " + onosSetName + ":\n" +
3565 str( getResponses[ i ] ) )
3566 main.log.debug( "Expected: " + str( onosSet ) )
3567 main.log.debug( "Actual: " + str( current ) )
3568 getResults = main.FALSE
3569 else:
3570 # error, set is not a set
3571 main.log.error( "ONOS" + node +
3572 " has repeat elements in" +
3573 " set " + onosSetName + ":\n" +
3574 str( getResponses[ i ] ) )
3575 getResults = main.FALSE
3576 elif getResponses[ i ] == main.ERROR:
3577 getResults = main.FALSE
3578 sizeResponses = []
3579 threads = []
3580 for i in main.activeNodes:
3581 t = main.Thread( target=main.CLIs[i].setTestSize,
3582 name="setTestSize-" + str( i ),
3583 args=[ onosSetName ] )
3584 threads.append( t )
3585 t.start()
3586 for t in threads:
3587 t.join()
3588 sizeResponses.append( t.result )
3589 sizeResults = main.TRUE
3590 for i in range( len( main.activeNodes ) ):
3591 node = str( main.activeNodes[i] + 1 )
3592 if size != sizeResponses[ i ]:
3593 sizeResults = main.FALSE
3594 main.log.error( "ONOS" + node +
3595 " expected a size of " + str( size ) +
3596 " for set " + onosSetName +
3597 " but got " + str( sizeResponses[ i ] ) )
3598 addAllResults = addAllResults and getResults and sizeResults
3599 utilities.assert_equals( expect=main.TRUE,
3600 actual=addAllResults,
3601 onpass="Set addAll correct",
3602 onfail="Set addAll was incorrect" )
3603
3604 main.step( "Distributed Set contains()" )
3605 containsResponses = []
3606 threads = []
3607 for i in main.activeNodes:
3608 t = main.Thread( target=main.CLIs[i].setTestGet,
3609 name="setContains-" + str( i ),
3610 args=[ onosSetName ],
3611 kwargs={ "values": addValue } )
3612 threads.append( t )
3613 t.start()
3614 for t in threads:
3615 t.join()
3616 # NOTE: This is the tuple
3617 containsResponses.append( t.result )
3618
3619 containsResults = main.TRUE
3620 for i in range( len( main.activeNodes ) ):
3621 if containsResponses[ i ] == main.ERROR:
3622 containsResults = main.FALSE
3623 else:
3624 containsResults = containsResults and\
3625 containsResponses[ i ][ 1 ]
3626 utilities.assert_equals( expect=main.TRUE,
3627 actual=containsResults,
3628 onpass="Set contains is functional",
3629 onfail="Set contains failed" )
3630
3631 main.step( "Distributed Set containsAll()" )
3632 containsAllResponses = []
3633 threads = []
3634 for i in main.activeNodes:
3635 t = main.Thread( target=main.CLIs[i].setTestGet,
3636 name="setContainsAll-" + str( i ),
3637 args=[ onosSetName ],
3638 kwargs={ "values": addAllValue } )
3639 threads.append( t )
3640 t.start()
3641 for t in threads:
3642 t.join()
3643 # NOTE: This is the tuple
3644 containsAllResponses.append( t.result )
3645
3646 containsAllResults = main.TRUE
3647 for i in range( len( main.activeNodes ) ):
3648 if containsResponses[ i ] == main.ERROR:
3649 containsResults = main.FALSE
3650 else:
3651 containsResults = containsResults and\
3652 containsResponses[ i ][ 1 ]
3653 utilities.assert_equals( expect=main.TRUE,
3654 actual=containsAllResults,
3655 onpass="Set containsAll is functional",
3656 onfail="Set containsAll failed" )
3657
3658 main.step( "Distributed Set remove()" )
3659 onosSet.remove( addValue )
3660 removeResponses = []
3661 threads = []
3662 for i in main.activeNodes:
3663 t = main.Thread( target=main.CLIs[i].setTestRemove,
3664 name="setTestRemove-" + str( i ),
3665 args=[ onosSetName, addValue ] )
3666 threads.append( t )
3667 t.start()
3668 for t in threads:
3669 t.join()
3670 removeResponses.append( t.result )
3671
3672 # main.TRUE = successfully changed the set
3673 # main.FALSE = action resulted in no change in set
3674 # main.ERROR - Some error in executing the function
3675 removeResults = main.TRUE
3676 for i in range( len( main.activeNodes ) ):
3677 if removeResponses[ i ] == main.TRUE:
3678 # All is well
3679 pass
3680 elif removeResponses[ i ] == main.FALSE:
3681 # not in set, probably fine
3682 pass
3683 elif removeResponses[ i ] == main.ERROR:
3684 # Error in execution
3685 removeResults = main.FALSE
3686 else:
3687 # unexpected result
3688 removeResults = main.FALSE
3689 if removeResults != main.TRUE:
3690 main.log.error( "Error executing set remove" )
3691
3692 # Check if set is still correct
3693 size = len( onosSet )
3694 getResponses = []
3695 threads = []
3696 for i in main.activeNodes:
3697 t = main.Thread( target=main.CLIs[i].setTestGet,
3698 name="setTestGet-" + str( i ),
3699 args=[ onosSetName ] )
3700 threads.append( t )
3701 t.start()
3702 for t in threads:
3703 t.join()
3704 getResponses.append( t.result )
3705 getResults = main.TRUE
3706 for i in range( len( main.activeNodes ) ):
3707 node = str( main.activeNodes[i] + 1 )
3708 if isinstance( getResponses[ i ], list):
3709 current = set( getResponses[ i ] )
3710 if len( current ) == len( getResponses[ i ] ):
3711 # no repeats
3712 if onosSet != current:
3713 main.log.error( "ONOS" + node +
3714 " has incorrect view" +
3715 " of set " + onosSetName + ":\n" +
3716 str( getResponses[ i ] ) )
3717 main.log.debug( "Expected: " + str( onosSet ) )
3718 main.log.debug( "Actual: " + str( current ) )
3719 getResults = main.FALSE
3720 else:
3721 # error, set is not a set
3722 main.log.error( "ONOS" + node +
3723 " has repeat elements in" +
3724 " set " + onosSetName + ":\n" +
3725 str( getResponses[ i ] ) )
3726 getResults = main.FALSE
3727 elif getResponses[ i ] == main.ERROR:
3728 getResults = main.FALSE
3729 sizeResponses = []
3730 threads = []
3731 for i in main.activeNodes:
3732 t = main.Thread( target=main.CLIs[i].setTestSize,
3733 name="setTestSize-" + str( i ),
3734 args=[ onosSetName ] )
3735 threads.append( t )
3736 t.start()
3737 for t in threads:
3738 t.join()
3739 sizeResponses.append( t.result )
3740 sizeResults = main.TRUE
3741 for i in range( len( main.activeNodes ) ):
3742 node = str( main.activeNodes[i] + 1 )
3743 if size != sizeResponses[ i ]:
3744 sizeResults = main.FALSE
3745 main.log.error( "ONOS" + node +
3746 " expected a size of " + str( size ) +
3747 " for set " + onosSetName +
3748 " but got " + str( sizeResponses[ i ] ) )
3749 removeResults = removeResults and getResults and sizeResults
3750 utilities.assert_equals( expect=main.TRUE,
3751 actual=removeResults,
3752 onpass="Set remove correct",
3753 onfail="Set remove was incorrect" )
3754
3755 main.step( "Distributed Set removeAll()" )
3756 onosSet.difference_update( addAllValue.split() )
3757 removeAllResponses = []
3758 threads = []
3759 try:
3760 for i in main.activeNodes:
3761 t = main.Thread( target=main.CLIs[i].setTestRemove,
3762 name="setTestRemoveAll-" + str( i ),
3763 args=[ onosSetName, addAllValue ] )
3764 threads.append( t )
3765 t.start()
3766 for t in threads:
3767 t.join()
3768 removeAllResponses.append( t.result )
3769 except Exception, e:
3770 main.log.exception(e)
3771
3772 # main.TRUE = successfully changed the set
3773 # main.FALSE = action resulted in no change in set
3774 # main.ERROR - Some error in executing the function
3775 removeAllResults = main.TRUE
3776 for i in range( len( main.activeNodes ) ):
3777 if removeAllResponses[ i ] == main.TRUE:
3778 # All is well
3779 pass
3780 elif removeAllResponses[ i ] == main.FALSE:
3781 # not in set, probably fine
3782 pass
3783 elif removeAllResponses[ i ] == main.ERROR:
3784 # Error in execution
3785 removeAllResults = main.FALSE
3786 else:
3787 # unexpected result
3788 removeAllResults = main.FALSE
3789 if removeAllResults != main.TRUE:
3790 main.log.error( "Error executing set removeAll" )
3791
3792 # Check if set is still correct
3793 size = len( onosSet )
3794 getResponses = []
3795 threads = []
3796 for i in main.activeNodes:
3797 t = main.Thread( target=main.CLIs[i].setTestGet,
3798 name="setTestGet-" + str( i ),
3799 args=[ onosSetName ] )
3800 threads.append( t )
3801 t.start()
3802 for t in threads:
3803 t.join()
3804 getResponses.append( t.result )
3805 getResults = main.TRUE
3806 for i in range( len( main.activeNodes ) ):
3807 node = str( main.activeNodes[i] + 1 )
3808 if isinstance( getResponses[ i ], list):
3809 current = set( getResponses[ i ] )
3810 if len( current ) == len( getResponses[ i ] ):
3811 # no repeats
3812 if onosSet != current:
3813 main.log.error( "ONOS" + node +
3814 " has incorrect view" +
3815 " of set " + onosSetName + ":\n" +
3816 str( getResponses[ i ] ) )
3817 main.log.debug( "Expected: " + str( onosSet ) )
3818 main.log.debug( "Actual: " + str( current ) )
3819 getResults = main.FALSE
3820 else:
3821 # error, set is not a set
3822 main.log.error( "ONOS" + node +
3823 " has repeat elements in" +
3824 " set " + onosSetName + ":\n" +
3825 str( getResponses[ i ] ) )
3826 getResults = main.FALSE
3827 elif getResponses[ i ] == main.ERROR:
3828 getResults = main.FALSE
3829 sizeResponses = []
3830 threads = []
3831 for i in main.activeNodes:
3832 t = main.Thread( target=main.CLIs[i].setTestSize,
3833 name="setTestSize-" + str( i ),
3834 args=[ onosSetName ] )
3835 threads.append( t )
3836 t.start()
3837 for t in threads:
3838 t.join()
3839 sizeResponses.append( t.result )
3840 sizeResults = main.TRUE
3841 for i in range( len( main.activeNodes ) ):
3842 node = str( main.activeNodes[i] + 1 )
3843 if size != sizeResponses[ i ]:
3844 sizeResults = main.FALSE
3845 main.log.error( "ONOS" + node +
3846 " expected a size of " + str( size ) +
3847 " for set " + onosSetName +
3848 " but got " + str( sizeResponses[ i ] ) )
3849 removeAllResults = removeAllResults and getResults and sizeResults
3850 utilities.assert_equals( expect=main.TRUE,
3851 actual=removeAllResults,
3852 onpass="Set removeAll correct",
3853 onfail="Set removeAll was incorrect" )
3854
3855 main.step( "Distributed Set addAll()" )
3856 onosSet.update( addAllValue.split() )
3857 addResponses = []
3858 threads = []
3859 for i in main.activeNodes:
3860 t = main.Thread( target=main.CLIs[i].setTestAdd,
3861 name="setTestAddAll-" + str( i ),
3862 args=[ onosSetName, addAllValue ] )
3863 threads.append( t )
3864 t.start()
3865 for t in threads:
3866 t.join()
3867 addResponses.append( t.result )
3868
3869 # main.TRUE = successfully changed the set
3870 # main.FALSE = action resulted in no change in set
3871 # main.ERROR - Some error in executing the function
3872 addAllResults = main.TRUE
3873 for i in range( len( main.activeNodes ) ):
3874 if addResponses[ i ] == main.TRUE:
3875 # All is well
3876 pass
3877 elif addResponses[ i ] == main.FALSE:
3878 # Already in set, probably fine
3879 pass
3880 elif addResponses[ i ] == main.ERROR:
3881 # Error in execution
3882 addAllResults = main.FALSE
3883 else:
3884 # unexpected result
3885 addAllResults = main.FALSE
3886 if addAllResults != main.TRUE:
3887 main.log.error( "Error executing set addAll" )
3888
3889 # Check if set is still correct
3890 size = len( onosSet )
3891 getResponses = []
3892 threads = []
3893 for i in main.activeNodes:
3894 t = main.Thread( target=main.CLIs[i].setTestGet,
3895 name="setTestGet-" + str( i ),
3896 args=[ onosSetName ] )
3897 threads.append( t )
3898 t.start()
3899 for t in threads:
3900 t.join()
3901 getResponses.append( t.result )
3902 getResults = main.TRUE
3903 for i in range( len( main.activeNodes ) ):
3904 node = str( main.activeNodes[i] + 1 )
3905 if isinstance( getResponses[ i ], list):
3906 current = set( getResponses[ i ] )
3907 if len( current ) == len( getResponses[ i ] ):
3908 # no repeats
3909 if onosSet != current:
3910 main.log.error( "ONOS" + node +
3911 " has incorrect view" +
3912 " of set " + onosSetName + ":\n" +
3913 str( getResponses[ i ] ) )
3914 main.log.debug( "Expected: " + str( onosSet ) )
3915 main.log.debug( "Actual: " + str( current ) )
3916 getResults = main.FALSE
3917 else:
3918 # error, set is not a set
3919 main.log.error( "ONOS" + node +
3920 " has repeat elements in" +
3921 " set " + onosSetName + ":\n" +
3922 str( getResponses[ i ] ) )
3923 getResults = main.FALSE
3924 elif getResponses[ i ] == main.ERROR:
3925 getResults = main.FALSE
3926 sizeResponses = []
3927 threads = []
3928 for i in main.activeNodes:
3929 t = main.Thread( target=main.CLIs[i].setTestSize,
3930 name="setTestSize-" + str( i ),
3931 args=[ onosSetName ] )
3932 threads.append( t )
3933 t.start()
3934 for t in threads:
3935 t.join()
3936 sizeResponses.append( t.result )
3937 sizeResults = main.TRUE
3938 for i in range( len( main.activeNodes ) ):
3939 node = str( main.activeNodes[i] + 1 )
3940 if size != sizeResponses[ i ]:
3941 sizeResults = main.FALSE
3942 main.log.error( "ONOS" + node +
3943 " expected a size of " + str( size ) +
3944 " for set " + onosSetName +
3945 " but got " + str( sizeResponses[ i ] ) )
3946 addAllResults = addAllResults and getResults and sizeResults
3947 utilities.assert_equals( expect=main.TRUE,
3948 actual=addAllResults,
3949 onpass="Set addAll correct",
3950 onfail="Set addAll was incorrect" )
3951
3952 main.step( "Distributed Set clear()" )
3953 onosSet.clear()
3954 clearResponses = []
3955 threads = []
3956 for i in main.activeNodes:
3957 t = main.Thread( target=main.CLIs[i].setTestRemove,
3958 name="setTestClear-" + str( i ),
3959 args=[ onosSetName, " "], # Values doesn't matter
3960 kwargs={ "clear": True } )
3961 threads.append( t )
3962 t.start()
3963 for t in threads:
3964 t.join()
3965 clearResponses.append( t.result )
3966
3967 # main.TRUE = successfully changed the set
3968 # main.FALSE = action resulted in no change in set
3969 # main.ERROR - Some error in executing the function
3970 clearResults = main.TRUE
3971 for i in range( len( main.activeNodes ) ):
3972 if clearResponses[ i ] == main.TRUE:
3973 # All is well
3974 pass
3975 elif clearResponses[ i ] == main.FALSE:
3976 # Nothing set, probably fine
3977 pass
3978 elif clearResponses[ i ] == main.ERROR:
3979 # Error in execution
3980 clearResults = main.FALSE
3981 else:
3982 # unexpected result
3983 clearResults = main.FALSE
3984 if clearResults != main.TRUE:
3985 main.log.error( "Error executing set clear" )
3986
3987 # Check if set is still correct
3988 size = len( onosSet )
3989 getResponses = []
3990 threads = []
3991 for i in main.activeNodes:
3992 t = main.Thread( target=main.CLIs[i].setTestGet,
3993 name="setTestGet-" + str( i ),
3994 args=[ onosSetName ] )
3995 threads.append( t )
3996 t.start()
3997 for t in threads:
3998 t.join()
3999 getResponses.append( t.result )
4000 getResults = main.TRUE
4001 for i in range( len( main.activeNodes ) ):
4002 node = str( main.activeNodes[i] + 1 )
4003 if isinstance( getResponses[ i ], list):
4004 current = set( getResponses[ i ] )
4005 if len( current ) == len( getResponses[ i ] ):
4006 # no repeats
4007 if onosSet != current:
4008 main.log.error( "ONOS" + node +
4009 " has incorrect view" +
4010 " of set " + onosSetName + ":\n" +
4011 str( getResponses[ i ] ) )
4012 main.log.debug( "Expected: " + str( onosSet ) )
4013 main.log.debug( "Actual: " + str( current ) )
4014 getResults = main.FALSE
4015 else:
4016 # error, set is not a set
4017 main.log.error( "ONOS" + node +
4018 " has repeat elements in" +
4019 " set " + onosSetName + ":\n" +
4020 str( getResponses[ i ] ) )
4021 getResults = main.FALSE
4022 elif getResponses[ i ] == main.ERROR:
4023 getResults = main.FALSE
4024 sizeResponses = []
4025 threads = []
4026 for i in main.activeNodes:
4027 t = main.Thread( target=main.CLIs[i].setTestSize,
4028 name="setTestSize-" + str( i ),
4029 args=[ onosSetName ] )
4030 threads.append( t )
4031 t.start()
4032 for t in threads:
4033 t.join()
4034 sizeResponses.append( t.result )
4035 sizeResults = main.TRUE
4036 for i in range( len( main.activeNodes ) ):
4037 node = str( main.activeNodes[i] + 1 )
4038 if size != sizeResponses[ i ]:
4039 sizeResults = main.FALSE
4040 main.log.error( "ONOS" + node +
4041 " expected a size of " + str( size ) +
4042 " for set " + onosSetName +
4043 " but got " + str( sizeResponses[ i ] ) )
4044 clearResults = clearResults and getResults and sizeResults
4045 utilities.assert_equals( expect=main.TRUE,
4046 actual=clearResults,
4047 onpass="Set clear correct",
4048 onfail="Set clear was incorrect" )
4049
4050 main.step( "Distributed Set addAll()" )
4051 onosSet.update( addAllValue.split() )
4052 addResponses = []
4053 threads = []
4054 for i in main.activeNodes:
4055 t = main.Thread( target=main.CLIs[i].setTestAdd,
4056 name="setTestAddAll-" + str( i ),
4057 args=[ onosSetName, addAllValue ] )
4058 threads.append( t )
4059 t.start()
4060 for t in threads:
4061 t.join()
4062 addResponses.append( t.result )
4063
4064 # main.TRUE = successfully changed the set
4065 # main.FALSE = action resulted in no change in set
4066 # main.ERROR - Some error in executing the function
4067 addAllResults = main.TRUE
4068 for i in range( len( main.activeNodes ) ):
4069 if addResponses[ i ] == main.TRUE:
4070 # All is well
4071 pass
4072 elif addResponses[ i ] == main.FALSE:
4073 # Already in set, probably fine
4074 pass
4075 elif addResponses[ i ] == main.ERROR:
4076 # Error in execution
4077 addAllResults = main.FALSE
4078 else:
4079 # unexpected result
4080 addAllResults = main.FALSE
4081 if addAllResults != main.TRUE:
4082 main.log.error( "Error executing set addAll" )
4083
4084 # Check if set is still correct
4085 size = len( onosSet )
4086 getResponses = []
4087 threads = []
4088 for i in main.activeNodes:
4089 t = main.Thread( target=main.CLIs[i].setTestGet,
4090 name="setTestGet-" + str( i ),
4091 args=[ onosSetName ] )
4092 threads.append( t )
4093 t.start()
4094 for t in threads:
4095 t.join()
4096 getResponses.append( t.result )
4097 getResults = main.TRUE
4098 for i in range( len( main.activeNodes ) ):
4099 node = str( main.activeNodes[i] + 1 )
4100 if isinstance( getResponses[ i ], list):
4101 current = set( getResponses[ i ] )
4102 if len( current ) == len( getResponses[ i ] ):
4103 # no repeats
4104 if onosSet != current:
4105 main.log.error( "ONOS" + node +
4106 " has incorrect view" +
4107 " of set " + onosSetName + ":\n" +
4108 str( getResponses[ i ] ) )
4109 main.log.debug( "Expected: " + str( onosSet ) )
4110 main.log.debug( "Actual: " + str( current ) )
4111 getResults = main.FALSE
4112 else:
4113 # error, set is not a set
4114 main.log.error( "ONOS" + node +
4115 " has repeat elements in" +
4116 " set " + onosSetName + ":\n" +
4117 str( getResponses[ i ] ) )
4118 getResults = main.FALSE
4119 elif getResponses[ i ] == main.ERROR:
4120 getResults = main.FALSE
4121 sizeResponses = []
4122 threads = []
4123 for i in main.activeNodes:
4124 t = main.Thread( target=main.CLIs[i].setTestSize,
4125 name="setTestSize-" + str( i ),
4126 args=[ onosSetName ] )
4127 threads.append( t )
4128 t.start()
4129 for t in threads:
4130 t.join()
4131 sizeResponses.append( t.result )
4132 sizeResults = main.TRUE
4133 for i in range( len( main.activeNodes ) ):
4134 node = str( main.activeNodes[i] + 1 )
4135 if size != sizeResponses[ i ]:
4136 sizeResults = main.FALSE
4137 main.log.error( "ONOS" + node +
4138 " expected a size of " + str( size ) +
4139 " for set " + onosSetName +
4140 " but got " + str( sizeResponses[ i ] ) )
4141 addAllResults = addAllResults and getResults and sizeResults
4142 utilities.assert_equals( expect=main.TRUE,
4143 actual=addAllResults,
4144 onpass="Set addAll correct",
4145 onfail="Set addAll was incorrect" )
4146
4147 main.step( "Distributed Set retain()" )
4148 onosSet.intersection_update( retainValue.split() )
4149 retainResponses = []
4150 threads = []
4151 for i in main.activeNodes:
4152 t = main.Thread( target=main.CLIs[i].setTestRemove,
4153 name="setTestRetain-" + str( i ),
4154 args=[ onosSetName, retainValue ],
4155 kwargs={ "retain": True } )
4156 threads.append( t )
4157 t.start()
4158 for t in threads:
4159 t.join()
4160 retainResponses.append( t.result )
4161
4162 # main.TRUE = successfully changed the set
4163 # main.FALSE = action resulted in no change in set
4164 # main.ERROR - Some error in executing the function
4165 retainResults = main.TRUE
4166 for i in range( len( main.activeNodes ) ):
4167 if retainResponses[ i ] == main.TRUE:
4168 # All is well
4169 pass
4170 elif retainResponses[ i ] == main.FALSE:
4171 # Already in set, probably fine
4172 pass
4173 elif retainResponses[ i ] == main.ERROR:
4174 # Error in execution
4175 retainResults = main.FALSE
4176 else:
4177 # unexpected result
4178 retainResults = main.FALSE
4179 if retainResults != main.TRUE:
4180 main.log.error( "Error executing set retain" )
4181
4182 # Check if set is still correct
4183 size = len( onosSet )
4184 getResponses = []
4185 threads = []
4186 for i in main.activeNodes:
4187 t = main.Thread( target=main.CLIs[i].setTestGet,
4188 name="setTestGet-" + str( i ),
4189 args=[ onosSetName ] )
4190 threads.append( t )
4191 t.start()
4192 for t in threads:
4193 t.join()
4194 getResponses.append( t.result )
4195 getResults = main.TRUE
4196 for i in range( len( main.activeNodes ) ):
4197 node = str( main.activeNodes[i] + 1 )
4198 if isinstance( getResponses[ i ], list):
4199 current = set( getResponses[ i ] )
4200 if len( current ) == len( getResponses[ i ] ):
4201 # no repeats
4202 if onosSet != current:
4203 main.log.error( "ONOS" + node +
4204 " has incorrect view" +
4205 " of set " + onosSetName + ":\n" +
4206 str( getResponses[ i ] ) )
4207 main.log.debug( "Expected: " + str( onosSet ) )
4208 main.log.debug( "Actual: " + str( current ) )
4209 getResults = main.FALSE
4210 else:
4211 # error, set is not a set
4212 main.log.error( "ONOS" + node +
4213 " has repeat elements in" +
4214 " set " + onosSetName + ":\n" +
4215 str( getResponses[ i ] ) )
4216 getResults = main.FALSE
4217 elif getResponses[ i ] == main.ERROR:
4218 getResults = main.FALSE
4219 sizeResponses = []
4220 threads = []
4221 for i in main.activeNodes:
4222 t = main.Thread( target=main.CLIs[i].setTestSize,
4223 name="setTestSize-" + str( i ),
4224 args=[ onosSetName ] )
4225 threads.append( t )
4226 t.start()
4227 for t in threads:
4228 t.join()
4229 sizeResponses.append( t.result )
4230 sizeResults = main.TRUE
4231 for i in range( len( main.activeNodes ) ):
4232 node = str( main.activeNodes[i] + 1 )
4233 if size != sizeResponses[ i ]:
4234 sizeResults = main.FALSE
4235 main.log.error( "ONOS" + node + " expected a size of " +
4236 str( size ) + " for set " + onosSetName +
4237 " but got " + str( sizeResponses[ i ] ) )
4238 retainResults = retainResults and getResults and sizeResults
4239 utilities.assert_equals( expect=main.TRUE,
4240 actual=retainResults,
4241 onpass="Set retain correct",
4242 onfail="Set retain was incorrect" )
4243
4244 # Transactional maps
4245 main.step( "Partitioned Transactional maps put" )
4246 tMapValue = "Testing"
4247 numKeys = 100
4248 putResult = True
4249 node = main.activeNodes[0]
4250 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4251 if putResponses and len( putResponses ) == 100:
4252 for i in putResponses:
4253 if putResponses[ i ][ 'value' ] != tMapValue:
4254 putResult = False
4255 else:
4256 putResult = False
4257 if not putResult:
4258 main.log.debug( "Put response values: " + str( putResponses ) )
4259 utilities.assert_equals( expect=True,
4260 actual=putResult,
4261 onpass="Partitioned Transactional Map put successful",
4262 onfail="Partitioned Transactional Map put values are incorrect" )
4263
4264 main.step( "Partitioned Transactional maps get" )
Jon Hall9bfadd22016-05-11 14:48:07 -07004265 # FIXME: is this sleep needed?
4266 time.sleep( 5 )
4267
Jon Hall9ebd1bd2016-04-19 01:37:17 -07004268 getCheck = True
4269 for n in range( 1, numKeys + 1 ):
4270 getResponses = []
4271 threads = []
4272 valueCheck = True
4273 for i in main.activeNodes:
4274 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4275 name="TMap-get-" + str( i ),
4276 args=[ "Key" + str( n ) ] )
4277 threads.append( t )
4278 t.start()
4279 for t in threads:
4280 t.join()
4281 getResponses.append( t.result )
4282 for node in getResponses:
4283 if node != tMapValue:
4284 valueCheck = False
4285 if not valueCheck:
4286 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4287 main.log.warn( getResponses )
4288 getCheck = getCheck and valueCheck
4289 utilities.assert_equals( expect=True,
4290 actual=getCheck,
4291 onpass="Partitioned Transactional Map get values were correct",
4292 onfail="Partitioned Transactional Map values incorrect" )