blob: 082f831fadfac1a83ac59941edf395835dec97fe [file] [log] [blame]
Jon Hall69b2b982016-05-11 12:04:59 -07001"""
2Description: This test is to determine if ONOS can handle
3 dynamic swapping of cluster nodes.
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE6: Swap nodes
13CASE7: Check state after control plane failure
14CASE8: Compare topo
15CASE9: Link s3-s28 down
16CASE10: Link s3-s28 up
17CASE11: Switch down
18CASE12: Switch up
19CASE13: Clean up
20CASE14: start election app on all onos nodes
21CASE15: Check that Leadership Election is still functional
22CASE16: Install Distributed Primitives app
23CASE17: Check for basic functionality with distributed primitives
24"""
25
26
27class HAswapNodes:
28
29 def __init__( self ):
30 self.default = ''
31
32 def CASE1( self, main ):
33 """
34 CASE1 is to compile ONOS and push it to the test machines
35
36 Startup sequence:
37 cell <name>
38 onos-verify-cell
39 NOTE: temporary - onos-remove-raft-logs
40 onos-uninstall
41 start mininet
42 git pull
43 mvn clean install
44 onos-package
45 onos-install -f
46 onos-wait-for-start
47 start cli sessions
48 start tcpdump
49 """
50 import time
51 import os
52 import re
53 main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
54 "initialization" )
55 main.case( "Setting up test environment" )
56 main.caseExplanation = "Setup the test environment including " +\
57 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
59
60 # load some variables from the params file
61 PULLCODE = False
62 if main.params[ 'Git' ] == 'True':
63 PULLCODE = True
64 gitBranch = main.params[ 'branch' ]
65 cellName = main.params[ 'ENV' ][ 'cellName' ]
66
67 main.numCtrls = int( main.params[ 'num_controllers' ] )
68 if main.ONOSbench.maxNodes:
69 if main.ONOSbench.maxNodes < main.numCtrls:
70 main.numCtrls = int( main.ONOSbench.maxNodes )
71 # set global variables
72 # These are for csv plotting in jenkins
73 global labels
74 global data
75 labels = []
76 data = []
77
78 try:
79 from tests.HA.dependencies.HA import HA
80 main.HA = HA()
81 from tests.HA.HAswapNodes.dependencies.Server import Server
82 main.Server = Server()
83 except Exception as e:
84 main.log.exception( e )
85 main.cleanup()
86 main.exit()
87
88 main.CLIs = []
89 main.nodes = []
90 ipList = []
91 for i in range( 1, main.numCtrls + 1 ):
92 try:
93 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
94 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
95 ipList.append( main.nodes[ -1 ].ip_address )
96 except AttributeError:
97 break
98
99 main.step( "Create cell file" )
100 cellAppString = main.params[ 'ENV' ][ 'appString' ]
101 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
102 main.Mininet1.ip_address,
103 cellAppString, ipList )
104
105 main.step( "Applying cell variable to environment" )
106 cellResult = main.ONOSbench.setCell( cellName )
107 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
108 onpass="Set cell successfull",
109 onfail="Failled to set cell" )
110
111 main.step( "Verify connectivity to cell" )
112 verifyResult = main.ONOSbench.verifyCell()
113 utilities.assert_equals( expect=main.TRUE, actual=verifyResult,
114 onpass="Verify cell passed",
115 onfail="Failled to verify cell" )
116
117 # FIXME:this is short term fix
118 main.log.info( "Removing raft logs" )
119 main.ONOSbench.onosRemoveRaftLogs()
120
121 main.log.info( "Uninstalling ONOS" )
122 for node in main.nodes:
123 main.ONOSbench.onosUninstall( node.ip_address )
124
125 # Make sure ONOS is DEAD
126 main.log.info( "Killing any ONOS processes" )
127 killResults = main.TRUE
128 for node in main.nodes:
129 killed = main.ONOSbench.onosKill( node.ip_address )
130 killResults = killResults and killed
131
132 main.step( "Setup server for cluster metadata file" )
Jon Hall8f6d4622016-05-23 15:27:18 -0700133 port = main.params['server']['port']
Jon Hall69b2b982016-05-11 12:04:59 -0700134 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
135 main.log.debug( "Root dir: {}".format( rootDir ) )
136 status = main.Server.start( main.ONOSbench,
137 rootDir,
138 port=port,
139 logDir=main.logdir + "/server.log" )
140 utilities.assert_equals( expect=main.TRUE, actual=status,
141 onpass="Server started",
142 onfail="Failled to start SimpleHTTPServer" )
143
144 main.step( "Generate initial metadata file" )
145 if main.numCtrls >= 5:
146 main.numCtrls -= 2
147 else:
148 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
149 genResult = main.Server.generateFile( main.numCtrls )
150 utilities.assert_equals( expect=main.TRUE, actual=genResult,
151 onpass="New cluster metadata file generated",
152 onfail="Failled to generate new metadata file" )
153
154 cleanInstallResult = main.TRUE
155 gitPullResult = main.TRUE
156
157 main.step( "Starting Mininet" )
158 # scp topo file to mininet
159 # TODO: move to params?
160 topoName = "obelisk.py"
161 filePath = main.ONOSbench.home + "/tools/test/topos/"
162 main.ONOSbench.scp( main.Mininet1,
163 filePath + topoName,
164 main.Mininet1.home,
165 direction="to" )
166 mnResult = main.Mininet1.startNet( )
167 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
168 onpass="Mininet Started",
169 onfail="Error starting Mininet" )
170
171 main.step( "Git checkout and pull " + gitBranch )
172 if PULLCODE:
173 main.ONOSbench.gitCheckout( gitBranch )
174 gitPullResult = main.ONOSbench.gitPull()
175 # values of 1 or 3 are good
176 utilities.assert_lesser( expect=0, actual=gitPullResult,
177 onpass="Git pull successful",
178 onfail="Git pull failed" )
179 main.ONOSbench.getVersion( report=True )
180
181 main.step( "Using mvn clean install" )
182 cleanInstallResult = main.TRUE
183 if PULLCODE and gitPullResult == main.TRUE:
184 cleanInstallResult = main.ONOSbench.cleanInstall()
185 else:
186 main.log.warn( "Did not pull new code so skipping mvn " +
187 "clean install" )
188 utilities.assert_equals( expect=main.TRUE,
189 actual=cleanInstallResult,
190 onpass="MCI successful",
191 onfail="MCI failed" )
192 # GRAPHS
193 # NOTE: important params here:
194 # job = name of Jenkins job
195 # Plot Name = Plot-HA, only can be used if multiple plots
196 # index = The number of the graph under plot name
197 job = "HAswapNodes"
198 plotName = "Plot-HA"
Jon Hall676e5432016-09-26 11:32:50 -0700199 index = "2"
Jon Hall69b2b982016-05-11 12:04:59 -0700200 graphs = '<ac:structured-macro ac:name="html">\n'
201 graphs += '<ac:plain-text-body><![CDATA[\n'
202 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
203 '/plot/' + plotName + '/getPlot?index=' + index +\
204 '&width=500&height=300"' +\
205 'noborder="0" width="500" height="300" scrolling="yes" ' +\
206 'seamless="seamless"></iframe>\n'
207 graphs += ']]></ac:plain-text-body>\n'
208 graphs += '</ac:structured-macro>\n'
209 main.log.wiki(graphs)
210
211 main.step( "Copying backup config files" )
212 path = "~/onos/tools/package/bin/onos-service"
213 cp = main.ONOSbench.scp( main.ONOSbench,
214 path,
215 path + ".backup",
216 direction="to" )
217
218 utilities.assert_equals( expect=main.TRUE,
219 actual=cp,
220 onpass="Copy backup config file succeeded",
221 onfail="Copy backup config file failed" )
222 # we need to modify the onos-service file to use remote metadata file
223 # url for cluster metadata file
Jon Hall8f6d4622016-05-23 15:27:18 -0700224 iface = main.params['server'].get( 'interface' )
225 ip = main.ONOSbench.getIpAddr( iface=iface )
Jon Hall69b2b982016-05-11 12:04:59 -0700226 metaFile = "cluster.json"
227 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, port, metaFile )
228 main.log.warn( javaArgs )
229 main.log.warn( repr( javaArgs ) )
230 handle = main.ONOSbench.handle
231 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, path )
232 main.log.warn( sed )
233 main.log.warn( repr( sed ) )
234 handle.sendline( sed )
Jon Hallbd60ea02016-08-23 10:03:59 -0700235 handle.expect( metaFile )
236 output = handle.before
Jon Hall69b2b982016-05-11 12:04:59 -0700237 handle.expect( "\$" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700238 output += handle.before
239 main.log.debug( repr( output ) )
Jon Hall69b2b982016-05-11 12:04:59 -0700240
241 main.step( "Creating ONOS package" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700242 packageResult = main.ONOSbench.buckBuild()
Jon Hall69b2b982016-05-11 12:04:59 -0700243 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
244 onpass="ONOS package successful",
245 onfail="ONOS package failed" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700246 if not packageResult:
247 main.cleanup()
248 main.exit()
Jon Hall69b2b982016-05-11 12:04:59 -0700249
250 main.step( "Installing ONOS package" )
251 onosInstallResult = main.TRUE
252 for i in range( main.ONOSbench.maxNodes ):
253 node = main.nodes[i]
254 options = "-f"
255 if i >= main.numCtrls:
256 options = "-nf" # Don't start more than the current scale
257 tmpResult = main.ONOSbench.onosInstall( options=options,
258 node=node.ip_address )
259 onosInstallResult = onosInstallResult and tmpResult
260 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
261 onpass="ONOS install successful",
262 onfail="ONOS install failed" )
263
264 # Cleanup custom onos-service file
265 main.ONOSbench.scp( main.ONOSbench,
266 path + ".backup",
267 path,
268 direction="to" )
269
You Wangf5de25b2017-01-06 15:13:01 -0800270 main.step( "Set up ONOS secure SSH" )
271 secureSshResult = main.TRUE
272 for node in main.nodes:
273 secureSshResult = secureSshResult and main.ONOSbench.onosSecureSSH( node=node.ip_address )
274 utilities.assert_equals( expect=main.TRUE, actual=secureSshResult,
275 onpass="Test step PASS",
276 onfail="Test step FAIL" )
277
Jon Hall69b2b982016-05-11 12:04:59 -0700278 main.step( "Checking if ONOS is up yet" )
279 for i in range( 2 ):
280 onosIsupResult = main.TRUE
281 for i in range( main.numCtrls ):
282 node = main.nodes[i]
283 started = main.ONOSbench.isup( node.ip_address )
284 if not started:
285 main.log.error( node.name + " hasn't started" )
286 onosIsupResult = onosIsupResult and started
287 if onosIsupResult == main.TRUE:
288 break
289 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
290 onpass="ONOS startup successful",
291 onfail="ONOS startup failed" )
292
Jon Hall6509dbf2016-06-21 17:01:17 -0700293 main.step( "Starting ONOS CLI sessions" )
Jon Hall69b2b982016-05-11 12:04:59 -0700294 cliResults = main.TRUE
295 threads = []
296 for i in range( main.numCtrls ):
297 t = main.Thread( target=main.CLIs[i].startOnosCli,
298 name="startOnosCli-" + str( i ),
299 args=[main.nodes[i].ip_address] )
300 threads.append( t )
301 t.start()
302
303 for t in threads:
304 t.join()
305 cliResults = cliResults and t.result
306 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
307 onpass="ONOS cli startup successful",
308 onfail="ONOS cli startup failed" )
309
310 # Create a list of active nodes for use when some nodes are stopped
311 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
312
313 if main.params[ 'tcpdump' ].lower() == "true":
314 main.step( "Start Packet Capture MN" )
315 main.Mininet2.startTcpdump(
316 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
317 + "-MN.pcap",
318 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
319 port=main.params[ 'MNtcpdump' ][ 'port' ] )
320
321 main.step( "Checking ONOS nodes" )
322 nodeResults = utilities.retry( main.HA.nodesCheck,
323 False,
324 args=[main.activeNodes],
325 attempts=5 )
326 utilities.assert_equals( expect=True, actual=nodeResults,
327 onpass="Nodes check successful",
328 onfail="Nodes check NOT successful" )
329
330 if not nodeResults:
331 for i in main.activeNodes:
332 cli = main.CLIs[i]
333 main.log.debug( "{} components not ACTIVE: \n{}".format(
334 cli.name,
335 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
336 main.log.error( "Failed to start ONOS, stopping test" )
337 main.cleanup()
338 main.exit()
339
340 main.step( "Activate apps defined in the params file" )
341 # get data from the params
342 apps = main.params.get( 'apps' )
343 if apps:
344 apps = apps.split(',')
345 main.log.warn( apps )
346 activateResult = True
347 for app in apps:
348 main.CLIs[ 0 ].app( app, "Activate" )
349 # TODO: check this worked
350 time.sleep( 10 ) # wait for apps to activate
351 for app in apps:
352 state = main.CLIs[ 0 ].appStatus( app )
353 if state == "ACTIVE":
354 activateResult = activateResult and True
355 else:
356 main.log.error( "{} is in {} state".format( app, state ) )
357 activateResult = False
358 utilities.assert_equals( expect=True,
359 actual=activateResult,
360 onpass="Successfully activated apps",
361 onfail="Failed to activate apps" )
362 else:
363 main.log.warn( "No apps were specified to be loaded after startup" )
364
365 main.step( "Set ONOS configurations" )
366 config = main.params.get( 'ONOS_Configuration' )
367 if config:
368 main.log.debug( config )
369 checkResult = main.TRUE
370 for component in config:
371 for setting in config[component]:
372 value = config[component][setting]
373 check = main.CLIs[ 0 ].setCfg( component, setting, value )
374 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
375 checkResult = check and checkResult
376 utilities.assert_equals( expect=main.TRUE,
377 actual=checkResult,
378 onpass="Successfully set config",
379 onfail="Failed to set config" )
380 else:
381 main.log.warn( "No configurations were specified to be changed after startup" )
382
383 main.step( "App Ids check" )
384 appCheck = main.TRUE
385 threads = []
386 for i in main.activeNodes:
387 t = main.Thread( target=main.CLIs[i].appToIDCheck,
388 name="appToIDCheck-" + str( i ),
389 args=[] )
390 threads.append( t )
391 t.start()
392
393 for t in threads:
394 t.join()
395 appCheck = appCheck and t.result
396 if appCheck != main.TRUE:
397 node = main.activeNodes[0]
398 main.log.warn( main.CLIs[node].apps() )
399 main.log.warn( main.CLIs[node].appIDs() )
400 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
401 onpass="App Ids seem to be correct",
402 onfail="Something is wrong with app Ids" )
403
404 def CASE2( self, main ):
405 """
406 Assign devices to controllers
407 """
408 import re
409 assert main.numCtrls, "main.numCtrls not defined"
410 assert main, "main not defined"
411 assert utilities.assert_equals, "utilities.assert_equals not defined"
412 assert main.CLIs, "main.CLIs not defined"
413 assert main.nodes, "main.nodes not defined"
414
415 main.case( "Assigning devices to controllers" )
416 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
417 "and check that an ONOS node becomes the " +\
418 "master of the device."
419 main.step( "Assign switches to controllers" )
420
421 ipList = []
422 for i in range( main.ONOSbench.maxNodes ):
423 ipList.append( main.nodes[ i ].ip_address )
424 swList = []
425 for i in range( 1, 29 ):
426 swList.append( "s" + str( i ) )
427 main.Mininet1.assignSwController( sw=swList, ip=ipList )
428
429 mastershipCheck = main.TRUE
430 for i in range( 1, 29 ):
431 response = main.Mininet1.getSwController( "s" + str( i ) )
432 try:
433 main.log.info( str( response ) )
434 except Exception:
435 main.log.info( repr( response ) )
436 for node in main.nodes:
437 if re.search( "tcp:" + node.ip_address, response ):
438 mastershipCheck = mastershipCheck and main.TRUE
439 else:
440 main.log.error( "Error, node " + node.ip_address + " is " +
441 "not in the list of controllers s" +
442 str( i ) + " is connecting to." )
443 mastershipCheck = main.FALSE
444 utilities.assert_equals(
445 expect=main.TRUE,
446 actual=mastershipCheck,
447 onpass="Switch mastership assigned correctly",
448 onfail="Switches not assigned correctly to controllers" )
449
450 def CASE21( self, main ):
451 """
452 Assign mastership to controllers
453 """
454 import time
455 assert main.numCtrls, "main.numCtrls not defined"
456 assert main, "main not defined"
457 assert utilities.assert_equals, "utilities.assert_equals not defined"
458 assert main.CLIs, "main.CLIs not defined"
459 assert main.nodes, "main.nodes not defined"
460
461 main.case( "Assigning Controller roles for switches" )
462 main.caseExplanation = "Check that ONOS is connected to each " +\
463 "device. Then manually assign" +\
464 " mastership to specific ONOS nodes using" +\
465 " 'device-role'"
466 main.step( "Assign mastership of switches to specific controllers" )
467 # Manually assign mastership to the controller we want
468 roleCall = main.TRUE
469
470 ipList = [ ]
471 deviceList = []
472 onosCli = main.CLIs[ main.activeNodes[0] ]
473 try:
474 # Assign mastership to specific controllers. This assignment was
475 # determined for a 7 node cluser, but will work with any sized
476 # cluster
477 for i in range( 1, 29 ): # switches 1 through 28
478 # set up correct variables:
479 if i == 1:
480 c = 0
481 ip = main.nodes[ c ].ip_address # ONOS1
482 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
483 elif i == 2:
484 c = 1 % main.numCtrls
485 ip = main.nodes[ c ].ip_address # ONOS2
486 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
487 elif i == 3:
488 c = 1 % main.numCtrls
489 ip = main.nodes[ c ].ip_address # ONOS2
490 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
491 elif i == 4:
492 c = 3 % main.numCtrls
493 ip = main.nodes[ c ].ip_address # ONOS4
494 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
495 elif i == 5:
496 c = 2 % main.numCtrls
497 ip = main.nodes[ c ].ip_address # ONOS3
498 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
499 elif i == 6:
500 c = 2 % main.numCtrls
501 ip = main.nodes[ c ].ip_address # ONOS3
502 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
503 elif i == 7:
504 c = 5 % main.numCtrls
505 ip = main.nodes[ c ].ip_address # ONOS6
506 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
507 elif i >= 8 and i <= 17:
508 c = 4 % main.numCtrls
509 ip = main.nodes[ c ].ip_address # ONOS5
510 dpid = '3' + str( i ).zfill( 3 )
511 deviceId = onosCli.getDevice( dpid ).get( 'id' )
512 elif i >= 18 and i <= 27:
513 c = 6 % main.numCtrls
514 ip = main.nodes[ c ].ip_address # ONOS7
515 dpid = '6' + str( i ).zfill( 3 )
516 deviceId = onosCli.getDevice( dpid ).get( 'id' )
517 elif i == 28:
518 c = 0
519 ip = main.nodes[ c ].ip_address # ONOS1
520 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
521 else:
522 main.log.error( "You didn't write an else statement for " +
523 "switch s" + str( i ) )
524 roleCall = main.FALSE
525 # Assign switch
526 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
527 # TODO: make this controller dynamic
528 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
529 ipList.append( ip )
530 deviceList.append( deviceId )
531 except ( AttributeError, AssertionError ):
532 main.log.exception( "Something is wrong with ONOS device view" )
533 main.log.info( onosCli.devices() )
534 utilities.assert_equals(
535 expect=main.TRUE,
536 actual=roleCall,
537 onpass="Re-assigned switch mastership to designated controller",
538 onfail="Something wrong with deviceRole calls" )
539
540 main.step( "Check mastership was correctly assigned" )
541 roleCheck = main.TRUE
542 # NOTE: This is due to the fact that device mastership change is not
543 # atomic and is actually a multi step process
544 time.sleep( 5 )
545 for i in range( len( ipList ) ):
546 ip = ipList[i]
547 deviceId = deviceList[i]
548 # Check assignment
549 master = onosCli.getRole( deviceId ).get( 'master' )
550 if ip in master:
551 roleCheck = roleCheck and main.TRUE
552 else:
553 roleCheck = roleCheck and main.FALSE
554 main.log.error( "Error, controller " + ip + " is not" +
555 " master " + "of device " +
556 str( deviceId ) + ". Master is " +
557 repr( master ) + "." )
558 utilities.assert_equals(
559 expect=main.TRUE,
560 actual=roleCheck,
561 onpass="Switches were successfully reassigned to designated " +
562 "controller",
563 onfail="Switches were not successfully reassigned" )
564
565 def CASE3( self, main ):
566 """
567 Assign intents
568 """
569 import time
570 import json
571 assert main.numCtrls, "main.numCtrls not defined"
572 assert main, "main not defined"
573 assert utilities.assert_equals, "utilities.assert_equals not defined"
574 assert main.CLIs, "main.CLIs not defined"
575 assert main.nodes, "main.nodes not defined"
576 try:
577 labels
578 except NameError:
579 main.log.error( "labels not defined, setting to []" )
580 labels = []
581 try:
582 data
583 except NameError:
584 main.log.error( "data not defined, setting to []" )
585 data = []
586 # NOTE: we must reinstall intents until we have a persistant intent
587 # datastore!
588 main.case( "Adding host Intents" )
589 main.caseExplanation = "Discover hosts by using pingall then " +\
590 "assign predetermined host-to-host intents." +\
591 " After installation, check that the intent" +\
592 " is distributed to all nodes and the state" +\
593 " is INSTALLED"
594
595 # install onos-app-fwd
596 main.step( "Install reactive forwarding app" )
597 onosCli = main.CLIs[ main.activeNodes[0] ]
598 installResults = onosCli.activateApp( "org.onosproject.fwd" )
599 utilities.assert_equals( expect=main.TRUE, actual=installResults,
600 onpass="Install fwd successful",
601 onfail="Install fwd failed" )
602
603 main.step( "Check app ids" )
604 appCheck = main.TRUE
605 threads = []
606 for i in main.activeNodes:
607 t = main.Thread( target=main.CLIs[i].appToIDCheck,
608 name="appToIDCheck-" + str( i ),
609 args=[] )
610 threads.append( t )
611 t.start()
612
613 for t in threads:
614 t.join()
615 appCheck = appCheck and t.result
616 if appCheck != main.TRUE:
617 main.log.warn( onosCli.apps() )
618 main.log.warn( onosCli.appIDs() )
619 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
620 onpass="App Ids seem to be correct",
621 onfail="Something is wrong with app Ids" )
622
623 main.step( "Discovering Hosts( Via pingall for now )" )
624 # FIXME: Once we have a host discovery mechanism, use that instead
625 # REACTIVE FWD test
626 pingResult = main.FALSE
627 passMsg = "Reactive Pingall test passed"
628 time1 = time.time()
629 pingResult = main.Mininet1.pingall()
630 time2 = time.time()
631 if not pingResult:
632 main.log.warn("First pingall failed. Trying again...")
633 pingResult = main.Mininet1.pingall()
634 passMsg += " on the second try"
635 utilities.assert_equals(
636 expect=main.TRUE,
637 actual=pingResult,
638 onpass= passMsg,
639 onfail="Reactive Pingall failed, " +
640 "one or more ping pairs failed" )
641 main.log.info( "Time for pingall: %2f seconds" %
642 ( time2 - time1 ) )
643 # timeout for fwd flows
644 time.sleep( 11 )
645 # uninstall onos-app-fwd
646 main.step( "Uninstall reactive forwarding app" )
647 node = main.activeNodes[0]
648 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
649 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
650 onpass="Uninstall fwd successful",
651 onfail="Uninstall fwd failed" )
652
653 main.step( "Check app ids" )
654 threads = []
655 appCheck2 = main.TRUE
656 for i in main.activeNodes:
657 t = main.Thread( target=main.CLIs[i].appToIDCheck,
658 name="appToIDCheck-" + str( i ),
659 args=[] )
660 threads.append( t )
661 t.start()
662
663 for t in threads:
664 t.join()
665 appCheck2 = appCheck2 and t.result
666 if appCheck2 != main.TRUE:
667 node = main.activeNodes[0]
668 main.log.warn( main.CLIs[node].apps() )
669 main.log.warn( main.CLIs[node].appIDs() )
670 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
671 onpass="App Ids seem to be correct",
672 onfail="Something is wrong with app Ids" )
673
674 main.step( "Add host intents via cli" )
675 intentIds = []
676 # TODO: move the host numbers to params
677 # Maybe look at all the paths we ping?
678 intentAddResult = True
679 hostResult = main.TRUE
680 for i in range( 8, 18 ):
681 main.log.info( "Adding host intent between h" + str( i ) +
682 " and h" + str( i + 10 ) )
683 host1 = "00:00:00:00:00:" + \
684 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
685 host2 = "00:00:00:00:00:" + \
686 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
687 # NOTE: getHost can return None
688 host1Dict = onosCli.getHost( host1 )
689 host2Dict = onosCli.getHost( host2 )
690 host1Id = None
691 host2Id = None
692 if host1Dict and host2Dict:
693 host1Id = host1Dict.get( 'id', None )
694 host2Id = host2Dict.get( 'id', None )
695 if host1Id and host2Id:
696 nodeNum = ( i % len( main.activeNodes ) )
697 node = main.activeNodes[nodeNum]
698 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
699 if tmpId:
700 main.log.info( "Added intent with id: " + tmpId )
701 intentIds.append( tmpId )
702 else:
703 main.log.error( "addHostIntent returned: " +
704 repr( tmpId ) )
705 else:
706 main.log.error( "Error, getHost() failed for h" + str( i ) +
707 " and/or h" + str( i + 10 ) )
708 node = main.activeNodes[0]
709 hosts = main.CLIs[node].hosts()
710 main.log.warn( "Hosts output: " )
711 try:
712 main.log.warn( json.dumps( json.loads( hosts ),
713 sort_keys=True,
714 indent=4,
715 separators=( ',', ': ' ) ) )
716 except ( ValueError, TypeError ):
717 main.log.warn( repr( hosts ) )
718 hostResult = main.FALSE
719 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
720 onpass="Found a host id for each host",
721 onfail="Error looking up host ids" )
722
723 intentStart = time.time()
724 onosIds = onosCli.getAllIntentsId()
725 main.log.info( "Submitted intents: " + str( intentIds ) )
726 main.log.info( "Intents in ONOS: " + str( onosIds ) )
727 for intent in intentIds:
728 if intent in onosIds:
729 pass # intent submitted is in onos
730 else:
731 intentAddResult = False
732 if intentAddResult:
733 intentStop = time.time()
734 else:
735 intentStop = None
736 # Print the intent states
737 intents = onosCli.intents()
738 intentStates = []
739 installedCheck = True
740 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
741 count = 0
742 try:
743 for intent in json.loads( intents ):
744 state = intent.get( 'state', None )
745 if "INSTALLED" not in state:
746 installedCheck = False
747 intentId = intent.get( 'id', None )
748 intentStates.append( ( intentId, state ) )
749 except ( ValueError, TypeError ):
750 main.log.exception( "Error parsing intents" )
751 # add submitted intents not in the store
752 tmplist = [ i for i, s in intentStates ]
753 missingIntents = False
754 for i in intentIds:
755 if i not in tmplist:
756 intentStates.append( ( i, " - " ) )
757 missingIntents = True
758 intentStates.sort()
759 for i, s in intentStates:
760 count += 1
761 main.log.info( "%-6s%-15s%-15s" %
762 ( str( count ), str( i ), str( s ) ) )
763 leaders = onosCli.leaders()
764 try:
765 missing = False
766 if leaders:
767 parsedLeaders = json.loads( leaders )
768 main.log.warn( json.dumps( parsedLeaders,
769 sort_keys=True,
770 indent=4,
771 separators=( ',', ': ' ) ) )
772 # check for all intent partitions
773 topics = []
774 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700775 topics.append( "work-partition-" + str( i ) )
Jon Hall69b2b982016-05-11 12:04:59 -0700776 main.log.debug( topics )
777 ONOStopics = [ j['topic'] for j in parsedLeaders ]
778 for topic in topics:
779 if topic not in ONOStopics:
780 main.log.error( "Error: " + topic +
781 " not in leaders" )
782 missing = True
783 else:
784 main.log.error( "leaders() returned None" )
785 except ( ValueError, TypeError ):
786 main.log.exception( "Error parsing leaders" )
787 main.log.error( repr( leaders ) )
788 # Check all nodes
789 if missing:
790 for i in main.activeNodes:
791 response = main.CLIs[i].leaders( jsonFormat=False)
792 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
793 str( response ) )
794
795 partitions = onosCli.partitions()
796 try:
797 if partitions :
798 parsedPartitions = json.loads( partitions )
799 main.log.warn( json.dumps( parsedPartitions,
800 sort_keys=True,
801 indent=4,
802 separators=( ',', ': ' ) ) )
803 # TODO check for a leader in all paritions
804 # TODO check for consistency among nodes
805 else:
806 main.log.error( "partitions() returned None" )
807 except ( ValueError, TypeError ):
808 main.log.exception( "Error parsing partitions" )
809 main.log.error( repr( partitions ) )
810 pendingMap = onosCli.pendingMap()
811 try:
812 if pendingMap :
813 parsedPending = json.loads( pendingMap )
814 main.log.warn( json.dumps( parsedPending,
815 sort_keys=True,
816 indent=4,
817 separators=( ',', ': ' ) ) )
818 # TODO check something here?
819 else:
820 main.log.error( "pendingMap() returned None" )
821 except ( ValueError, TypeError ):
822 main.log.exception( "Error parsing pending map" )
823 main.log.error( repr( pendingMap ) )
824
825 intentAddResult = bool( intentAddResult and not missingIntents and
826 installedCheck )
827 if not intentAddResult:
828 main.log.error( "Error in pushing host intents to ONOS" )
829
830 main.step( "Intent Anti-Entropy dispersion" )
831 for j in range(100):
832 correct = True
833 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
834 for i in main.activeNodes:
835 onosIds = []
836 ids = main.CLIs[i].getAllIntentsId()
837 onosIds.append( ids )
838 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
839 str( sorted( onosIds ) ) )
840 if sorted( ids ) != sorted( intentIds ):
841 main.log.warn( "Set of intent IDs doesn't match" )
842 correct = False
843 break
844 else:
845 intents = json.loads( main.CLIs[i].intents() )
846 for intent in intents:
847 if intent[ 'state' ] != "INSTALLED":
848 main.log.warn( "Intent " + intent[ 'id' ] +
849 " is " + intent[ 'state' ] )
850 correct = False
851 break
852 if correct:
853 break
854 else:
855 time.sleep(1)
856 if not intentStop:
857 intentStop = time.time()
858 global gossipTime
859 gossipTime = intentStop - intentStart
860 main.log.info( "It took about " + str( gossipTime ) +
861 " seconds for all intents to appear in each node" )
862 append = False
863 title = "Gossip Intents"
864 count = 1
865 while append is False:
866 curTitle = title + str( count )
867 if curTitle not in labels:
868 labels.append( curTitle )
869 data.append( str( gossipTime ) )
870 append = True
871 else:
872 count += 1
873 gossipPeriod = int( main.params['timers']['gossip'] )
874 maxGossipTime = gossipPeriod * len( main.activeNodes )
875 utilities.assert_greater_equals(
876 expect=maxGossipTime, actual=gossipTime,
877 onpass="ECM anti-entropy for intents worked within " +
878 "expected time",
879 onfail="Intent ECM anti-entropy took too long. " +
880 "Expected time:{}, Actual time:{}".format( maxGossipTime,
881 gossipTime ) )
882 if gossipTime <= maxGossipTime:
883 intentAddResult = True
884
885 if not intentAddResult or "key" in pendingMap:
886 import time
887 installedCheck = True
888 main.log.info( "Sleeping 60 seconds to see if intents are found" )
889 time.sleep( 60 )
890 onosIds = onosCli.getAllIntentsId()
891 main.log.info( "Submitted intents: " + str( intentIds ) )
892 main.log.info( "Intents in ONOS: " + str( onosIds ) )
893 # Print the intent states
894 intents = onosCli.intents()
895 intentStates = []
896 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
897 count = 0
898 try:
899 for intent in json.loads( intents ):
900 # Iter through intents of a node
901 state = intent.get( 'state', None )
902 if "INSTALLED" not in state:
903 installedCheck = False
904 intentId = intent.get( 'id', None )
905 intentStates.append( ( intentId, state ) )
906 except ( ValueError, TypeError ):
907 main.log.exception( "Error parsing intents" )
908 # add submitted intents not in the store
909 tmplist = [ i for i, s in intentStates ]
910 for i in intentIds:
911 if i not in tmplist:
912 intentStates.append( ( i, " - " ) )
913 intentStates.sort()
914 for i, s in intentStates:
915 count += 1
916 main.log.info( "%-6s%-15s%-15s" %
917 ( str( count ), str( i ), str( s ) ) )
918 leaders = onosCli.leaders()
919 try:
920 missing = False
921 if leaders:
922 parsedLeaders = json.loads( leaders )
923 main.log.warn( json.dumps( parsedLeaders,
924 sort_keys=True,
925 indent=4,
926 separators=( ',', ': ' ) ) )
927 # check for all intent partitions
928 # check for election
929 topics = []
930 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700931 topics.append( "work-partition-" + str( i ) )
Jon Hall69b2b982016-05-11 12:04:59 -0700932 # FIXME: this should only be after we start the app
933 topics.append( "org.onosproject.election" )
934 main.log.debug( topics )
935 ONOStopics = [ j['topic'] for j in parsedLeaders ]
936 for topic in topics:
937 if topic not in ONOStopics:
938 main.log.error( "Error: " + topic +
939 " not in leaders" )
940 missing = True
941 else:
942 main.log.error( "leaders() returned None" )
943 except ( ValueError, TypeError ):
944 main.log.exception( "Error parsing leaders" )
945 main.log.error( repr( leaders ) )
946 # Check all nodes
947 if missing:
948 for i in main.activeNodes:
949 node = main.CLIs[i]
950 response = node.leaders( jsonFormat=False)
951 main.log.warn( str( node.name ) + " leaders output: \n" +
952 str( response ) )
953
954 partitions = onosCli.partitions()
955 try:
956 if partitions :
957 parsedPartitions = json.loads( partitions )
958 main.log.warn( json.dumps( parsedPartitions,
959 sort_keys=True,
960 indent=4,
961 separators=( ',', ': ' ) ) )
962 # TODO check for a leader in all paritions
963 # TODO check for consistency among nodes
964 else:
965 main.log.error( "partitions() returned None" )
966 except ( ValueError, TypeError ):
967 main.log.exception( "Error parsing partitions" )
968 main.log.error( repr( partitions ) )
969 pendingMap = onosCli.pendingMap()
970 try:
971 if pendingMap :
972 parsedPending = json.loads( pendingMap )
973 main.log.warn( json.dumps( parsedPending,
974 sort_keys=True,
975 indent=4,
976 separators=( ',', ': ' ) ) )
977 # TODO check something here?
978 else:
979 main.log.error( "pendingMap() returned None" )
980 except ( ValueError, TypeError ):
981 main.log.exception( "Error parsing pending map" )
982 main.log.error( repr( pendingMap ) )
983
984 def CASE4( self, main ):
985 """
986 Ping across added host intents
987 """
988 import json
989 import time
990 assert main.numCtrls, "main.numCtrls not defined"
991 assert main, "main not defined"
992 assert utilities.assert_equals, "utilities.assert_equals not defined"
993 assert main.CLIs, "main.CLIs not defined"
994 assert main.nodes, "main.nodes not defined"
995 main.case( "Verify connectivity by sending traffic across Intents" )
996 main.caseExplanation = "Ping across added host intents to check " +\
997 "functionality and check the state of " +\
998 "the intent"
999
1000 onosCli = main.CLIs[ main.activeNodes[0] ]
1001 main.step( "Check Intent state" )
1002 installedCheck = False
1003 loopCount = 0
1004 while not installedCheck and loopCount < 40:
1005 installedCheck = True
1006 # Print the intent states
1007 intents = onosCli.intents()
1008 intentStates = []
1009 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1010 count = 0
1011 # Iter through intents of a node
1012 try:
1013 for intent in json.loads( intents ):
1014 state = intent.get( 'state', None )
1015 if "INSTALLED" not in state:
1016 installedCheck = False
1017 intentId = intent.get( 'id', None )
1018 intentStates.append( ( intentId, state ) )
1019 except ( ValueError, TypeError ):
1020 main.log.exception( "Error parsing intents." )
1021 # Print states
1022 intentStates.sort()
1023 for i, s in intentStates:
1024 count += 1
1025 main.log.info( "%-6s%-15s%-15s" %
1026 ( str( count ), str( i ), str( s ) ) )
1027 if not installedCheck:
1028 time.sleep( 1 )
1029 loopCount += 1
1030 utilities.assert_equals( expect=True, actual=installedCheck,
1031 onpass="Intents are all INSTALLED",
1032 onfail="Intents are not all in " +
1033 "INSTALLED state" )
1034
1035 main.step( "Ping across added host intents" )
1036 PingResult = main.TRUE
1037 for i in range( 8, 18 ):
1038 ping = main.Mininet1.pingHost( src="h" + str( i ),
1039 target="h" + str( i + 10 ) )
1040 PingResult = PingResult and ping
1041 if ping == main.FALSE:
1042 main.log.warn( "Ping failed between h" + str( i ) +
1043 " and h" + str( i + 10 ) )
1044 elif ping == main.TRUE:
1045 main.log.info( "Ping test passed!" )
1046 # Don't set PingResult or you'd override failures
1047 if PingResult == main.FALSE:
1048 main.log.error(
1049 "Intents have not been installed correctly, pings failed." )
1050 # TODO: pretty print
1051 main.log.warn( "ONOS1 intents: " )
1052 try:
1053 tmpIntents = onosCli.intents()
1054 main.log.warn( json.dumps( json.loads( tmpIntents ),
1055 sort_keys=True,
1056 indent=4,
1057 separators=( ',', ': ' ) ) )
1058 except ( ValueError, TypeError ):
1059 main.log.warn( repr( tmpIntents ) )
1060 utilities.assert_equals(
1061 expect=main.TRUE,
1062 actual=PingResult,
1063 onpass="Intents have been installed correctly and pings work",
1064 onfail="Intents have not been installed correctly, pings failed." )
1065
1066 main.step( "Check leadership of topics" )
1067 leaders = onosCli.leaders()
1068 topicCheck = main.TRUE
1069 try:
1070 if leaders:
1071 parsedLeaders = json.loads( leaders )
1072 main.log.warn( json.dumps( parsedLeaders,
1073 sort_keys=True,
1074 indent=4,
1075 separators=( ',', ': ' ) ) )
1076 # check for all intent partitions
1077 # check for election
1078 # TODO: Look at Devices as topics now that it uses this system
1079 topics = []
1080 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001081 topics.append( "work-partition-" + str( i ) )
Jon Hall69b2b982016-05-11 12:04:59 -07001082 # FIXME: this should only be after we start the app
1083 # FIXME: topics.append( "org.onosproject.election" )
1084 # Print leaders output
1085 main.log.debug( topics )
1086 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1087 for topic in topics:
1088 if topic not in ONOStopics:
1089 main.log.error( "Error: " + topic +
1090 " not in leaders" )
1091 topicCheck = main.FALSE
1092 else:
1093 main.log.error( "leaders() returned None" )
1094 topicCheck = main.FALSE
1095 except ( ValueError, TypeError ):
1096 topicCheck = main.FALSE
1097 main.log.exception( "Error parsing leaders" )
1098 main.log.error( repr( leaders ) )
1099 # TODO: Check for a leader of these topics
1100 # Check all nodes
1101 if topicCheck:
1102 for i in main.activeNodes:
1103 node = main.CLIs[i]
1104 response = node.leaders( jsonFormat=False)
1105 main.log.warn( str( node.name ) + " leaders output: \n" +
1106 str( response ) )
1107
1108 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1109 onpass="intent Partitions is in leaders",
1110 onfail="Some topics were lost " )
1111 # Print partitions
1112 partitions = onosCli.partitions()
1113 try:
1114 if partitions :
1115 parsedPartitions = json.loads( partitions )
1116 main.log.warn( json.dumps( parsedPartitions,
1117 sort_keys=True,
1118 indent=4,
1119 separators=( ',', ': ' ) ) )
1120 # TODO check for a leader in all paritions
1121 # TODO check for consistency among nodes
1122 else:
1123 main.log.error( "partitions() returned None" )
1124 except ( ValueError, TypeError ):
1125 main.log.exception( "Error parsing partitions" )
1126 main.log.error( repr( partitions ) )
1127 # Print Pending Map
1128 pendingMap = onosCli.pendingMap()
1129 try:
1130 if pendingMap :
1131 parsedPending = json.loads( pendingMap )
1132 main.log.warn( json.dumps( parsedPending,
1133 sort_keys=True,
1134 indent=4,
1135 separators=( ',', ': ' ) ) )
1136 # TODO check something here?
1137 else:
1138 main.log.error( "pendingMap() returned None" )
1139 except ( ValueError, TypeError ):
1140 main.log.exception( "Error parsing pending map" )
1141 main.log.error( repr( pendingMap ) )
1142
1143 if not installedCheck:
1144 main.log.info( "Waiting 60 seconds to see if the state of " +
1145 "intents change" )
1146 time.sleep( 60 )
1147 # Print the intent states
1148 intents = onosCli.intents()
1149 intentStates = []
1150 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1151 count = 0
1152 # Iter through intents of a node
1153 try:
1154 for intent in json.loads( intents ):
1155 state = intent.get( 'state', None )
1156 if "INSTALLED" not in state:
1157 installedCheck = False
1158 intentId = intent.get( 'id', None )
1159 intentStates.append( ( intentId, state ) )
1160 except ( ValueError, TypeError ):
1161 main.log.exception( "Error parsing intents." )
1162 intentStates.sort()
1163 for i, s in intentStates:
1164 count += 1
1165 main.log.info( "%-6s%-15s%-15s" %
1166 ( str( count ), str( i ), str( s ) ) )
1167 leaders = onosCli.leaders()
1168 try:
1169 missing = False
1170 if leaders:
1171 parsedLeaders = json.loads( leaders )
1172 main.log.warn( json.dumps( parsedLeaders,
1173 sort_keys=True,
1174 indent=4,
1175 separators=( ',', ': ' ) ) )
1176 # check for all intent partitions
1177 # check for election
1178 topics = []
1179 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001180 topics.append( "work-partition-" + str( i ) )
Jon Hall69b2b982016-05-11 12:04:59 -07001181 # FIXME: this should only be after we start the app
1182 topics.append( "org.onosproject.election" )
1183 main.log.debug( topics )
1184 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1185 for topic in topics:
1186 if topic not in ONOStopics:
1187 main.log.error( "Error: " + topic +
1188 " not in leaders" )
1189 missing = True
1190 else:
1191 main.log.error( "leaders() returned None" )
1192 except ( ValueError, TypeError ):
1193 main.log.exception( "Error parsing leaders" )
1194 main.log.error( repr( leaders ) )
1195 if missing:
1196 for i in main.activeNodes:
1197 node = main.CLIs[i]
1198 response = node.leaders( jsonFormat=False)
1199 main.log.warn( str( node.name ) + " leaders output: \n" +
1200 str( response ) )
1201
1202 partitions = onosCli.partitions()
1203 try:
1204 if partitions :
1205 parsedPartitions = json.loads( partitions )
1206 main.log.warn( json.dumps( parsedPartitions,
1207 sort_keys=True,
1208 indent=4,
1209 separators=( ',', ': ' ) ) )
1210 # TODO check for a leader in all paritions
1211 # TODO check for consistency among nodes
1212 else:
1213 main.log.error( "partitions() returned None" )
1214 except ( ValueError, TypeError ):
1215 main.log.exception( "Error parsing partitions" )
1216 main.log.error( repr( partitions ) )
1217 pendingMap = onosCli.pendingMap()
1218 try:
1219 if pendingMap :
1220 parsedPending = json.loads( pendingMap )
1221 main.log.warn( json.dumps( parsedPending,
1222 sort_keys=True,
1223 indent=4,
1224 separators=( ',', ': ' ) ) )
1225 # TODO check something here?
1226 else:
1227 main.log.error( "pendingMap() returned None" )
1228 except ( ValueError, TypeError ):
1229 main.log.exception( "Error parsing pending map" )
1230 main.log.error( repr( pendingMap ) )
1231 # Print flowrules
1232 node = main.activeNodes[0]
1233 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1234 main.step( "Wait a minute then ping again" )
1235 # the wait is above
1236 PingResult = main.TRUE
1237 for i in range( 8, 18 ):
1238 ping = main.Mininet1.pingHost( src="h" + str( i ),
1239 target="h" + str( i + 10 ) )
1240 PingResult = PingResult and ping
1241 if ping == main.FALSE:
1242 main.log.warn( "Ping failed between h" + str( i ) +
1243 " and h" + str( i + 10 ) )
1244 elif ping == main.TRUE:
1245 main.log.info( "Ping test passed!" )
1246 # Don't set PingResult or you'd override failures
1247 if PingResult == main.FALSE:
1248 main.log.error(
1249 "Intents have not been installed correctly, pings failed." )
1250 # TODO: pretty print
1251 main.log.warn( "ONOS1 intents: " )
1252 try:
1253 tmpIntents = onosCli.intents()
1254 main.log.warn( json.dumps( json.loads( tmpIntents ),
1255 sort_keys=True,
1256 indent=4,
1257 separators=( ',', ': ' ) ) )
1258 except ( ValueError, TypeError ):
1259 main.log.warn( repr( tmpIntents ) )
1260 utilities.assert_equals(
1261 expect=main.TRUE,
1262 actual=PingResult,
1263 onpass="Intents have been installed correctly and pings work",
1264 onfail="Intents have not been installed correctly, pings failed." )
1265
1266 def CASE5( self, main ):
1267 """
1268 Reading state of ONOS
1269 """
1270 import json
1271 import time
1272 assert main.numCtrls, "main.numCtrls not defined"
1273 assert main, "main not defined"
1274 assert utilities.assert_equals, "utilities.assert_equals not defined"
1275 assert main.CLIs, "main.CLIs not defined"
1276 assert main.nodes, "main.nodes not defined"
1277
1278 main.case( "Setting up and gathering data for current state" )
1279 # The general idea for this test case is to pull the state of
1280 # ( intents,flows, topology,... ) from each ONOS node
1281 # We can then compare them with each other and also with past states
1282
1283 main.step( "Check that each switch has a master" )
1284 global mastershipState
1285 mastershipState = '[]'
1286
1287 # Assert that each device has a master
1288 rolesNotNull = main.TRUE
1289 threads = []
1290 for i in main.activeNodes:
1291 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1292 name="rolesNotNull-" + str( i ),
1293 args=[] )
1294 threads.append( t )
1295 t.start()
1296
1297 for t in threads:
1298 t.join()
1299 rolesNotNull = rolesNotNull and t.result
1300 utilities.assert_equals(
1301 expect=main.TRUE,
1302 actual=rolesNotNull,
1303 onpass="Each device has a master",
1304 onfail="Some devices don't have a master assigned" )
1305
1306 main.step( "Get the Mastership of each switch from each controller" )
1307 ONOSMastership = []
1308 consistentMastership = True
1309 rolesResults = True
1310 threads = []
1311 for i in main.activeNodes:
1312 t = main.Thread( target=main.CLIs[i].roles,
1313 name="roles-" + str( i ),
1314 args=[] )
1315 threads.append( t )
1316 t.start()
1317
1318 for t in threads:
1319 t.join()
1320 ONOSMastership.append( t.result )
1321
1322 for i in range( len( ONOSMastership ) ):
1323 node = str( main.activeNodes[i] + 1 )
1324 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1325 main.log.error( "Error in getting ONOS" + node + " roles" )
1326 main.log.warn( "ONOS" + node + " mastership response: " +
1327 repr( ONOSMastership[i] ) )
1328 rolesResults = False
1329 utilities.assert_equals(
1330 expect=True,
1331 actual=rolesResults,
1332 onpass="No error in reading roles output",
1333 onfail="Error in reading roles from ONOS" )
1334
1335 main.step( "Check for consistency in roles from each controller" )
1336 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1337 main.log.info(
1338 "Switch roles are consistent across all ONOS nodes" )
1339 else:
1340 consistentMastership = False
1341 utilities.assert_equals(
1342 expect=True,
1343 actual=consistentMastership,
1344 onpass="Switch roles are consistent across all ONOS nodes",
1345 onfail="ONOS nodes have different views of switch roles" )
1346
1347 if rolesResults and not consistentMastership:
1348 for i in range( len( main.activeNodes ) ):
1349 node = str( main.activeNodes[i] + 1 )
1350 try:
1351 main.log.warn(
1352 "ONOS" + node + " roles: ",
1353 json.dumps(
1354 json.loads( ONOSMastership[ i ] ),
1355 sort_keys=True,
1356 indent=4,
1357 separators=( ',', ': ' ) ) )
1358 except ( ValueError, TypeError ):
1359 main.log.warn( repr( ONOSMastership[ i ] ) )
1360 elif rolesResults and consistentMastership:
1361 mastershipState = ONOSMastership[ 0 ]
1362
1363 main.step( "Get the intents from each controller" )
1364 global intentState
1365 intentState = []
1366 ONOSIntents = []
1367 consistentIntents = True # Are Intents consistent across nodes?
1368 intentsResults = True # Could we read Intents from ONOS?
1369 threads = []
1370 for i in main.activeNodes:
1371 t = main.Thread( target=main.CLIs[i].intents,
1372 name="intents-" + str( i ),
1373 args=[],
1374 kwargs={ 'jsonFormat': True } )
1375 threads.append( t )
1376 t.start()
1377
1378 for t in threads:
1379 t.join()
1380 ONOSIntents.append( t.result )
1381
1382 for i in range( len( ONOSIntents ) ):
1383 node = str( main.activeNodes[i] + 1 )
1384 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1385 main.log.error( "Error in getting ONOS" + node + " intents" )
1386 main.log.warn( "ONOS" + node + " intents response: " +
1387 repr( ONOSIntents[ i ] ) )
1388 intentsResults = False
1389 utilities.assert_equals(
1390 expect=True,
1391 actual=intentsResults,
1392 onpass="No error in reading intents output",
1393 onfail="Error in reading intents from ONOS" )
1394
1395 main.step( "Check for consistency in Intents from each controller" )
1396 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1397 main.log.info( "Intents are consistent across all ONOS " +
1398 "nodes" )
1399 else:
1400 consistentIntents = False
1401 main.log.error( "Intents not consistent" )
1402 utilities.assert_equals(
1403 expect=True,
1404 actual=consistentIntents,
1405 onpass="Intents are consistent across all ONOS nodes",
1406 onfail="ONOS nodes have different views of intents" )
1407
1408 if intentsResults:
1409 # Try to make it easy to figure out what is happening
1410 #
1411 # Intent ONOS1 ONOS2 ...
1412 # 0x01 INSTALLED INSTALLING
1413 # ... ... ...
1414 # ... ... ...
1415 title = " Id"
1416 for n in main.activeNodes:
1417 title += " " * 10 + "ONOS" + str( n + 1 )
1418 main.log.warn( title )
1419 # get all intent keys in the cluster
1420 keys = []
1421 try:
1422 # Get the set of all intent keys
1423 for nodeStr in ONOSIntents:
1424 node = json.loads( nodeStr )
1425 for intent in node:
1426 keys.append( intent.get( 'id' ) )
1427 keys = set( keys )
1428 # For each intent key, print the state on each node
1429 for key in keys:
1430 row = "%-13s" % key
1431 for nodeStr in ONOSIntents:
1432 node = json.loads( nodeStr )
1433 for intent in node:
1434 if intent.get( 'id', "Error" ) == key:
1435 row += "%-15s" % intent.get( 'state' )
1436 main.log.warn( row )
1437 # End of intent state table
1438 except ValueError as e:
1439 main.log.exception( e )
1440 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1441
1442 if intentsResults and not consistentIntents:
1443 # print the json objects
1444 n = str( main.activeNodes[-1] + 1 )
1445 main.log.debug( "ONOS" + n + " intents: " )
1446 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1447 sort_keys=True,
1448 indent=4,
1449 separators=( ',', ': ' ) ) )
1450 for i in range( len( ONOSIntents ) ):
1451 node = str( main.activeNodes[i] + 1 )
1452 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1453 main.log.debug( "ONOS" + node + " intents: " )
1454 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1455 sort_keys=True,
1456 indent=4,
1457 separators=( ',', ': ' ) ) )
1458 else:
1459 main.log.debug( "ONOS" + node + " intents match ONOS" +
1460 n + " intents" )
1461 elif intentsResults and consistentIntents:
1462 intentState = ONOSIntents[ 0 ]
1463
1464 main.step( "Get the flows from each controller" )
1465 global flowState
1466 flowState = []
1467 ONOSFlows = []
1468 ONOSFlowsJson = []
1469 flowCheck = main.FALSE
1470 consistentFlows = True
1471 flowsResults = True
1472 threads = []
1473 for i in main.activeNodes:
1474 t = main.Thread( target=main.CLIs[i].flows,
1475 name="flows-" + str( i ),
1476 args=[],
1477 kwargs={ 'jsonFormat': True } )
1478 threads.append( t )
1479 t.start()
1480
1481 # NOTE: Flows command can take some time to run
1482 time.sleep(30)
1483 for t in threads:
1484 t.join()
1485 result = t.result
1486 ONOSFlows.append( result )
1487
1488 for i in range( len( ONOSFlows ) ):
1489 num = str( main.activeNodes[i] + 1 )
1490 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1491 main.log.error( "Error in getting ONOS" + num + " flows" )
1492 main.log.warn( "ONOS" + num + " flows response: " +
1493 repr( ONOSFlows[ i ] ) )
1494 flowsResults = False
1495 ONOSFlowsJson.append( None )
1496 else:
1497 try:
1498 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1499 except ( ValueError, TypeError ):
1500 # FIXME: change this to log.error?
1501 main.log.exception( "Error in parsing ONOS" + num +
1502 " response as json." )
1503 main.log.error( repr( ONOSFlows[ i ] ) )
1504 ONOSFlowsJson.append( None )
1505 flowsResults = False
1506 utilities.assert_equals(
1507 expect=True,
1508 actual=flowsResults,
1509 onpass="No error in reading flows output",
1510 onfail="Error in reading flows from ONOS" )
1511
1512 main.step( "Check for consistency in Flows from each controller" )
1513 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1514 if all( tmp ):
1515 main.log.info( "Flow count is consistent across all ONOS nodes" )
1516 else:
1517 consistentFlows = False
1518 utilities.assert_equals(
1519 expect=True,
1520 actual=consistentFlows,
1521 onpass="The flow count is consistent across all ONOS nodes",
1522 onfail="ONOS nodes have different flow counts" )
1523
1524 if flowsResults and not consistentFlows:
1525 for i in range( len( ONOSFlows ) ):
1526 node = str( main.activeNodes[i] + 1 )
1527 try:
1528 main.log.warn(
1529 "ONOS" + node + " flows: " +
1530 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1531 indent=4, separators=( ',', ': ' ) ) )
1532 except ( ValueError, TypeError ):
1533 main.log.warn( "ONOS" + node + " flows: " +
1534 repr( ONOSFlows[ i ] ) )
1535 elif flowsResults and consistentFlows:
1536 flowCheck = main.TRUE
1537 flowState = ONOSFlows[ 0 ]
1538
1539 main.step( "Get the OF Table entries" )
1540 global flows
1541 flows = []
1542 for i in range( 1, 29 ):
1543 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1544 if flowCheck == main.FALSE:
1545 for table in flows:
1546 main.log.warn( table )
1547 # TODO: Compare switch flow tables with ONOS flow tables
1548
1549 main.step( "Start continuous pings" )
1550 main.Mininet2.pingLong(
1551 src=main.params[ 'PING' ][ 'source1' ],
1552 target=main.params[ 'PING' ][ 'target1' ],
1553 pingTime=500 )
1554 main.Mininet2.pingLong(
1555 src=main.params[ 'PING' ][ 'source2' ],
1556 target=main.params[ 'PING' ][ 'target2' ],
1557 pingTime=500 )
1558 main.Mininet2.pingLong(
1559 src=main.params[ 'PING' ][ 'source3' ],
1560 target=main.params[ 'PING' ][ 'target3' ],
1561 pingTime=500 )
1562 main.Mininet2.pingLong(
1563 src=main.params[ 'PING' ][ 'source4' ],
1564 target=main.params[ 'PING' ][ 'target4' ],
1565 pingTime=500 )
1566 main.Mininet2.pingLong(
1567 src=main.params[ 'PING' ][ 'source5' ],
1568 target=main.params[ 'PING' ][ 'target5' ],
1569 pingTime=500 )
1570 main.Mininet2.pingLong(
1571 src=main.params[ 'PING' ][ 'source6' ],
1572 target=main.params[ 'PING' ][ 'target6' ],
1573 pingTime=500 )
1574 main.Mininet2.pingLong(
1575 src=main.params[ 'PING' ][ 'source7' ],
1576 target=main.params[ 'PING' ][ 'target7' ],
1577 pingTime=500 )
1578 main.Mininet2.pingLong(
1579 src=main.params[ 'PING' ][ 'source8' ],
1580 target=main.params[ 'PING' ][ 'target8' ],
1581 pingTime=500 )
1582 main.Mininet2.pingLong(
1583 src=main.params[ 'PING' ][ 'source9' ],
1584 target=main.params[ 'PING' ][ 'target9' ],
1585 pingTime=500 )
1586 main.Mininet2.pingLong(
1587 src=main.params[ 'PING' ][ 'source10' ],
1588 target=main.params[ 'PING' ][ 'target10' ],
1589 pingTime=500 )
1590
1591 main.step( "Collecting topology information from ONOS" )
1592 devices = []
1593 threads = []
1594 for i in main.activeNodes:
1595 t = main.Thread( target=main.CLIs[i].devices,
1596 name="devices-" + str( i ),
1597 args=[ ] )
1598 threads.append( t )
1599 t.start()
1600
1601 for t in threads:
1602 t.join()
1603 devices.append( t.result )
1604 hosts = []
1605 threads = []
1606 for i in main.activeNodes:
1607 t = main.Thread( target=main.CLIs[i].hosts,
1608 name="hosts-" + str( i ),
1609 args=[ ] )
1610 threads.append( t )
1611 t.start()
1612
1613 for t in threads:
1614 t.join()
1615 try:
1616 hosts.append( json.loads( t.result ) )
1617 except ( ValueError, TypeError ):
1618 # FIXME: better handling of this, print which node
1619 # Maybe use thread name?
1620 main.log.exception( "Error parsing json output of hosts" )
1621 main.log.warn( repr( t.result ) )
1622 hosts.append( None )
1623
1624 ports = []
1625 threads = []
1626 for i in main.activeNodes:
1627 t = main.Thread( target=main.CLIs[i].ports,
1628 name="ports-" + str( i ),
1629 args=[ ] )
1630 threads.append( t )
1631 t.start()
1632
1633 for t in threads:
1634 t.join()
1635 ports.append( t.result )
1636 links = []
1637 threads = []
1638 for i in main.activeNodes:
1639 t = main.Thread( target=main.CLIs[i].links,
1640 name="links-" + str( i ),
1641 args=[ ] )
1642 threads.append( t )
1643 t.start()
1644
1645 for t in threads:
1646 t.join()
1647 links.append( t.result )
1648 clusters = []
1649 threads = []
1650 for i in main.activeNodes:
1651 t = main.Thread( target=main.CLIs[i].clusters,
1652 name="clusters-" + str( i ),
1653 args=[ ] )
1654 threads.append( t )
1655 t.start()
1656
1657 for t in threads:
1658 t.join()
1659 clusters.append( t.result )
1660 # Compare json objects for hosts and dataplane clusters
1661
1662 # hosts
1663 main.step( "Host view is consistent across ONOS nodes" )
1664 consistentHostsResult = main.TRUE
1665 for controller in range( len( hosts ) ):
1666 controllerStr = str( main.activeNodes[controller] + 1 )
1667 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1668 if hosts[ controller ] == hosts[ 0 ]:
1669 continue
1670 else: # hosts not consistent
1671 main.log.error( "hosts from ONOS" +
1672 controllerStr +
1673 " is inconsistent with ONOS1" )
1674 main.log.warn( repr( hosts[ controller ] ) )
1675 consistentHostsResult = main.FALSE
1676
1677 else:
1678 main.log.error( "Error in getting ONOS hosts from ONOS" +
1679 controllerStr )
1680 consistentHostsResult = main.FALSE
1681 main.log.warn( "ONOS" + controllerStr +
1682 " hosts response: " +
1683 repr( hosts[ controller ] ) )
1684 utilities.assert_equals(
1685 expect=main.TRUE,
1686 actual=consistentHostsResult,
1687 onpass="Hosts view is consistent across all ONOS nodes",
1688 onfail="ONOS nodes have different views of hosts" )
1689
1690 main.step( "Each host has an IP address" )
1691 ipResult = main.TRUE
1692 for controller in range( 0, len( hosts ) ):
1693 controllerStr = str( main.activeNodes[controller] + 1 )
1694 if hosts[ controller ]:
1695 for host in hosts[ controller ]:
1696 if not host.get( 'ipAddresses', [ ] ):
1697 main.log.error( "Error with host ips on controller" +
1698 controllerStr + ": " + str( host ) )
1699 ipResult = main.FALSE
1700 utilities.assert_equals(
1701 expect=main.TRUE,
1702 actual=ipResult,
1703 onpass="The ips of the hosts aren't empty",
1704 onfail="The ip of at least one host is missing" )
1705
1706 # Strongly connected clusters of devices
1707 main.step( "Cluster view is consistent across ONOS nodes" )
1708 consistentClustersResult = main.TRUE
1709 for controller in range( len( clusters ) ):
1710 controllerStr = str( main.activeNodes[controller] + 1 )
1711 if "Error" not in clusters[ controller ]:
1712 if clusters[ controller ] == clusters[ 0 ]:
1713 continue
1714 else: # clusters not consistent
1715 main.log.error( "clusters from ONOS" + controllerStr +
1716 " is inconsistent with ONOS1" )
1717 consistentClustersResult = main.FALSE
1718
1719 else:
1720 main.log.error( "Error in getting dataplane clusters " +
1721 "from ONOS" + controllerStr )
1722 consistentClustersResult = main.FALSE
1723 main.log.warn( "ONOS" + controllerStr +
1724 " clusters response: " +
1725 repr( clusters[ controller ] ) )
1726 utilities.assert_equals(
1727 expect=main.TRUE,
1728 actual=consistentClustersResult,
1729 onpass="Clusters view is consistent across all ONOS nodes",
1730 onfail="ONOS nodes have different views of clusters" )
1731 if not consistentClustersResult:
1732 main.log.debug( clusters )
1733
1734 # there should always only be one cluster
1735 main.step( "Cluster view correct across ONOS nodes" )
1736 try:
1737 numClusters = len( json.loads( clusters[ 0 ] ) )
1738 except ( ValueError, TypeError ):
1739 main.log.exception( "Error parsing clusters[0]: " +
1740 repr( clusters[ 0 ] ) )
1741 numClusters = "ERROR"
1742 utilities.assert_equals(
1743 expect=1,
1744 actual=numClusters,
1745 onpass="ONOS shows 1 SCC",
1746 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1747
1748 main.step( "Comparing ONOS topology to MN" )
1749 devicesResults = main.TRUE
1750 linksResults = main.TRUE
1751 hostsResults = main.TRUE
1752 mnSwitches = main.Mininet1.getSwitches()
1753 mnLinks = main.Mininet1.getLinks()
1754 mnHosts = main.Mininet1.getHosts()
1755 for controller in main.activeNodes:
1756 controllerStr = str( main.activeNodes[controller] + 1 )
1757 if devices[ controller ] and ports[ controller ] and\
1758 "Error" not in devices[ controller ] and\
1759 "Error" not in ports[ controller ]:
1760 currentDevicesResult = main.Mininet1.compareSwitches(
1761 mnSwitches,
1762 json.loads( devices[ controller ] ),
1763 json.loads( ports[ controller ] ) )
1764 else:
1765 currentDevicesResult = main.FALSE
1766 utilities.assert_equals( expect=main.TRUE,
1767 actual=currentDevicesResult,
1768 onpass="ONOS" + controllerStr +
1769 " Switches view is correct",
1770 onfail="ONOS" + controllerStr +
1771 " Switches view is incorrect" )
1772 if links[ controller ] and "Error" not in links[ controller ]:
1773 currentLinksResult = main.Mininet1.compareLinks(
1774 mnSwitches, mnLinks,
1775 json.loads( links[ controller ] ) )
1776 else:
1777 currentLinksResult = main.FALSE
1778 utilities.assert_equals( expect=main.TRUE,
1779 actual=currentLinksResult,
1780 onpass="ONOS" + controllerStr +
1781 " links view is correct",
1782 onfail="ONOS" + controllerStr +
1783 " links view is incorrect" )
1784
1785 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1786 currentHostsResult = main.Mininet1.compareHosts(
1787 mnHosts,
1788 hosts[ controller ] )
1789 else:
1790 currentHostsResult = main.FALSE
1791 utilities.assert_equals( expect=main.TRUE,
1792 actual=currentHostsResult,
1793 onpass="ONOS" + controllerStr +
1794 " hosts exist in Mininet",
1795 onfail="ONOS" + controllerStr +
1796 " hosts don't match Mininet" )
1797
1798 devicesResults = devicesResults and currentDevicesResult
1799 linksResults = linksResults and currentLinksResult
1800 hostsResults = hostsResults and currentHostsResult
1801
1802 main.step( "Device information is correct" )
1803 utilities.assert_equals(
1804 expect=main.TRUE,
1805 actual=devicesResults,
1806 onpass="Device information is correct",
1807 onfail="Device information is incorrect" )
1808
1809 main.step( "Links are correct" )
1810 utilities.assert_equals(
1811 expect=main.TRUE,
1812 actual=linksResults,
1813 onpass="Link are correct",
1814 onfail="Links are incorrect" )
1815
1816 main.step( "Hosts are correct" )
1817 utilities.assert_equals(
1818 expect=main.TRUE,
1819 actual=hostsResults,
1820 onpass="Hosts are correct",
1821 onfail="Hosts are incorrect" )
1822
1823 def CASE6( self, main ):
1824 """
1825 The Scaling case.
1826 """
1827 import time
1828 import re
1829 assert main.numCtrls, "main.numCtrls not defined"
1830 assert main, "main not defined"
1831 assert utilities.assert_equals, "utilities.assert_equals not defined"
1832 assert main.CLIs, "main.CLIs not defined"
1833 assert main.nodes, "main.nodes not defined"
1834 try:
1835 labels
1836 except NameError:
1837 main.log.error( "labels not defined, setting to []" )
1838 global labels
1839 labels = []
1840 try:
1841 data
1842 except NameError:
1843 main.log.error( "data not defined, setting to []" )
1844 global data
1845 data = []
1846
1847 main.case( "Swap some of the ONOS nodes" )
1848
1849 main.step( "Checking ONOS Logs for errors" )
1850 for i in main.activeNodes:
1851 node = main.nodes[i]
1852 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1853 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1854
1855 main.step( "Generate new metadata file" )
Jon Hallbd60ea02016-08-23 10:03:59 -07001856 old = [ main.activeNodes[1], main.activeNodes[-2] ]
Jon Hall69b2b982016-05-11 12:04:59 -07001857 new = range( main.ONOSbench.maxNodes )[-2:]
1858 assert len( old ) == len( new ), "Length of nodes to swap don't match"
1859 handle = main.ONOSbench.handle
1860 for x, y in zip( old, new ):
1861 handle.sendline( "export OC{}=$OC{}".format( x + 1, y + 1 ) )
1862 handle.expect( "\$" ) # from the variable
1863 ret = handle.before
1864 handle.expect( "\$" ) # From the prompt
1865 ret += handle.before
1866 main.log.debug( ret )
1867 main.activeNodes.remove( x )
1868 main.activeNodes.append( y )
1869
1870 genResult = main.Server.generateFile( main.numCtrls )
1871 utilities.assert_equals( expect=main.TRUE, actual=genResult,
1872 onpass="New cluster metadata file generated",
1873 onfail="Failled to generate new metadata file" )
1874 time.sleep( 5 ) # Give time for nodes to read new file
1875
1876 main.step( "Start new nodes" ) # OR stop old nodes?
1877 started = main.TRUE
1878 for i in new:
1879 started = main.ONOSbench.onosStart( main.nodes[i].ip_address ) and main.TRUE
1880 utilities.assert_equals( expect=main.TRUE, actual=started,
1881 onpass="ONOS started",
1882 onfail="ONOS start NOT successful" )
1883
1884 main.step( "Checking if ONOS is up yet" )
1885 for i in range( 2 ):
1886 onosIsupResult = main.TRUE
1887 for i in main.activeNodes:
1888 node = main.nodes[i]
1889 started = main.ONOSbench.isup( node.ip_address )
1890 if not started:
1891 main.log.error( node.name + " didn't start!" )
1892 onosIsupResult = onosIsupResult and started
1893 if onosIsupResult == main.TRUE:
1894 break
1895 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1896 onpass="ONOS started",
1897 onfail="ONOS start NOT successful" )
1898
Jon Hall6509dbf2016-06-21 17:01:17 -07001899 main.step( "Starting ONOS CLI sessions" )
Jon Hall69b2b982016-05-11 12:04:59 -07001900 cliResults = main.TRUE
1901 threads = []
1902 for i in main.activeNodes:
1903 t = main.Thread( target=main.CLIs[i].startOnosCli,
1904 name="startOnosCli-" + str( i ),
1905 args=[main.nodes[i].ip_address] )
1906 threads.append( t )
1907 t.start()
1908
1909 for t in threads:
1910 t.join()
1911 cliResults = cliResults and t.result
1912 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1913 onpass="ONOS cli started",
1914 onfail="ONOS clis did not start" )
1915
1916 main.step( "Checking ONOS nodes" )
1917 nodeResults = utilities.retry( main.HA.nodesCheck,
1918 False,
1919 args=[main.activeNodes],
1920 attempts=5 )
1921 utilities.assert_equals( expect=True, actual=nodeResults,
1922 onpass="Nodes check successful",
1923 onfail="Nodes check NOT successful" )
1924
1925 for i in range( 10 ):
1926 ready = True
1927 for i in main.activeNodes:
1928 cli = main.CLIs[i]
1929 output = cli.summary()
1930 if not output:
1931 ready = False
1932 if ready:
1933 break
1934 time.sleep( 30 )
1935 utilities.assert_equals( expect=True, actual=ready,
1936 onpass="ONOS summary command succeded",
1937 onfail="ONOS summary command failed" )
1938 if not ready:
1939 main.cleanup()
1940 main.exit()
1941
1942 # Rerun for election on new nodes
1943 runResults = main.TRUE
1944 for i in main.activeNodes:
1945 cli = main.CLIs[i]
1946 run = cli.electionTestRun()
1947 if run != main.TRUE:
1948 main.log.error( "Error running for election on " + cli.name )
1949 runResults = runResults and run
1950 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1951 onpass="Reran for election",
1952 onfail="Failed to rerun for election" )
1953
1954 for node in main.activeNodes:
1955 main.log.warn( "\n****************** {} **************".format( main.nodes[node].ip_address ) )
1956 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1957 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1958 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
1959 main.log.debug( main.CLIs[node].apps( jsonFormat=False ) )
1960
1961 main.step( "Reapplying cell variable to environment" )
1962 cellName = main.params[ 'ENV' ][ 'cellName' ]
1963 cellResult = main.ONOSbench.setCell( cellName )
1964 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
1965 onpass="Set cell successfull",
1966 onfail="Failled to set cell" )
1967
1968 def CASE7( self, main ):
1969 """
1970 Check state after ONOS scaling
1971 """
1972 import json
1973 assert main.numCtrls, "main.numCtrls not defined"
1974 assert main, "main not defined"
1975 assert utilities.assert_equals, "utilities.assert_equals not defined"
1976 assert main.CLIs, "main.CLIs not defined"
1977 assert main.nodes, "main.nodes not defined"
1978 main.case( "Running ONOS Constant State Tests" )
1979
1980 main.step( "Check that each switch has a master" )
1981 # Assert that each device has a master
1982 rolesNotNull = main.TRUE
1983 threads = []
1984 for i in main.activeNodes:
1985 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1986 name="rolesNotNull-" + str( i ),
1987 args=[ ] )
1988 threads.append( t )
1989 t.start()
1990
1991 for t in threads:
1992 t.join()
1993 rolesNotNull = rolesNotNull and t.result
1994 utilities.assert_equals(
1995 expect=main.TRUE,
1996 actual=rolesNotNull,
1997 onpass="Each device has a master",
1998 onfail="Some devices don't have a master assigned" )
1999
2000 main.step( "Read device roles from ONOS" )
2001 ONOSMastership = []
2002 consistentMastership = True
2003 rolesResults = True
2004 threads = []
2005 for i in main.activeNodes:
2006 t = main.Thread( target=main.CLIs[i].roles,
2007 name="roles-" + str( i ),
2008 args=[] )
2009 threads.append( t )
2010 t.start()
2011
2012 for t in threads:
2013 t.join()
2014 ONOSMastership.append( t.result )
2015
2016 for i in range( len( ONOSMastership ) ):
2017 node = str( main.activeNodes[i] + 1 )
2018 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
2019 main.log.error( "Error in getting ONOS" + node + " roles" )
2020 main.log.warn( "ONOS" + node + " mastership response: " +
2021 repr( ONOSMastership[i] ) )
2022 rolesResults = False
2023 utilities.assert_equals(
2024 expect=True,
2025 actual=rolesResults,
2026 onpass="No error in reading roles output",
2027 onfail="Error in reading roles from ONOS" )
2028
2029 main.step( "Check for consistency in roles from each controller" )
2030 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
2031 main.log.info(
2032 "Switch roles are consistent across all ONOS nodes" )
2033 else:
2034 consistentMastership = False
2035 utilities.assert_equals(
2036 expect=True,
2037 actual=consistentMastership,
2038 onpass="Switch roles are consistent across all ONOS nodes",
2039 onfail="ONOS nodes have different views of switch roles" )
2040
2041 if rolesResults and not consistentMastership:
2042 for i in range( len( ONOSMastership ) ):
2043 node = str( main.activeNodes[i] + 1 )
2044 main.log.warn( "ONOS" + node + " roles: ",
2045 json.dumps( json.loads( ONOSMastership[ i ] ),
2046 sort_keys=True,
2047 indent=4,
2048 separators=( ',', ': ' ) ) )
2049
2050 # NOTE: we expect mastership to change on controller scaling down
2051
2052 main.step( "Get the intents and compare across all nodes" )
2053 ONOSIntents = []
2054 intentCheck = main.FALSE
2055 consistentIntents = True
2056 intentsResults = True
2057 threads = []
2058 for i in main.activeNodes:
2059 t = main.Thread( target=main.CLIs[i].intents,
2060 name="intents-" + str( i ),
2061 args=[],
2062 kwargs={ 'jsonFormat': True } )
2063 threads.append( t )
2064 t.start()
2065
2066 for t in threads:
2067 t.join()
2068 ONOSIntents.append( t.result )
2069
2070 for i in range( len( ONOSIntents) ):
2071 node = str( main.activeNodes[i] + 1 )
2072 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2073 main.log.error( "Error in getting ONOS" + node + " intents" )
2074 main.log.warn( "ONOS" + node + " intents response: " +
2075 repr( ONOSIntents[ i ] ) )
2076 intentsResults = False
2077 utilities.assert_equals(
2078 expect=True,
2079 actual=intentsResults,
2080 onpass="No error in reading intents output",
2081 onfail="Error in reading intents from ONOS" )
2082
2083 main.step( "Check for consistency in Intents from each controller" )
2084 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2085 main.log.info( "Intents are consistent across all ONOS " +
2086 "nodes" )
2087 else:
2088 consistentIntents = False
2089
2090 # Try to make it easy to figure out what is happening
2091 #
2092 # Intent ONOS1 ONOS2 ...
2093 # 0x01 INSTALLED INSTALLING
2094 # ... ... ...
2095 # ... ... ...
2096 title = " ID"
2097 for n in main.activeNodes:
2098 title += " " * 10 + "ONOS" + str( n + 1 )
2099 main.log.warn( title )
2100 # get all intent keys in the cluster
2101 keys = []
2102 for nodeStr in ONOSIntents:
2103 node = json.loads( nodeStr )
2104 for intent in node:
2105 keys.append( intent.get( 'id' ) )
2106 keys = set( keys )
2107 for key in keys:
2108 row = "%-13s" % key
2109 for nodeStr in ONOSIntents:
2110 node = json.loads( nodeStr )
2111 for intent in node:
2112 if intent.get( 'id' ) == key:
2113 row += "%-15s" % intent.get( 'state' )
2114 main.log.warn( row )
2115 # End table view
2116
2117 utilities.assert_equals(
2118 expect=True,
2119 actual=consistentIntents,
2120 onpass="Intents are consistent across all ONOS nodes",
2121 onfail="ONOS nodes have different views of intents" )
2122 intentStates = []
2123 for node in ONOSIntents: # Iter through ONOS nodes
2124 nodeStates = []
2125 # Iter through intents of a node
2126 try:
2127 for intent in json.loads( node ):
2128 nodeStates.append( intent[ 'state' ] )
2129 except ( ValueError, TypeError ):
2130 main.log.exception( "Error in parsing intents" )
2131 main.log.error( repr( node ) )
2132 intentStates.append( nodeStates )
2133 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2134 main.log.info( dict( out ) )
2135
2136 if intentsResults and not consistentIntents:
2137 for i in range( len( main.activeNodes ) ):
2138 node = str( main.activeNodes[i] + 1 )
2139 main.log.warn( "ONOS" + node + " intents: " )
2140 main.log.warn( json.dumps(
2141 json.loads( ONOSIntents[ i ] ),
2142 sort_keys=True,
2143 indent=4,
2144 separators=( ',', ': ' ) ) )
2145 elif intentsResults and consistentIntents:
2146 intentCheck = main.TRUE
2147
2148 main.step( "Compare current intents with intents before the scaling" )
2149 # NOTE: this requires case 5 to pass for intentState to be set.
2150 # maybe we should stop the test if that fails?
2151 sameIntents = main.FALSE
2152 try:
2153 intentState
2154 except NameError:
2155 main.log.warn( "No previous intent state was saved" )
2156 else:
2157 if intentState and intentState == ONOSIntents[ 0 ]:
2158 sameIntents = main.TRUE
2159 main.log.info( "Intents are consistent with before scaling" )
2160 # TODO: possibly the states have changed? we may need to figure out
2161 # what the acceptable states are
2162 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2163 sameIntents = main.TRUE
2164 try:
2165 before = json.loads( intentState )
2166 after = json.loads( ONOSIntents[ 0 ] )
2167 for intent in before:
2168 if intent not in after:
2169 sameIntents = main.FALSE
2170 main.log.debug( "Intent is not currently in ONOS " +
2171 "(at least in the same form):" )
2172 main.log.debug( json.dumps( intent ) )
2173 except ( ValueError, TypeError ):
2174 main.log.exception( "Exception printing intents" )
2175 main.log.debug( repr( ONOSIntents[0] ) )
2176 main.log.debug( repr( intentState ) )
2177 if sameIntents == main.FALSE:
2178 try:
2179 main.log.debug( "ONOS intents before: " )
2180 main.log.debug( json.dumps( json.loads( intentState ),
2181 sort_keys=True, indent=4,
2182 separators=( ',', ': ' ) ) )
2183 main.log.debug( "Current ONOS intents: " )
2184 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2185 sort_keys=True, indent=4,
2186 separators=( ',', ': ' ) ) )
2187 except ( ValueError, TypeError ):
2188 main.log.exception( "Exception printing intents" )
2189 main.log.debug( repr( ONOSIntents[0] ) )
2190 main.log.debug( repr( intentState ) )
2191 utilities.assert_equals(
2192 expect=main.TRUE,
2193 actual=sameIntents,
2194 onpass="Intents are consistent with before scaling",
2195 onfail="The Intents changed during scaling" )
2196 intentCheck = intentCheck and sameIntents
2197
2198 main.step( "Get the OF Table entries and compare to before " +
2199 "component scaling" )
2200 FlowTables = main.TRUE
2201 for i in range( 28 ):
2202 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2203 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2204 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2205 FlowTables = FlowTables and curSwitch
2206 if curSwitch == main.FALSE:
2207 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2208 utilities.assert_equals(
2209 expect=main.TRUE,
2210 actual=FlowTables,
2211 onpass="No changes were found in the flow tables",
2212 onfail="Changes were found in the flow tables" )
2213
2214 main.Mininet2.pingLongKill()
2215 '''
2216 # main.step( "Check the continuous pings to ensure that no packets " +
2217 # "were dropped during component failure" )
2218 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2219 main.params[ 'TESTONIP' ] )
2220 LossInPings = main.FALSE
2221 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2222 for i in range( 8, 18 ):
2223 main.log.info(
2224 "Checking for a loss in pings along flow from s" +
2225 str( i ) )
2226 LossInPings = main.Mininet2.checkForLoss(
2227 "/tmp/ping.h" +
2228 str( i ) ) or LossInPings
2229 if LossInPings == main.TRUE:
2230 main.log.info( "Loss in ping detected" )
2231 elif LossInPings == main.ERROR:
2232 main.log.info( "There are multiple mininet process running" )
2233 elif LossInPings == main.FALSE:
2234 main.log.info( "No Loss in the pings" )
2235 main.log.info( "No loss of dataplane connectivity" )
2236 # utilities.assert_equals(
2237 # expect=main.FALSE,
2238 # actual=LossInPings,
2239 # onpass="No Loss of connectivity",
2240 # onfail="Loss of dataplane connectivity detected" )
2241
2242 # NOTE: Since intents are not persisted with IntnentStore,
2243 # we expect loss in dataplane connectivity
2244 LossInPings = main.FALSE
2245 '''
2246
2247 main.step( "Leadership Election is still functional" )
2248 # Test of LeadershipElection
2249 leaderList = []
2250 leaderResult = main.TRUE
2251
2252 for i in main.activeNodes:
2253 cli = main.CLIs[i]
2254 leaderN = cli.electionTestLeader()
2255 leaderList.append( leaderN )
2256 if leaderN == main.FALSE:
2257 # error in response
2258 main.log.error( "Something is wrong with " +
2259 "electionTestLeader function, check the" +
2260 " error logs" )
2261 leaderResult = main.FALSE
2262 elif leaderN is None:
2263 main.log.error( cli.name +
2264 " shows no leader for the election-app." )
2265 leaderResult = main.FALSE
2266 if len( set( leaderList ) ) != 1:
2267 leaderResult = main.FALSE
2268 main.log.error(
2269 "Inconsistent view of leader for the election test app" )
2270 # TODO: print the list
2271 utilities.assert_equals(
2272 expect=main.TRUE,
2273 actual=leaderResult,
2274 onpass="Leadership election passed",
2275 onfail="Something went wrong with Leadership election" )
2276
2277 def CASE8( self, main ):
2278 """
2279 Compare topo
2280 """
2281 import json
2282 import time
2283 assert main.numCtrls, "main.numCtrls not defined"
2284 assert main, "main not defined"
2285 assert utilities.assert_equals, "utilities.assert_equals not defined"
2286 assert main.CLIs, "main.CLIs not defined"
2287 assert main.nodes, "main.nodes not defined"
2288
2289 main.case( "Compare ONOS Topology view to Mininet topology" )
2290 main.caseExplanation = "Compare topology objects between Mininet" +\
2291 " and ONOS"
2292 topoResult = main.FALSE
2293 topoFailMsg = "ONOS topology don't match Mininet"
2294 elapsed = 0
2295 count = 0
2296 main.step( "Comparing ONOS topology to MN topology" )
2297 startTime = time.time()
2298 # Give time for Gossip to work
2299 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2300 devicesResults = main.TRUE
2301 linksResults = main.TRUE
2302 hostsResults = main.TRUE
2303 hostAttachmentResults = True
2304 count += 1
2305 cliStart = time.time()
2306 devices = []
2307 threads = []
2308 for i in main.activeNodes:
2309 t = main.Thread( target=utilities.retry,
2310 name="devices-" + str( i ),
2311 args=[ main.CLIs[i].devices, [ None ] ],
2312 kwargs= { 'sleep': 5, 'attempts': 5,
2313 'randomTime': True } )
2314 threads.append( t )
2315 t.start()
2316
2317 for t in threads:
2318 t.join()
2319 devices.append( t.result )
2320 hosts = []
2321 ipResult = main.TRUE
2322 threads = []
2323 for i in main.activeNodes:
2324 t = main.Thread( target=utilities.retry,
2325 name="hosts-" + str( i ),
2326 args=[ main.CLIs[i].hosts, [ None ] ],
2327 kwargs= { 'sleep': 5, 'attempts': 5,
2328 'randomTime': True } )
2329 threads.append( t )
2330 t.start()
2331
2332 for t in threads:
2333 t.join()
2334 try:
2335 hosts.append( json.loads( t.result ) )
2336 except ( ValueError, TypeError ):
2337 main.log.exception( "Error parsing hosts results" )
2338 main.log.error( repr( t.result ) )
2339 hosts.append( None )
2340 for controller in range( 0, len( hosts ) ):
2341 controllerStr = str( main.activeNodes[controller] + 1 )
2342 if hosts[ controller ]:
2343 for host in hosts[ controller ]:
2344 if host is None or host.get( 'ipAddresses', [] ) == []:
2345 main.log.error(
2346 "Error with host ipAddresses on controller" +
2347 controllerStr + ": " + str( host ) )
2348 ipResult = main.FALSE
2349 ports = []
2350 threads = []
2351 for i in main.activeNodes:
2352 t = main.Thread( target=utilities.retry,
2353 name="ports-" + str( i ),
2354 args=[ main.CLIs[i].ports, [ None ] ],
2355 kwargs= { 'sleep': 5, 'attempts': 5,
2356 'randomTime': True } )
2357 threads.append( t )
2358 t.start()
2359
2360 for t in threads:
2361 t.join()
2362 ports.append( t.result )
2363 links = []
2364 threads = []
2365 for i in main.activeNodes:
2366 t = main.Thread( target=utilities.retry,
2367 name="links-" + str( i ),
2368 args=[ main.CLIs[i].links, [ None ] ],
2369 kwargs= { 'sleep': 5, 'attempts': 5,
2370 'randomTime': True } )
2371 threads.append( t )
2372 t.start()
2373
2374 for t in threads:
2375 t.join()
2376 links.append( t.result )
2377 clusters = []
2378 threads = []
2379 for i in main.activeNodes:
2380 t = main.Thread( target=utilities.retry,
2381 name="clusters-" + str( i ),
2382 args=[ main.CLIs[i].clusters, [ None ] ],
2383 kwargs= { 'sleep': 5, 'attempts': 5,
2384 'randomTime': True } )
2385 threads.append( t )
2386 t.start()
2387
2388 for t in threads:
2389 t.join()
2390 clusters.append( t.result )
2391
2392 elapsed = time.time() - startTime
2393 cliTime = time.time() - cliStart
2394 print "Elapsed time: " + str( elapsed )
2395 print "CLI time: " + str( cliTime )
2396
2397 if all( e is None for e in devices ) and\
2398 all( e is None for e in hosts ) and\
2399 all( e is None for e in ports ) and\
2400 all( e is None for e in links ) and\
2401 all( e is None for e in clusters ):
2402 topoFailMsg = "Could not get topology from ONOS"
2403 main.log.error( topoFailMsg )
2404 continue # Try again, No use trying to compare
2405
2406 mnSwitches = main.Mininet1.getSwitches()
2407 mnLinks = main.Mininet1.getLinks()
2408 mnHosts = main.Mininet1.getHosts()
2409 for controller in range( len( main.activeNodes ) ):
2410 controllerStr = str( main.activeNodes[controller] + 1 )
2411 if devices[ controller ] and ports[ controller ] and\
2412 "Error" not in devices[ controller ] and\
2413 "Error" not in ports[ controller ]:
2414
2415 try:
2416 currentDevicesResult = main.Mininet1.compareSwitches(
2417 mnSwitches,
2418 json.loads( devices[ controller ] ),
2419 json.loads( ports[ controller ] ) )
2420 except ( TypeError, ValueError ):
2421 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2422 devices[ controller ], ports[ controller ] ) )
2423 else:
2424 currentDevicesResult = main.FALSE
2425 utilities.assert_equals( expect=main.TRUE,
2426 actual=currentDevicesResult,
2427 onpass="ONOS" + controllerStr +
2428 " Switches view is correct",
2429 onfail="ONOS" + controllerStr +
2430 " Switches view is incorrect" )
2431
2432 if links[ controller ] and "Error" not in links[ controller ]:
2433 currentLinksResult = main.Mininet1.compareLinks(
2434 mnSwitches, mnLinks,
2435 json.loads( links[ controller ] ) )
2436 else:
2437 currentLinksResult = main.FALSE
2438 utilities.assert_equals( expect=main.TRUE,
2439 actual=currentLinksResult,
2440 onpass="ONOS" + controllerStr +
2441 " links view is correct",
2442 onfail="ONOS" + controllerStr +
2443 " links view is incorrect" )
2444 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2445 currentHostsResult = main.Mininet1.compareHosts(
2446 mnHosts,
2447 hosts[ controller ] )
2448 elif hosts[ controller ] == []:
2449 currentHostsResult = main.TRUE
2450 else:
2451 currentHostsResult = main.FALSE
2452 utilities.assert_equals( expect=main.TRUE,
2453 actual=currentHostsResult,
2454 onpass="ONOS" + controllerStr +
2455 " hosts exist in Mininet",
2456 onfail="ONOS" + controllerStr +
2457 " hosts don't match Mininet" )
2458 # CHECKING HOST ATTACHMENT POINTS
2459 hostAttachment = True
2460 zeroHosts = False
2461 # FIXME: topo-HA/obelisk specific mappings:
2462 # key is mac and value is dpid
2463 mappings = {}
2464 for i in range( 1, 29 ): # hosts 1 through 28
2465 # set up correct variables:
2466 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2467 if i == 1:
2468 deviceId = "1000".zfill(16)
2469 elif i == 2:
2470 deviceId = "2000".zfill(16)
2471 elif i == 3:
2472 deviceId = "3000".zfill(16)
2473 elif i == 4:
2474 deviceId = "3004".zfill(16)
2475 elif i == 5:
2476 deviceId = "5000".zfill(16)
2477 elif i == 6:
2478 deviceId = "6000".zfill(16)
2479 elif i == 7:
2480 deviceId = "6007".zfill(16)
2481 elif i >= 8 and i <= 17:
2482 dpid = '3' + str( i ).zfill( 3 )
2483 deviceId = dpid.zfill(16)
2484 elif i >= 18 and i <= 27:
2485 dpid = '6' + str( i ).zfill( 3 )
2486 deviceId = dpid.zfill(16)
2487 elif i == 28:
2488 deviceId = "2800".zfill(16)
2489 mappings[ macId ] = deviceId
2490 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2491 if hosts[ controller ] == []:
2492 main.log.warn( "There are no hosts discovered" )
2493 zeroHosts = True
2494 else:
2495 for host in hosts[ controller ]:
2496 mac = None
2497 location = None
2498 device = None
2499 port = None
2500 try:
2501 mac = host.get( 'mac' )
2502 assert mac, "mac field could not be found for this host object"
2503
2504 location = host.get( 'location' )
2505 assert location, "location field could not be found for this host object"
2506
2507 # Trim the protocol identifier off deviceId
2508 device = str( location.get( 'elementId' ) ).split(':')[1]
2509 assert device, "elementId field could not be found for this host location object"
2510
2511 port = location.get( 'port' )
2512 assert port, "port field could not be found for this host location object"
2513
2514 # Now check if this matches where they should be
2515 if mac and device and port:
2516 if str( port ) != "1":
2517 main.log.error( "The attachment port is incorrect for " +
2518 "host " + str( mac ) +
2519 ". Expected: 1 Actual: " + str( port) )
2520 hostAttachment = False
2521 if device != mappings[ str( mac ) ]:
2522 main.log.error( "The attachment device is incorrect for " +
2523 "host " + str( mac ) +
2524 ". Expected: " + mappings[ str( mac ) ] +
2525 " Actual: " + device )
2526 hostAttachment = False
2527 else:
2528 hostAttachment = False
2529 except AssertionError:
2530 main.log.exception( "Json object not as expected" )
2531 main.log.error( repr( host ) )
2532 hostAttachment = False
2533 else:
2534 main.log.error( "No hosts json output or \"Error\"" +
2535 " in output. hosts = " +
2536 repr( hosts[ controller ] ) )
2537 if zeroHosts is False:
2538 # TODO: Find a way to know if there should be hosts in a
2539 # given point of the test
2540 hostAttachment = True
2541
2542 # END CHECKING HOST ATTACHMENT POINTS
2543 devicesResults = devicesResults and currentDevicesResult
2544 linksResults = linksResults and currentLinksResult
2545 hostsResults = hostsResults and currentHostsResult
2546 hostAttachmentResults = hostAttachmentResults and\
2547 hostAttachment
2548 topoResult = ( devicesResults and linksResults
2549 and hostsResults and ipResult and
2550 hostAttachmentResults )
2551 utilities.assert_equals( expect=True,
2552 actual=topoResult,
2553 onpass="ONOS topology matches Mininet",
2554 onfail=topoFailMsg )
2555 # End of While loop to pull ONOS state
2556
2557 # Compare json objects for hosts and dataplane clusters
2558
2559 # hosts
2560 main.step( "Hosts view is consistent across all ONOS nodes" )
2561 consistentHostsResult = main.TRUE
2562 for controller in range( len( hosts ) ):
2563 controllerStr = str( main.activeNodes[controller] + 1 )
2564 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2565 if hosts[ controller ] == hosts[ 0 ]:
2566 continue
2567 else: # hosts not consistent
2568 main.log.error( "hosts from ONOS" + controllerStr +
2569 " is inconsistent with ONOS1" )
2570 main.log.warn( repr( hosts[ controller ] ) )
2571 consistentHostsResult = main.FALSE
2572
2573 else:
2574 main.log.error( "Error in getting ONOS hosts from ONOS" +
2575 controllerStr )
2576 consistentHostsResult = main.FALSE
2577 main.log.warn( "ONOS" + controllerStr +
2578 " hosts response: " +
2579 repr( hosts[ controller ] ) )
2580 utilities.assert_equals(
2581 expect=main.TRUE,
2582 actual=consistentHostsResult,
2583 onpass="Hosts view is consistent across all ONOS nodes",
2584 onfail="ONOS nodes have different views of hosts" )
2585
2586 main.step( "Hosts information is correct" )
2587 hostsResults = hostsResults and ipResult
2588 utilities.assert_equals(
2589 expect=main.TRUE,
2590 actual=hostsResults,
2591 onpass="Host information is correct",
2592 onfail="Host information is incorrect" )
2593
2594 main.step( "Host attachment points to the network" )
2595 utilities.assert_equals(
2596 expect=True,
2597 actual=hostAttachmentResults,
2598 onpass="Hosts are correctly attached to the network",
2599 onfail="ONOS did not correctly attach hosts to the network" )
2600
2601 # Strongly connected clusters of devices
2602 main.step( "Clusters view is consistent across all ONOS nodes" )
2603 consistentClustersResult = main.TRUE
2604 for controller in range( len( clusters ) ):
2605 controllerStr = str( main.activeNodes[controller] + 1 )
2606 if "Error" not in clusters[ controller ]:
2607 if clusters[ controller ] == clusters[ 0 ]:
2608 continue
2609 else: # clusters not consistent
2610 main.log.error( "clusters from ONOS" +
2611 controllerStr +
2612 " is inconsistent with ONOS1" )
2613 consistentClustersResult = main.FALSE
2614 else:
2615 main.log.error( "Error in getting dataplane clusters " +
2616 "from ONOS" + controllerStr )
2617 consistentClustersResult = main.FALSE
2618 main.log.warn( "ONOS" + controllerStr +
2619 " clusters response: " +
2620 repr( clusters[ controller ] ) )
2621 utilities.assert_equals(
2622 expect=main.TRUE,
2623 actual=consistentClustersResult,
2624 onpass="Clusters view is consistent across all ONOS nodes",
2625 onfail="ONOS nodes have different views of clusters" )
2626 if not consistentClustersResult:
2627 main.log.debug( clusters )
2628 for x in links:
2629 main.log.warn( "{}: {}".format( len( x ), x ) )
2630
2631
2632 main.step( "There is only one SCC" )
2633 # there should always only be one cluster
2634 try:
2635 numClusters = len( json.loads( clusters[ 0 ] ) )
2636 except ( ValueError, TypeError ):
2637 main.log.exception( "Error parsing clusters[0]: " +
2638 repr( clusters[0] ) )
2639 numClusters = "ERROR"
2640 clusterResults = main.FALSE
2641 if numClusters == 1:
2642 clusterResults = main.TRUE
2643 utilities.assert_equals(
2644 expect=1,
2645 actual=numClusters,
2646 onpass="ONOS shows 1 SCC",
2647 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2648
2649 topoResult = ( devicesResults and linksResults
2650 and hostsResults and consistentHostsResult
2651 and consistentClustersResult and clusterResults
2652 and ipResult and hostAttachmentResults )
2653
2654 topoResult = topoResult and int( count <= 2 )
2655 note = "note it takes about " + str( int( cliTime ) ) + \
2656 " seconds for the test to make all the cli calls to fetch " +\
2657 "the topology from each ONOS instance"
2658 main.log.info(
2659 "Very crass estimate for topology discovery/convergence( " +
2660 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2661 str( count ) + " tries" )
2662
2663 main.step( "Device information is correct" )
2664 utilities.assert_equals(
2665 expect=main.TRUE,
2666 actual=devicesResults,
2667 onpass="Device information is correct",
2668 onfail="Device information is incorrect" )
2669
2670 main.step( "Links are correct" )
2671 utilities.assert_equals(
2672 expect=main.TRUE,
2673 actual=linksResults,
2674 onpass="Link are correct",
2675 onfail="Links are incorrect" )
2676
2677 main.step( "Hosts are correct" )
2678 utilities.assert_equals(
2679 expect=main.TRUE,
2680 actual=hostsResults,
2681 onpass="Hosts are correct",
2682 onfail="Hosts are incorrect" )
2683
2684 # FIXME: move this to an ONOS state case
2685 main.step( "Checking ONOS nodes" )
2686 nodeResults = utilities.retry( main.HA.nodesCheck,
2687 False,
2688 args=[main.activeNodes],
2689 attempts=5 )
2690 utilities.assert_equals( expect=True, actual=nodeResults,
2691 onpass="Nodes check successful",
2692 onfail="Nodes check NOT successful" )
2693 if not nodeResults:
2694 for i in main.activeNodes:
2695 main.log.debug( "{} components not ACTIVE: \n{}".format(
2696 main.CLIs[i].name,
2697 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
2698
Jon Halld2871c22016-07-26 11:01:14 -07002699 if not topoResult:
2700 main.cleanup()
2701 main.exit()
2702
Jon Hall69b2b982016-05-11 12:04:59 -07002703 def CASE9( self, main ):
2704 """
2705 Link s3-s28 down
2706 """
2707 import time
2708 assert main.numCtrls, "main.numCtrls not defined"
2709 assert main, "main not defined"
2710 assert utilities.assert_equals, "utilities.assert_equals not defined"
2711 assert main.CLIs, "main.CLIs not defined"
2712 assert main.nodes, "main.nodes not defined"
2713 # NOTE: You should probably run a topology check after this
2714
2715 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2716
2717 description = "Turn off a link to ensure that Link Discovery " +\
2718 "is working properly"
2719 main.case( description )
2720
2721 main.step( "Kill Link between s3 and s28" )
2722 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2723 main.log.info( "Waiting " + str( linkSleep ) +
2724 " seconds for link down to be discovered" )
2725 time.sleep( linkSleep )
2726 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2727 onpass="Link down successful",
2728 onfail="Failed to bring link down" )
2729 # TODO do some sort of check here
2730
2731 def CASE10( self, main ):
2732 """
2733 Link s3-s28 up
2734 """
2735 import time
2736 assert main.numCtrls, "main.numCtrls not defined"
2737 assert main, "main not defined"
2738 assert utilities.assert_equals, "utilities.assert_equals not defined"
2739 assert main.CLIs, "main.CLIs not defined"
2740 assert main.nodes, "main.nodes not defined"
2741 # NOTE: You should probably run a topology check after this
2742
2743 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2744
2745 description = "Restore a link to ensure that Link Discovery is " + \
2746 "working properly"
2747 main.case( description )
2748
2749 main.step( "Bring link between s3 and s28 back up" )
2750 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2751 main.log.info( "Waiting " + str( linkSleep ) +
2752 " seconds for link up to be discovered" )
2753 time.sleep( linkSleep )
2754 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2755 onpass="Link up successful",
2756 onfail="Failed to bring link up" )
2757 # TODO do some sort of check here
2758
2759 def CASE11( self, main ):
2760 """
2761 Switch Down
2762 """
2763 # NOTE: You should probably run a topology check after this
2764 import time
2765 assert main.numCtrls, "main.numCtrls not defined"
2766 assert main, "main not defined"
2767 assert utilities.assert_equals, "utilities.assert_equals not defined"
2768 assert main.CLIs, "main.CLIs not defined"
2769 assert main.nodes, "main.nodes not defined"
2770
2771 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2772
2773 description = "Killing a switch to ensure it is discovered correctly"
2774 onosCli = main.CLIs[ main.activeNodes[0] ]
2775 main.case( description )
2776 switch = main.params[ 'kill' ][ 'switch' ]
2777 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2778
2779 # TODO: Make this switch parameterizable
2780 main.step( "Kill " + switch )
2781 main.log.info( "Deleting " + switch )
2782 main.Mininet1.delSwitch( switch )
2783 main.log.info( "Waiting " + str( switchSleep ) +
2784 " seconds for switch down to be discovered" )
2785 time.sleep( switchSleep )
2786 device = onosCli.getDevice( dpid=switchDPID )
2787 # Peek at the deleted switch
2788 main.log.warn( str( device ) )
2789 result = main.FALSE
2790 if device and device[ 'available' ] is False:
2791 result = main.TRUE
2792 utilities.assert_equals( expect=main.TRUE, actual=result,
2793 onpass="Kill switch successful",
2794 onfail="Failed to kill switch?" )
2795
2796 def CASE12( self, main ):
2797 """
2798 Switch Up
2799 """
2800 # NOTE: You should probably run a topology check after this
2801 import time
2802 assert main.numCtrls, "main.numCtrls not defined"
2803 assert main, "main not defined"
2804 assert utilities.assert_equals, "utilities.assert_equals not defined"
2805 assert main.CLIs, "main.CLIs not defined"
2806 assert main.nodes, "main.nodes not defined"
2807
2808 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2809 switch = main.params[ 'kill' ][ 'switch' ]
2810 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2811 links = main.params[ 'kill' ][ 'links' ].split()
2812 onosCli = main.CLIs[ main.activeNodes[0] ]
2813 description = "Adding a switch to ensure it is discovered correctly"
2814 main.case( description )
2815
2816 main.step( "Add back " + switch )
2817 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2818 for peer in links:
2819 main.Mininet1.addLink( switch, peer )
2820 ipList = [ node.ip_address for node in main.nodes ]
2821 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2822 main.log.info( "Waiting " + str( switchSleep ) +
2823 " seconds for switch up to be discovered" )
2824 time.sleep( switchSleep )
2825 device = onosCli.getDevice( dpid=switchDPID )
2826 # Peek at the deleted switch
2827 main.log.warn( str( device ) )
2828 result = main.FALSE
2829 if device and device[ 'available' ]:
2830 result = main.TRUE
2831 utilities.assert_equals( expect=main.TRUE, actual=result,
2832 onpass="add switch successful",
2833 onfail="Failed to add switch?" )
2834
2835 def CASE13( self, main ):
2836 """
2837 Clean up
2838 """
2839 assert main.numCtrls, "main.numCtrls not defined"
2840 assert main, "main not defined"
2841 assert utilities.assert_equals, "utilities.assert_equals not defined"
2842 assert main.CLIs, "main.CLIs not defined"
2843 assert main.nodes, "main.nodes not defined"
2844
2845 main.case( "Test Cleanup" )
2846 main.step( "Killing tcpdumps" )
2847 main.Mininet2.stopTcpdump()
2848
2849 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2850 main.step( "Copying MN pcap and ONOS log files to test station" )
2851 # NOTE: MN Pcap file is being saved to logdir.
2852 # We scp this file as MN and TestON aren't necessarily the same vm
2853
2854 # FIXME: To be replaced with a Jenkin's post script
2855 # TODO: Load these from params
2856 # NOTE: must end in /
2857 logFolder = "/opt/onos/log/"
2858 logFiles = [ "karaf.log", "karaf.log.1" ]
2859 # NOTE: must end in /
2860 for f in logFiles:
2861 for node in main.nodes:
2862 dstName = main.logdir + "/" + node.name + "-" + f
2863 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2864 logFolder + f, dstName )
2865 # std*.log's
2866 # NOTE: must end in /
2867 logFolder = "/opt/onos/var/"
2868 logFiles = [ "stderr.log", "stdout.log" ]
2869 # NOTE: must end in /
2870 for f in logFiles:
2871 for node in main.nodes:
2872 dstName = main.logdir + "/" + node.name + "-" + f
2873 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2874 logFolder + f, dstName )
2875 else:
2876 main.log.debug( "skipping saving log files" )
2877
2878 main.step( "Stopping Mininet" )
2879 mnResult = main.Mininet1.stopNet()
2880 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2881 onpass="Mininet stopped",
2882 onfail="MN cleanup NOT successful" )
2883
2884 main.step( "Checking ONOS Logs for errors" )
2885 for node in main.nodes:
2886 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2887 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2888
2889 try:
2890 timerLog = open( main.logdir + "/Timers.csv", 'w')
2891 main.log.error( ", ".join( labels ) + "\n" + ", ".join( data ) )
2892 timerLog.write( ", ".join( labels ) + "\n" + ", ".join( data ) )
2893 timerLog.close()
2894 except NameError, e:
2895 main.log.exception(e)
2896
2897 main.step( "Stopping webserver" )
2898 status = main.Server.stop( )
2899 utilities.assert_equals( expect=main.TRUE, actual=status,
2900 onpass="Stop Server",
2901 onfail="Failled to stop SimpleHTTPServer" )
2902 del main.Server
2903
2904 def CASE14( self, main ):
2905 """
2906 start election app on all onos nodes
2907 """
2908 import time
2909 assert main.numCtrls, "main.numCtrls not defined"
2910 assert main, "main not defined"
2911 assert utilities.assert_equals, "utilities.assert_equals not defined"
2912 assert main.CLIs, "main.CLIs not defined"
2913 assert main.nodes, "main.nodes not defined"
2914
2915 main.case("Start Leadership Election app")
2916 main.step( "Install leadership election app" )
2917 onosCli = main.CLIs[ main.activeNodes[0] ]
2918 appResult = onosCli.activateApp( "org.onosproject.election" )
2919 utilities.assert_equals(
2920 expect=main.TRUE,
2921 actual=appResult,
2922 onpass="Election app installed",
2923 onfail="Something went wrong with installing Leadership election" )
2924
2925 main.step( "Run for election on each node" )
2926 for i in main.activeNodes:
2927 main.CLIs[i].electionTestRun()
2928 time.sleep(5)
2929 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2930 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
2931 utilities.assert_equals(
2932 expect=True,
2933 actual=sameResult,
2934 onpass="All nodes see the same leaderboards",
2935 onfail="Inconsistent leaderboards" )
2936
2937 if sameResult:
2938 leader = leaders[ 0 ][ 0 ]
2939 if main.nodes[ main.activeNodes[0] ].ip_address in leader:
2940 correctLeader = True
2941 else:
2942 correctLeader = False
2943 main.step( "First node was elected leader" )
2944 utilities.assert_equals(
2945 expect=True,
2946 actual=correctLeader,
2947 onpass="Correct leader was elected",
2948 onfail="Incorrect leader" )
2949
2950 def CASE15( self, main ):
2951 """
2952 Check that Leadership Election is still functional
2953 15.1 Run election on each node
2954 15.2 Check that each node has the same leaders and candidates
2955 15.3 Find current leader and withdraw
2956 15.4 Check that a new node was elected leader
2957 15.5 Check that that new leader was the candidate of old leader
2958 15.6 Run for election on old leader
2959 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2960 15.8 Make sure that the old leader was added to the candidate list
2961
2962 old and new variable prefixes refer to data from before vs after
2963 withdrawl and later before withdrawl vs after re-election
2964 """
2965 import time
2966 assert main.numCtrls, "main.numCtrls not defined"
2967 assert main, "main not defined"
2968 assert utilities.assert_equals, "utilities.assert_equals not defined"
2969 assert main.CLIs, "main.CLIs not defined"
2970 assert main.nodes, "main.nodes not defined"
2971
2972 description = "Check that Leadership Election is still functional"
2973 main.case( description )
2974 # NOTE: Need to re-run after restarts since being a canidate is not persistant
2975
2976 oldLeaders = [] # list of lists of each nodes' candidates before
2977 newLeaders = [] # list of lists of each nodes' candidates after
2978 oldLeader = '' # the old leader from oldLeaders, None if not same
2979 newLeader = '' # the new leaders fron newLoeaders, None if not same
2980 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2981 expectNoLeader = False # True when there is only one leader
2982 if main.numCtrls == 1:
2983 expectNoLeader = True
2984
2985 main.step( "Run for election on each node" )
2986 electionResult = main.TRUE
2987
2988 for i in main.activeNodes: # run test election on each node
2989 if main.CLIs[i].electionTestRun() == main.FALSE:
2990 electionResult = main.FALSE
2991 utilities.assert_equals(
2992 expect=main.TRUE,
2993 actual=electionResult,
2994 onpass="All nodes successfully ran for leadership",
2995 onfail="At least one node failed to run for leadership" )
2996
2997 if electionResult == main.FALSE:
2998 main.log.error(
2999 "Skipping Test Case because Election Test App isn't loaded" )
3000 main.skipCase()
3001
3002 main.step( "Check that each node shows the same leader and candidates" )
3003 failMessage = "Nodes have different leaderboards"
3004 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
3005 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
3006 if sameResult:
3007 oldLeader = oldLeaders[ 0 ][ 0 ]
3008 main.log.warn( oldLeader )
3009 else:
3010 oldLeader = None
3011 utilities.assert_equals(
3012 expect=True,
3013 actual=sameResult,
3014 onpass="Leaderboards are consistent for the election topic",
3015 onfail=failMessage )
3016
3017 main.step( "Find current leader and withdraw" )
3018 withdrawResult = main.TRUE
3019 # do some sanity checking on leader before using it
3020 if oldLeader is None:
3021 main.log.error( "Leadership isn't consistent." )
3022 withdrawResult = main.FALSE
3023 # Get the CLI of the oldLeader
3024 for i in main.activeNodes:
3025 if oldLeader == main.nodes[ i ].ip_address:
3026 oldLeaderCLI = main.CLIs[ i ]
3027 break
3028 else: # FOR/ELSE statement
3029 main.log.error( "Leader election, could not find current leader" )
3030 if oldLeader:
3031 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3032 utilities.assert_equals(
3033 expect=main.TRUE,
3034 actual=withdrawResult,
3035 onpass="Node was withdrawn from election",
3036 onfail="Node was not withdrawn from election" )
3037
3038 main.step( "Check that a new node was elected leader" )
3039 failMessage = "Nodes have different leaders"
3040 # Get new leaders and candidates
3041 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
3042 newLeader = None
3043 if newLeaderResult:
3044 if newLeaders[ 0 ][ 0 ] == 'none':
3045 main.log.error( "No leader was elected on at least 1 node" )
3046 if not expectNoLeader:
3047 newLeaderResult = False
3048 newLeader = newLeaders[ 0 ][ 0 ]
3049
3050 # Check that the new leader is not the older leader, which was withdrawn
3051 if newLeader == oldLeader:
3052 newLeaderResult = False
3053 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3054 " as the current leader" )
3055 utilities.assert_equals(
3056 expect=True,
3057 actual=newLeaderResult,
3058 onpass="Leadership election passed",
3059 onfail="Something went wrong with Leadership election" )
3060
3061 main.step( "Check that that new leader was the candidate of old leader" )
3062 # candidates[ 2 ] should become the top candidate after withdrawl
3063 correctCandidateResult = main.TRUE
3064 if expectNoLeader:
3065 if newLeader == 'none':
3066 main.log.info( "No leader expected. None found. Pass" )
3067 correctCandidateResult = main.TRUE
3068 else:
3069 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3070 correctCandidateResult = main.FALSE
3071 elif len( oldLeaders[0] ) >= 3:
3072 if newLeader == oldLeaders[ 0 ][ 2 ]:
3073 # correct leader was elected
3074 correctCandidateResult = main.TRUE
3075 else:
3076 correctCandidateResult = main.FALSE
3077 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3078 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3079 else:
3080 main.log.warn( "Could not determine who should be the correct leader" )
3081 main.log.debug( oldLeaders[ 0 ] )
3082 correctCandidateResult = main.FALSE
3083 utilities.assert_equals(
3084 expect=main.TRUE,
3085 actual=correctCandidateResult,
3086 onpass="Correct Candidate Elected",
3087 onfail="Incorrect Candidate Elected" )
3088
3089 main.step( "Run for election on old leader( just so everyone " +
3090 "is in the hat )" )
3091 if oldLeaderCLI is not None:
3092 runResult = oldLeaderCLI.electionTestRun()
3093 else:
3094 main.log.error( "No old leader to re-elect" )
3095 runResult = main.FALSE
3096 utilities.assert_equals(
3097 expect=main.TRUE,
3098 actual=runResult,
3099 onpass="App re-ran for election",
3100 onfail="App failed to run for election" )
3101
3102 main.step(
3103 "Check that oldLeader is a candidate, and leader if only 1 node" )
3104 # verify leader didn't just change
3105 # Get new leaders and candidates
3106 reRunLeaders = []
3107 time.sleep( 5 ) # Paremterize
3108 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
3109
3110 # Check that the re-elected node is last on the candidate List
3111 if not reRunLeaders[0]:
3112 positionResult = main.FALSE
3113 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3114 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3115 str( reRunLeaders[ 0 ] ) ) )
3116 positionResult = main.FALSE
3117 utilities.assert_equals(
3118 expect=True,
3119 actual=positionResult,
3120 onpass="Old leader successfully re-ran for election",
3121 onfail="Something went wrong with Leadership election after " +
3122 "the old leader re-ran for election" )
3123
3124 def CASE16( self, main ):
3125 """
3126 Install Distributed Primitives app
3127 """
3128 import time
3129 assert main.numCtrls, "main.numCtrls not defined"
3130 assert main, "main not defined"
3131 assert utilities.assert_equals, "utilities.assert_equals not defined"
3132 assert main.CLIs, "main.CLIs not defined"
3133 assert main.nodes, "main.nodes not defined"
3134
3135 # Variables for the distributed primitives tests
3136 global pCounterName
3137 global pCounterValue
3138 global onosSet
3139 global onosSetName
3140 pCounterName = "TestON-Partitions"
3141 pCounterValue = 0
3142 onosSet = set([])
3143 onosSetName = "TestON-set"
3144
3145 description = "Install Primitives app"
3146 main.case( description )
3147 main.step( "Install Primitives app" )
3148 appName = "org.onosproject.distributedprimitives"
3149 node = main.activeNodes[0]
3150 appResults = main.CLIs[node].activateApp( appName )
3151 utilities.assert_equals( expect=main.TRUE,
3152 actual=appResults,
3153 onpass="Primitives app activated",
3154 onfail="Primitives app not activated" )
3155 time.sleep( 5 ) # To allow all nodes to activate
3156
3157 def CASE17( self, main ):
3158 """
3159 Check for basic functionality with distributed primitives
3160 """
3161 # Make sure variables are defined/set
3162 assert main.numCtrls, "main.numCtrls not defined"
3163 assert main, "main not defined"
3164 assert utilities.assert_equals, "utilities.assert_equals not defined"
3165 assert main.CLIs, "main.CLIs not defined"
3166 assert main.nodes, "main.nodes not defined"
3167 assert pCounterName, "pCounterName not defined"
3168 assert onosSetName, "onosSetName not defined"
3169 # NOTE: assert fails if value is 0/None/Empty/False
3170 try:
3171 pCounterValue
3172 except NameError:
3173 main.log.error( "pCounterValue not defined, setting to 0" )
3174 pCounterValue = 0
3175 try:
3176 onosSet
3177 except NameError:
3178 main.log.error( "onosSet not defined, setting to empty Set" )
3179 onosSet = set([])
3180 # Variables for the distributed primitives tests. These are local only
3181 addValue = "a"
3182 addAllValue = "a b c d e f"
3183 retainValue = "c d e f"
3184
3185 description = "Check for basic functionality with distributed " +\
3186 "primitives"
3187 main.case( description )
3188 main.caseExplanation = "Test the methods of the distributed " +\
3189 "primitives (counters and sets) throught the cli"
3190 # DISTRIBUTED ATOMIC COUNTERS
3191 # Partitioned counters
3192 main.step( "Increment then get a default counter on each node" )
3193 pCounters = []
3194 threads = []
3195 addedPValues = []
3196 for i in main.activeNodes:
3197 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3198 name="counterAddAndGet-" + str( i ),
3199 args=[ pCounterName ] )
3200 pCounterValue += 1
3201 addedPValues.append( pCounterValue )
3202 threads.append( t )
3203 t.start()
3204
3205 for t in threads:
3206 t.join()
3207 pCounters.append( t.result )
3208 # Check that counter incremented numController times
3209 pCounterResults = True
3210 for i in addedPValues:
3211 tmpResult = i in pCounters
3212 pCounterResults = pCounterResults and tmpResult
3213 if not tmpResult:
3214 main.log.error( str( i ) + " is not in partitioned "
3215 "counter incremented results" )
3216 utilities.assert_equals( expect=True,
3217 actual=pCounterResults,
3218 onpass="Default counter incremented",
3219 onfail="Error incrementing default" +
3220 " counter" )
3221
3222 main.step( "Get then Increment a default counter on each node" )
3223 pCounters = []
3224 threads = []
3225 addedPValues = []
3226 for i in main.activeNodes:
3227 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3228 name="counterGetAndAdd-" + str( i ),
3229 args=[ pCounterName ] )
3230 addedPValues.append( pCounterValue )
3231 pCounterValue += 1
3232 threads.append( t )
3233 t.start()
3234
3235 for t in threads:
3236 t.join()
3237 pCounters.append( t.result )
3238 # Check that counter incremented numController times
3239 pCounterResults = True
3240 for i in addedPValues:
3241 tmpResult = i in pCounters
3242 pCounterResults = pCounterResults and tmpResult
3243 if not tmpResult:
3244 main.log.error( str( i ) + " is not in partitioned "
3245 "counter incremented results" )
3246 utilities.assert_equals( expect=True,
3247 actual=pCounterResults,
3248 onpass="Default counter incremented",
3249 onfail="Error incrementing default" +
3250 " counter" )
3251
3252 main.step( "Counters we added have the correct values" )
3253 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3254 utilities.assert_equals( expect=main.TRUE,
3255 actual=incrementCheck,
3256 onpass="Added counters are correct",
3257 onfail="Added counters are incorrect" )
3258
3259 main.step( "Add -8 to then get a default counter on each node" )
3260 pCounters = []
3261 threads = []
3262 addedPValues = []
3263 for i in main.activeNodes:
3264 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3265 name="counterIncrement-" + str( i ),
3266 args=[ pCounterName ],
3267 kwargs={ "delta": -8 } )
3268 pCounterValue += -8
3269 addedPValues.append( pCounterValue )
3270 threads.append( t )
3271 t.start()
3272
3273 for t in threads:
3274 t.join()
3275 pCounters.append( t.result )
3276 # Check that counter incremented numController times
3277 pCounterResults = True
3278 for i in addedPValues:
3279 tmpResult = i in pCounters
3280 pCounterResults = pCounterResults and tmpResult
3281 if not tmpResult:
3282 main.log.error( str( i ) + " is not in partitioned "
3283 "counter incremented results" )
3284 utilities.assert_equals( expect=True,
3285 actual=pCounterResults,
3286 onpass="Default counter incremented",
3287 onfail="Error incrementing default" +
3288 " counter" )
3289
3290 main.step( "Add 5 to then get a default counter on each node" )
3291 pCounters = []
3292 threads = []
3293 addedPValues = []
3294 for i in main.activeNodes:
3295 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3296 name="counterIncrement-" + str( i ),
3297 args=[ pCounterName ],
3298 kwargs={ "delta": 5 } )
3299 pCounterValue += 5
3300 addedPValues.append( pCounterValue )
3301 threads.append( t )
3302 t.start()
3303
3304 for t in threads:
3305 t.join()
3306 pCounters.append( t.result )
3307 # Check that counter incremented numController times
3308 pCounterResults = True
3309 for i in addedPValues:
3310 tmpResult = i in pCounters
3311 pCounterResults = pCounterResults and tmpResult
3312 if not tmpResult:
3313 main.log.error( str( i ) + " is not in partitioned "
3314 "counter incremented results" )
3315 utilities.assert_equals( expect=True,
3316 actual=pCounterResults,
3317 onpass="Default counter incremented",
3318 onfail="Error incrementing default" +
3319 " counter" )
3320
3321 main.step( "Get then add 5 to a default counter on each node" )
3322 pCounters = []
3323 threads = []
3324 addedPValues = []
3325 for i in main.activeNodes:
3326 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3327 name="counterIncrement-" + str( i ),
3328 args=[ pCounterName ],
3329 kwargs={ "delta": 5 } )
3330 addedPValues.append( pCounterValue )
3331 pCounterValue += 5
3332 threads.append( t )
3333 t.start()
3334
3335 for t in threads:
3336 t.join()
3337 pCounters.append( t.result )
3338 # Check that counter incremented numController times
3339 pCounterResults = True
3340 for i in addedPValues:
3341 tmpResult = i in pCounters
3342 pCounterResults = pCounterResults and tmpResult
3343 if not tmpResult:
3344 main.log.error( str( i ) + " is not in partitioned "
3345 "counter incremented results" )
3346 utilities.assert_equals( expect=True,
3347 actual=pCounterResults,
3348 onpass="Default counter incremented",
3349 onfail="Error incrementing default" +
3350 " counter" )
3351
3352 main.step( "Counters we added have the correct values" )
3353 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3354 utilities.assert_equals( expect=main.TRUE,
3355 actual=incrementCheck,
3356 onpass="Added counters are correct",
3357 onfail="Added counters are incorrect" )
3358
3359 # DISTRIBUTED SETS
3360 main.step( "Distributed Set get" )
3361 size = len( onosSet )
3362 getResponses = []
3363 threads = []
3364 for i in main.activeNodes:
3365 t = main.Thread( target=main.CLIs[i].setTestGet,
3366 name="setTestGet-" + str( i ),
3367 args=[ onosSetName ] )
3368 threads.append( t )
3369 t.start()
3370 for t in threads:
3371 t.join()
3372 getResponses.append( t.result )
3373
3374 getResults = main.TRUE
3375 for i in range( len( main.activeNodes ) ):
3376 node = str( main.activeNodes[i] + 1 )
3377 if isinstance( getResponses[ i ], list):
3378 current = set( getResponses[ i ] )
3379 if len( current ) == len( getResponses[ i ] ):
3380 # no repeats
3381 if onosSet != current:
3382 main.log.error( "ONOS" + node +
3383 " has incorrect view" +
3384 " of set " + onosSetName + ":\n" +
3385 str( getResponses[ i ] ) )
3386 main.log.debug( "Expected: " + str( onosSet ) )
3387 main.log.debug( "Actual: " + str( current ) )
3388 getResults = main.FALSE
3389 else:
3390 # error, set is not a set
3391 main.log.error( "ONOS" + node +
3392 " has repeat elements in" +
3393 " set " + onosSetName + ":\n" +
3394 str( getResponses[ i ] ) )
3395 getResults = main.FALSE
3396 elif getResponses[ i ] == main.ERROR:
3397 getResults = main.FALSE
3398 utilities.assert_equals( expect=main.TRUE,
3399 actual=getResults,
3400 onpass="Set elements are correct",
3401 onfail="Set elements are incorrect" )
3402
3403 main.step( "Distributed Set size" )
3404 sizeResponses = []
3405 threads = []
3406 for i in main.activeNodes:
3407 t = main.Thread( target=main.CLIs[i].setTestSize,
3408 name="setTestSize-" + str( i ),
3409 args=[ onosSetName ] )
3410 threads.append( t )
3411 t.start()
3412 for t in threads:
3413 t.join()
3414 sizeResponses.append( t.result )
3415
3416 sizeResults = main.TRUE
3417 for i in range( len( main.activeNodes ) ):
3418 node = str( main.activeNodes[i] + 1 )
3419 if size != sizeResponses[ i ]:
3420 sizeResults = main.FALSE
3421 main.log.error( "ONOS" + node +
3422 " expected a size of " + str( size ) +
3423 " for set " + onosSetName +
3424 " but got " + str( sizeResponses[ i ] ) )
3425 utilities.assert_equals( expect=main.TRUE,
3426 actual=sizeResults,
3427 onpass="Set sizes are correct",
3428 onfail="Set sizes are incorrect" )
3429
3430 main.step( "Distributed Set add()" )
3431 onosSet.add( addValue )
3432 addResponses = []
3433 threads = []
3434 for i in main.activeNodes:
3435 t = main.Thread( target=main.CLIs[i].setTestAdd,
3436 name="setTestAdd-" + str( i ),
3437 args=[ onosSetName, addValue ] )
3438 threads.append( t )
3439 t.start()
3440 for t in threads:
3441 t.join()
3442 addResponses.append( t.result )
3443
3444 # main.TRUE = successfully changed the set
3445 # main.FALSE = action resulted in no change in set
3446 # main.ERROR - Some error in executing the function
3447 addResults = main.TRUE
3448 for i in range( len( main.activeNodes ) ):
3449 if addResponses[ i ] == main.TRUE:
3450 # All is well
3451 pass
3452 elif addResponses[ i ] == main.FALSE:
3453 # Already in set, probably fine
3454 pass
3455 elif addResponses[ i ] == main.ERROR:
3456 # Error in execution
3457 addResults = main.FALSE
3458 else:
3459 # unexpected result
3460 addResults = main.FALSE
3461 if addResults != main.TRUE:
3462 main.log.error( "Error executing set add" )
3463
3464 # Check if set is still correct
3465 size = len( onosSet )
3466 getResponses = []
3467 threads = []
3468 for i in main.activeNodes:
3469 t = main.Thread( target=main.CLIs[i].setTestGet,
3470 name="setTestGet-" + str( i ),
3471 args=[ onosSetName ] )
3472 threads.append( t )
3473 t.start()
3474 for t in threads:
3475 t.join()
3476 getResponses.append( t.result )
3477 getResults = main.TRUE
3478 for i in range( len( main.activeNodes ) ):
3479 node = str( main.activeNodes[i] + 1 )
3480 if isinstance( getResponses[ i ], list):
3481 current = set( getResponses[ i ] )
3482 if len( current ) == len( getResponses[ i ] ):
3483 # no repeats
3484 if onosSet != current:
3485 main.log.error( "ONOS" + node + " has incorrect view" +
3486 " of set " + onosSetName + ":\n" +
3487 str( getResponses[ i ] ) )
3488 main.log.debug( "Expected: " + str( onosSet ) )
3489 main.log.debug( "Actual: " + str( current ) )
3490 getResults = main.FALSE
3491 else:
3492 # error, set is not a set
3493 main.log.error( "ONOS" + node + " has repeat elements in" +
3494 " set " + onosSetName + ":\n" +
3495 str( getResponses[ i ] ) )
3496 getResults = main.FALSE
3497 elif getResponses[ i ] == main.ERROR:
3498 getResults = main.FALSE
3499 sizeResponses = []
3500 threads = []
3501 for i in main.activeNodes:
3502 t = main.Thread( target=main.CLIs[i].setTestSize,
3503 name="setTestSize-" + str( i ),
3504 args=[ onosSetName ] )
3505 threads.append( t )
3506 t.start()
3507 for t in threads:
3508 t.join()
3509 sizeResponses.append( t.result )
3510 sizeResults = main.TRUE
3511 for i in range( len( main.activeNodes ) ):
3512 node = str( main.activeNodes[i] + 1 )
3513 if size != sizeResponses[ i ]:
3514 sizeResults = main.FALSE
3515 main.log.error( "ONOS" + node +
3516 " expected a size of " + str( size ) +
3517 " for set " + onosSetName +
3518 " but got " + str( sizeResponses[ i ] ) )
3519 addResults = addResults and getResults and sizeResults
3520 utilities.assert_equals( expect=main.TRUE,
3521 actual=addResults,
3522 onpass="Set add correct",
3523 onfail="Set add was incorrect" )
3524
3525 main.step( "Distributed Set addAll()" )
3526 onosSet.update( addAllValue.split() )
3527 addResponses = []
3528 threads = []
3529 for i in main.activeNodes:
3530 t = main.Thread( target=main.CLIs[i].setTestAdd,
3531 name="setTestAddAll-" + str( i ),
3532 args=[ onosSetName, addAllValue ] )
3533 threads.append( t )
3534 t.start()
3535 for t in threads:
3536 t.join()
3537 addResponses.append( t.result )
3538
3539 # main.TRUE = successfully changed the set
3540 # main.FALSE = action resulted in no change in set
3541 # main.ERROR - Some error in executing the function
3542 addAllResults = main.TRUE
3543 for i in range( len( main.activeNodes ) ):
3544 if addResponses[ i ] == main.TRUE:
3545 # All is well
3546 pass
3547 elif addResponses[ i ] == main.FALSE:
3548 # Already in set, probably fine
3549 pass
3550 elif addResponses[ i ] == main.ERROR:
3551 # Error in execution
3552 addAllResults = main.FALSE
3553 else:
3554 # unexpected result
3555 addAllResults = main.FALSE
3556 if addAllResults != main.TRUE:
3557 main.log.error( "Error executing set addAll" )
3558
3559 # Check if set is still correct
3560 size = len( onosSet )
3561 getResponses = []
3562 threads = []
3563 for i in main.activeNodes:
3564 t = main.Thread( target=main.CLIs[i].setTestGet,
3565 name="setTestGet-" + str( i ),
3566 args=[ onosSetName ] )
3567 threads.append( t )
3568 t.start()
3569 for t in threads:
3570 t.join()
3571 getResponses.append( t.result )
3572 getResults = main.TRUE
3573 for i in range( len( main.activeNodes ) ):
3574 node = str( main.activeNodes[i] + 1 )
3575 if isinstance( getResponses[ i ], list):
3576 current = set( getResponses[ i ] )
3577 if len( current ) == len( getResponses[ i ] ):
3578 # no repeats
3579 if onosSet != current:
3580 main.log.error( "ONOS" + node +
3581 " has incorrect view" +
3582 " of set " + onosSetName + ":\n" +
3583 str( getResponses[ i ] ) )
3584 main.log.debug( "Expected: " + str( onosSet ) )
3585 main.log.debug( "Actual: " + str( current ) )
3586 getResults = main.FALSE
3587 else:
3588 # error, set is not a set
3589 main.log.error( "ONOS" + node +
3590 " has repeat elements in" +
3591 " set " + onosSetName + ":\n" +
3592 str( getResponses[ i ] ) )
3593 getResults = main.FALSE
3594 elif getResponses[ i ] == main.ERROR:
3595 getResults = main.FALSE
3596 sizeResponses = []
3597 threads = []
3598 for i in main.activeNodes:
3599 t = main.Thread( target=main.CLIs[i].setTestSize,
3600 name="setTestSize-" + str( i ),
3601 args=[ onosSetName ] )
3602 threads.append( t )
3603 t.start()
3604 for t in threads:
3605 t.join()
3606 sizeResponses.append( t.result )
3607 sizeResults = main.TRUE
3608 for i in range( len( main.activeNodes ) ):
3609 node = str( main.activeNodes[i] + 1 )
3610 if size != sizeResponses[ i ]:
3611 sizeResults = main.FALSE
3612 main.log.error( "ONOS" + node +
3613 " expected a size of " + str( size ) +
3614 " for set " + onosSetName +
3615 " but got " + str( sizeResponses[ i ] ) )
3616 addAllResults = addAllResults and getResults and sizeResults
3617 utilities.assert_equals( expect=main.TRUE,
3618 actual=addAllResults,
3619 onpass="Set addAll correct",
3620 onfail="Set addAll was incorrect" )
3621
3622 main.step( "Distributed Set contains()" )
3623 containsResponses = []
3624 threads = []
3625 for i in main.activeNodes:
3626 t = main.Thread( target=main.CLIs[i].setTestGet,
3627 name="setContains-" + str( i ),
3628 args=[ onosSetName ],
3629 kwargs={ "values": addValue } )
3630 threads.append( t )
3631 t.start()
3632 for t in threads:
3633 t.join()
3634 # NOTE: This is the tuple
3635 containsResponses.append( t.result )
3636
3637 containsResults = main.TRUE
3638 for i in range( len( main.activeNodes ) ):
3639 if containsResponses[ i ] == main.ERROR:
3640 containsResults = main.FALSE
3641 else:
3642 containsResults = containsResults and\
3643 containsResponses[ i ][ 1 ]
3644 utilities.assert_equals( expect=main.TRUE,
3645 actual=containsResults,
3646 onpass="Set contains is functional",
3647 onfail="Set contains failed" )
3648
3649 main.step( "Distributed Set containsAll()" )
3650 containsAllResponses = []
3651 threads = []
3652 for i in main.activeNodes:
3653 t = main.Thread( target=main.CLIs[i].setTestGet,
3654 name="setContainsAll-" + str( i ),
3655 args=[ onosSetName ],
3656 kwargs={ "values": addAllValue } )
3657 threads.append( t )
3658 t.start()
3659 for t in threads:
3660 t.join()
3661 # NOTE: This is the tuple
3662 containsAllResponses.append( t.result )
3663
3664 containsAllResults = main.TRUE
3665 for i in range( len( main.activeNodes ) ):
3666 if containsResponses[ i ] == main.ERROR:
3667 containsResults = main.FALSE
3668 else:
3669 containsResults = containsResults and\
3670 containsResponses[ i ][ 1 ]
3671 utilities.assert_equals( expect=main.TRUE,
3672 actual=containsAllResults,
3673 onpass="Set containsAll is functional",
3674 onfail="Set containsAll failed" )
3675
3676 main.step( "Distributed Set remove()" )
3677 onosSet.remove( addValue )
3678 removeResponses = []
3679 threads = []
3680 for i in main.activeNodes:
3681 t = main.Thread( target=main.CLIs[i].setTestRemove,
3682 name="setTestRemove-" + str( i ),
3683 args=[ onosSetName, addValue ] )
3684 threads.append( t )
3685 t.start()
3686 for t in threads:
3687 t.join()
3688 removeResponses.append( t.result )
3689
3690 # main.TRUE = successfully changed the set
3691 # main.FALSE = action resulted in no change in set
3692 # main.ERROR - Some error in executing the function
3693 removeResults = main.TRUE
3694 for i in range( len( main.activeNodes ) ):
3695 if removeResponses[ i ] == main.TRUE:
3696 # All is well
3697 pass
3698 elif removeResponses[ i ] == main.FALSE:
3699 # not in set, probably fine
3700 pass
3701 elif removeResponses[ i ] == main.ERROR:
3702 # Error in execution
3703 removeResults = main.FALSE
3704 else:
3705 # unexpected result
3706 removeResults = main.FALSE
3707 if removeResults != main.TRUE:
3708 main.log.error( "Error executing set remove" )
3709
3710 # Check if set is still correct
3711 size = len( onosSet )
3712 getResponses = []
3713 threads = []
3714 for i in main.activeNodes:
3715 t = main.Thread( target=main.CLIs[i].setTestGet,
3716 name="setTestGet-" + str( i ),
3717 args=[ onosSetName ] )
3718 threads.append( t )
3719 t.start()
3720 for t in threads:
3721 t.join()
3722 getResponses.append( t.result )
3723 getResults = main.TRUE
3724 for i in range( len( main.activeNodes ) ):
3725 node = str( main.activeNodes[i] + 1 )
3726 if isinstance( getResponses[ i ], list):
3727 current = set( getResponses[ i ] )
3728 if len( current ) == len( getResponses[ i ] ):
3729 # no repeats
3730 if onosSet != current:
3731 main.log.error( "ONOS" + node +
3732 " has incorrect view" +
3733 " of set " + onosSetName + ":\n" +
3734 str( getResponses[ i ] ) )
3735 main.log.debug( "Expected: " + str( onosSet ) )
3736 main.log.debug( "Actual: " + str( current ) )
3737 getResults = main.FALSE
3738 else:
3739 # error, set is not a set
3740 main.log.error( "ONOS" + node +
3741 " has repeat elements in" +
3742 " set " + onosSetName + ":\n" +
3743 str( getResponses[ i ] ) )
3744 getResults = main.FALSE
3745 elif getResponses[ i ] == main.ERROR:
3746 getResults = main.FALSE
3747 sizeResponses = []
3748 threads = []
3749 for i in main.activeNodes:
3750 t = main.Thread( target=main.CLIs[i].setTestSize,
3751 name="setTestSize-" + str( i ),
3752 args=[ onosSetName ] )
3753 threads.append( t )
3754 t.start()
3755 for t in threads:
3756 t.join()
3757 sizeResponses.append( t.result )
3758 sizeResults = main.TRUE
3759 for i in range( len( main.activeNodes ) ):
3760 node = str( main.activeNodes[i] + 1 )
3761 if size != sizeResponses[ i ]:
3762 sizeResults = main.FALSE
3763 main.log.error( "ONOS" + node +
3764 " expected a size of " + str( size ) +
3765 " for set " + onosSetName +
3766 " but got " + str( sizeResponses[ i ] ) )
3767 removeResults = removeResults and getResults and sizeResults
3768 utilities.assert_equals( expect=main.TRUE,
3769 actual=removeResults,
3770 onpass="Set remove correct",
3771 onfail="Set remove was incorrect" )
3772
3773 main.step( "Distributed Set removeAll()" )
3774 onosSet.difference_update( addAllValue.split() )
3775 removeAllResponses = []
3776 threads = []
3777 try:
3778 for i in main.activeNodes:
3779 t = main.Thread( target=main.CLIs[i].setTestRemove,
3780 name="setTestRemoveAll-" + str( i ),
3781 args=[ onosSetName, addAllValue ] )
3782 threads.append( t )
3783 t.start()
3784 for t in threads:
3785 t.join()
3786 removeAllResponses.append( t.result )
3787 except Exception, e:
3788 main.log.exception(e)
3789
3790 # main.TRUE = successfully changed the set
3791 # main.FALSE = action resulted in no change in set
3792 # main.ERROR - Some error in executing the function
3793 removeAllResults = main.TRUE
3794 for i in range( len( main.activeNodes ) ):
3795 if removeAllResponses[ i ] == main.TRUE:
3796 # All is well
3797 pass
3798 elif removeAllResponses[ i ] == main.FALSE:
3799 # not in set, probably fine
3800 pass
3801 elif removeAllResponses[ i ] == main.ERROR:
3802 # Error in execution
3803 removeAllResults = main.FALSE
3804 else:
3805 # unexpected result
3806 removeAllResults = main.FALSE
3807 if removeAllResults != main.TRUE:
3808 main.log.error( "Error executing set removeAll" )
3809
3810 # Check if set is still correct
3811 size = len( onosSet )
3812 getResponses = []
3813 threads = []
3814 for i in main.activeNodes:
3815 t = main.Thread( target=main.CLIs[i].setTestGet,
3816 name="setTestGet-" + str( i ),
3817 args=[ onosSetName ] )
3818 threads.append( t )
3819 t.start()
3820 for t in threads:
3821 t.join()
3822 getResponses.append( t.result )
3823 getResults = main.TRUE
3824 for i in range( len( main.activeNodes ) ):
3825 node = str( main.activeNodes[i] + 1 )
3826 if isinstance( getResponses[ i ], list):
3827 current = set( getResponses[ i ] )
3828 if len( current ) == len( getResponses[ i ] ):
3829 # no repeats
3830 if onosSet != current:
3831 main.log.error( "ONOS" + node +
3832 " has incorrect view" +
3833 " of set " + onosSetName + ":\n" +
3834 str( getResponses[ i ] ) )
3835 main.log.debug( "Expected: " + str( onosSet ) )
3836 main.log.debug( "Actual: " + str( current ) )
3837 getResults = main.FALSE
3838 else:
3839 # error, set is not a set
3840 main.log.error( "ONOS" + node +
3841 " has repeat elements in" +
3842 " set " + onosSetName + ":\n" +
3843 str( getResponses[ i ] ) )
3844 getResults = main.FALSE
3845 elif getResponses[ i ] == main.ERROR:
3846 getResults = main.FALSE
3847 sizeResponses = []
3848 threads = []
3849 for i in main.activeNodes:
3850 t = main.Thread( target=main.CLIs[i].setTestSize,
3851 name="setTestSize-" + str( i ),
3852 args=[ onosSetName ] )
3853 threads.append( t )
3854 t.start()
3855 for t in threads:
3856 t.join()
3857 sizeResponses.append( t.result )
3858 sizeResults = main.TRUE
3859 for i in range( len( main.activeNodes ) ):
3860 node = str( main.activeNodes[i] + 1 )
3861 if size != sizeResponses[ i ]:
3862 sizeResults = main.FALSE
3863 main.log.error( "ONOS" + node +
3864 " expected a size of " + str( size ) +
3865 " for set " + onosSetName +
3866 " but got " + str( sizeResponses[ i ] ) )
3867 removeAllResults = removeAllResults and getResults and sizeResults
3868 utilities.assert_equals( expect=main.TRUE,
3869 actual=removeAllResults,
3870 onpass="Set removeAll correct",
3871 onfail="Set removeAll was incorrect" )
3872
3873 main.step( "Distributed Set addAll()" )
3874 onosSet.update( addAllValue.split() )
3875 addResponses = []
3876 threads = []
3877 for i in main.activeNodes:
3878 t = main.Thread( target=main.CLIs[i].setTestAdd,
3879 name="setTestAddAll-" + str( i ),
3880 args=[ onosSetName, addAllValue ] )
3881 threads.append( t )
3882 t.start()
3883 for t in threads:
3884 t.join()
3885 addResponses.append( t.result )
3886
3887 # main.TRUE = successfully changed the set
3888 # main.FALSE = action resulted in no change in set
3889 # main.ERROR - Some error in executing the function
3890 addAllResults = main.TRUE
3891 for i in range( len( main.activeNodes ) ):
3892 if addResponses[ i ] == main.TRUE:
3893 # All is well
3894 pass
3895 elif addResponses[ i ] == main.FALSE:
3896 # Already in set, probably fine
3897 pass
3898 elif addResponses[ i ] == main.ERROR:
3899 # Error in execution
3900 addAllResults = main.FALSE
3901 else:
3902 # unexpected result
3903 addAllResults = main.FALSE
3904 if addAllResults != main.TRUE:
3905 main.log.error( "Error executing set addAll" )
3906
3907 # Check if set is still correct
3908 size = len( onosSet )
3909 getResponses = []
3910 threads = []
3911 for i in main.activeNodes:
3912 t = main.Thread( target=main.CLIs[i].setTestGet,
3913 name="setTestGet-" + str( i ),
3914 args=[ onosSetName ] )
3915 threads.append( t )
3916 t.start()
3917 for t in threads:
3918 t.join()
3919 getResponses.append( t.result )
3920 getResults = main.TRUE
3921 for i in range( len( main.activeNodes ) ):
3922 node = str( main.activeNodes[i] + 1 )
3923 if isinstance( getResponses[ i ], list):
3924 current = set( getResponses[ i ] )
3925 if len( current ) == len( getResponses[ i ] ):
3926 # no repeats
3927 if onosSet != current:
3928 main.log.error( "ONOS" + node +
3929 " has incorrect view" +
3930 " of set " + onosSetName + ":\n" +
3931 str( getResponses[ i ] ) )
3932 main.log.debug( "Expected: " + str( onosSet ) )
3933 main.log.debug( "Actual: " + str( current ) )
3934 getResults = main.FALSE
3935 else:
3936 # error, set is not a set
3937 main.log.error( "ONOS" + node +
3938 " has repeat elements in" +
3939 " set " + onosSetName + ":\n" +
3940 str( getResponses[ i ] ) )
3941 getResults = main.FALSE
3942 elif getResponses[ i ] == main.ERROR:
3943 getResults = main.FALSE
3944 sizeResponses = []
3945 threads = []
3946 for i in main.activeNodes:
3947 t = main.Thread( target=main.CLIs[i].setTestSize,
3948 name="setTestSize-" + str( i ),
3949 args=[ onosSetName ] )
3950 threads.append( t )
3951 t.start()
3952 for t in threads:
3953 t.join()
3954 sizeResponses.append( t.result )
3955 sizeResults = main.TRUE
3956 for i in range( len( main.activeNodes ) ):
3957 node = str( main.activeNodes[i] + 1 )
3958 if size != sizeResponses[ i ]:
3959 sizeResults = main.FALSE
3960 main.log.error( "ONOS" + node +
3961 " expected a size of " + str( size ) +
3962 " for set " + onosSetName +
3963 " but got " + str( sizeResponses[ i ] ) )
3964 addAllResults = addAllResults and getResults and sizeResults
3965 utilities.assert_equals( expect=main.TRUE,
3966 actual=addAllResults,
3967 onpass="Set addAll correct",
3968 onfail="Set addAll was incorrect" )
3969
3970 main.step( "Distributed Set clear()" )
3971 onosSet.clear()
3972 clearResponses = []
3973 threads = []
3974 for i in main.activeNodes:
3975 t = main.Thread( target=main.CLIs[i].setTestRemove,
3976 name="setTestClear-" + str( i ),
3977 args=[ onosSetName, " "], # Values doesn't matter
3978 kwargs={ "clear": True } )
3979 threads.append( t )
3980 t.start()
3981 for t in threads:
3982 t.join()
3983 clearResponses.append( t.result )
3984
3985 # main.TRUE = successfully changed the set
3986 # main.FALSE = action resulted in no change in set
3987 # main.ERROR - Some error in executing the function
3988 clearResults = main.TRUE
3989 for i in range( len( main.activeNodes ) ):
3990 if clearResponses[ i ] == main.TRUE:
3991 # All is well
3992 pass
3993 elif clearResponses[ i ] == main.FALSE:
3994 # Nothing set, probably fine
3995 pass
3996 elif clearResponses[ i ] == main.ERROR:
3997 # Error in execution
3998 clearResults = main.FALSE
3999 else:
4000 # unexpected result
4001 clearResults = main.FALSE
4002 if clearResults != main.TRUE:
4003 main.log.error( "Error executing set clear" )
4004
4005 # Check if set is still correct
4006 size = len( onosSet )
4007 getResponses = []
4008 threads = []
4009 for i in main.activeNodes:
4010 t = main.Thread( target=main.CLIs[i].setTestGet,
4011 name="setTestGet-" + str( i ),
4012 args=[ onosSetName ] )
4013 threads.append( t )
4014 t.start()
4015 for t in threads:
4016 t.join()
4017 getResponses.append( t.result )
4018 getResults = main.TRUE
4019 for i in range( len( main.activeNodes ) ):
4020 node = str( main.activeNodes[i] + 1 )
4021 if isinstance( getResponses[ i ], list):
4022 current = set( getResponses[ i ] )
4023 if len( current ) == len( getResponses[ i ] ):
4024 # no repeats
4025 if onosSet != current:
4026 main.log.error( "ONOS" + node +
4027 " has incorrect view" +
4028 " of set " + onosSetName + ":\n" +
4029 str( getResponses[ i ] ) )
4030 main.log.debug( "Expected: " + str( onosSet ) )
4031 main.log.debug( "Actual: " + str( current ) )
4032 getResults = main.FALSE
4033 else:
4034 # error, set is not a set
4035 main.log.error( "ONOS" + node +
4036 " has repeat elements in" +
4037 " set " + onosSetName + ":\n" +
4038 str( getResponses[ i ] ) )
4039 getResults = main.FALSE
4040 elif getResponses[ i ] == main.ERROR:
4041 getResults = main.FALSE
4042 sizeResponses = []
4043 threads = []
4044 for i in main.activeNodes:
4045 t = main.Thread( target=main.CLIs[i].setTestSize,
4046 name="setTestSize-" + str( i ),
4047 args=[ onosSetName ] )
4048 threads.append( t )
4049 t.start()
4050 for t in threads:
4051 t.join()
4052 sizeResponses.append( t.result )
4053 sizeResults = main.TRUE
4054 for i in range( len( main.activeNodes ) ):
4055 node = str( main.activeNodes[i] + 1 )
4056 if size != sizeResponses[ i ]:
4057 sizeResults = main.FALSE
4058 main.log.error( "ONOS" + node +
4059 " expected a size of " + str( size ) +
4060 " for set " + onosSetName +
4061 " but got " + str( sizeResponses[ i ] ) )
4062 clearResults = clearResults and getResults and sizeResults
4063 utilities.assert_equals( expect=main.TRUE,
4064 actual=clearResults,
4065 onpass="Set clear correct",
4066 onfail="Set clear was incorrect" )
4067
4068 main.step( "Distributed Set addAll()" )
4069 onosSet.update( addAllValue.split() )
4070 addResponses = []
4071 threads = []
4072 for i in main.activeNodes:
4073 t = main.Thread( target=main.CLIs[i].setTestAdd,
4074 name="setTestAddAll-" + str( i ),
4075 args=[ onosSetName, addAllValue ] )
4076 threads.append( t )
4077 t.start()
4078 for t in threads:
4079 t.join()
4080 addResponses.append( t.result )
4081
4082 # main.TRUE = successfully changed the set
4083 # main.FALSE = action resulted in no change in set
4084 # main.ERROR - Some error in executing the function
4085 addAllResults = main.TRUE
4086 for i in range( len( main.activeNodes ) ):
4087 if addResponses[ i ] == main.TRUE:
4088 # All is well
4089 pass
4090 elif addResponses[ i ] == main.FALSE:
4091 # Already in set, probably fine
4092 pass
4093 elif addResponses[ i ] == main.ERROR:
4094 # Error in execution
4095 addAllResults = main.FALSE
4096 else:
4097 # unexpected result
4098 addAllResults = main.FALSE
4099 if addAllResults != main.TRUE:
4100 main.log.error( "Error executing set addAll" )
4101
4102 # Check if set is still correct
4103 size = len( onosSet )
4104 getResponses = []
4105 threads = []
4106 for i in main.activeNodes:
4107 t = main.Thread( target=main.CLIs[i].setTestGet,
4108 name="setTestGet-" + str( i ),
4109 args=[ onosSetName ] )
4110 threads.append( t )
4111 t.start()
4112 for t in threads:
4113 t.join()
4114 getResponses.append( t.result )
4115 getResults = main.TRUE
4116 for i in range( len( main.activeNodes ) ):
4117 node = str( main.activeNodes[i] + 1 )
4118 if isinstance( getResponses[ i ], list):
4119 current = set( getResponses[ i ] )
4120 if len( current ) == len( getResponses[ i ] ):
4121 # no repeats
4122 if onosSet != current:
4123 main.log.error( "ONOS" + node +
4124 " has incorrect view" +
4125 " of set " + onosSetName + ":\n" +
4126 str( getResponses[ i ] ) )
4127 main.log.debug( "Expected: " + str( onosSet ) )
4128 main.log.debug( "Actual: " + str( current ) )
4129 getResults = main.FALSE
4130 else:
4131 # error, set is not a set
4132 main.log.error( "ONOS" + node +
4133 " has repeat elements in" +
4134 " set " + onosSetName + ":\n" +
4135 str( getResponses[ i ] ) )
4136 getResults = main.FALSE
4137 elif getResponses[ i ] == main.ERROR:
4138 getResults = main.FALSE
4139 sizeResponses = []
4140 threads = []
4141 for i in main.activeNodes:
4142 t = main.Thread( target=main.CLIs[i].setTestSize,
4143 name="setTestSize-" + str( i ),
4144 args=[ onosSetName ] )
4145 threads.append( t )
4146 t.start()
4147 for t in threads:
4148 t.join()
4149 sizeResponses.append( t.result )
4150 sizeResults = main.TRUE
4151 for i in range( len( main.activeNodes ) ):
4152 node = str( main.activeNodes[i] + 1 )
4153 if size != sizeResponses[ i ]:
4154 sizeResults = main.FALSE
4155 main.log.error( "ONOS" + node +
4156 " expected a size of " + str( size ) +
4157 " for set " + onosSetName +
4158 " but got " + str( sizeResponses[ i ] ) )
4159 addAllResults = addAllResults and getResults and sizeResults
4160 utilities.assert_equals( expect=main.TRUE,
4161 actual=addAllResults,
4162 onpass="Set addAll correct",
4163 onfail="Set addAll was incorrect" )
4164
4165 main.step( "Distributed Set retain()" )
4166 onosSet.intersection_update( retainValue.split() )
4167 retainResponses = []
4168 threads = []
4169 for i in main.activeNodes:
4170 t = main.Thread( target=main.CLIs[i].setTestRemove,
4171 name="setTestRetain-" + str( i ),
4172 args=[ onosSetName, retainValue ],
4173 kwargs={ "retain": True } )
4174 threads.append( t )
4175 t.start()
4176 for t in threads:
4177 t.join()
4178 retainResponses.append( t.result )
4179
4180 # main.TRUE = successfully changed the set
4181 # main.FALSE = action resulted in no change in set
4182 # main.ERROR - Some error in executing the function
4183 retainResults = main.TRUE
4184 for i in range( len( main.activeNodes ) ):
4185 if retainResponses[ i ] == main.TRUE:
4186 # All is well
4187 pass
4188 elif retainResponses[ i ] == main.FALSE:
4189 # Already in set, probably fine
4190 pass
4191 elif retainResponses[ i ] == main.ERROR:
4192 # Error in execution
4193 retainResults = main.FALSE
4194 else:
4195 # unexpected result
4196 retainResults = main.FALSE
4197 if retainResults != main.TRUE:
4198 main.log.error( "Error executing set retain" )
4199
4200 # Check if set is still correct
4201 size = len( onosSet )
4202 getResponses = []
4203 threads = []
4204 for i in main.activeNodes:
4205 t = main.Thread( target=main.CLIs[i].setTestGet,
4206 name="setTestGet-" + str( i ),
4207 args=[ onosSetName ] )
4208 threads.append( t )
4209 t.start()
4210 for t in threads:
4211 t.join()
4212 getResponses.append( t.result )
4213 getResults = main.TRUE
4214 for i in range( len( main.activeNodes ) ):
4215 node = str( main.activeNodes[i] + 1 )
4216 if isinstance( getResponses[ i ], list):
4217 current = set( getResponses[ i ] )
4218 if len( current ) == len( getResponses[ i ] ):
4219 # no repeats
4220 if onosSet != current:
4221 main.log.error( "ONOS" + node +
4222 " has incorrect view" +
4223 " of set " + onosSetName + ":\n" +
4224 str( getResponses[ i ] ) )
4225 main.log.debug( "Expected: " + str( onosSet ) )
4226 main.log.debug( "Actual: " + str( current ) )
4227 getResults = main.FALSE
4228 else:
4229 # error, set is not a set
4230 main.log.error( "ONOS" + node +
4231 " has repeat elements in" +
4232 " set " + onosSetName + ":\n" +
4233 str( getResponses[ i ] ) )
4234 getResults = main.FALSE
4235 elif getResponses[ i ] == main.ERROR:
4236 getResults = main.FALSE
4237 sizeResponses = []
4238 threads = []
4239 for i in main.activeNodes:
4240 t = main.Thread( target=main.CLIs[i].setTestSize,
4241 name="setTestSize-" + str( i ),
4242 args=[ onosSetName ] )
4243 threads.append( t )
4244 t.start()
4245 for t in threads:
4246 t.join()
4247 sizeResponses.append( t.result )
4248 sizeResults = main.TRUE
4249 for i in range( len( main.activeNodes ) ):
4250 node = str( main.activeNodes[i] + 1 )
4251 if size != sizeResponses[ i ]:
4252 sizeResults = main.FALSE
4253 main.log.error( "ONOS" + node + " expected a size of " +
4254 str( size ) + " for set " + onosSetName +
4255 " but got " + str( sizeResponses[ i ] ) )
4256 retainResults = retainResults and getResults and sizeResults
4257 utilities.assert_equals( expect=main.TRUE,
4258 actual=retainResults,
4259 onpass="Set retain correct",
4260 onfail="Set retain was incorrect" )
4261
4262 # Transactional maps
4263 main.step( "Partitioned Transactional maps put" )
4264 tMapValue = "Testing"
4265 numKeys = 100
4266 putResult = True
4267 node = main.activeNodes[0]
4268 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4269 if putResponses and len( putResponses ) == 100:
4270 for i in putResponses:
4271 if putResponses[ i ][ 'value' ] != tMapValue:
4272 putResult = False
4273 else:
4274 putResult = False
4275 if not putResult:
4276 main.log.debug( "Put response values: " + str( putResponses ) )
4277 utilities.assert_equals( expect=True,
4278 actual=putResult,
4279 onpass="Partitioned Transactional Map put successful",
4280 onfail="Partitioned Transactional Map put values are incorrect" )
4281
4282 main.step( "Partitioned Transactional maps get" )
4283 getCheck = True
4284 for n in range( 1, numKeys + 1 ):
4285 getResponses = []
4286 threads = []
4287 valueCheck = True
4288 for i in main.activeNodes:
4289 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4290 name="TMap-get-" + str( i ),
4291 args=[ "Key" + str( n ) ] )
4292 threads.append( t )
4293 t.start()
4294 for t in threads:
4295 t.join()
4296 getResponses.append( t.result )
4297 for node in getResponses:
4298 if node != tMapValue:
4299 valueCheck = False
4300 if not valueCheck:
4301 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4302 main.log.warn( getResponses )
4303 getCheck = getCheck and valueCheck
4304 utilities.assert_equals( expect=True,
4305 actual=getCheck,
4306 onpass="Partitioned Transactional Map get values were correct",
4307 onfail="Partitioned Transactional Map values incorrect" )