blob: 24daf1d18939f51f795878327ad46c2440d4d799 [file] [log] [blame]
Jon Hall69b2b982016-05-11 12:04:59 -07001"""
2Description: This test is to determine if ONOS can handle
3 dynamic swapping of cluster nodes.
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE6: Swap nodes
13CASE7: Check state after control plane failure
14CASE8: Compare topo
15CASE9: Link s3-s28 down
16CASE10: Link s3-s28 up
17CASE11: Switch down
18CASE12: Switch up
19CASE13: Clean up
20CASE14: start election app on all onos nodes
21CASE15: Check that Leadership Election is still functional
22CASE16: Install Distributed Primitives app
23CASE17: Check for basic functionality with distributed primitives
24"""
25
26
27class HAswapNodes:
28
29 def __init__( self ):
30 self.default = ''
31
32 def CASE1( self, main ):
33 """
34 CASE1 is to compile ONOS and push it to the test machines
35
36 Startup sequence:
37 cell <name>
38 onos-verify-cell
39 NOTE: temporary - onos-remove-raft-logs
40 onos-uninstall
41 start mininet
42 git pull
43 mvn clean install
44 onos-package
45 onos-install -f
46 onos-wait-for-start
47 start cli sessions
48 start tcpdump
49 """
50 import time
51 import os
52 import re
53 main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
54 "initialization" )
55 main.case( "Setting up test environment" )
56 main.caseExplanation = "Setup the test environment including " +\
57 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
59
60 # load some variables from the params file
61 PULLCODE = False
62 if main.params[ 'Git' ] == 'True':
63 PULLCODE = True
64 gitBranch = main.params[ 'branch' ]
65 cellName = main.params[ 'ENV' ][ 'cellName' ]
66
67 main.numCtrls = int( main.params[ 'num_controllers' ] )
68 if main.ONOSbench.maxNodes:
69 if main.ONOSbench.maxNodes < main.numCtrls:
70 main.numCtrls = int( main.ONOSbench.maxNodes )
71 # set global variables
72 # These are for csv plotting in jenkins
73 global labels
74 global data
75 labels = []
76 data = []
77
78 try:
79 from tests.HA.dependencies.HA import HA
80 main.HA = HA()
81 from tests.HA.HAswapNodes.dependencies.Server import Server
82 main.Server = Server()
83 except Exception as e:
84 main.log.exception( e )
85 main.cleanup()
86 main.exit()
87
88 main.CLIs = []
89 main.nodes = []
90 ipList = []
91 for i in range( 1, main.numCtrls + 1 ):
92 try:
93 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
94 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
95 ipList.append( main.nodes[ -1 ].ip_address )
96 except AttributeError:
97 break
98
99 main.step( "Create cell file" )
100 cellAppString = main.params[ 'ENV' ][ 'appString' ]
101 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
102 main.Mininet1.ip_address,
103 cellAppString, ipList )
104
105 main.step( "Applying cell variable to environment" )
106 cellResult = main.ONOSbench.setCell( cellName )
107 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
108 onpass="Set cell successfull",
109 onfail="Failled to set cell" )
110
111 main.step( "Verify connectivity to cell" )
112 verifyResult = main.ONOSbench.verifyCell()
113 utilities.assert_equals( expect=main.TRUE, actual=verifyResult,
114 onpass="Verify cell passed",
115 onfail="Failled to verify cell" )
116
117 # FIXME:this is short term fix
118 main.log.info( "Removing raft logs" )
119 main.ONOSbench.onosRemoveRaftLogs()
120
121 main.log.info( "Uninstalling ONOS" )
122 for node in main.nodes:
123 main.ONOSbench.onosUninstall( node.ip_address )
124
125 # Make sure ONOS is DEAD
126 main.log.info( "Killing any ONOS processes" )
127 killResults = main.TRUE
128 for node in main.nodes:
129 killed = main.ONOSbench.onosKill( node.ip_address )
130 killResults = killResults and killed
131
132 main.step( "Setup server for cluster metadata file" )
Jon Hall8f6d4622016-05-23 15:27:18 -0700133 port = main.params['server']['port']
Jon Hall69b2b982016-05-11 12:04:59 -0700134 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
135 main.log.debug( "Root dir: {}".format( rootDir ) )
136 status = main.Server.start( main.ONOSbench,
137 rootDir,
138 port=port,
139 logDir=main.logdir + "/server.log" )
140 utilities.assert_equals( expect=main.TRUE, actual=status,
141 onpass="Server started",
142 onfail="Failled to start SimpleHTTPServer" )
143
144 main.step( "Generate initial metadata file" )
145 if main.numCtrls >= 5:
146 main.numCtrls -= 2
147 else:
148 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
149 genResult = main.Server.generateFile( main.numCtrls )
150 utilities.assert_equals( expect=main.TRUE, actual=genResult,
151 onpass="New cluster metadata file generated",
152 onfail="Failled to generate new metadata file" )
153
154 cleanInstallResult = main.TRUE
155 gitPullResult = main.TRUE
156
157 main.step( "Starting Mininet" )
158 # scp topo file to mininet
159 # TODO: move to params?
160 topoName = "obelisk.py"
161 filePath = main.ONOSbench.home + "/tools/test/topos/"
162 main.ONOSbench.scp( main.Mininet1,
163 filePath + topoName,
164 main.Mininet1.home,
165 direction="to" )
166 mnResult = main.Mininet1.startNet( )
167 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
168 onpass="Mininet Started",
169 onfail="Error starting Mininet" )
170
171 main.step( "Git checkout and pull " + gitBranch )
172 if PULLCODE:
173 main.ONOSbench.gitCheckout( gitBranch )
174 gitPullResult = main.ONOSbench.gitPull()
175 # values of 1 or 3 are good
176 utilities.assert_lesser( expect=0, actual=gitPullResult,
177 onpass="Git pull successful",
178 onfail="Git pull failed" )
179 main.ONOSbench.getVersion( report=True )
180
181 main.step( "Using mvn clean install" )
182 cleanInstallResult = main.TRUE
183 if PULLCODE and gitPullResult == main.TRUE:
184 cleanInstallResult = main.ONOSbench.cleanInstall()
185 else:
186 main.log.warn( "Did not pull new code so skipping mvn " +
187 "clean install" )
188 utilities.assert_equals( expect=main.TRUE,
189 actual=cleanInstallResult,
190 onpass="MCI successful",
191 onfail="MCI failed" )
192 # GRAPHS
193 # NOTE: important params here:
194 # job = name of Jenkins job
195 # Plot Name = Plot-HA, only can be used if multiple plots
196 # index = The number of the graph under plot name
197 job = "HAswapNodes"
198 plotName = "Plot-HA"
Jon Hall676e5432016-09-26 11:32:50 -0700199 index = "2"
Jon Hall69b2b982016-05-11 12:04:59 -0700200 graphs = '<ac:structured-macro ac:name="html">\n'
201 graphs += '<ac:plain-text-body><![CDATA[\n'
202 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
203 '/plot/' + plotName + '/getPlot?index=' + index +\
204 '&width=500&height=300"' +\
205 'noborder="0" width="500" height="300" scrolling="yes" ' +\
206 'seamless="seamless"></iframe>\n'
207 graphs += ']]></ac:plain-text-body>\n'
208 graphs += '</ac:structured-macro>\n'
209 main.log.wiki(graphs)
210
211 main.step( "Copying backup config files" )
212 path = "~/onos/tools/package/bin/onos-service"
213 cp = main.ONOSbench.scp( main.ONOSbench,
214 path,
215 path + ".backup",
216 direction="to" )
217
218 utilities.assert_equals( expect=main.TRUE,
219 actual=cp,
220 onpass="Copy backup config file succeeded",
221 onfail="Copy backup config file failed" )
222 # we need to modify the onos-service file to use remote metadata file
223 # url for cluster metadata file
Jon Hall8f6d4622016-05-23 15:27:18 -0700224 iface = main.params['server'].get( 'interface' )
225 ip = main.ONOSbench.getIpAddr( iface=iface )
Jon Hall69b2b982016-05-11 12:04:59 -0700226 metaFile = "cluster.json"
227 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, port, metaFile )
228 main.log.warn( javaArgs )
229 main.log.warn( repr( javaArgs ) )
230 handle = main.ONOSbench.handle
231 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, path )
232 main.log.warn( sed )
233 main.log.warn( repr( sed ) )
234 handle.sendline( sed )
Jon Hallbd60ea02016-08-23 10:03:59 -0700235 handle.expect( metaFile )
236 output = handle.before
Jon Hall69b2b982016-05-11 12:04:59 -0700237 handle.expect( "\$" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700238 output += handle.before
239 main.log.debug( repr( output ) )
Jon Hall69b2b982016-05-11 12:04:59 -0700240
241 main.step( "Creating ONOS package" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700242 packageResult = main.ONOSbench.buckBuild()
Jon Hall69b2b982016-05-11 12:04:59 -0700243 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
244 onpass="ONOS package successful",
245 onfail="ONOS package failed" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700246 if not packageResult:
247 main.cleanup()
248 main.exit()
Jon Hall69b2b982016-05-11 12:04:59 -0700249
250 main.step( "Installing ONOS package" )
251 onosInstallResult = main.TRUE
252 for i in range( main.ONOSbench.maxNodes ):
253 node = main.nodes[i]
254 options = "-f"
255 if i >= main.numCtrls:
256 options = "-nf" # Don't start more than the current scale
257 tmpResult = main.ONOSbench.onosInstall( options=options,
258 node=node.ip_address )
259 onosInstallResult = onosInstallResult and tmpResult
260 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
261 onpass="ONOS install successful",
262 onfail="ONOS install failed" )
263
264 # Cleanup custom onos-service file
265 main.ONOSbench.scp( main.ONOSbench,
266 path + ".backup",
267 path,
268 direction="to" )
269
270 main.step( "Checking if ONOS is up yet" )
271 for i in range( 2 ):
272 onosIsupResult = main.TRUE
273 for i in range( main.numCtrls ):
274 node = main.nodes[i]
275 started = main.ONOSbench.isup( node.ip_address )
276 if not started:
277 main.log.error( node.name + " hasn't started" )
278 onosIsupResult = onosIsupResult and started
279 if onosIsupResult == main.TRUE:
280 break
281 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
282 onpass="ONOS startup successful",
283 onfail="ONOS startup failed" )
284
Jon Hall6509dbf2016-06-21 17:01:17 -0700285 main.step( "Starting ONOS CLI sessions" )
Jon Hall69b2b982016-05-11 12:04:59 -0700286 cliResults = main.TRUE
287 threads = []
288 for i in range( main.numCtrls ):
289 t = main.Thread( target=main.CLIs[i].startOnosCli,
290 name="startOnosCli-" + str( i ),
291 args=[main.nodes[i].ip_address] )
292 threads.append( t )
293 t.start()
294
295 for t in threads:
296 t.join()
297 cliResults = cliResults and t.result
298 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
299 onpass="ONOS cli startup successful",
300 onfail="ONOS cli startup failed" )
301
302 # Create a list of active nodes for use when some nodes are stopped
303 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
304
305 if main.params[ 'tcpdump' ].lower() == "true":
306 main.step( "Start Packet Capture MN" )
307 main.Mininet2.startTcpdump(
308 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
309 + "-MN.pcap",
310 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
311 port=main.params[ 'MNtcpdump' ][ 'port' ] )
312
313 main.step( "Checking ONOS nodes" )
314 nodeResults = utilities.retry( main.HA.nodesCheck,
315 False,
316 args=[main.activeNodes],
317 attempts=5 )
318 utilities.assert_equals( expect=True, actual=nodeResults,
319 onpass="Nodes check successful",
320 onfail="Nodes check NOT successful" )
321
322 if not nodeResults:
323 for i in main.activeNodes:
324 cli = main.CLIs[i]
325 main.log.debug( "{} components not ACTIVE: \n{}".format(
326 cli.name,
327 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
328 main.log.error( "Failed to start ONOS, stopping test" )
329 main.cleanup()
330 main.exit()
331
332 main.step( "Activate apps defined in the params file" )
333 # get data from the params
334 apps = main.params.get( 'apps' )
335 if apps:
336 apps = apps.split(',')
337 main.log.warn( apps )
338 activateResult = True
339 for app in apps:
340 main.CLIs[ 0 ].app( app, "Activate" )
341 # TODO: check this worked
342 time.sleep( 10 ) # wait for apps to activate
343 for app in apps:
344 state = main.CLIs[ 0 ].appStatus( app )
345 if state == "ACTIVE":
346 activateResult = activateResult and True
347 else:
348 main.log.error( "{} is in {} state".format( app, state ) )
349 activateResult = False
350 utilities.assert_equals( expect=True,
351 actual=activateResult,
352 onpass="Successfully activated apps",
353 onfail="Failed to activate apps" )
354 else:
355 main.log.warn( "No apps were specified to be loaded after startup" )
356
357 main.step( "Set ONOS configurations" )
358 config = main.params.get( 'ONOS_Configuration' )
359 if config:
360 main.log.debug( config )
361 checkResult = main.TRUE
362 for component in config:
363 for setting in config[component]:
364 value = config[component][setting]
365 check = main.CLIs[ 0 ].setCfg( component, setting, value )
366 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
367 checkResult = check and checkResult
368 utilities.assert_equals( expect=main.TRUE,
369 actual=checkResult,
370 onpass="Successfully set config",
371 onfail="Failed to set config" )
372 else:
373 main.log.warn( "No configurations were specified to be changed after startup" )
374
375 main.step( "App Ids check" )
376 appCheck = main.TRUE
377 threads = []
378 for i in main.activeNodes:
379 t = main.Thread( target=main.CLIs[i].appToIDCheck,
380 name="appToIDCheck-" + str( i ),
381 args=[] )
382 threads.append( t )
383 t.start()
384
385 for t in threads:
386 t.join()
387 appCheck = appCheck and t.result
388 if appCheck != main.TRUE:
389 node = main.activeNodes[0]
390 main.log.warn( main.CLIs[node].apps() )
391 main.log.warn( main.CLIs[node].appIDs() )
392 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
393 onpass="App Ids seem to be correct",
394 onfail="Something is wrong with app Ids" )
395
396 def CASE2( self, main ):
397 """
398 Assign devices to controllers
399 """
400 import re
401 assert main.numCtrls, "main.numCtrls not defined"
402 assert main, "main not defined"
403 assert utilities.assert_equals, "utilities.assert_equals not defined"
404 assert main.CLIs, "main.CLIs not defined"
405 assert main.nodes, "main.nodes not defined"
406
407 main.case( "Assigning devices to controllers" )
408 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
409 "and check that an ONOS node becomes the " +\
410 "master of the device."
411 main.step( "Assign switches to controllers" )
412
413 ipList = []
414 for i in range( main.ONOSbench.maxNodes ):
415 ipList.append( main.nodes[ i ].ip_address )
416 swList = []
417 for i in range( 1, 29 ):
418 swList.append( "s" + str( i ) )
419 main.Mininet1.assignSwController( sw=swList, ip=ipList )
420
421 mastershipCheck = main.TRUE
422 for i in range( 1, 29 ):
423 response = main.Mininet1.getSwController( "s" + str( i ) )
424 try:
425 main.log.info( str( response ) )
426 except Exception:
427 main.log.info( repr( response ) )
428 for node in main.nodes:
429 if re.search( "tcp:" + node.ip_address, response ):
430 mastershipCheck = mastershipCheck and main.TRUE
431 else:
432 main.log.error( "Error, node " + node.ip_address + " is " +
433 "not in the list of controllers s" +
434 str( i ) + " is connecting to." )
435 mastershipCheck = main.FALSE
436 utilities.assert_equals(
437 expect=main.TRUE,
438 actual=mastershipCheck,
439 onpass="Switch mastership assigned correctly",
440 onfail="Switches not assigned correctly to controllers" )
441
442 def CASE21( self, main ):
443 """
444 Assign mastership to controllers
445 """
446 import time
447 assert main.numCtrls, "main.numCtrls not defined"
448 assert main, "main not defined"
449 assert utilities.assert_equals, "utilities.assert_equals not defined"
450 assert main.CLIs, "main.CLIs not defined"
451 assert main.nodes, "main.nodes not defined"
452
453 main.case( "Assigning Controller roles for switches" )
454 main.caseExplanation = "Check that ONOS is connected to each " +\
455 "device. Then manually assign" +\
456 " mastership to specific ONOS nodes using" +\
457 " 'device-role'"
458 main.step( "Assign mastership of switches to specific controllers" )
459 # Manually assign mastership to the controller we want
460 roleCall = main.TRUE
461
462 ipList = [ ]
463 deviceList = []
464 onosCli = main.CLIs[ main.activeNodes[0] ]
465 try:
466 # Assign mastership to specific controllers. This assignment was
467 # determined for a 7 node cluser, but will work with any sized
468 # cluster
469 for i in range( 1, 29 ): # switches 1 through 28
470 # set up correct variables:
471 if i == 1:
472 c = 0
473 ip = main.nodes[ c ].ip_address # ONOS1
474 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
475 elif i == 2:
476 c = 1 % main.numCtrls
477 ip = main.nodes[ c ].ip_address # ONOS2
478 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
479 elif i == 3:
480 c = 1 % main.numCtrls
481 ip = main.nodes[ c ].ip_address # ONOS2
482 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
483 elif i == 4:
484 c = 3 % main.numCtrls
485 ip = main.nodes[ c ].ip_address # ONOS4
486 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
487 elif i == 5:
488 c = 2 % main.numCtrls
489 ip = main.nodes[ c ].ip_address # ONOS3
490 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
491 elif i == 6:
492 c = 2 % main.numCtrls
493 ip = main.nodes[ c ].ip_address # ONOS3
494 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
495 elif i == 7:
496 c = 5 % main.numCtrls
497 ip = main.nodes[ c ].ip_address # ONOS6
498 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
499 elif i >= 8 and i <= 17:
500 c = 4 % main.numCtrls
501 ip = main.nodes[ c ].ip_address # ONOS5
502 dpid = '3' + str( i ).zfill( 3 )
503 deviceId = onosCli.getDevice( dpid ).get( 'id' )
504 elif i >= 18 and i <= 27:
505 c = 6 % main.numCtrls
506 ip = main.nodes[ c ].ip_address # ONOS7
507 dpid = '6' + str( i ).zfill( 3 )
508 deviceId = onosCli.getDevice( dpid ).get( 'id' )
509 elif i == 28:
510 c = 0
511 ip = main.nodes[ c ].ip_address # ONOS1
512 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
513 else:
514 main.log.error( "You didn't write an else statement for " +
515 "switch s" + str( i ) )
516 roleCall = main.FALSE
517 # Assign switch
518 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
519 # TODO: make this controller dynamic
520 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
521 ipList.append( ip )
522 deviceList.append( deviceId )
523 except ( AttributeError, AssertionError ):
524 main.log.exception( "Something is wrong with ONOS device view" )
525 main.log.info( onosCli.devices() )
526 utilities.assert_equals(
527 expect=main.TRUE,
528 actual=roleCall,
529 onpass="Re-assigned switch mastership to designated controller",
530 onfail="Something wrong with deviceRole calls" )
531
532 main.step( "Check mastership was correctly assigned" )
533 roleCheck = main.TRUE
534 # NOTE: This is due to the fact that device mastership change is not
535 # atomic and is actually a multi step process
536 time.sleep( 5 )
537 for i in range( len( ipList ) ):
538 ip = ipList[i]
539 deviceId = deviceList[i]
540 # Check assignment
541 master = onosCli.getRole( deviceId ).get( 'master' )
542 if ip in master:
543 roleCheck = roleCheck and main.TRUE
544 else:
545 roleCheck = roleCheck and main.FALSE
546 main.log.error( "Error, controller " + ip + " is not" +
547 " master " + "of device " +
548 str( deviceId ) + ". Master is " +
549 repr( master ) + "." )
550 utilities.assert_equals(
551 expect=main.TRUE,
552 actual=roleCheck,
553 onpass="Switches were successfully reassigned to designated " +
554 "controller",
555 onfail="Switches were not successfully reassigned" )
556
557 def CASE3( self, main ):
558 """
559 Assign intents
560 """
561 import time
562 import json
563 assert main.numCtrls, "main.numCtrls not defined"
564 assert main, "main not defined"
565 assert utilities.assert_equals, "utilities.assert_equals not defined"
566 assert main.CLIs, "main.CLIs not defined"
567 assert main.nodes, "main.nodes not defined"
568 try:
569 labels
570 except NameError:
571 main.log.error( "labels not defined, setting to []" )
572 labels = []
573 try:
574 data
575 except NameError:
576 main.log.error( "data not defined, setting to []" )
577 data = []
578 # NOTE: we must reinstall intents until we have a persistant intent
579 # datastore!
580 main.case( "Adding host Intents" )
581 main.caseExplanation = "Discover hosts by using pingall then " +\
582 "assign predetermined host-to-host intents." +\
583 " After installation, check that the intent" +\
584 " is distributed to all nodes and the state" +\
585 " is INSTALLED"
586
587 # install onos-app-fwd
588 main.step( "Install reactive forwarding app" )
589 onosCli = main.CLIs[ main.activeNodes[0] ]
590 installResults = onosCli.activateApp( "org.onosproject.fwd" )
591 utilities.assert_equals( expect=main.TRUE, actual=installResults,
592 onpass="Install fwd successful",
593 onfail="Install fwd failed" )
594
595 main.step( "Check app ids" )
596 appCheck = main.TRUE
597 threads = []
598 for i in main.activeNodes:
599 t = main.Thread( target=main.CLIs[i].appToIDCheck,
600 name="appToIDCheck-" + str( i ),
601 args=[] )
602 threads.append( t )
603 t.start()
604
605 for t in threads:
606 t.join()
607 appCheck = appCheck and t.result
608 if appCheck != main.TRUE:
609 main.log.warn( onosCli.apps() )
610 main.log.warn( onosCli.appIDs() )
611 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
612 onpass="App Ids seem to be correct",
613 onfail="Something is wrong with app Ids" )
614
615 main.step( "Discovering Hosts( Via pingall for now )" )
616 # FIXME: Once we have a host discovery mechanism, use that instead
617 # REACTIVE FWD test
618 pingResult = main.FALSE
619 passMsg = "Reactive Pingall test passed"
620 time1 = time.time()
621 pingResult = main.Mininet1.pingall()
622 time2 = time.time()
623 if not pingResult:
624 main.log.warn("First pingall failed. Trying again...")
625 pingResult = main.Mininet1.pingall()
626 passMsg += " on the second try"
627 utilities.assert_equals(
628 expect=main.TRUE,
629 actual=pingResult,
630 onpass= passMsg,
631 onfail="Reactive Pingall failed, " +
632 "one or more ping pairs failed" )
633 main.log.info( "Time for pingall: %2f seconds" %
634 ( time2 - time1 ) )
635 # timeout for fwd flows
636 time.sleep( 11 )
637 # uninstall onos-app-fwd
638 main.step( "Uninstall reactive forwarding app" )
639 node = main.activeNodes[0]
640 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
641 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
642 onpass="Uninstall fwd successful",
643 onfail="Uninstall fwd failed" )
644
645 main.step( "Check app ids" )
646 threads = []
647 appCheck2 = main.TRUE
648 for i in main.activeNodes:
649 t = main.Thread( target=main.CLIs[i].appToIDCheck,
650 name="appToIDCheck-" + str( i ),
651 args=[] )
652 threads.append( t )
653 t.start()
654
655 for t in threads:
656 t.join()
657 appCheck2 = appCheck2 and t.result
658 if appCheck2 != main.TRUE:
659 node = main.activeNodes[0]
660 main.log.warn( main.CLIs[node].apps() )
661 main.log.warn( main.CLIs[node].appIDs() )
662 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
663 onpass="App Ids seem to be correct",
664 onfail="Something is wrong with app Ids" )
665
666 main.step( "Add host intents via cli" )
667 intentIds = []
668 # TODO: move the host numbers to params
669 # Maybe look at all the paths we ping?
670 intentAddResult = True
671 hostResult = main.TRUE
672 for i in range( 8, 18 ):
673 main.log.info( "Adding host intent between h" + str( i ) +
674 " and h" + str( i + 10 ) )
675 host1 = "00:00:00:00:00:" + \
676 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
677 host2 = "00:00:00:00:00:" + \
678 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
679 # NOTE: getHost can return None
680 host1Dict = onosCli.getHost( host1 )
681 host2Dict = onosCli.getHost( host2 )
682 host1Id = None
683 host2Id = None
684 if host1Dict and host2Dict:
685 host1Id = host1Dict.get( 'id', None )
686 host2Id = host2Dict.get( 'id', None )
687 if host1Id and host2Id:
688 nodeNum = ( i % len( main.activeNodes ) )
689 node = main.activeNodes[nodeNum]
690 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
691 if tmpId:
692 main.log.info( "Added intent with id: " + tmpId )
693 intentIds.append( tmpId )
694 else:
695 main.log.error( "addHostIntent returned: " +
696 repr( tmpId ) )
697 else:
698 main.log.error( "Error, getHost() failed for h" + str( i ) +
699 " and/or h" + str( i + 10 ) )
700 node = main.activeNodes[0]
701 hosts = main.CLIs[node].hosts()
702 main.log.warn( "Hosts output: " )
703 try:
704 main.log.warn( json.dumps( json.loads( hosts ),
705 sort_keys=True,
706 indent=4,
707 separators=( ',', ': ' ) ) )
708 except ( ValueError, TypeError ):
709 main.log.warn( repr( hosts ) )
710 hostResult = main.FALSE
711 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
712 onpass="Found a host id for each host",
713 onfail="Error looking up host ids" )
714
715 intentStart = time.time()
716 onosIds = onosCli.getAllIntentsId()
717 main.log.info( "Submitted intents: " + str( intentIds ) )
718 main.log.info( "Intents in ONOS: " + str( onosIds ) )
719 for intent in intentIds:
720 if intent in onosIds:
721 pass # intent submitted is in onos
722 else:
723 intentAddResult = False
724 if intentAddResult:
725 intentStop = time.time()
726 else:
727 intentStop = None
728 # Print the intent states
729 intents = onosCli.intents()
730 intentStates = []
731 installedCheck = True
732 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
733 count = 0
734 try:
735 for intent in json.loads( intents ):
736 state = intent.get( 'state', None )
737 if "INSTALLED" not in state:
738 installedCheck = False
739 intentId = intent.get( 'id', None )
740 intentStates.append( ( intentId, state ) )
741 except ( ValueError, TypeError ):
742 main.log.exception( "Error parsing intents" )
743 # add submitted intents not in the store
744 tmplist = [ i for i, s in intentStates ]
745 missingIntents = False
746 for i in intentIds:
747 if i not in tmplist:
748 intentStates.append( ( i, " - " ) )
749 missingIntents = True
750 intentStates.sort()
751 for i, s in intentStates:
752 count += 1
753 main.log.info( "%-6s%-15s%-15s" %
754 ( str( count ), str( i ), str( s ) ) )
755 leaders = onosCli.leaders()
756 try:
757 missing = False
758 if leaders:
759 parsedLeaders = json.loads( leaders )
760 main.log.warn( json.dumps( parsedLeaders,
761 sort_keys=True,
762 indent=4,
763 separators=( ',', ': ' ) ) )
764 # check for all intent partitions
765 topics = []
766 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700767 topics.append( "work-partition-" + str( i ) )
Jon Hall69b2b982016-05-11 12:04:59 -0700768 main.log.debug( topics )
769 ONOStopics = [ j['topic'] for j in parsedLeaders ]
770 for topic in topics:
771 if topic not in ONOStopics:
772 main.log.error( "Error: " + topic +
773 " not in leaders" )
774 missing = True
775 else:
776 main.log.error( "leaders() returned None" )
777 except ( ValueError, TypeError ):
778 main.log.exception( "Error parsing leaders" )
779 main.log.error( repr( leaders ) )
780 # Check all nodes
781 if missing:
782 for i in main.activeNodes:
783 response = main.CLIs[i].leaders( jsonFormat=False)
784 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
785 str( response ) )
786
787 partitions = onosCli.partitions()
788 try:
789 if partitions :
790 parsedPartitions = json.loads( partitions )
791 main.log.warn( json.dumps( parsedPartitions,
792 sort_keys=True,
793 indent=4,
794 separators=( ',', ': ' ) ) )
795 # TODO check for a leader in all paritions
796 # TODO check for consistency among nodes
797 else:
798 main.log.error( "partitions() returned None" )
799 except ( ValueError, TypeError ):
800 main.log.exception( "Error parsing partitions" )
801 main.log.error( repr( partitions ) )
802 pendingMap = onosCli.pendingMap()
803 try:
804 if pendingMap :
805 parsedPending = json.loads( pendingMap )
806 main.log.warn( json.dumps( parsedPending,
807 sort_keys=True,
808 indent=4,
809 separators=( ',', ': ' ) ) )
810 # TODO check something here?
811 else:
812 main.log.error( "pendingMap() returned None" )
813 except ( ValueError, TypeError ):
814 main.log.exception( "Error parsing pending map" )
815 main.log.error( repr( pendingMap ) )
816
817 intentAddResult = bool( intentAddResult and not missingIntents and
818 installedCheck )
819 if not intentAddResult:
820 main.log.error( "Error in pushing host intents to ONOS" )
821
822 main.step( "Intent Anti-Entropy dispersion" )
823 for j in range(100):
824 correct = True
825 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
826 for i in main.activeNodes:
827 onosIds = []
828 ids = main.CLIs[i].getAllIntentsId()
829 onosIds.append( ids )
830 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
831 str( sorted( onosIds ) ) )
832 if sorted( ids ) != sorted( intentIds ):
833 main.log.warn( "Set of intent IDs doesn't match" )
834 correct = False
835 break
836 else:
837 intents = json.loads( main.CLIs[i].intents() )
838 for intent in intents:
839 if intent[ 'state' ] != "INSTALLED":
840 main.log.warn( "Intent " + intent[ 'id' ] +
841 " is " + intent[ 'state' ] )
842 correct = False
843 break
844 if correct:
845 break
846 else:
847 time.sleep(1)
848 if not intentStop:
849 intentStop = time.time()
850 global gossipTime
851 gossipTime = intentStop - intentStart
852 main.log.info( "It took about " + str( gossipTime ) +
853 " seconds for all intents to appear in each node" )
854 append = False
855 title = "Gossip Intents"
856 count = 1
857 while append is False:
858 curTitle = title + str( count )
859 if curTitle not in labels:
860 labels.append( curTitle )
861 data.append( str( gossipTime ) )
862 append = True
863 else:
864 count += 1
865 gossipPeriod = int( main.params['timers']['gossip'] )
866 maxGossipTime = gossipPeriod * len( main.activeNodes )
867 utilities.assert_greater_equals(
868 expect=maxGossipTime, actual=gossipTime,
869 onpass="ECM anti-entropy for intents worked within " +
870 "expected time",
871 onfail="Intent ECM anti-entropy took too long. " +
872 "Expected time:{}, Actual time:{}".format( maxGossipTime,
873 gossipTime ) )
874 if gossipTime <= maxGossipTime:
875 intentAddResult = True
876
877 if not intentAddResult or "key" in pendingMap:
878 import time
879 installedCheck = True
880 main.log.info( "Sleeping 60 seconds to see if intents are found" )
881 time.sleep( 60 )
882 onosIds = onosCli.getAllIntentsId()
883 main.log.info( "Submitted intents: " + str( intentIds ) )
884 main.log.info( "Intents in ONOS: " + str( onosIds ) )
885 # Print the intent states
886 intents = onosCli.intents()
887 intentStates = []
888 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
889 count = 0
890 try:
891 for intent in json.loads( intents ):
892 # Iter through intents of a node
893 state = intent.get( 'state', None )
894 if "INSTALLED" not in state:
895 installedCheck = False
896 intentId = intent.get( 'id', None )
897 intentStates.append( ( intentId, state ) )
898 except ( ValueError, TypeError ):
899 main.log.exception( "Error parsing intents" )
900 # add submitted intents not in the store
901 tmplist = [ i for i, s in intentStates ]
902 for i in intentIds:
903 if i not in tmplist:
904 intentStates.append( ( i, " - " ) )
905 intentStates.sort()
906 for i, s in intentStates:
907 count += 1
908 main.log.info( "%-6s%-15s%-15s" %
909 ( str( count ), str( i ), str( s ) ) )
910 leaders = onosCli.leaders()
911 try:
912 missing = False
913 if leaders:
914 parsedLeaders = json.loads( leaders )
915 main.log.warn( json.dumps( parsedLeaders,
916 sort_keys=True,
917 indent=4,
918 separators=( ',', ': ' ) ) )
919 # check for all intent partitions
920 # check for election
921 topics = []
922 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700923 topics.append( "work-partition-" + str( i ) )
Jon Hall69b2b982016-05-11 12:04:59 -0700924 # FIXME: this should only be after we start the app
925 topics.append( "org.onosproject.election" )
926 main.log.debug( topics )
927 ONOStopics = [ j['topic'] for j in parsedLeaders ]
928 for topic in topics:
929 if topic not in ONOStopics:
930 main.log.error( "Error: " + topic +
931 " not in leaders" )
932 missing = True
933 else:
934 main.log.error( "leaders() returned None" )
935 except ( ValueError, TypeError ):
936 main.log.exception( "Error parsing leaders" )
937 main.log.error( repr( leaders ) )
938 # Check all nodes
939 if missing:
940 for i in main.activeNodes:
941 node = main.CLIs[i]
942 response = node.leaders( jsonFormat=False)
943 main.log.warn( str( node.name ) + " leaders output: \n" +
944 str( response ) )
945
946 partitions = onosCli.partitions()
947 try:
948 if partitions :
949 parsedPartitions = json.loads( partitions )
950 main.log.warn( json.dumps( parsedPartitions,
951 sort_keys=True,
952 indent=4,
953 separators=( ',', ': ' ) ) )
954 # TODO check for a leader in all paritions
955 # TODO check for consistency among nodes
956 else:
957 main.log.error( "partitions() returned None" )
958 except ( ValueError, TypeError ):
959 main.log.exception( "Error parsing partitions" )
960 main.log.error( repr( partitions ) )
961 pendingMap = onosCli.pendingMap()
962 try:
963 if pendingMap :
964 parsedPending = json.loads( pendingMap )
965 main.log.warn( json.dumps( parsedPending,
966 sort_keys=True,
967 indent=4,
968 separators=( ',', ': ' ) ) )
969 # TODO check something here?
970 else:
971 main.log.error( "pendingMap() returned None" )
972 except ( ValueError, TypeError ):
973 main.log.exception( "Error parsing pending map" )
974 main.log.error( repr( pendingMap ) )
975
976 def CASE4( self, main ):
977 """
978 Ping across added host intents
979 """
980 import json
981 import time
982 assert main.numCtrls, "main.numCtrls not defined"
983 assert main, "main not defined"
984 assert utilities.assert_equals, "utilities.assert_equals not defined"
985 assert main.CLIs, "main.CLIs not defined"
986 assert main.nodes, "main.nodes not defined"
987 main.case( "Verify connectivity by sending traffic across Intents" )
988 main.caseExplanation = "Ping across added host intents to check " +\
989 "functionality and check the state of " +\
990 "the intent"
991
992 onosCli = main.CLIs[ main.activeNodes[0] ]
993 main.step( "Check Intent state" )
994 installedCheck = False
995 loopCount = 0
996 while not installedCheck and loopCount < 40:
997 installedCheck = True
998 # Print the intent states
999 intents = onosCli.intents()
1000 intentStates = []
1001 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1002 count = 0
1003 # Iter through intents of a node
1004 try:
1005 for intent in json.loads( intents ):
1006 state = intent.get( 'state', None )
1007 if "INSTALLED" not in state:
1008 installedCheck = False
1009 intentId = intent.get( 'id', None )
1010 intentStates.append( ( intentId, state ) )
1011 except ( ValueError, TypeError ):
1012 main.log.exception( "Error parsing intents." )
1013 # Print states
1014 intentStates.sort()
1015 for i, s in intentStates:
1016 count += 1
1017 main.log.info( "%-6s%-15s%-15s" %
1018 ( str( count ), str( i ), str( s ) ) )
1019 if not installedCheck:
1020 time.sleep( 1 )
1021 loopCount += 1
1022 utilities.assert_equals( expect=True, actual=installedCheck,
1023 onpass="Intents are all INSTALLED",
1024 onfail="Intents are not all in " +
1025 "INSTALLED state" )
1026
1027 main.step( "Ping across added host intents" )
1028 PingResult = main.TRUE
1029 for i in range( 8, 18 ):
1030 ping = main.Mininet1.pingHost( src="h" + str( i ),
1031 target="h" + str( i + 10 ) )
1032 PingResult = PingResult and ping
1033 if ping == main.FALSE:
1034 main.log.warn( "Ping failed between h" + str( i ) +
1035 " and h" + str( i + 10 ) )
1036 elif ping == main.TRUE:
1037 main.log.info( "Ping test passed!" )
1038 # Don't set PingResult or you'd override failures
1039 if PingResult == main.FALSE:
1040 main.log.error(
1041 "Intents have not been installed correctly, pings failed." )
1042 # TODO: pretty print
1043 main.log.warn( "ONOS1 intents: " )
1044 try:
1045 tmpIntents = onosCli.intents()
1046 main.log.warn( json.dumps( json.loads( tmpIntents ),
1047 sort_keys=True,
1048 indent=4,
1049 separators=( ',', ': ' ) ) )
1050 except ( ValueError, TypeError ):
1051 main.log.warn( repr( tmpIntents ) )
1052 utilities.assert_equals(
1053 expect=main.TRUE,
1054 actual=PingResult,
1055 onpass="Intents have been installed correctly and pings work",
1056 onfail="Intents have not been installed correctly, pings failed." )
1057
1058 main.step( "Check leadership of topics" )
1059 leaders = onosCli.leaders()
1060 topicCheck = main.TRUE
1061 try:
1062 if leaders:
1063 parsedLeaders = json.loads( leaders )
1064 main.log.warn( json.dumps( parsedLeaders,
1065 sort_keys=True,
1066 indent=4,
1067 separators=( ',', ': ' ) ) )
1068 # check for all intent partitions
1069 # check for election
1070 # TODO: Look at Devices as topics now that it uses this system
1071 topics = []
1072 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001073 topics.append( "work-partition-" + str( i ) )
Jon Hall69b2b982016-05-11 12:04:59 -07001074 # FIXME: this should only be after we start the app
1075 # FIXME: topics.append( "org.onosproject.election" )
1076 # Print leaders output
1077 main.log.debug( topics )
1078 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1079 for topic in topics:
1080 if topic not in ONOStopics:
1081 main.log.error( "Error: " + topic +
1082 " not in leaders" )
1083 topicCheck = main.FALSE
1084 else:
1085 main.log.error( "leaders() returned None" )
1086 topicCheck = main.FALSE
1087 except ( ValueError, TypeError ):
1088 topicCheck = main.FALSE
1089 main.log.exception( "Error parsing leaders" )
1090 main.log.error( repr( leaders ) )
1091 # TODO: Check for a leader of these topics
1092 # Check all nodes
1093 if topicCheck:
1094 for i in main.activeNodes:
1095 node = main.CLIs[i]
1096 response = node.leaders( jsonFormat=False)
1097 main.log.warn( str( node.name ) + " leaders output: \n" +
1098 str( response ) )
1099
1100 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1101 onpass="intent Partitions is in leaders",
1102 onfail="Some topics were lost " )
1103 # Print partitions
1104 partitions = onosCli.partitions()
1105 try:
1106 if partitions :
1107 parsedPartitions = json.loads( partitions )
1108 main.log.warn( json.dumps( parsedPartitions,
1109 sort_keys=True,
1110 indent=4,
1111 separators=( ',', ': ' ) ) )
1112 # TODO check for a leader in all paritions
1113 # TODO check for consistency among nodes
1114 else:
1115 main.log.error( "partitions() returned None" )
1116 except ( ValueError, TypeError ):
1117 main.log.exception( "Error parsing partitions" )
1118 main.log.error( repr( partitions ) )
1119 # Print Pending Map
1120 pendingMap = onosCli.pendingMap()
1121 try:
1122 if pendingMap :
1123 parsedPending = json.loads( pendingMap )
1124 main.log.warn( json.dumps( parsedPending,
1125 sort_keys=True,
1126 indent=4,
1127 separators=( ',', ': ' ) ) )
1128 # TODO check something here?
1129 else:
1130 main.log.error( "pendingMap() returned None" )
1131 except ( ValueError, TypeError ):
1132 main.log.exception( "Error parsing pending map" )
1133 main.log.error( repr( pendingMap ) )
1134
1135 if not installedCheck:
1136 main.log.info( "Waiting 60 seconds to see if the state of " +
1137 "intents change" )
1138 time.sleep( 60 )
1139 # Print the intent states
1140 intents = onosCli.intents()
1141 intentStates = []
1142 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1143 count = 0
1144 # Iter through intents of a node
1145 try:
1146 for intent in json.loads( intents ):
1147 state = intent.get( 'state', None )
1148 if "INSTALLED" not in state:
1149 installedCheck = False
1150 intentId = intent.get( 'id', None )
1151 intentStates.append( ( intentId, state ) )
1152 except ( ValueError, TypeError ):
1153 main.log.exception( "Error parsing intents." )
1154 intentStates.sort()
1155 for i, s in intentStates:
1156 count += 1
1157 main.log.info( "%-6s%-15s%-15s" %
1158 ( str( count ), str( i ), str( s ) ) )
1159 leaders = onosCli.leaders()
1160 try:
1161 missing = False
1162 if leaders:
1163 parsedLeaders = json.loads( leaders )
1164 main.log.warn( json.dumps( parsedLeaders,
1165 sort_keys=True,
1166 indent=4,
1167 separators=( ',', ': ' ) ) )
1168 # check for all intent partitions
1169 # check for election
1170 topics = []
1171 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001172 topics.append( "work-partition-" + str( i ) )
Jon Hall69b2b982016-05-11 12:04:59 -07001173 # FIXME: this should only be after we start the app
1174 topics.append( "org.onosproject.election" )
1175 main.log.debug( topics )
1176 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1177 for topic in topics:
1178 if topic not in ONOStopics:
1179 main.log.error( "Error: " + topic +
1180 " not in leaders" )
1181 missing = True
1182 else:
1183 main.log.error( "leaders() returned None" )
1184 except ( ValueError, TypeError ):
1185 main.log.exception( "Error parsing leaders" )
1186 main.log.error( repr( leaders ) )
1187 if missing:
1188 for i in main.activeNodes:
1189 node = main.CLIs[i]
1190 response = node.leaders( jsonFormat=False)
1191 main.log.warn( str( node.name ) + " leaders output: \n" +
1192 str( response ) )
1193
1194 partitions = onosCli.partitions()
1195 try:
1196 if partitions :
1197 parsedPartitions = json.loads( partitions )
1198 main.log.warn( json.dumps( parsedPartitions,
1199 sort_keys=True,
1200 indent=4,
1201 separators=( ',', ': ' ) ) )
1202 # TODO check for a leader in all paritions
1203 # TODO check for consistency among nodes
1204 else:
1205 main.log.error( "partitions() returned None" )
1206 except ( ValueError, TypeError ):
1207 main.log.exception( "Error parsing partitions" )
1208 main.log.error( repr( partitions ) )
1209 pendingMap = onosCli.pendingMap()
1210 try:
1211 if pendingMap :
1212 parsedPending = json.loads( pendingMap )
1213 main.log.warn( json.dumps( parsedPending,
1214 sort_keys=True,
1215 indent=4,
1216 separators=( ',', ': ' ) ) )
1217 # TODO check something here?
1218 else:
1219 main.log.error( "pendingMap() returned None" )
1220 except ( ValueError, TypeError ):
1221 main.log.exception( "Error parsing pending map" )
1222 main.log.error( repr( pendingMap ) )
1223 # Print flowrules
1224 node = main.activeNodes[0]
1225 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1226 main.step( "Wait a minute then ping again" )
1227 # the wait is above
1228 PingResult = main.TRUE
1229 for i in range( 8, 18 ):
1230 ping = main.Mininet1.pingHost( src="h" + str( i ),
1231 target="h" + str( i + 10 ) )
1232 PingResult = PingResult and ping
1233 if ping == main.FALSE:
1234 main.log.warn( "Ping failed between h" + str( i ) +
1235 " and h" + str( i + 10 ) )
1236 elif ping == main.TRUE:
1237 main.log.info( "Ping test passed!" )
1238 # Don't set PingResult or you'd override failures
1239 if PingResult == main.FALSE:
1240 main.log.error(
1241 "Intents have not been installed correctly, pings failed." )
1242 # TODO: pretty print
1243 main.log.warn( "ONOS1 intents: " )
1244 try:
1245 tmpIntents = onosCli.intents()
1246 main.log.warn( json.dumps( json.loads( tmpIntents ),
1247 sort_keys=True,
1248 indent=4,
1249 separators=( ',', ': ' ) ) )
1250 except ( ValueError, TypeError ):
1251 main.log.warn( repr( tmpIntents ) )
1252 utilities.assert_equals(
1253 expect=main.TRUE,
1254 actual=PingResult,
1255 onpass="Intents have been installed correctly and pings work",
1256 onfail="Intents have not been installed correctly, pings failed." )
1257
1258 def CASE5( self, main ):
1259 """
1260 Reading state of ONOS
1261 """
1262 import json
1263 import time
1264 assert main.numCtrls, "main.numCtrls not defined"
1265 assert main, "main not defined"
1266 assert utilities.assert_equals, "utilities.assert_equals not defined"
1267 assert main.CLIs, "main.CLIs not defined"
1268 assert main.nodes, "main.nodes not defined"
1269
1270 main.case( "Setting up and gathering data for current state" )
1271 # The general idea for this test case is to pull the state of
1272 # ( intents,flows, topology,... ) from each ONOS node
1273 # We can then compare them with each other and also with past states
1274
1275 main.step( "Check that each switch has a master" )
1276 global mastershipState
1277 mastershipState = '[]'
1278
1279 # Assert that each device has a master
1280 rolesNotNull = main.TRUE
1281 threads = []
1282 for i in main.activeNodes:
1283 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1284 name="rolesNotNull-" + str( i ),
1285 args=[] )
1286 threads.append( t )
1287 t.start()
1288
1289 for t in threads:
1290 t.join()
1291 rolesNotNull = rolesNotNull and t.result
1292 utilities.assert_equals(
1293 expect=main.TRUE,
1294 actual=rolesNotNull,
1295 onpass="Each device has a master",
1296 onfail="Some devices don't have a master assigned" )
1297
1298 main.step( "Get the Mastership of each switch from each controller" )
1299 ONOSMastership = []
1300 consistentMastership = True
1301 rolesResults = True
1302 threads = []
1303 for i in main.activeNodes:
1304 t = main.Thread( target=main.CLIs[i].roles,
1305 name="roles-" + str( i ),
1306 args=[] )
1307 threads.append( t )
1308 t.start()
1309
1310 for t in threads:
1311 t.join()
1312 ONOSMastership.append( t.result )
1313
1314 for i in range( len( ONOSMastership ) ):
1315 node = str( main.activeNodes[i] + 1 )
1316 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1317 main.log.error( "Error in getting ONOS" + node + " roles" )
1318 main.log.warn( "ONOS" + node + " mastership response: " +
1319 repr( ONOSMastership[i] ) )
1320 rolesResults = False
1321 utilities.assert_equals(
1322 expect=True,
1323 actual=rolesResults,
1324 onpass="No error in reading roles output",
1325 onfail="Error in reading roles from ONOS" )
1326
1327 main.step( "Check for consistency in roles from each controller" )
1328 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1329 main.log.info(
1330 "Switch roles are consistent across all ONOS nodes" )
1331 else:
1332 consistentMastership = False
1333 utilities.assert_equals(
1334 expect=True,
1335 actual=consistentMastership,
1336 onpass="Switch roles are consistent across all ONOS nodes",
1337 onfail="ONOS nodes have different views of switch roles" )
1338
1339 if rolesResults and not consistentMastership:
1340 for i in range( len( main.activeNodes ) ):
1341 node = str( main.activeNodes[i] + 1 )
1342 try:
1343 main.log.warn(
1344 "ONOS" + node + " roles: ",
1345 json.dumps(
1346 json.loads( ONOSMastership[ i ] ),
1347 sort_keys=True,
1348 indent=4,
1349 separators=( ',', ': ' ) ) )
1350 except ( ValueError, TypeError ):
1351 main.log.warn( repr( ONOSMastership[ i ] ) )
1352 elif rolesResults and consistentMastership:
1353 mastershipState = ONOSMastership[ 0 ]
1354
1355 main.step( "Get the intents from each controller" )
1356 global intentState
1357 intentState = []
1358 ONOSIntents = []
1359 consistentIntents = True # Are Intents consistent across nodes?
1360 intentsResults = True # Could we read Intents from ONOS?
1361 threads = []
1362 for i in main.activeNodes:
1363 t = main.Thread( target=main.CLIs[i].intents,
1364 name="intents-" + str( i ),
1365 args=[],
1366 kwargs={ 'jsonFormat': True } )
1367 threads.append( t )
1368 t.start()
1369
1370 for t in threads:
1371 t.join()
1372 ONOSIntents.append( t.result )
1373
1374 for i in range( len( ONOSIntents ) ):
1375 node = str( main.activeNodes[i] + 1 )
1376 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1377 main.log.error( "Error in getting ONOS" + node + " intents" )
1378 main.log.warn( "ONOS" + node + " intents response: " +
1379 repr( ONOSIntents[ i ] ) )
1380 intentsResults = False
1381 utilities.assert_equals(
1382 expect=True,
1383 actual=intentsResults,
1384 onpass="No error in reading intents output",
1385 onfail="Error in reading intents from ONOS" )
1386
1387 main.step( "Check for consistency in Intents from each controller" )
1388 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1389 main.log.info( "Intents are consistent across all ONOS " +
1390 "nodes" )
1391 else:
1392 consistentIntents = False
1393 main.log.error( "Intents not consistent" )
1394 utilities.assert_equals(
1395 expect=True,
1396 actual=consistentIntents,
1397 onpass="Intents are consistent across all ONOS nodes",
1398 onfail="ONOS nodes have different views of intents" )
1399
1400 if intentsResults:
1401 # Try to make it easy to figure out what is happening
1402 #
1403 # Intent ONOS1 ONOS2 ...
1404 # 0x01 INSTALLED INSTALLING
1405 # ... ... ...
1406 # ... ... ...
1407 title = " Id"
1408 for n in main.activeNodes:
1409 title += " " * 10 + "ONOS" + str( n + 1 )
1410 main.log.warn( title )
1411 # get all intent keys in the cluster
1412 keys = []
1413 try:
1414 # Get the set of all intent keys
1415 for nodeStr in ONOSIntents:
1416 node = json.loads( nodeStr )
1417 for intent in node:
1418 keys.append( intent.get( 'id' ) )
1419 keys = set( keys )
1420 # For each intent key, print the state on each node
1421 for key in keys:
1422 row = "%-13s" % key
1423 for nodeStr in ONOSIntents:
1424 node = json.loads( nodeStr )
1425 for intent in node:
1426 if intent.get( 'id', "Error" ) == key:
1427 row += "%-15s" % intent.get( 'state' )
1428 main.log.warn( row )
1429 # End of intent state table
1430 except ValueError as e:
1431 main.log.exception( e )
1432 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1433
1434 if intentsResults and not consistentIntents:
1435 # print the json objects
1436 n = str( main.activeNodes[-1] + 1 )
1437 main.log.debug( "ONOS" + n + " intents: " )
1438 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1439 sort_keys=True,
1440 indent=4,
1441 separators=( ',', ': ' ) ) )
1442 for i in range( len( ONOSIntents ) ):
1443 node = str( main.activeNodes[i] + 1 )
1444 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1445 main.log.debug( "ONOS" + node + " intents: " )
1446 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1447 sort_keys=True,
1448 indent=4,
1449 separators=( ',', ': ' ) ) )
1450 else:
1451 main.log.debug( "ONOS" + node + " intents match ONOS" +
1452 n + " intents" )
1453 elif intentsResults and consistentIntents:
1454 intentState = ONOSIntents[ 0 ]
1455
1456 main.step( "Get the flows from each controller" )
1457 global flowState
1458 flowState = []
1459 ONOSFlows = []
1460 ONOSFlowsJson = []
1461 flowCheck = main.FALSE
1462 consistentFlows = True
1463 flowsResults = True
1464 threads = []
1465 for i in main.activeNodes:
1466 t = main.Thread( target=main.CLIs[i].flows,
1467 name="flows-" + str( i ),
1468 args=[],
1469 kwargs={ 'jsonFormat': True } )
1470 threads.append( t )
1471 t.start()
1472
1473 # NOTE: Flows command can take some time to run
1474 time.sleep(30)
1475 for t in threads:
1476 t.join()
1477 result = t.result
1478 ONOSFlows.append( result )
1479
1480 for i in range( len( ONOSFlows ) ):
1481 num = str( main.activeNodes[i] + 1 )
1482 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1483 main.log.error( "Error in getting ONOS" + num + " flows" )
1484 main.log.warn( "ONOS" + num + " flows response: " +
1485 repr( ONOSFlows[ i ] ) )
1486 flowsResults = False
1487 ONOSFlowsJson.append( None )
1488 else:
1489 try:
1490 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1491 except ( ValueError, TypeError ):
1492 # FIXME: change this to log.error?
1493 main.log.exception( "Error in parsing ONOS" + num +
1494 " response as json." )
1495 main.log.error( repr( ONOSFlows[ i ] ) )
1496 ONOSFlowsJson.append( None )
1497 flowsResults = False
1498 utilities.assert_equals(
1499 expect=True,
1500 actual=flowsResults,
1501 onpass="No error in reading flows output",
1502 onfail="Error in reading flows from ONOS" )
1503
1504 main.step( "Check for consistency in Flows from each controller" )
1505 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1506 if all( tmp ):
1507 main.log.info( "Flow count is consistent across all ONOS nodes" )
1508 else:
1509 consistentFlows = False
1510 utilities.assert_equals(
1511 expect=True,
1512 actual=consistentFlows,
1513 onpass="The flow count is consistent across all ONOS nodes",
1514 onfail="ONOS nodes have different flow counts" )
1515
1516 if flowsResults and not consistentFlows:
1517 for i in range( len( ONOSFlows ) ):
1518 node = str( main.activeNodes[i] + 1 )
1519 try:
1520 main.log.warn(
1521 "ONOS" + node + " flows: " +
1522 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1523 indent=4, separators=( ',', ': ' ) ) )
1524 except ( ValueError, TypeError ):
1525 main.log.warn( "ONOS" + node + " flows: " +
1526 repr( ONOSFlows[ i ] ) )
1527 elif flowsResults and consistentFlows:
1528 flowCheck = main.TRUE
1529 flowState = ONOSFlows[ 0 ]
1530
1531 main.step( "Get the OF Table entries" )
1532 global flows
1533 flows = []
1534 for i in range( 1, 29 ):
1535 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1536 if flowCheck == main.FALSE:
1537 for table in flows:
1538 main.log.warn( table )
1539 # TODO: Compare switch flow tables with ONOS flow tables
1540
1541 main.step( "Start continuous pings" )
1542 main.Mininet2.pingLong(
1543 src=main.params[ 'PING' ][ 'source1' ],
1544 target=main.params[ 'PING' ][ 'target1' ],
1545 pingTime=500 )
1546 main.Mininet2.pingLong(
1547 src=main.params[ 'PING' ][ 'source2' ],
1548 target=main.params[ 'PING' ][ 'target2' ],
1549 pingTime=500 )
1550 main.Mininet2.pingLong(
1551 src=main.params[ 'PING' ][ 'source3' ],
1552 target=main.params[ 'PING' ][ 'target3' ],
1553 pingTime=500 )
1554 main.Mininet2.pingLong(
1555 src=main.params[ 'PING' ][ 'source4' ],
1556 target=main.params[ 'PING' ][ 'target4' ],
1557 pingTime=500 )
1558 main.Mininet2.pingLong(
1559 src=main.params[ 'PING' ][ 'source5' ],
1560 target=main.params[ 'PING' ][ 'target5' ],
1561 pingTime=500 )
1562 main.Mininet2.pingLong(
1563 src=main.params[ 'PING' ][ 'source6' ],
1564 target=main.params[ 'PING' ][ 'target6' ],
1565 pingTime=500 )
1566 main.Mininet2.pingLong(
1567 src=main.params[ 'PING' ][ 'source7' ],
1568 target=main.params[ 'PING' ][ 'target7' ],
1569 pingTime=500 )
1570 main.Mininet2.pingLong(
1571 src=main.params[ 'PING' ][ 'source8' ],
1572 target=main.params[ 'PING' ][ 'target8' ],
1573 pingTime=500 )
1574 main.Mininet2.pingLong(
1575 src=main.params[ 'PING' ][ 'source9' ],
1576 target=main.params[ 'PING' ][ 'target9' ],
1577 pingTime=500 )
1578 main.Mininet2.pingLong(
1579 src=main.params[ 'PING' ][ 'source10' ],
1580 target=main.params[ 'PING' ][ 'target10' ],
1581 pingTime=500 )
1582
1583 main.step( "Collecting topology information from ONOS" )
1584 devices = []
1585 threads = []
1586 for i in main.activeNodes:
1587 t = main.Thread( target=main.CLIs[i].devices,
1588 name="devices-" + str( i ),
1589 args=[ ] )
1590 threads.append( t )
1591 t.start()
1592
1593 for t in threads:
1594 t.join()
1595 devices.append( t.result )
1596 hosts = []
1597 threads = []
1598 for i in main.activeNodes:
1599 t = main.Thread( target=main.CLIs[i].hosts,
1600 name="hosts-" + str( i ),
1601 args=[ ] )
1602 threads.append( t )
1603 t.start()
1604
1605 for t in threads:
1606 t.join()
1607 try:
1608 hosts.append( json.loads( t.result ) )
1609 except ( ValueError, TypeError ):
1610 # FIXME: better handling of this, print which node
1611 # Maybe use thread name?
1612 main.log.exception( "Error parsing json output of hosts" )
1613 main.log.warn( repr( t.result ) )
1614 hosts.append( None )
1615
1616 ports = []
1617 threads = []
1618 for i in main.activeNodes:
1619 t = main.Thread( target=main.CLIs[i].ports,
1620 name="ports-" + str( i ),
1621 args=[ ] )
1622 threads.append( t )
1623 t.start()
1624
1625 for t in threads:
1626 t.join()
1627 ports.append( t.result )
1628 links = []
1629 threads = []
1630 for i in main.activeNodes:
1631 t = main.Thread( target=main.CLIs[i].links,
1632 name="links-" + str( i ),
1633 args=[ ] )
1634 threads.append( t )
1635 t.start()
1636
1637 for t in threads:
1638 t.join()
1639 links.append( t.result )
1640 clusters = []
1641 threads = []
1642 for i in main.activeNodes:
1643 t = main.Thread( target=main.CLIs[i].clusters,
1644 name="clusters-" + str( i ),
1645 args=[ ] )
1646 threads.append( t )
1647 t.start()
1648
1649 for t in threads:
1650 t.join()
1651 clusters.append( t.result )
1652 # Compare json objects for hosts and dataplane clusters
1653
1654 # hosts
1655 main.step( "Host view is consistent across ONOS nodes" )
1656 consistentHostsResult = main.TRUE
1657 for controller in range( len( hosts ) ):
1658 controllerStr = str( main.activeNodes[controller] + 1 )
1659 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1660 if hosts[ controller ] == hosts[ 0 ]:
1661 continue
1662 else: # hosts not consistent
1663 main.log.error( "hosts from ONOS" +
1664 controllerStr +
1665 " is inconsistent with ONOS1" )
1666 main.log.warn( repr( hosts[ controller ] ) )
1667 consistentHostsResult = main.FALSE
1668
1669 else:
1670 main.log.error( "Error in getting ONOS hosts from ONOS" +
1671 controllerStr )
1672 consistentHostsResult = main.FALSE
1673 main.log.warn( "ONOS" + controllerStr +
1674 " hosts response: " +
1675 repr( hosts[ controller ] ) )
1676 utilities.assert_equals(
1677 expect=main.TRUE,
1678 actual=consistentHostsResult,
1679 onpass="Hosts view is consistent across all ONOS nodes",
1680 onfail="ONOS nodes have different views of hosts" )
1681
1682 main.step( "Each host has an IP address" )
1683 ipResult = main.TRUE
1684 for controller in range( 0, len( hosts ) ):
1685 controllerStr = str( main.activeNodes[controller] + 1 )
1686 if hosts[ controller ]:
1687 for host in hosts[ controller ]:
1688 if not host.get( 'ipAddresses', [ ] ):
1689 main.log.error( "Error with host ips on controller" +
1690 controllerStr + ": " + str( host ) )
1691 ipResult = main.FALSE
1692 utilities.assert_equals(
1693 expect=main.TRUE,
1694 actual=ipResult,
1695 onpass="The ips of the hosts aren't empty",
1696 onfail="The ip of at least one host is missing" )
1697
1698 # Strongly connected clusters of devices
1699 main.step( "Cluster view is consistent across ONOS nodes" )
1700 consistentClustersResult = main.TRUE
1701 for controller in range( len( clusters ) ):
1702 controllerStr = str( main.activeNodes[controller] + 1 )
1703 if "Error" not in clusters[ controller ]:
1704 if clusters[ controller ] == clusters[ 0 ]:
1705 continue
1706 else: # clusters not consistent
1707 main.log.error( "clusters from ONOS" + controllerStr +
1708 " is inconsistent with ONOS1" )
1709 consistentClustersResult = main.FALSE
1710
1711 else:
1712 main.log.error( "Error in getting dataplane clusters " +
1713 "from ONOS" + controllerStr )
1714 consistentClustersResult = main.FALSE
1715 main.log.warn( "ONOS" + controllerStr +
1716 " clusters response: " +
1717 repr( clusters[ controller ] ) )
1718 utilities.assert_equals(
1719 expect=main.TRUE,
1720 actual=consistentClustersResult,
1721 onpass="Clusters view is consistent across all ONOS nodes",
1722 onfail="ONOS nodes have different views of clusters" )
1723 if not consistentClustersResult:
1724 main.log.debug( clusters )
1725
1726 # there should always only be one cluster
1727 main.step( "Cluster view correct across ONOS nodes" )
1728 try:
1729 numClusters = len( json.loads( clusters[ 0 ] ) )
1730 except ( ValueError, TypeError ):
1731 main.log.exception( "Error parsing clusters[0]: " +
1732 repr( clusters[ 0 ] ) )
1733 numClusters = "ERROR"
1734 utilities.assert_equals(
1735 expect=1,
1736 actual=numClusters,
1737 onpass="ONOS shows 1 SCC",
1738 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1739
1740 main.step( "Comparing ONOS topology to MN" )
1741 devicesResults = main.TRUE
1742 linksResults = main.TRUE
1743 hostsResults = main.TRUE
1744 mnSwitches = main.Mininet1.getSwitches()
1745 mnLinks = main.Mininet1.getLinks()
1746 mnHosts = main.Mininet1.getHosts()
1747 for controller in main.activeNodes:
1748 controllerStr = str( main.activeNodes[controller] + 1 )
1749 if devices[ controller ] and ports[ controller ] and\
1750 "Error" not in devices[ controller ] and\
1751 "Error" not in ports[ controller ]:
1752 currentDevicesResult = main.Mininet1.compareSwitches(
1753 mnSwitches,
1754 json.loads( devices[ controller ] ),
1755 json.loads( ports[ controller ] ) )
1756 else:
1757 currentDevicesResult = main.FALSE
1758 utilities.assert_equals( expect=main.TRUE,
1759 actual=currentDevicesResult,
1760 onpass="ONOS" + controllerStr +
1761 " Switches view is correct",
1762 onfail="ONOS" + controllerStr +
1763 " Switches view is incorrect" )
1764 if links[ controller ] and "Error" not in links[ controller ]:
1765 currentLinksResult = main.Mininet1.compareLinks(
1766 mnSwitches, mnLinks,
1767 json.loads( links[ controller ] ) )
1768 else:
1769 currentLinksResult = main.FALSE
1770 utilities.assert_equals( expect=main.TRUE,
1771 actual=currentLinksResult,
1772 onpass="ONOS" + controllerStr +
1773 " links view is correct",
1774 onfail="ONOS" + controllerStr +
1775 " links view is incorrect" )
1776
1777 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1778 currentHostsResult = main.Mininet1.compareHosts(
1779 mnHosts,
1780 hosts[ controller ] )
1781 else:
1782 currentHostsResult = main.FALSE
1783 utilities.assert_equals( expect=main.TRUE,
1784 actual=currentHostsResult,
1785 onpass="ONOS" + controllerStr +
1786 " hosts exist in Mininet",
1787 onfail="ONOS" + controllerStr +
1788 " hosts don't match Mininet" )
1789
1790 devicesResults = devicesResults and currentDevicesResult
1791 linksResults = linksResults and currentLinksResult
1792 hostsResults = hostsResults and currentHostsResult
1793
1794 main.step( "Device information is correct" )
1795 utilities.assert_equals(
1796 expect=main.TRUE,
1797 actual=devicesResults,
1798 onpass="Device information is correct",
1799 onfail="Device information is incorrect" )
1800
1801 main.step( "Links are correct" )
1802 utilities.assert_equals(
1803 expect=main.TRUE,
1804 actual=linksResults,
1805 onpass="Link are correct",
1806 onfail="Links are incorrect" )
1807
1808 main.step( "Hosts are correct" )
1809 utilities.assert_equals(
1810 expect=main.TRUE,
1811 actual=hostsResults,
1812 onpass="Hosts are correct",
1813 onfail="Hosts are incorrect" )
1814
1815 def CASE6( self, main ):
1816 """
1817 The Scaling case.
1818 """
1819 import time
1820 import re
1821 assert main.numCtrls, "main.numCtrls not defined"
1822 assert main, "main not defined"
1823 assert utilities.assert_equals, "utilities.assert_equals not defined"
1824 assert main.CLIs, "main.CLIs not defined"
1825 assert main.nodes, "main.nodes not defined"
1826 try:
1827 labels
1828 except NameError:
1829 main.log.error( "labels not defined, setting to []" )
1830 global labels
1831 labels = []
1832 try:
1833 data
1834 except NameError:
1835 main.log.error( "data not defined, setting to []" )
1836 global data
1837 data = []
1838
1839 main.case( "Swap some of the ONOS nodes" )
1840
1841 main.step( "Checking ONOS Logs for errors" )
1842 for i in main.activeNodes:
1843 node = main.nodes[i]
1844 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1845 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1846
1847 main.step( "Generate new metadata file" )
Jon Hallbd60ea02016-08-23 10:03:59 -07001848 old = [ main.activeNodes[1], main.activeNodes[-2] ]
Jon Hall69b2b982016-05-11 12:04:59 -07001849 new = range( main.ONOSbench.maxNodes )[-2:]
1850 assert len( old ) == len( new ), "Length of nodes to swap don't match"
1851 handle = main.ONOSbench.handle
1852 for x, y in zip( old, new ):
1853 handle.sendline( "export OC{}=$OC{}".format( x + 1, y + 1 ) )
1854 handle.expect( "\$" ) # from the variable
1855 ret = handle.before
1856 handle.expect( "\$" ) # From the prompt
1857 ret += handle.before
1858 main.log.debug( ret )
1859 main.activeNodes.remove( x )
1860 main.activeNodes.append( y )
1861
1862 genResult = main.Server.generateFile( main.numCtrls )
1863 utilities.assert_equals( expect=main.TRUE, actual=genResult,
1864 onpass="New cluster metadata file generated",
1865 onfail="Failled to generate new metadata file" )
1866 time.sleep( 5 ) # Give time for nodes to read new file
1867
1868 main.step( "Start new nodes" ) # OR stop old nodes?
1869 started = main.TRUE
1870 for i in new:
1871 started = main.ONOSbench.onosStart( main.nodes[i].ip_address ) and main.TRUE
1872 utilities.assert_equals( expect=main.TRUE, actual=started,
1873 onpass="ONOS started",
1874 onfail="ONOS start NOT successful" )
1875
1876 main.step( "Checking if ONOS is up yet" )
1877 for i in range( 2 ):
1878 onosIsupResult = main.TRUE
1879 for i in main.activeNodes:
1880 node = main.nodes[i]
1881 started = main.ONOSbench.isup( node.ip_address )
1882 if not started:
1883 main.log.error( node.name + " didn't start!" )
1884 onosIsupResult = onosIsupResult and started
1885 if onosIsupResult == main.TRUE:
1886 break
1887 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1888 onpass="ONOS started",
1889 onfail="ONOS start NOT successful" )
1890
Jon Hall6509dbf2016-06-21 17:01:17 -07001891 main.step( "Starting ONOS CLI sessions" )
Jon Hall69b2b982016-05-11 12:04:59 -07001892 cliResults = main.TRUE
1893 threads = []
1894 for i in main.activeNodes:
1895 t = main.Thread( target=main.CLIs[i].startOnosCli,
1896 name="startOnosCli-" + str( i ),
1897 args=[main.nodes[i].ip_address] )
1898 threads.append( t )
1899 t.start()
1900
1901 for t in threads:
1902 t.join()
1903 cliResults = cliResults and t.result
1904 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1905 onpass="ONOS cli started",
1906 onfail="ONOS clis did not start" )
1907
1908 main.step( "Checking ONOS nodes" )
1909 nodeResults = utilities.retry( main.HA.nodesCheck,
1910 False,
1911 args=[main.activeNodes],
1912 attempts=5 )
1913 utilities.assert_equals( expect=True, actual=nodeResults,
1914 onpass="Nodes check successful",
1915 onfail="Nodes check NOT successful" )
1916
1917 for i in range( 10 ):
1918 ready = True
1919 for i in main.activeNodes:
1920 cli = main.CLIs[i]
1921 output = cli.summary()
1922 if not output:
1923 ready = False
1924 if ready:
1925 break
1926 time.sleep( 30 )
1927 utilities.assert_equals( expect=True, actual=ready,
1928 onpass="ONOS summary command succeded",
1929 onfail="ONOS summary command failed" )
1930 if not ready:
1931 main.cleanup()
1932 main.exit()
1933
1934 # Rerun for election on new nodes
1935 runResults = main.TRUE
1936 for i in main.activeNodes:
1937 cli = main.CLIs[i]
1938 run = cli.electionTestRun()
1939 if run != main.TRUE:
1940 main.log.error( "Error running for election on " + cli.name )
1941 runResults = runResults and run
1942 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1943 onpass="Reran for election",
1944 onfail="Failed to rerun for election" )
1945
1946 for node in main.activeNodes:
1947 main.log.warn( "\n****************** {} **************".format( main.nodes[node].ip_address ) )
1948 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1949 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1950 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
1951 main.log.debug( main.CLIs[node].apps( jsonFormat=False ) )
1952
1953 main.step( "Reapplying cell variable to environment" )
1954 cellName = main.params[ 'ENV' ][ 'cellName' ]
1955 cellResult = main.ONOSbench.setCell( cellName )
1956 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
1957 onpass="Set cell successfull",
1958 onfail="Failled to set cell" )
1959
1960 def CASE7( self, main ):
1961 """
1962 Check state after ONOS scaling
1963 """
1964 import json
1965 assert main.numCtrls, "main.numCtrls not defined"
1966 assert main, "main not defined"
1967 assert utilities.assert_equals, "utilities.assert_equals not defined"
1968 assert main.CLIs, "main.CLIs not defined"
1969 assert main.nodes, "main.nodes not defined"
1970 main.case( "Running ONOS Constant State Tests" )
1971
1972 main.step( "Check that each switch has a master" )
1973 # Assert that each device has a master
1974 rolesNotNull = main.TRUE
1975 threads = []
1976 for i in main.activeNodes:
1977 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1978 name="rolesNotNull-" + str( i ),
1979 args=[ ] )
1980 threads.append( t )
1981 t.start()
1982
1983 for t in threads:
1984 t.join()
1985 rolesNotNull = rolesNotNull and t.result
1986 utilities.assert_equals(
1987 expect=main.TRUE,
1988 actual=rolesNotNull,
1989 onpass="Each device has a master",
1990 onfail="Some devices don't have a master assigned" )
1991
1992 main.step( "Read device roles from ONOS" )
1993 ONOSMastership = []
1994 consistentMastership = True
1995 rolesResults = True
1996 threads = []
1997 for i in main.activeNodes:
1998 t = main.Thread( target=main.CLIs[i].roles,
1999 name="roles-" + str( i ),
2000 args=[] )
2001 threads.append( t )
2002 t.start()
2003
2004 for t in threads:
2005 t.join()
2006 ONOSMastership.append( t.result )
2007
2008 for i in range( len( ONOSMastership ) ):
2009 node = str( main.activeNodes[i] + 1 )
2010 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
2011 main.log.error( "Error in getting ONOS" + node + " roles" )
2012 main.log.warn( "ONOS" + node + " mastership response: " +
2013 repr( ONOSMastership[i] ) )
2014 rolesResults = False
2015 utilities.assert_equals(
2016 expect=True,
2017 actual=rolesResults,
2018 onpass="No error in reading roles output",
2019 onfail="Error in reading roles from ONOS" )
2020
2021 main.step( "Check for consistency in roles from each controller" )
2022 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
2023 main.log.info(
2024 "Switch roles are consistent across all ONOS nodes" )
2025 else:
2026 consistentMastership = False
2027 utilities.assert_equals(
2028 expect=True,
2029 actual=consistentMastership,
2030 onpass="Switch roles are consistent across all ONOS nodes",
2031 onfail="ONOS nodes have different views of switch roles" )
2032
2033 if rolesResults and not consistentMastership:
2034 for i in range( len( ONOSMastership ) ):
2035 node = str( main.activeNodes[i] + 1 )
2036 main.log.warn( "ONOS" + node + " roles: ",
2037 json.dumps( json.loads( ONOSMastership[ i ] ),
2038 sort_keys=True,
2039 indent=4,
2040 separators=( ',', ': ' ) ) )
2041
2042 # NOTE: we expect mastership to change on controller scaling down
2043
2044 main.step( "Get the intents and compare across all nodes" )
2045 ONOSIntents = []
2046 intentCheck = main.FALSE
2047 consistentIntents = True
2048 intentsResults = True
2049 threads = []
2050 for i in main.activeNodes:
2051 t = main.Thread( target=main.CLIs[i].intents,
2052 name="intents-" + str( i ),
2053 args=[],
2054 kwargs={ 'jsonFormat': True } )
2055 threads.append( t )
2056 t.start()
2057
2058 for t in threads:
2059 t.join()
2060 ONOSIntents.append( t.result )
2061
2062 for i in range( len( ONOSIntents) ):
2063 node = str( main.activeNodes[i] + 1 )
2064 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2065 main.log.error( "Error in getting ONOS" + node + " intents" )
2066 main.log.warn( "ONOS" + node + " intents response: " +
2067 repr( ONOSIntents[ i ] ) )
2068 intentsResults = False
2069 utilities.assert_equals(
2070 expect=True,
2071 actual=intentsResults,
2072 onpass="No error in reading intents output",
2073 onfail="Error in reading intents from ONOS" )
2074
2075 main.step( "Check for consistency in Intents from each controller" )
2076 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2077 main.log.info( "Intents are consistent across all ONOS " +
2078 "nodes" )
2079 else:
2080 consistentIntents = False
2081
2082 # Try to make it easy to figure out what is happening
2083 #
2084 # Intent ONOS1 ONOS2 ...
2085 # 0x01 INSTALLED INSTALLING
2086 # ... ... ...
2087 # ... ... ...
2088 title = " ID"
2089 for n in main.activeNodes:
2090 title += " " * 10 + "ONOS" + str( n + 1 )
2091 main.log.warn( title )
2092 # get all intent keys in the cluster
2093 keys = []
2094 for nodeStr in ONOSIntents:
2095 node = json.loads( nodeStr )
2096 for intent in node:
2097 keys.append( intent.get( 'id' ) )
2098 keys = set( keys )
2099 for key in keys:
2100 row = "%-13s" % key
2101 for nodeStr in ONOSIntents:
2102 node = json.loads( nodeStr )
2103 for intent in node:
2104 if intent.get( 'id' ) == key:
2105 row += "%-15s" % intent.get( 'state' )
2106 main.log.warn( row )
2107 # End table view
2108
2109 utilities.assert_equals(
2110 expect=True,
2111 actual=consistentIntents,
2112 onpass="Intents are consistent across all ONOS nodes",
2113 onfail="ONOS nodes have different views of intents" )
2114 intentStates = []
2115 for node in ONOSIntents: # Iter through ONOS nodes
2116 nodeStates = []
2117 # Iter through intents of a node
2118 try:
2119 for intent in json.loads( node ):
2120 nodeStates.append( intent[ 'state' ] )
2121 except ( ValueError, TypeError ):
2122 main.log.exception( "Error in parsing intents" )
2123 main.log.error( repr( node ) )
2124 intentStates.append( nodeStates )
2125 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2126 main.log.info( dict( out ) )
2127
2128 if intentsResults and not consistentIntents:
2129 for i in range( len( main.activeNodes ) ):
2130 node = str( main.activeNodes[i] + 1 )
2131 main.log.warn( "ONOS" + node + " intents: " )
2132 main.log.warn( json.dumps(
2133 json.loads( ONOSIntents[ i ] ),
2134 sort_keys=True,
2135 indent=4,
2136 separators=( ',', ': ' ) ) )
2137 elif intentsResults and consistentIntents:
2138 intentCheck = main.TRUE
2139
2140 main.step( "Compare current intents with intents before the scaling" )
2141 # NOTE: this requires case 5 to pass for intentState to be set.
2142 # maybe we should stop the test if that fails?
2143 sameIntents = main.FALSE
2144 try:
2145 intentState
2146 except NameError:
2147 main.log.warn( "No previous intent state was saved" )
2148 else:
2149 if intentState and intentState == ONOSIntents[ 0 ]:
2150 sameIntents = main.TRUE
2151 main.log.info( "Intents are consistent with before scaling" )
2152 # TODO: possibly the states have changed? we may need to figure out
2153 # what the acceptable states are
2154 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2155 sameIntents = main.TRUE
2156 try:
2157 before = json.loads( intentState )
2158 after = json.loads( ONOSIntents[ 0 ] )
2159 for intent in before:
2160 if intent not in after:
2161 sameIntents = main.FALSE
2162 main.log.debug( "Intent is not currently in ONOS " +
2163 "(at least in the same form):" )
2164 main.log.debug( json.dumps( intent ) )
2165 except ( ValueError, TypeError ):
2166 main.log.exception( "Exception printing intents" )
2167 main.log.debug( repr( ONOSIntents[0] ) )
2168 main.log.debug( repr( intentState ) )
2169 if sameIntents == main.FALSE:
2170 try:
2171 main.log.debug( "ONOS intents before: " )
2172 main.log.debug( json.dumps( json.loads( intentState ),
2173 sort_keys=True, indent=4,
2174 separators=( ',', ': ' ) ) )
2175 main.log.debug( "Current ONOS intents: " )
2176 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2177 sort_keys=True, indent=4,
2178 separators=( ',', ': ' ) ) )
2179 except ( ValueError, TypeError ):
2180 main.log.exception( "Exception printing intents" )
2181 main.log.debug( repr( ONOSIntents[0] ) )
2182 main.log.debug( repr( intentState ) )
2183 utilities.assert_equals(
2184 expect=main.TRUE,
2185 actual=sameIntents,
2186 onpass="Intents are consistent with before scaling",
2187 onfail="The Intents changed during scaling" )
2188 intentCheck = intentCheck and sameIntents
2189
2190 main.step( "Get the OF Table entries and compare to before " +
2191 "component scaling" )
2192 FlowTables = main.TRUE
2193 for i in range( 28 ):
2194 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2195 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2196 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2197 FlowTables = FlowTables and curSwitch
2198 if curSwitch == main.FALSE:
2199 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2200 utilities.assert_equals(
2201 expect=main.TRUE,
2202 actual=FlowTables,
2203 onpass="No changes were found in the flow tables",
2204 onfail="Changes were found in the flow tables" )
2205
2206 main.Mininet2.pingLongKill()
2207 '''
2208 # main.step( "Check the continuous pings to ensure that no packets " +
2209 # "were dropped during component failure" )
2210 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2211 main.params[ 'TESTONIP' ] )
2212 LossInPings = main.FALSE
2213 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2214 for i in range( 8, 18 ):
2215 main.log.info(
2216 "Checking for a loss in pings along flow from s" +
2217 str( i ) )
2218 LossInPings = main.Mininet2.checkForLoss(
2219 "/tmp/ping.h" +
2220 str( i ) ) or LossInPings
2221 if LossInPings == main.TRUE:
2222 main.log.info( "Loss in ping detected" )
2223 elif LossInPings == main.ERROR:
2224 main.log.info( "There are multiple mininet process running" )
2225 elif LossInPings == main.FALSE:
2226 main.log.info( "No Loss in the pings" )
2227 main.log.info( "No loss of dataplane connectivity" )
2228 # utilities.assert_equals(
2229 # expect=main.FALSE,
2230 # actual=LossInPings,
2231 # onpass="No Loss of connectivity",
2232 # onfail="Loss of dataplane connectivity detected" )
2233
2234 # NOTE: Since intents are not persisted with IntnentStore,
2235 # we expect loss in dataplane connectivity
2236 LossInPings = main.FALSE
2237 '''
2238
2239 main.step( "Leadership Election is still functional" )
2240 # Test of LeadershipElection
2241 leaderList = []
2242 leaderResult = main.TRUE
2243
2244 for i in main.activeNodes:
2245 cli = main.CLIs[i]
2246 leaderN = cli.electionTestLeader()
2247 leaderList.append( leaderN )
2248 if leaderN == main.FALSE:
2249 # error in response
2250 main.log.error( "Something is wrong with " +
2251 "electionTestLeader function, check the" +
2252 " error logs" )
2253 leaderResult = main.FALSE
2254 elif leaderN is None:
2255 main.log.error( cli.name +
2256 " shows no leader for the election-app." )
2257 leaderResult = main.FALSE
2258 if len( set( leaderList ) ) != 1:
2259 leaderResult = main.FALSE
2260 main.log.error(
2261 "Inconsistent view of leader for the election test app" )
2262 # TODO: print the list
2263 utilities.assert_equals(
2264 expect=main.TRUE,
2265 actual=leaderResult,
2266 onpass="Leadership election passed",
2267 onfail="Something went wrong with Leadership election" )
2268
2269 def CASE8( self, main ):
2270 """
2271 Compare topo
2272 """
2273 import json
2274 import time
2275 assert main.numCtrls, "main.numCtrls not defined"
2276 assert main, "main not defined"
2277 assert utilities.assert_equals, "utilities.assert_equals not defined"
2278 assert main.CLIs, "main.CLIs not defined"
2279 assert main.nodes, "main.nodes not defined"
2280
2281 main.case( "Compare ONOS Topology view to Mininet topology" )
2282 main.caseExplanation = "Compare topology objects between Mininet" +\
2283 " and ONOS"
2284 topoResult = main.FALSE
2285 topoFailMsg = "ONOS topology don't match Mininet"
2286 elapsed = 0
2287 count = 0
2288 main.step( "Comparing ONOS topology to MN topology" )
2289 startTime = time.time()
2290 # Give time for Gossip to work
2291 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2292 devicesResults = main.TRUE
2293 linksResults = main.TRUE
2294 hostsResults = main.TRUE
2295 hostAttachmentResults = True
2296 count += 1
2297 cliStart = time.time()
2298 devices = []
2299 threads = []
2300 for i in main.activeNodes:
2301 t = main.Thread( target=utilities.retry,
2302 name="devices-" + str( i ),
2303 args=[ main.CLIs[i].devices, [ None ] ],
2304 kwargs= { 'sleep': 5, 'attempts': 5,
2305 'randomTime': True } )
2306 threads.append( t )
2307 t.start()
2308
2309 for t in threads:
2310 t.join()
2311 devices.append( t.result )
2312 hosts = []
2313 ipResult = main.TRUE
2314 threads = []
2315 for i in main.activeNodes:
2316 t = main.Thread( target=utilities.retry,
2317 name="hosts-" + str( i ),
2318 args=[ main.CLIs[i].hosts, [ None ] ],
2319 kwargs= { 'sleep': 5, 'attempts': 5,
2320 'randomTime': True } )
2321 threads.append( t )
2322 t.start()
2323
2324 for t in threads:
2325 t.join()
2326 try:
2327 hosts.append( json.loads( t.result ) )
2328 except ( ValueError, TypeError ):
2329 main.log.exception( "Error parsing hosts results" )
2330 main.log.error( repr( t.result ) )
2331 hosts.append( None )
2332 for controller in range( 0, len( hosts ) ):
2333 controllerStr = str( main.activeNodes[controller] + 1 )
2334 if hosts[ controller ]:
2335 for host in hosts[ controller ]:
2336 if host is None or host.get( 'ipAddresses', [] ) == []:
2337 main.log.error(
2338 "Error with host ipAddresses on controller" +
2339 controllerStr + ": " + str( host ) )
2340 ipResult = main.FALSE
2341 ports = []
2342 threads = []
2343 for i in main.activeNodes:
2344 t = main.Thread( target=utilities.retry,
2345 name="ports-" + str( i ),
2346 args=[ main.CLIs[i].ports, [ None ] ],
2347 kwargs= { 'sleep': 5, 'attempts': 5,
2348 'randomTime': True } )
2349 threads.append( t )
2350 t.start()
2351
2352 for t in threads:
2353 t.join()
2354 ports.append( t.result )
2355 links = []
2356 threads = []
2357 for i in main.activeNodes:
2358 t = main.Thread( target=utilities.retry,
2359 name="links-" + str( i ),
2360 args=[ main.CLIs[i].links, [ None ] ],
2361 kwargs= { 'sleep': 5, 'attempts': 5,
2362 'randomTime': True } )
2363 threads.append( t )
2364 t.start()
2365
2366 for t in threads:
2367 t.join()
2368 links.append( t.result )
2369 clusters = []
2370 threads = []
2371 for i in main.activeNodes:
2372 t = main.Thread( target=utilities.retry,
2373 name="clusters-" + str( i ),
2374 args=[ main.CLIs[i].clusters, [ None ] ],
2375 kwargs= { 'sleep': 5, 'attempts': 5,
2376 'randomTime': True } )
2377 threads.append( t )
2378 t.start()
2379
2380 for t in threads:
2381 t.join()
2382 clusters.append( t.result )
2383
2384 elapsed = time.time() - startTime
2385 cliTime = time.time() - cliStart
2386 print "Elapsed time: " + str( elapsed )
2387 print "CLI time: " + str( cliTime )
2388
2389 if all( e is None for e in devices ) and\
2390 all( e is None for e in hosts ) and\
2391 all( e is None for e in ports ) and\
2392 all( e is None for e in links ) and\
2393 all( e is None for e in clusters ):
2394 topoFailMsg = "Could not get topology from ONOS"
2395 main.log.error( topoFailMsg )
2396 continue # Try again, No use trying to compare
2397
2398 mnSwitches = main.Mininet1.getSwitches()
2399 mnLinks = main.Mininet1.getLinks()
2400 mnHosts = main.Mininet1.getHosts()
2401 for controller in range( len( main.activeNodes ) ):
2402 controllerStr = str( main.activeNodes[controller] + 1 )
2403 if devices[ controller ] and ports[ controller ] and\
2404 "Error" not in devices[ controller ] and\
2405 "Error" not in ports[ controller ]:
2406
2407 try:
2408 currentDevicesResult = main.Mininet1.compareSwitches(
2409 mnSwitches,
2410 json.loads( devices[ controller ] ),
2411 json.loads( ports[ controller ] ) )
2412 except ( TypeError, ValueError ):
2413 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2414 devices[ controller ], ports[ controller ] ) )
2415 else:
2416 currentDevicesResult = main.FALSE
2417 utilities.assert_equals( expect=main.TRUE,
2418 actual=currentDevicesResult,
2419 onpass="ONOS" + controllerStr +
2420 " Switches view is correct",
2421 onfail="ONOS" + controllerStr +
2422 " Switches view is incorrect" )
2423
2424 if links[ controller ] and "Error" not in links[ controller ]:
2425 currentLinksResult = main.Mininet1.compareLinks(
2426 mnSwitches, mnLinks,
2427 json.loads( links[ controller ] ) )
2428 else:
2429 currentLinksResult = main.FALSE
2430 utilities.assert_equals( expect=main.TRUE,
2431 actual=currentLinksResult,
2432 onpass="ONOS" + controllerStr +
2433 " links view is correct",
2434 onfail="ONOS" + controllerStr +
2435 " links view is incorrect" )
2436 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2437 currentHostsResult = main.Mininet1.compareHosts(
2438 mnHosts,
2439 hosts[ controller ] )
2440 elif hosts[ controller ] == []:
2441 currentHostsResult = main.TRUE
2442 else:
2443 currentHostsResult = main.FALSE
2444 utilities.assert_equals( expect=main.TRUE,
2445 actual=currentHostsResult,
2446 onpass="ONOS" + controllerStr +
2447 " hosts exist in Mininet",
2448 onfail="ONOS" + controllerStr +
2449 " hosts don't match Mininet" )
2450 # CHECKING HOST ATTACHMENT POINTS
2451 hostAttachment = True
2452 zeroHosts = False
2453 # FIXME: topo-HA/obelisk specific mappings:
2454 # key is mac and value is dpid
2455 mappings = {}
2456 for i in range( 1, 29 ): # hosts 1 through 28
2457 # set up correct variables:
2458 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2459 if i == 1:
2460 deviceId = "1000".zfill(16)
2461 elif i == 2:
2462 deviceId = "2000".zfill(16)
2463 elif i == 3:
2464 deviceId = "3000".zfill(16)
2465 elif i == 4:
2466 deviceId = "3004".zfill(16)
2467 elif i == 5:
2468 deviceId = "5000".zfill(16)
2469 elif i == 6:
2470 deviceId = "6000".zfill(16)
2471 elif i == 7:
2472 deviceId = "6007".zfill(16)
2473 elif i >= 8 and i <= 17:
2474 dpid = '3' + str( i ).zfill( 3 )
2475 deviceId = dpid.zfill(16)
2476 elif i >= 18 and i <= 27:
2477 dpid = '6' + str( i ).zfill( 3 )
2478 deviceId = dpid.zfill(16)
2479 elif i == 28:
2480 deviceId = "2800".zfill(16)
2481 mappings[ macId ] = deviceId
2482 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2483 if hosts[ controller ] == []:
2484 main.log.warn( "There are no hosts discovered" )
2485 zeroHosts = True
2486 else:
2487 for host in hosts[ controller ]:
2488 mac = None
2489 location = None
2490 device = None
2491 port = None
2492 try:
2493 mac = host.get( 'mac' )
2494 assert mac, "mac field could not be found for this host object"
2495
2496 location = host.get( 'location' )
2497 assert location, "location field could not be found for this host object"
2498
2499 # Trim the protocol identifier off deviceId
2500 device = str( location.get( 'elementId' ) ).split(':')[1]
2501 assert device, "elementId field could not be found for this host location object"
2502
2503 port = location.get( 'port' )
2504 assert port, "port field could not be found for this host location object"
2505
2506 # Now check if this matches where they should be
2507 if mac and device and port:
2508 if str( port ) != "1":
2509 main.log.error( "The attachment port is incorrect for " +
2510 "host " + str( mac ) +
2511 ". Expected: 1 Actual: " + str( port) )
2512 hostAttachment = False
2513 if device != mappings[ str( mac ) ]:
2514 main.log.error( "The attachment device is incorrect for " +
2515 "host " + str( mac ) +
2516 ". Expected: " + mappings[ str( mac ) ] +
2517 " Actual: " + device )
2518 hostAttachment = False
2519 else:
2520 hostAttachment = False
2521 except AssertionError:
2522 main.log.exception( "Json object not as expected" )
2523 main.log.error( repr( host ) )
2524 hostAttachment = False
2525 else:
2526 main.log.error( "No hosts json output or \"Error\"" +
2527 " in output. hosts = " +
2528 repr( hosts[ controller ] ) )
2529 if zeroHosts is False:
2530 # TODO: Find a way to know if there should be hosts in a
2531 # given point of the test
2532 hostAttachment = True
2533
2534 # END CHECKING HOST ATTACHMENT POINTS
2535 devicesResults = devicesResults and currentDevicesResult
2536 linksResults = linksResults and currentLinksResult
2537 hostsResults = hostsResults and currentHostsResult
2538 hostAttachmentResults = hostAttachmentResults and\
2539 hostAttachment
2540 topoResult = ( devicesResults and linksResults
2541 and hostsResults and ipResult and
2542 hostAttachmentResults )
2543 utilities.assert_equals( expect=True,
2544 actual=topoResult,
2545 onpass="ONOS topology matches Mininet",
2546 onfail=topoFailMsg )
2547 # End of While loop to pull ONOS state
2548
2549 # Compare json objects for hosts and dataplane clusters
2550
2551 # hosts
2552 main.step( "Hosts view is consistent across all ONOS nodes" )
2553 consistentHostsResult = main.TRUE
2554 for controller in range( len( hosts ) ):
2555 controllerStr = str( main.activeNodes[controller] + 1 )
2556 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2557 if hosts[ controller ] == hosts[ 0 ]:
2558 continue
2559 else: # hosts not consistent
2560 main.log.error( "hosts from ONOS" + controllerStr +
2561 " is inconsistent with ONOS1" )
2562 main.log.warn( repr( hosts[ controller ] ) )
2563 consistentHostsResult = main.FALSE
2564
2565 else:
2566 main.log.error( "Error in getting ONOS hosts from ONOS" +
2567 controllerStr )
2568 consistentHostsResult = main.FALSE
2569 main.log.warn( "ONOS" + controllerStr +
2570 " hosts response: " +
2571 repr( hosts[ controller ] ) )
2572 utilities.assert_equals(
2573 expect=main.TRUE,
2574 actual=consistentHostsResult,
2575 onpass="Hosts view is consistent across all ONOS nodes",
2576 onfail="ONOS nodes have different views of hosts" )
2577
2578 main.step( "Hosts information is correct" )
2579 hostsResults = hostsResults and ipResult
2580 utilities.assert_equals(
2581 expect=main.TRUE,
2582 actual=hostsResults,
2583 onpass="Host information is correct",
2584 onfail="Host information is incorrect" )
2585
2586 main.step( "Host attachment points to the network" )
2587 utilities.assert_equals(
2588 expect=True,
2589 actual=hostAttachmentResults,
2590 onpass="Hosts are correctly attached to the network",
2591 onfail="ONOS did not correctly attach hosts to the network" )
2592
2593 # Strongly connected clusters of devices
2594 main.step( "Clusters view is consistent across all ONOS nodes" )
2595 consistentClustersResult = main.TRUE
2596 for controller in range( len( clusters ) ):
2597 controllerStr = str( main.activeNodes[controller] + 1 )
2598 if "Error" not in clusters[ controller ]:
2599 if clusters[ controller ] == clusters[ 0 ]:
2600 continue
2601 else: # clusters not consistent
2602 main.log.error( "clusters from ONOS" +
2603 controllerStr +
2604 " is inconsistent with ONOS1" )
2605 consistentClustersResult = main.FALSE
2606 else:
2607 main.log.error( "Error in getting dataplane clusters " +
2608 "from ONOS" + controllerStr )
2609 consistentClustersResult = main.FALSE
2610 main.log.warn( "ONOS" + controllerStr +
2611 " clusters response: " +
2612 repr( clusters[ controller ] ) )
2613 utilities.assert_equals(
2614 expect=main.TRUE,
2615 actual=consistentClustersResult,
2616 onpass="Clusters view is consistent across all ONOS nodes",
2617 onfail="ONOS nodes have different views of clusters" )
2618 if not consistentClustersResult:
2619 main.log.debug( clusters )
2620 for x in links:
2621 main.log.warn( "{}: {}".format( len( x ), x ) )
2622
2623
2624 main.step( "There is only one SCC" )
2625 # there should always only be one cluster
2626 try:
2627 numClusters = len( json.loads( clusters[ 0 ] ) )
2628 except ( ValueError, TypeError ):
2629 main.log.exception( "Error parsing clusters[0]: " +
2630 repr( clusters[0] ) )
2631 numClusters = "ERROR"
2632 clusterResults = main.FALSE
2633 if numClusters == 1:
2634 clusterResults = main.TRUE
2635 utilities.assert_equals(
2636 expect=1,
2637 actual=numClusters,
2638 onpass="ONOS shows 1 SCC",
2639 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2640
2641 topoResult = ( devicesResults and linksResults
2642 and hostsResults and consistentHostsResult
2643 and consistentClustersResult and clusterResults
2644 and ipResult and hostAttachmentResults )
2645
2646 topoResult = topoResult and int( count <= 2 )
2647 note = "note it takes about " + str( int( cliTime ) ) + \
2648 " seconds for the test to make all the cli calls to fetch " +\
2649 "the topology from each ONOS instance"
2650 main.log.info(
2651 "Very crass estimate for topology discovery/convergence( " +
2652 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2653 str( count ) + " tries" )
2654
2655 main.step( "Device information is correct" )
2656 utilities.assert_equals(
2657 expect=main.TRUE,
2658 actual=devicesResults,
2659 onpass="Device information is correct",
2660 onfail="Device information is incorrect" )
2661
2662 main.step( "Links are correct" )
2663 utilities.assert_equals(
2664 expect=main.TRUE,
2665 actual=linksResults,
2666 onpass="Link are correct",
2667 onfail="Links are incorrect" )
2668
2669 main.step( "Hosts are correct" )
2670 utilities.assert_equals(
2671 expect=main.TRUE,
2672 actual=hostsResults,
2673 onpass="Hosts are correct",
2674 onfail="Hosts are incorrect" )
2675
2676 # FIXME: move this to an ONOS state case
2677 main.step( "Checking ONOS nodes" )
2678 nodeResults = utilities.retry( main.HA.nodesCheck,
2679 False,
2680 args=[main.activeNodes],
2681 attempts=5 )
2682 utilities.assert_equals( expect=True, actual=nodeResults,
2683 onpass="Nodes check successful",
2684 onfail="Nodes check NOT successful" )
2685 if not nodeResults:
2686 for i in main.activeNodes:
2687 main.log.debug( "{} components not ACTIVE: \n{}".format(
2688 main.CLIs[i].name,
2689 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
2690
Jon Halld2871c22016-07-26 11:01:14 -07002691 if not topoResult:
2692 main.cleanup()
2693 main.exit()
2694
Jon Hall69b2b982016-05-11 12:04:59 -07002695 def CASE9( self, main ):
2696 """
2697 Link s3-s28 down
2698 """
2699 import time
2700 assert main.numCtrls, "main.numCtrls not defined"
2701 assert main, "main not defined"
2702 assert utilities.assert_equals, "utilities.assert_equals not defined"
2703 assert main.CLIs, "main.CLIs not defined"
2704 assert main.nodes, "main.nodes not defined"
2705 # NOTE: You should probably run a topology check after this
2706
2707 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2708
2709 description = "Turn off a link to ensure that Link Discovery " +\
2710 "is working properly"
2711 main.case( description )
2712
2713 main.step( "Kill Link between s3 and s28" )
2714 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2715 main.log.info( "Waiting " + str( linkSleep ) +
2716 " seconds for link down to be discovered" )
2717 time.sleep( linkSleep )
2718 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2719 onpass="Link down successful",
2720 onfail="Failed to bring link down" )
2721 # TODO do some sort of check here
2722
2723 def CASE10( self, main ):
2724 """
2725 Link s3-s28 up
2726 """
2727 import time
2728 assert main.numCtrls, "main.numCtrls not defined"
2729 assert main, "main not defined"
2730 assert utilities.assert_equals, "utilities.assert_equals not defined"
2731 assert main.CLIs, "main.CLIs not defined"
2732 assert main.nodes, "main.nodes not defined"
2733 # NOTE: You should probably run a topology check after this
2734
2735 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2736
2737 description = "Restore a link to ensure that Link Discovery is " + \
2738 "working properly"
2739 main.case( description )
2740
2741 main.step( "Bring link between s3 and s28 back up" )
2742 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2743 main.log.info( "Waiting " + str( linkSleep ) +
2744 " seconds for link up to be discovered" )
2745 time.sleep( linkSleep )
2746 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2747 onpass="Link up successful",
2748 onfail="Failed to bring link up" )
2749 # TODO do some sort of check here
2750
2751 def CASE11( self, main ):
2752 """
2753 Switch Down
2754 """
2755 # NOTE: You should probably run a topology check after this
2756 import time
2757 assert main.numCtrls, "main.numCtrls not defined"
2758 assert main, "main not defined"
2759 assert utilities.assert_equals, "utilities.assert_equals not defined"
2760 assert main.CLIs, "main.CLIs not defined"
2761 assert main.nodes, "main.nodes not defined"
2762
2763 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2764
2765 description = "Killing a switch to ensure it is discovered correctly"
2766 onosCli = main.CLIs[ main.activeNodes[0] ]
2767 main.case( description )
2768 switch = main.params[ 'kill' ][ 'switch' ]
2769 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2770
2771 # TODO: Make this switch parameterizable
2772 main.step( "Kill " + switch )
2773 main.log.info( "Deleting " + switch )
2774 main.Mininet1.delSwitch( switch )
2775 main.log.info( "Waiting " + str( switchSleep ) +
2776 " seconds for switch down to be discovered" )
2777 time.sleep( switchSleep )
2778 device = onosCli.getDevice( dpid=switchDPID )
2779 # Peek at the deleted switch
2780 main.log.warn( str( device ) )
2781 result = main.FALSE
2782 if device and device[ 'available' ] is False:
2783 result = main.TRUE
2784 utilities.assert_equals( expect=main.TRUE, actual=result,
2785 onpass="Kill switch successful",
2786 onfail="Failed to kill switch?" )
2787
2788 def CASE12( self, main ):
2789 """
2790 Switch Up
2791 """
2792 # NOTE: You should probably run a topology check after this
2793 import time
2794 assert main.numCtrls, "main.numCtrls not defined"
2795 assert main, "main not defined"
2796 assert utilities.assert_equals, "utilities.assert_equals not defined"
2797 assert main.CLIs, "main.CLIs not defined"
2798 assert main.nodes, "main.nodes not defined"
2799
2800 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2801 switch = main.params[ 'kill' ][ 'switch' ]
2802 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2803 links = main.params[ 'kill' ][ 'links' ].split()
2804 onosCli = main.CLIs[ main.activeNodes[0] ]
2805 description = "Adding a switch to ensure it is discovered correctly"
2806 main.case( description )
2807
2808 main.step( "Add back " + switch )
2809 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2810 for peer in links:
2811 main.Mininet1.addLink( switch, peer )
2812 ipList = [ node.ip_address for node in main.nodes ]
2813 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2814 main.log.info( "Waiting " + str( switchSleep ) +
2815 " seconds for switch up to be discovered" )
2816 time.sleep( switchSleep )
2817 device = onosCli.getDevice( dpid=switchDPID )
2818 # Peek at the deleted switch
2819 main.log.warn( str( device ) )
2820 result = main.FALSE
2821 if device and device[ 'available' ]:
2822 result = main.TRUE
2823 utilities.assert_equals( expect=main.TRUE, actual=result,
2824 onpass="add switch successful",
2825 onfail="Failed to add switch?" )
2826
2827 def CASE13( self, main ):
2828 """
2829 Clean up
2830 """
2831 assert main.numCtrls, "main.numCtrls not defined"
2832 assert main, "main not defined"
2833 assert utilities.assert_equals, "utilities.assert_equals not defined"
2834 assert main.CLIs, "main.CLIs not defined"
2835 assert main.nodes, "main.nodes not defined"
2836
2837 main.case( "Test Cleanup" )
2838 main.step( "Killing tcpdumps" )
2839 main.Mininet2.stopTcpdump()
2840
2841 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2842 main.step( "Copying MN pcap and ONOS log files to test station" )
2843 # NOTE: MN Pcap file is being saved to logdir.
2844 # We scp this file as MN and TestON aren't necessarily the same vm
2845
2846 # FIXME: To be replaced with a Jenkin's post script
2847 # TODO: Load these from params
2848 # NOTE: must end in /
2849 logFolder = "/opt/onos/log/"
2850 logFiles = [ "karaf.log", "karaf.log.1" ]
2851 # NOTE: must end in /
2852 for f in logFiles:
2853 for node in main.nodes:
2854 dstName = main.logdir + "/" + node.name + "-" + f
2855 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2856 logFolder + f, dstName )
2857 # std*.log's
2858 # NOTE: must end in /
2859 logFolder = "/opt/onos/var/"
2860 logFiles = [ "stderr.log", "stdout.log" ]
2861 # NOTE: must end in /
2862 for f in logFiles:
2863 for node in main.nodes:
2864 dstName = main.logdir + "/" + node.name + "-" + f
2865 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2866 logFolder + f, dstName )
2867 else:
2868 main.log.debug( "skipping saving log files" )
2869
2870 main.step( "Stopping Mininet" )
2871 mnResult = main.Mininet1.stopNet()
2872 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2873 onpass="Mininet stopped",
2874 onfail="MN cleanup NOT successful" )
2875
2876 main.step( "Checking ONOS Logs for errors" )
2877 for node in main.nodes:
2878 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2879 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2880
2881 try:
2882 timerLog = open( main.logdir + "/Timers.csv", 'w')
2883 main.log.error( ", ".join( labels ) + "\n" + ", ".join( data ) )
2884 timerLog.write( ", ".join( labels ) + "\n" + ", ".join( data ) )
2885 timerLog.close()
2886 except NameError, e:
2887 main.log.exception(e)
2888
2889 main.step( "Stopping webserver" )
2890 status = main.Server.stop( )
2891 utilities.assert_equals( expect=main.TRUE, actual=status,
2892 onpass="Stop Server",
2893 onfail="Failled to stop SimpleHTTPServer" )
2894 del main.Server
2895
2896 def CASE14( self, main ):
2897 """
2898 start election app on all onos nodes
2899 """
2900 import time
2901 assert main.numCtrls, "main.numCtrls not defined"
2902 assert main, "main not defined"
2903 assert utilities.assert_equals, "utilities.assert_equals not defined"
2904 assert main.CLIs, "main.CLIs not defined"
2905 assert main.nodes, "main.nodes not defined"
2906
2907 main.case("Start Leadership Election app")
2908 main.step( "Install leadership election app" )
2909 onosCli = main.CLIs[ main.activeNodes[0] ]
2910 appResult = onosCli.activateApp( "org.onosproject.election" )
2911 utilities.assert_equals(
2912 expect=main.TRUE,
2913 actual=appResult,
2914 onpass="Election app installed",
2915 onfail="Something went wrong with installing Leadership election" )
2916
2917 main.step( "Run for election on each node" )
2918 for i in main.activeNodes:
2919 main.CLIs[i].electionTestRun()
2920 time.sleep(5)
2921 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2922 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
2923 utilities.assert_equals(
2924 expect=True,
2925 actual=sameResult,
2926 onpass="All nodes see the same leaderboards",
2927 onfail="Inconsistent leaderboards" )
2928
2929 if sameResult:
2930 leader = leaders[ 0 ][ 0 ]
2931 if main.nodes[ main.activeNodes[0] ].ip_address in leader:
2932 correctLeader = True
2933 else:
2934 correctLeader = False
2935 main.step( "First node was elected leader" )
2936 utilities.assert_equals(
2937 expect=True,
2938 actual=correctLeader,
2939 onpass="Correct leader was elected",
2940 onfail="Incorrect leader" )
2941
2942 def CASE15( self, main ):
2943 """
2944 Check that Leadership Election is still functional
2945 15.1 Run election on each node
2946 15.2 Check that each node has the same leaders and candidates
2947 15.3 Find current leader and withdraw
2948 15.4 Check that a new node was elected leader
2949 15.5 Check that that new leader was the candidate of old leader
2950 15.6 Run for election on old leader
2951 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2952 15.8 Make sure that the old leader was added to the candidate list
2953
2954 old and new variable prefixes refer to data from before vs after
2955 withdrawl and later before withdrawl vs after re-election
2956 """
2957 import time
2958 assert main.numCtrls, "main.numCtrls not defined"
2959 assert main, "main not defined"
2960 assert utilities.assert_equals, "utilities.assert_equals not defined"
2961 assert main.CLIs, "main.CLIs not defined"
2962 assert main.nodes, "main.nodes not defined"
2963
2964 description = "Check that Leadership Election is still functional"
2965 main.case( description )
2966 # NOTE: Need to re-run after restarts since being a canidate is not persistant
2967
2968 oldLeaders = [] # list of lists of each nodes' candidates before
2969 newLeaders = [] # list of lists of each nodes' candidates after
2970 oldLeader = '' # the old leader from oldLeaders, None if not same
2971 newLeader = '' # the new leaders fron newLoeaders, None if not same
2972 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2973 expectNoLeader = False # True when there is only one leader
2974 if main.numCtrls == 1:
2975 expectNoLeader = True
2976
2977 main.step( "Run for election on each node" )
2978 electionResult = main.TRUE
2979
2980 for i in main.activeNodes: # run test election on each node
2981 if main.CLIs[i].electionTestRun() == main.FALSE:
2982 electionResult = main.FALSE
2983 utilities.assert_equals(
2984 expect=main.TRUE,
2985 actual=electionResult,
2986 onpass="All nodes successfully ran for leadership",
2987 onfail="At least one node failed to run for leadership" )
2988
2989 if electionResult == main.FALSE:
2990 main.log.error(
2991 "Skipping Test Case because Election Test App isn't loaded" )
2992 main.skipCase()
2993
2994 main.step( "Check that each node shows the same leader and candidates" )
2995 failMessage = "Nodes have different leaderboards"
2996 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2997 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
2998 if sameResult:
2999 oldLeader = oldLeaders[ 0 ][ 0 ]
3000 main.log.warn( oldLeader )
3001 else:
3002 oldLeader = None
3003 utilities.assert_equals(
3004 expect=True,
3005 actual=sameResult,
3006 onpass="Leaderboards are consistent for the election topic",
3007 onfail=failMessage )
3008
3009 main.step( "Find current leader and withdraw" )
3010 withdrawResult = main.TRUE
3011 # do some sanity checking on leader before using it
3012 if oldLeader is None:
3013 main.log.error( "Leadership isn't consistent." )
3014 withdrawResult = main.FALSE
3015 # Get the CLI of the oldLeader
3016 for i in main.activeNodes:
3017 if oldLeader == main.nodes[ i ].ip_address:
3018 oldLeaderCLI = main.CLIs[ i ]
3019 break
3020 else: # FOR/ELSE statement
3021 main.log.error( "Leader election, could not find current leader" )
3022 if oldLeader:
3023 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3024 utilities.assert_equals(
3025 expect=main.TRUE,
3026 actual=withdrawResult,
3027 onpass="Node was withdrawn from election",
3028 onfail="Node was not withdrawn from election" )
3029
3030 main.step( "Check that a new node was elected leader" )
3031 failMessage = "Nodes have different leaders"
3032 # Get new leaders and candidates
3033 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
3034 newLeader = None
3035 if newLeaderResult:
3036 if newLeaders[ 0 ][ 0 ] == 'none':
3037 main.log.error( "No leader was elected on at least 1 node" )
3038 if not expectNoLeader:
3039 newLeaderResult = False
3040 newLeader = newLeaders[ 0 ][ 0 ]
3041
3042 # Check that the new leader is not the older leader, which was withdrawn
3043 if newLeader == oldLeader:
3044 newLeaderResult = False
3045 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3046 " as the current leader" )
3047 utilities.assert_equals(
3048 expect=True,
3049 actual=newLeaderResult,
3050 onpass="Leadership election passed",
3051 onfail="Something went wrong with Leadership election" )
3052
3053 main.step( "Check that that new leader was the candidate of old leader" )
3054 # candidates[ 2 ] should become the top candidate after withdrawl
3055 correctCandidateResult = main.TRUE
3056 if expectNoLeader:
3057 if newLeader == 'none':
3058 main.log.info( "No leader expected. None found. Pass" )
3059 correctCandidateResult = main.TRUE
3060 else:
3061 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3062 correctCandidateResult = main.FALSE
3063 elif len( oldLeaders[0] ) >= 3:
3064 if newLeader == oldLeaders[ 0 ][ 2 ]:
3065 # correct leader was elected
3066 correctCandidateResult = main.TRUE
3067 else:
3068 correctCandidateResult = main.FALSE
3069 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3070 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3071 else:
3072 main.log.warn( "Could not determine who should be the correct leader" )
3073 main.log.debug( oldLeaders[ 0 ] )
3074 correctCandidateResult = main.FALSE
3075 utilities.assert_equals(
3076 expect=main.TRUE,
3077 actual=correctCandidateResult,
3078 onpass="Correct Candidate Elected",
3079 onfail="Incorrect Candidate Elected" )
3080
3081 main.step( "Run for election on old leader( just so everyone " +
3082 "is in the hat )" )
3083 if oldLeaderCLI is not None:
3084 runResult = oldLeaderCLI.electionTestRun()
3085 else:
3086 main.log.error( "No old leader to re-elect" )
3087 runResult = main.FALSE
3088 utilities.assert_equals(
3089 expect=main.TRUE,
3090 actual=runResult,
3091 onpass="App re-ran for election",
3092 onfail="App failed to run for election" )
3093
3094 main.step(
3095 "Check that oldLeader is a candidate, and leader if only 1 node" )
3096 # verify leader didn't just change
3097 # Get new leaders and candidates
3098 reRunLeaders = []
3099 time.sleep( 5 ) # Paremterize
3100 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
3101
3102 # Check that the re-elected node is last on the candidate List
3103 if not reRunLeaders[0]:
3104 positionResult = main.FALSE
3105 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3106 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3107 str( reRunLeaders[ 0 ] ) ) )
3108 positionResult = main.FALSE
3109 utilities.assert_equals(
3110 expect=True,
3111 actual=positionResult,
3112 onpass="Old leader successfully re-ran for election",
3113 onfail="Something went wrong with Leadership election after " +
3114 "the old leader re-ran for election" )
3115
3116 def CASE16( self, main ):
3117 """
3118 Install Distributed Primitives app
3119 """
3120 import time
3121 assert main.numCtrls, "main.numCtrls not defined"
3122 assert main, "main not defined"
3123 assert utilities.assert_equals, "utilities.assert_equals not defined"
3124 assert main.CLIs, "main.CLIs not defined"
3125 assert main.nodes, "main.nodes not defined"
3126
3127 # Variables for the distributed primitives tests
3128 global pCounterName
3129 global pCounterValue
3130 global onosSet
3131 global onosSetName
3132 pCounterName = "TestON-Partitions"
3133 pCounterValue = 0
3134 onosSet = set([])
3135 onosSetName = "TestON-set"
3136
3137 description = "Install Primitives app"
3138 main.case( description )
3139 main.step( "Install Primitives app" )
3140 appName = "org.onosproject.distributedprimitives"
3141 node = main.activeNodes[0]
3142 appResults = main.CLIs[node].activateApp( appName )
3143 utilities.assert_equals( expect=main.TRUE,
3144 actual=appResults,
3145 onpass="Primitives app activated",
3146 onfail="Primitives app not activated" )
3147 time.sleep( 5 ) # To allow all nodes to activate
3148
3149 def CASE17( self, main ):
3150 """
3151 Check for basic functionality with distributed primitives
3152 """
3153 # Make sure variables are defined/set
3154 assert main.numCtrls, "main.numCtrls not defined"
3155 assert main, "main not defined"
3156 assert utilities.assert_equals, "utilities.assert_equals not defined"
3157 assert main.CLIs, "main.CLIs not defined"
3158 assert main.nodes, "main.nodes not defined"
3159 assert pCounterName, "pCounterName not defined"
3160 assert onosSetName, "onosSetName not defined"
3161 # NOTE: assert fails if value is 0/None/Empty/False
3162 try:
3163 pCounterValue
3164 except NameError:
3165 main.log.error( "pCounterValue not defined, setting to 0" )
3166 pCounterValue = 0
3167 try:
3168 onosSet
3169 except NameError:
3170 main.log.error( "onosSet not defined, setting to empty Set" )
3171 onosSet = set([])
3172 # Variables for the distributed primitives tests. These are local only
3173 addValue = "a"
3174 addAllValue = "a b c d e f"
3175 retainValue = "c d e f"
3176
3177 description = "Check for basic functionality with distributed " +\
3178 "primitives"
3179 main.case( description )
3180 main.caseExplanation = "Test the methods of the distributed " +\
3181 "primitives (counters and sets) throught the cli"
3182 # DISTRIBUTED ATOMIC COUNTERS
3183 # Partitioned counters
3184 main.step( "Increment then get a default counter on each node" )
3185 pCounters = []
3186 threads = []
3187 addedPValues = []
3188 for i in main.activeNodes:
3189 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3190 name="counterAddAndGet-" + str( i ),
3191 args=[ pCounterName ] )
3192 pCounterValue += 1
3193 addedPValues.append( pCounterValue )
3194 threads.append( t )
3195 t.start()
3196
3197 for t in threads:
3198 t.join()
3199 pCounters.append( t.result )
3200 # Check that counter incremented numController times
3201 pCounterResults = True
3202 for i in addedPValues:
3203 tmpResult = i in pCounters
3204 pCounterResults = pCounterResults and tmpResult
3205 if not tmpResult:
3206 main.log.error( str( i ) + " is not in partitioned "
3207 "counter incremented results" )
3208 utilities.assert_equals( expect=True,
3209 actual=pCounterResults,
3210 onpass="Default counter incremented",
3211 onfail="Error incrementing default" +
3212 " counter" )
3213
3214 main.step( "Get then Increment a default counter on each node" )
3215 pCounters = []
3216 threads = []
3217 addedPValues = []
3218 for i in main.activeNodes:
3219 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3220 name="counterGetAndAdd-" + str( i ),
3221 args=[ pCounterName ] )
3222 addedPValues.append( pCounterValue )
3223 pCounterValue += 1
3224 threads.append( t )
3225 t.start()
3226
3227 for t in threads:
3228 t.join()
3229 pCounters.append( t.result )
3230 # Check that counter incremented numController times
3231 pCounterResults = True
3232 for i in addedPValues:
3233 tmpResult = i in pCounters
3234 pCounterResults = pCounterResults and tmpResult
3235 if not tmpResult:
3236 main.log.error( str( i ) + " is not in partitioned "
3237 "counter incremented results" )
3238 utilities.assert_equals( expect=True,
3239 actual=pCounterResults,
3240 onpass="Default counter incremented",
3241 onfail="Error incrementing default" +
3242 " counter" )
3243
3244 main.step( "Counters we added have the correct values" )
3245 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3246 utilities.assert_equals( expect=main.TRUE,
3247 actual=incrementCheck,
3248 onpass="Added counters are correct",
3249 onfail="Added counters are incorrect" )
3250
3251 main.step( "Add -8 to then get a default counter on each node" )
3252 pCounters = []
3253 threads = []
3254 addedPValues = []
3255 for i in main.activeNodes:
3256 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3257 name="counterIncrement-" + str( i ),
3258 args=[ pCounterName ],
3259 kwargs={ "delta": -8 } )
3260 pCounterValue += -8
3261 addedPValues.append( pCounterValue )
3262 threads.append( t )
3263 t.start()
3264
3265 for t in threads:
3266 t.join()
3267 pCounters.append( t.result )
3268 # Check that counter incremented numController times
3269 pCounterResults = True
3270 for i in addedPValues:
3271 tmpResult = i in pCounters
3272 pCounterResults = pCounterResults and tmpResult
3273 if not tmpResult:
3274 main.log.error( str( i ) + " is not in partitioned "
3275 "counter incremented results" )
3276 utilities.assert_equals( expect=True,
3277 actual=pCounterResults,
3278 onpass="Default counter incremented",
3279 onfail="Error incrementing default" +
3280 " counter" )
3281
3282 main.step( "Add 5 to then get a default counter on each node" )
3283 pCounters = []
3284 threads = []
3285 addedPValues = []
3286 for i in main.activeNodes:
3287 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3288 name="counterIncrement-" + str( i ),
3289 args=[ pCounterName ],
3290 kwargs={ "delta": 5 } )
3291 pCounterValue += 5
3292 addedPValues.append( pCounterValue )
3293 threads.append( t )
3294 t.start()
3295
3296 for t in threads:
3297 t.join()
3298 pCounters.append( t.result )
3299 # Check that counter incremented numController times
3300 pCounterResults = True
3301 for i in addedPValues:
3302 tmpResult = i in pCounters
3303 pCounterResults = pCounterResults and tmpResult
3304 if not tmpResult:
3305 main.log.error( str( i ) + " is not in partitioned "
3306 "counter incremented results" )
3307 utilities.assert_equals( expect=True,
3308 actual=pCounterResults,
3309 onpass="Default counter incremented",
3310 onfail="Error incrementing default" +
3311 " counter" )
3312
3313 main.step( "Get then add 5 to a default counter on each node" )
3314 pCounters = []
3315 threads = []
3316 addedPValues = []
3317 for i in main.activeNodes:
3318 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3319 name="counterIncrement-" + str( i ),
3320 args=[ pCounterName ],
3321 kwargs={ "delta": 5 } )
3322 addedPValues.append( pCounterValue )
3323 pCounterValue += 5
3324 threads.append( t )
3325 t.start()
3326
3327 for t in threads:
3328 t.join()
3329 pCounters.append( t.result )
3330 # Check that counter incremented numController times
3331 pCounterResults = True
3332 for i in addedPValues:
3333 tmpResult = i in pCounters
3334 pCounterResults = pCounterResults and tmpResult
3335 if not tmpResult:
3336 main.log.error( str( i ) + " is not in partitioned "
3337 "counter incremented results" )
3338 utilities.assert_equals( expect=True,
3339 actual=pCounterResults,
3340 onpass="Default counter incremented",
3341 onfail="Error incrementing default" +
3342 " counter" )
3343
3344 main.step( "Counters we added have the correct values" )
3345 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3346 utilities.assert_equals( expect=main.TRUE,
3347 actual=incrementCheck,
3348 onpass="Added counters are correct",
3349 onfail="Added counters are incorrect" )
3350
3351 # DISTRIBUTED SETS
3352 main.step( "Distributed Set get" )
3353 size = len( onosSet )
3354 getResponses = []
3355 threads = []
3356 for i in main.activeNodes:
3357 t = main.Thread( target=main.CLIs[i].setTestGet,
3358 name="setTestGet-" + str( i ),
3359 args=[ onosSetName ] )
3360 threads.append( t )
3361 t.start()
3362 for t in threads:
3363 t.join()
3364 getResponses.append( t.result )
3365
3366 getResults = main.TRUE
3367 for i in range( len( main.activeNodes ) ):
3368 node = str( main.activeNodes[i] + 1 )
3369 if isinstance( getResponses[ i ], list):
3370 current = set( getResponses[ i ] )
3371 if len( current ) == len( getResponses[ i ] ):
3372 # no repeats
3373 if onosSet != current:
3374 main.log.error( "ONOS" + node +
3375 " has incorrect view" +
3376 " of set " + onosSetName + ":\n" +
3377 str( getResponses[ i ] ) )
3378 main.log.debug( "Expected: " + str( onosSet ) )
3379 main.log.debug( "Actual: " + str( current ) )
3380 getResults = main.FALSE
3381 else:
3382 # error, set is not a set
3383 main.log.error( "ONOS" + node +
3384 " has repeat elements in" +
3385 " set " + onosSetName + ":\n" +
3386 str( getResponses[ i ] ) )
3387 getResults = main.FALSE
3388 elif getResponses[ i ] == main.ERROR:
3389 getResults = main.FALSE
3390 utilities.assert_equals( expect=main.TRUE,
3391 actual=getResults,
3392 onpass="Set elements are correct",
3393 onfail="Set elements are incorrect" )
3394
3395 main.step( "Distributed Set size" )
3396 sizeResponses = []
3397 threads = []
3398 for i in main.activeNodes:
3399 t = main.Thread( target=main.CLIs[i].setTestSize,
3400 name="setTestSize-" + str( i ),
3401 args=[ onosSetName ] )
3402 threads.append( t )
3403 t.start()
3404 for t in threads:
3405 t.join()
3406 sizeResponses.append( t.result )
3407
3408 sizeResults = main.TRUE
3409 for i in range( len( main.activeNodes ) ):
3410 node = str( main.activeNodes[i] + 1 )
3411 if size != sizeResponses[ i ]:
3412 sizeResults = main.FALSE
3413 main.log.error( "ONOS" + node +
3414 " expected a size of " + str( size ) +
3415 " for set " + onosSetName +
3416 " but got " + str( sizeResponses[ i ] ) )
3417 utilities.assert_equals( expect=main.TRUE,
3418 actual=sizeResults,
3419 onpass="Set sizes are correct",
3420 onfail="Set sizes are incorrect" )
3421
3422 main.step( "Distributed Set add()" )
3423 onosSet.add( addValue )
3424 addResponses = []
3425 threads = []
3426 for i in main.activeNodes:
3427 t = main.Thread( target=main.CLIs[i].setTestAdd,
3428 name="setTestAdd-" + str( i ),
3429 args=[ onosSetName, addValue ] )
3430 threads.append( t )
3431 t.start()
3432 for t in threads:
3433 t.join()
3434 addResponses.append( t.result )
3435
3436 # main.TRUE = successfully changed the set
3437 # main.FALSE = action resulted in no change in set
3438 # main.ERROR - Some error in executing the function
3439 addResults = main.TRUE
3440 for i in range( len( main.activeNodes ) ):
3441 if addResponses[ i ] == main.TRUE:
3442 # All is well
3443 pass
3444 elif addResponses[ i ] == main.FALSE:
3445 # Already in set, probably fine
3446 pass
3447 elif addResponses[ i ] == main.ERROR:
3448 # Error in execution
3449 addResults = main.FALSE
3450 else:
3451 # unexpected result
3452 addResults = main.FALSE
3453 if addResults != main.TRUE:
3454 main.log.error( "Error executing set add" )
3455
3456 # Check if set is still correct
3457 size = len( onosSet )
3458 getResponses = []
3459 threads = []
3460 for i in main.activeNodes:
3461 t = main.Thread( target=main.CLIs[i].setTestGet,
3462 name="setTestGet-" + str( i ),
3463 args=[ onosSetName ] )
3464 threads.append( t )
3465 t.start()
3466 for t in threads:
3467 t.join()
3468 getResponses.append( t.result )
3469 getResults = main.TRUE
3470 for i in range( len( main.activeNodes ) ):
3471 node = str( main.activeNodes[i] + 1 )
3472 if isinstance( getResponses[ i ], list):
3473 current = set( getResponses[ i ] )
3474 if len( current ) == len( getResponses[ i ] ):
3475 # no repeats
3476 if onosSet != current:
3477 main.log.error( "ONOS" + node + " has incorrect view" +
3478 " of set " + onosSetName + ":\n" +
3479 str( getResponses[ i ] ) )
3480 main.log.debug( "Expected: " + str( onosSet ) )
3481 main.log.debug( "Actual: " + str( current ) )
3482 getResults = main.FALSE
3483 else:
3484 # error, set is not a set
3485 main.log.error( "ONOS" + node + " has repeat elements in" +
3486 " set " + onosSetName + ":\n" +
3487 str( getResponses[ i ] ) )
3488 getResults = main.FALSE
3489 elif getResponses[ i ] == main.ERROR:
3490 getResults = main.FALSE
3491 sizeResponses = []
3492 threads = []
3493 for i in main.activeNodes:
3494 t = main.Thread( target=main.CLIs[i].setTestSize,
3495 name="setTestSize-" + str( i ),
3496 args=[ onosSetName ] )
3497 threads.append( t )
3498 t.start()
3499 for t in threads:
3500 t.join()
3501 sizeResponses.append( t.result )
3502 sizeResults = main.TRUE
3503 for i in range( len( main.activeNodes ) ):
3504 node = str( main.activeNodes[i] + 1 )
3505 if size != sizeResponses[ i ]:
3506 sizeResults = main.FALSE
3507 main.log.error( "ONOS" + node +
3508 " expected a size of " + str( size ) +
3509 " for set " + onosSetName +
3510 " but got " + str( sizeResponses[ i ] ) )
3511 addResults = addResults and getResults and sizeResults
3512 utilities.assert_equals( expect=main.TRUE,
3513 actual=addResults,
3514 onpass="Set add correct",
3515 onfail="Set add was incorrect" )
3516
3517 main.step( "Distributed Set addAll()" )
3518 onosSet.update( addAllValue.split() )
3519 addResponses = []
3520 threads = []
3521 for i in main.activeNodes:
3522 t = main.Thread( target=main.CLIs[i].setTestAdd,
3523 name="setTestAddAll-" + str( i ),
3524 args=[ onosSetName, addAllValue ] )
3525 threads.append( t )
3526 t.start()
3527 for t in threads:
3528 t.join()
3529 addResponses.append( t.result )
3530
3531 # main.TRUE = successfully changed the set
3532 # main.FALSE = action resulted in no change in set
3533 # main.ERROR - Some error in executing the function
3534 addAllResults = main.TRUE
3535 for i in range( len( main.activeNodes ) ):
3536 if addResponses[ i ] == main.TRUE:
3537 # All is well
3538 pass
3539 elif addResponses[ i ] == main.FALSE:
3540 # Already in set, probably fine
3541 pass
3542 elif addResponses[ i ] == main.ERROR:
3543 # Error in execution
3544 addAllResults = main.FALSE
3545 else:
3546 # unexpected result
3547 addAllResults = main.FALSE
3548 if addAllResults != main.TRUE:
3549 main.log.error( "Error executing set addAll" )
3550
3551 # Check if set is still correct
3552 size = len( onosSet )
3553 getResponses = []
3554 threads = []
3555 for i in main.activeNodes:
3556 t = main.Thread( target=main.CLIs[i].setTestGet,
3557 name="setTestGet-" + str( i ),
3558 args=[ onosSetName ] )
3559 threads.append( t )
3560 t.start()
3561 for t in threads:
3562 t.join()
3563 getResponses.append( t.result )
3564 getResults = main.TRUE
3565 for i in range( len( main.activeNodes ) ):
3566 node = str( main.activeNodes[i] + 1 )
3567 if isinstance( getResponses[ i ], list):
3568 current = set( getResponses[ i ] )
3569 if len( current ) == len( getResponses[ i ] ):
3570 # no repeats
3571 if onosSet != current:
3572 main.log.error( "ONOS" + node +
3573 " has incorrect view" +
3574 " of set " + onosSetName + ":\n" +
3575 str( getResponses[ i ] ) )
3576 main.log.debug( "Expected: " + str( onosSet ) )
3577 main.log.debug( "Actual: " + str( current ) )
3578 getResults = main.FALSE
3579 else:
3580 # error, set is not a set
3581 main.log.error( "ONOS" + node +
3582 " has repeat elements in" +
3583 " set " + onosSetName + ":\n" +
3584 str( getResponses[ i ] ) )
3585 getResults = main.FALSE
3586 elif getResponses[ i ] == main.ERROR:
3587 getResults = main.FALSE
3588 sizeResponses = []
3589 threads = []
3590 for i in main.activeNodes:
3591 t = main.Thread( target=main.CLIs[i].setTestSize,
3592 name="setTestSize-" + str( i ),
3593 args=[ onosSetName ] )
3594 threads.append( t )
3595 t.start()
3596 for t in threads:
3597 t.join()
3598 sizeResponses.append( t.result )
3599 sizeResults = main.TRUE
3600 for i in range( len( main.activeNodes ) ):
3601 node = str( main.activeNodes[i] + 1 )
3602 if size != sizeResponses[ i ]:
3603 sizeResults = main.FALSE
3604 main.log.error( "ONOS" + node +
3605 " expected a size of " + str( size ) +
3606 " for set " + onosSetName +
3607 " but got " + str( sizeResponses[ i ] ) )
3608 addAllResults = addAllResults and getResults and sizeResults
3609 utilities.assert_equals( expect=main.TRUE,
3610 actual=addAllResults,
3611 onpass="Set addAll correct",
3612 onfail="Set addAll was incorrect" )
3613
3614 main.step( "Distributed Set contains()" )
3615 containsResponses = []
3616 threads = []
3617 for i in main.activeNodes:
3618 t = main.Thread( target=main.CLIs[i].setTestGet,
3619 name="setContains-" + str( i ),
3620 args=[ onosSetName ],
3621 kwargs={ "values": addValue } )
3622 threads.append( t )
3623 t.start()
3624 for t in threads:
3625 t.join()
3626 # NOTE: This is the tuple
3627 containsResponses.append( t.result )
3628
3629 containsResults = main.TRUE
3630 for i in range( len( main.activeNodes ) ):
3631 if containsResponses[ i ] == main.ERROR:
3632 containsResults = main.FALSE
3633 else:
3634 containsResults = containsResults and\
3635 containsResponses[ i ][ 1 ]
3636 utilities.assert_equals( expect=main.TRUE,
3637 actual=containsResults,
3638 onpass="Set contains is functional",
3639 onfail="Set contains failed" )
3640
3641 main.step( "Distributed Set containsAll()" )
3642 containsAllResponses = []
3643 threads = []
3644 for i in main.activeNodes:
3645 t = main.Thread( target=main.CLIs[i].setTestGet,
3646 name="setContainsAll-" + str( i ),
3647 args=[ onosSetName ],
3648 kwargs={ "values": addAllValue } )
3649 threads.append( t )
3650 t.start()
3651 for t in threads:
3652 t.join()
3653 # NOTE: This is the tuple
3654 containsAllResponses.append( t.result )
3655
3656 containsAllResults = main.TRUE
3657 for i in range( len( main.activeNodes ) ):
3658 if containsResponses[ i ] == main.ERROR:
3659 containsResults = main.FALSE
3660 else:
3661 containsResults = containsResults and\
3662 containsResponses[ i ][ 1 ]
3663 utilities.assert_equals( expect=main.TRUE,
3664 actual=containsAllResults,
3665 onpass="Set containsAll is functional",
3666 onfail="Set containsAll failed" )
3667
3668 main.step( "Distributed Set remove()" )
3669 onosSet.remove( addValue )
3670 removeResponses = []
3671 threads = []
3672 for i in main.activeNodes:
3673 t = main.Thread( target=main.CLIs[i].setTestRemove,
3674 name="setTestRemove-" + str( i ),
3675 args=[ onosSetName, addValue ] )
3676 threads.append( t )
3677 t.start()
3678 for t in threads:
3679 t.join()
3680 removeResponses.append( t.result )
3681
3682 # main.TRUE = successfully changed the set
3683 # main.FALSE = action resulted in no change in set
3684 # main.ERROR - Some error in executing the function
3685 removeResults = main.TRUE
3686 for i in range( len( main.activeNodes ) ):
3687 if removeResponses[ i ] == main.TRUE:
3688 # All is well
3689 pass
3690 elif removeResponses[ i ] == main.FALSE:
3691 # not in set, probably fine
3692 pass
3693 elif removeResponses[ i ] == main.ERROR:
3694 # Error in execution
3695 removeResults = main.FALSE
3696 else:
3697 # unexpected result
3698 removeResults = main.FALSE
3699 if removeResults != main.TRUE:
3700 main.log.error( "Error executing set remove" )
3701
3702 # Check if set is still correct
3703 size = len( onosSet )
3704 getResponses = []
3705 threads = []
3706 for i in main.activeNodes:
3707 t = main.Thread( target=main.CLIs[i].setTestGet,
3708 name="setTestGet-" + str( i ),
3709 args=[ onosSetName ] )
3710 threads.append( t )
3711 t.start()
3712 for t in threads:
3713 t.join()
3714 getResponses.append( t.result )
3715 getResults = main.TRUE
3716 for i in range( len( main.activeNodes ) ):
3717 node = str( main.activeNodes[i] + 1 )
3718 if isinstance( getResponses[ i ], list):
3719 current = set( getResponses[ i ] )
3720 if len( current ) == len( getResponses[ i ] ):
3721 # no repeats
3722 if onosSet != current:
3723 main.log.error( "ONOS" + node +
3724 " has incorrect view" +
3725 " of set " + onosSetName + ":\n" +
3726 str( getResponses[ i ] ) )
3727 main.log.debug( "Expected: " + str( onosSet ) )
3728 main.log.debug( "Actual: " + str( current ) )
3729 getResults = main.FALSE
3730 else:
3731 # error, set is not a set
3732 main.log.error( "ONOS" + node +
3733 " has repeat elements in" +
3734 " set " + onosSetName + ":\n" +
3735 str( getResponses[ i ] ) )
3736 getResults = main.FALSE
3737 elif getResponses[ i ] == main.ERROR:
3738 getResults = main.FALSE
3739 sizeResponses = []
3740 threads = []
3741 for i in main.activeNodes:
3742 t = main.Thread( target=main.CLIs[i].setTestSize,
3743 name="setTestSize-" + str( i ),
3744 args=[ onosSetName ] )
3745 threads.append( t )
3746 t.start()
3747 for t in threads:
3748 t.join()
3749 sizeResponses.append( t.result )
3750 sizeResults = main.TRUE
3751 for i in range( len( main.activeNodes ) ):
3752 node = str( main.activeNodes[i] + 1 )
3753 if size != sizeResponses[ i ]:
3754 sizeResults = main.FALSE
3755 main.log.error( "ONOS" + node +
3756 " expected a size of " + str( size ) +
3757 " for set " + onosSetName +
3758 " but got " + str( sizeResponses[ i ] ) )
3759 removeResults = removeResults and getResults and sizeResults
3760 utilities.assert_equals( expect=main.TRUE,
3761 actual=removeResults,
3762 onpass="Set remove correct",
3763 onfail="Set remove was incorrect" )
3764
3765 main.step( "Distributed Set removeAll()" )
3766 onosSet.difference_update( addAllValue.split() )
3767 removeAllResponses = []
3768 threads = []
3769 try:
3770 for i in main.activeNodes:
3771 t = main.Thread( target=main.CLIs[i].setTestRemove,
3772 name="setTestRemoveAll-" + str( i ),
3773 args=[ onosSetName, addAllValue ] )
3774 threads.append( t )
3775 t.start()
3776 for t in threads:
3777 t.join()
3778 removeAllResponses.append( t.result )
3779 except Exception, e:
3780 main.log.exception(e)
3781
3782 # main.TRUE = successfully changed the set
3783 # main.FALSE = action resulted in no change in set
3784 # main.ERROR - Some error in executing the function
3785 removeAllResults = main.TRUE
3786 for i in range( len( main.activeNodes ) ):
3787 if removeAllResponses[ i ] == main.TRUE:
3788 # All is well
3789 pass
3790 elif removeAllResponses[ i ] == main.FALSE:
3791 # not in set, probably fine
3792 pass
3793 elif removeAllResponses[ i ] == main.ERROR:
3794 # Error in execution
3795 removeAllResults = main.FALSE
3796 else:
3797 # unexpected result
3798 removeAllResults = main.FALSE
3799 if removeAllResults != main.TRUE:
3800 main.log.error( "Error executing set removeAll" )
3801
3802 # Check if set is still correct
3803 size = len( onosSet )
3804 getResponses = []
3805 threads = []
3806 for i in main.activeNodes:
3807 t = main.Thread( target=main.CLIs[i].setTestGet,
3808 name="setTestGet-" + str( i ),
3809 args=[ onosSetName ] )
3810 threads.append( t )
3811 t.start()
3812 for t in threads:
3813 t.join()
3814 getResponses.append( t.result )
3815 getResults = main.TRUE
3816 for i in range( len( main.activeNodes ) ):
3817 node = str( main.activeNodes[i] + 1 )
3818 if isinstance( getResponses[ i ], list):
3819 current = set( getResponses[ i ] )
3820 if len( current ) == len( getResponses[ i ] ):
3821 # no repeats
3822 if onosSet != current:
3823 main.log.error( "ONOS" + node +
3824 " has incorrect view" +
3825 " of set " + onosSetName + ":\n" +
3826 str( getResponses[ i ] ) )
3827 main.log.debug( "Expected: " + str( onosSet ) )
3828 main.log.debug( "Actual: " + str( current ) )
3829 getResults = main.FALSE
3830 else:
3831 # error, set is not a set
3832 main.log.error( "ONOS" + node +
3833 " has repeat elements in" +
3834 " set " + onosSetName + ":\n" +
3835 str( getResponses[ i ] ) )
3836 getResults = main.FALSE
3837 elif getResponses[ i ] == main.ERROR:
3838 getResults = main.FALSE
3839 sizeResponses = []
3840 threads = []
3841 for i in main.activeNodes:
3842 t = main.Thread( target=main.CLIs[i].setTestSize,
3843 name="setTestSize-" + str( i ),
3844 args=[ onosSetName ] )
3845 threads.append( t )
3846 t.start()
3847 for t in threads:
3848 t.join()
3849 sizeResponses.append( t.result )
3850 sizeResults = main.TRUE
3851 for i in range( len( main.activeNodes ) ):
3852 node = str( main.activeNodes[i] + 1 )
3853 if size != sizeResponses[ i ]:
3854 sizeResults = main.FALSE
3855 main.log.error( "ONOS" + node +
3856 " expected a size of " + str( size ) +
3857 " for set " + onosSetName +
3858 " but got " + str( sizeResponses[ i ] ) )
3859 removeAllResults = removeAllResults and getResults and sizeResults
3860 utilities.assert_equals( expect=main.TRUE,
3861 actual=removeAllResults,
3862 onpass="Set removeAll correct",
3863 onfail="Set removeAll was incorrect" )
3864
3865 main.step( "Distributed Set addAll()" )
3866 onosSet.update( addAllValue.split() )
3867 addResponses = []
3868 threads = []
3869 for i in main.activeNodes:
3870 t = main.Thread( target=main.CLIs[i].setTestAdd,
3871 name="setTestAddAll-" + str( i ),
3872 args=[ onosSetName, addAllValue ] )
3873 threads.append( t )
3874 t.start()
3875 for t in threads:
3876 t.join()
3877 addResponses.append( t.result )
3878
3879 # main.TRUE = successfully changed the set
3880 # main.FALSE = action resulted in no change in set
3881 # main.ERROR - Some error in executing the function
3882 addAllResults = main.TRUE
3883 for i in range( len( main.activeNodes ) ):
3884 if addResponses[ i ] == main.TRUE:
3885 # All is well
3886 pass
3887 elif addResponses[ i ] == main.FALSE:
3888 # Already in set, probably fine
3889 pass
3890 elif addResponses[ i ] == main.ERROR:
3891 # Error in execution
3892 addAllResults = main.FALSE
3893 else:
3894 # unexpected result
3895 addAllResults = main.FALSE
3896 if addAllResults != main.TRUE:
3897 main.log.error( "Error executing set addAll" )
3898
3899 # Check if set is still correct
3900 size = len( onosSet )
3901 getResponses = []
3902 threads = []
3903 for i in main.activeNodes:
3904 t = main.Thread( target=main.CLIs[i].setTestGet,
3905 name="setTestGet-" + str( i ),
3906 args=[ onosSetName ] )
3907 threads.append( t )
3908 t.start()
3909 for t in threads:
3910 t.join()
3911 getResponses.append( t.result )
3912 getResults = main.TRUE
3913 for i in range( len( main.activeNodes ) ):
3914 node = str( main.activeNodes[i] + 1 )
3915 if isinstance( getResponses[ i ], list):
3916 current = set( getResponses[ i ] )
3917 if len( current ) == len( getResponses[ i ] ):
3918 # no repeats
3919 if onosSet != current:
3920 main.log.error( "ONOS" + node +
3921 " has incorrect view" +
3922 " of set " + onosSetName + ":\n" +
3923 str( getResponses[ i ] ) )
3924 main.log.debug( "Expected: " + str( onosSet ) )
3925 main.log.debug( "Actual: " + str( current ) )
3926 getResults = main.FALSE
3927 else:
3928 # error, set is not a set
3929 main.log.error( "ONOS" + node +
3930 " has repeat elements in" +
3931 " set " + onosSetName + ":\n" +
3932 str( getResponses[ i ] ) )
3933 getResults = main.FALSE
3934 elif getResponses[ i ] == main.ERROR:
3935 getResults = main.FALSE
3936 sizeResponses = []
3937 threads = []
3938 for i in main.activeNodes:
3939 t = main.Thread( target=main.CLIs[i].setTestSize,
3940 name="setTestSize-" + str( i ),
3941 args=[ onosSetName ] )
3942 threads.append( t )
3943 t.start()
3944 for t in threads:
3945 t.join()
3946 sizeResponses.append( t.result )
3947 sizeResults = main.TRUE
3948 for i in range( len( main.activeNodes ) ):
3949 node = str( main.activeNodes[i] + 1 )
3950 if size != sizeResponses[ i ]:
3951 sizeResults = main.FALSE
3952 main.log.error( "ONOS" + node +
3953 " expected a size of " + str( size ) +
3954 " for set " + onosSetName +
3955 " but got " + str( sizeResponses[ i ] ) )
3956 addAllResults = addAllResults and getResults and sizeResults
3957 utilities.assert_equals( expect=main.TRUE,
3958 actual=addAllResults,
3959 onpass="Set addAll correct",
3960 onfail="Set addAll was incorrect" )
3961
3962 main.step( "Distributed Set clear()" )
3963 onosSet.clear()
3964 clearResponses = []
3965 threads = []
3966 for i in main.activeNodes:
3967 t = main.Thread( target=main.CLIs[i].setTestRemove,
3968 name="setTestClear-" + str( i ),
3969 args=[ onosSetName, " "], # Values doesn't matter
3970 kwargs={ "clear": True } )
3971 threads.append( t )
3972 t.start()
3973 for t in threads:
3974 t.join()
3975 clearResponses.append( t.result )
3976
3977 # main.TRUE = successfully changed the set
3978 # main.FALSE = action resulted in no change in set
3979 # main.ERROR - Some error in executing the function
3980 clearResults = main.TRUE
3981 for i in range( len( main.activeNodes ) ):
3982 if clearResponses[ i ] == main.TRUE:
3983 # All is well
3984 pass
3985 elif clearResponses[ i ] == main.FALSE:
3986 # Nothing set, probably fine
3987 pass
3988 elif clearResponses[ i ] == main.ERROR:
3989 # Error in execution
3990 clearResults = main.FALSE
3991 else:
3992 # unexpected result
3993 clearResults = main.FALSE
3994 if clearResults != main.TRUE:
3995 main.log.error( "Error executing set clear" )
3996
3997 # Check if set is still correct
3998 size = len( onosSet )
3999 getResponses = []
4000 threads = []
4001 for i in main.activeNodes:
4002 t = main.Thread( target=main.CLIs[i].setTestGet,
4003 name="setTestGet-" + str( i ),
4004 args=[ onosSetName ] )
4005 threads.append( t )
4006 t.start()
4007 for t in threads:
4008 t.join()
4009 getResponses.append( t.result )
4010 getResults = main.TRUE
4011 for i in range( len( main.activeNodes ) ):
4012 node = str( main.activeNodes[i] + 1 )
4013 if isinstance( getResponses[ i ], list):
4014 current = set( getResponses[ i ] )
4015 if len( current ) == len( getResponses[ i ] ):
4016 # no repeats
4017 if onosSet != current:
4018 main.log.error( "ONOS" + node +
4019 " has incorrect view" +
4020 " of set " + onosSetName + ":\n" +
4021 str( getResponses[ i ] ) )
4022 main.log.debug( "Expected: " + str( onosSet ) )
4023 main.log.debug( "Actual: " + str( current ) )
4024 getResults = main.FALSE
4025 else:
4026 # error, set is not a set
4027 main.log.error( "ONOS" + node +
4028 " has repeat elements in" +
4029 " set " + onosSetName + ":\n" +
4030 str( getResponses[ i ] ) )
4031 getResults = main.FALSE
4032 elif getResponses[ i ] == main.ERROR:
4033 getResults = main.FALSE
4034 sizeResponses = []
4035 threads = []
4036 for i in main.activeNodes:
4037 t = main.Thread( target=main.CLIs[i].setTestSize,
4038 name="setTestSize-" + str( i ),
4039 args=[ onosSetName ] )
4040 threads.append( t )
4041 t.start()
4042 for t in threads:
4043 t.join()
4044 sizeResponses.append( t.result )
4045 sizeResults = main.TRUE
4046 for i in range( len( main.activeNodes ) ):
4047 node = str( main.activeNodes[i] + 1 )
4048 if size != sizeResponses[ i ]:
4049 sizeResults = main.FALSE
4050 main.log.error( "ONOS" + node +
4051 " expected a size of " + str( size ) +
4052 " for set " + onosSetName +
4053 " but got " + str( sizeResponses[ i ] ) )
4054 clearResults = clearResults and getResults and sizeResults
4055 utilities.assert_equals( expect=main.TRUE,
4056 actual=clearResults,
4057 onpass="Set clear correct",
4058 onfail="Set clear was incorrect" )
4059
4060 main.step( "Distributed Set addAll()" )
4061 onosSet.update( addAllValue.split() )
4062 addResponses = []
4063 threads = []
4064 for i in main.activeNodes:
4065 t = main.Thread( target=main.CLIs[i].setTestAdd,
4066 name="setTestAddAll-" + str( i ),
4067 args=[ onosSetName, addAllValue ] )
4068 threads.append( t )
4069 t.start()
4070 for t in threads:
4071 t.join()
4072 addResponses.append( t.result )
4073
4074 # main.TRUE = successfully changed the set
4075 # main.FALSE = action resulted in no change in set
4076 # main.ERROR - Some error in executing the function
4077 addAllResults = main.TRUE
4078 for i in range( len( main.activeNodes ) ):
4079 if addResponses[ i ] == main.TRUE:
4080 # All is well
4081 pass
4082 elif addResponses[ i ] == main.FALSE:
4083 # Already in set, probably fine
4084 pass
4085 elif addResponses[ i ] == main.ERROR:
4086 # Error in execution
4087 addAllResults = main.FALSE
4088 else:
4089 # unexpected result
4090 addAllResults = main.FALSE
4091 if addAllResults != main.TRUE:
4092 main.log.error( "Error executing set addAll" )
4093
4094 # Check if set is still correct
4095 size = len( onosSet )
4096 getResponses = []
4097 threads = []
4098 for i in main.activeNodes:
4099 t = main.Thread( target=main.CLIs[i].setTestGet,
4100 name="setTestGet-" + str( i ),
4101 args=[ onosSetName ] )
4102 threads.append( t )
4103 t.start()
4104 for t in threads:
4105 t.join()
4106 getResponses.append( t.result )
4107 getResults = main.TRUE
4108 for i in range( len( main.activeNodes ) ):
4109 node = str( main.activeNodes[i] + 1 )
4110 if isinstance( getResponses[ i ], list):
4111 current = set( getResponses[ i ] )
4112 if len( current ) == len( getResponses[ i ] ):
4113 # no repeats
4114 if onosSet != current:
4115 main.log.error( "ONOS" + node +
4116 " has incorrect view" +
4117 " of set " + onosSetName + ":\n" +
4118 str( getResponses[ i ] ) )
4119 main.log.debug( "Expected: " + str( onosSet ) )
4120 main.log.debug( "Actual: " + str( current ) )
4121 getResults = main.FALSE
4122 else:
4123 # error, set is not a set
4124 main.log.error( "ONOS" + node +
4125 " has repeat elements in" +
4126 " set " + onosSetName + ":\n" +
4127 str( getResponses[ i ] ) )
4128 getResults = main.FALSE
4129 elif getResponses[ i ] == main.ERROR:
4130 getResults = main.FALSE
4131 sizeResponses = []
4132 threads = []
4133 for i in main.activeNodes:
4134 t = main.Thread( target=main.CLIs[i].setTestSize,
4135 name="setTestSize-" + str( i ),
4136 args=[ onosSetName ] )
4137 threads.append( t )
4138 t.start()
4139 for t in threads:
4140 t.join()
4141 sizeResponses.append( t.result )
4142 sizeResults = main.TRUE
4143 for i in range( len( main.activeNodes ) ):
4144 node = str( main.activeNodes[i] + 1 )
4145 if size != sizeResponses[ i ]:
4146 sizeResults = main.FALSE
4147 main.log.error( "ONOS" + node +
4148 " expected a size of " + str( size ) +
4149 " for set " + onosSetName +
4150 " but got " + str( sizeResponses[ i ] ) )
4151 addAllResults = addAllResults and getResults and sizeResults
4152 utilities.assert_equals( expect=main.TRUE,
4153 actual=addAllResults,
4154 onpass="Set addAll correct",
4155 onfail="Set addAll was incorrect" )
4156
4157 main.step( "Distributed Set retain()" )
4158 onosSet.intersection_update( retainValue.split() )
4159 retainResponses = []
4160 threads = []
4161 for i in main.activeNodes:
4162 t = main.Thread( target=main.CLIs[i].setTestRemove,
4163 name="setTestRetain-" + str( i ),
4164 args=[ onosSetName, retainValue ],
4165 kwargs={ "retain": True } )
4166 threads.append( t )
4167 t.start()
4168 for t in threads:
4169 t.join()
4170 retainResponses.append( t.result )
4171
4172 # main.TRUE = successfully changed the set
4173 # main.FALSE = action resulted in no change in set
4174 # main.ERROR - Some error in executing the function
4175 retainResults = main.TRUE
4176 for i in range( len( main.activeNodes ) ):
4177 if retainResponses[ i ] == main.TRUE:
4178 # All is well
4179 pass
4180 elif retainResponses[ i ] == main.FALSE:
4181 # Already in set, probably fine
4182 pass
4183 elif retainResponses[ i ] == main.ERROR:
4184 # Error in execution
4185 retainResults = main.FALSE
4186 else:
4187 # unexpected result
4188 retainResults = main.FALSE
4189 if retainResults != main.TRUE:
4190 main.log.error( "Error executing set retain" )
4191
4192 # Check if set is still correct
4193 size = len( onosSet )
4194 getResponses = []
4195 threads = []
4196 for i in main.activeNodes:
4197 t = main.Thread( target=main.CLIs[i].setTestGet,
4198 name="setTestGet-" + str( i ),
4199 args=[ onosSetName ] )
4200 threads.append( t )
4201 t.start()
4202 for t in threads:
4203 t.join()
4204 getResponses.append( t.result )
4205 getResults = main.TRUE
4206 for i in range( len( main.activeNodes ) ):
4207 node = str( main.activeNodes[i] + 1 )
4208 if isinstance( getResponses[ i ], list):
4209 current = set( getResponses[ i ] )
4210 if len( current ) == len( getResponses[ i ] ):
4211 # no repeats
4212 if onosSet != current:
4213 main.log.error( "ONOS" + node +
4214 " has incorrect view" +
4215 " of set " + onosSetName + ":\n" +
4216 str( getResponses[ i ] ) )
4217 main.log.debug( "Expected: " + str( onosSet ) )
4218 main.log.debug( "Actual: " + str( current ) )
4219 getResults = main.FALSE
4220 else:
4221 # error, set is not a set
4222 main.log.error( "ONOS" + node +
4223 " has repeat elements in" +
4224 " set " + onosSetName + ":\n" +
4225 str( getResponses[ i ] ) )
4226 getResults = main.FALSE
4227 elif getResponses[ i ] == main.ERROR:
4228 getResults = main.FALSE
4229 sizeResponses = []
4230 threads = []
4231 for i in main.activeNodes:
4232 t = main.Thread( target=main.CLIs[i].setTestSize,
4233 name="setTestSize-" + str( i ),
4234 args=[ onosSetName ] )
4235 threads.append( t )
4236 t.start()
4237 for t in threads:
4238 t.join()
4239 sizeResponses.append( t.result )
4240 sizeResults = main.TRUE
4241 for i in range( len( main.activeNodes ) ):
4242 node = str( main.activeNodes[i] + 1 )
4243 if size != sizeResponses[ i ]:
4244 sizeResults = main.FALSE
4245 main.log.error( "ONOS" + node + " expected a size of " +
4246 str( size ) + " for set " + onosSetName +
4247 " but got " + str( sizeResponses[ i ] ) )
4248 retainResults = retainResults and getResults and sizeResults
4249 utilities.assert_equals( expect=main.TRUE,
4250 actual=retainResults,
4251 onpass="Set retain correct",
4252 onfail="Set retain was incorrect" )
4253
4254 # Transactional maps
4255 main.step( "Partitioned Transactional maps put" )
4256 tMapValue = "Testing"
4257 numKeys = 100
4258 putResult = True
4259 node = main.activeNodes[0]
4260 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4261 if putResponses and len( putResponses ) == 100:
4262 for i in putResponses:
4263 if putResponses[ i ][ 'value' ] != tMapValue:
4264 putResult = False
4265 else:
4266 putResult = False
4267 if not putResult:
4268 main.log.debug( "Put response values: " + str( putResponses ) )
4269 utilities.assert_equals( expect=True,
4270 actual=putResult,
4271 onpass="Partitioned Transactional Map put successful",
4272 onfail="Partitioned Transactional Map put values are incorrect" )
4273
4274 main.step( "Partitioned Transactional maps get" )
4275 getCheck = True
4276 for n in range( 1, numKeys + 1 ):
4277 getResponses = []
4278 threads = []
4279 valueCheck = True
4280 for i in main.activeNodes:
4281 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4282 name="TMap-get-" + str( i ),
4283 args=[ "Key" + str( n ) ] )
4284 threads.append( t )
4285 t.start()
4286 for t in threads:
4287 t.join()
4288 getResponses.append( t.result )
4289 for node in getResponses:
4290 if node != tMapValue:
4291 valueCheck = False
4292 if not valueCheck:
4293 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4294 main.log.warn( getResponses )
4295 getCheck = getCheck and valueCheck
4296 utilities.assert_equals( expect=True,
4297 actual=getCheck,
4298 onpass="Partitioned Transactional Map get values were correct",
4299 onfail="Partitioned Transactional Map values incorrect" )