blob: 0065ecf38c18c715205db0f329867e9bd54a11e2 [file] [log] [blame]
Jon Hall69b2b982016-05-11 12:04:59 -07001"""
2Description: This test is to determine if ONOS can handle
3 dynamic swapping of cluster nodes.
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE6: Swap nodes
13CASE7: Check state after control plane failure
14CASE8: Compare topo
15CASE9: Link s3-s28 down
16CASE10: Link s3-s28 up
17CASE11: Switch down
18CASE12: Switch up
19CASE13: Clean up
20CASE14: start election app on all onos nodes
21CASE15: Check that Leadership Election is still functional
22CASE16: Install Distributed Primitives app
23CASE17: Check for basic functionality with distributed primitives
24"""
25
26
27class HAswapNodes:
28
29 def __init__( self ):
30 self.default = ''
31
32 def CASE1( self, main ):
33 """
34 CASE1 is to compile ONOS and push it to the test machines
35
36 Startup sequence:
37 cell <name>
38 onos-verify-cell
39 NOTE: temporary - onos-remove-raft-logs
40 onos-uninstall
41 start mininet
42 git pull
43 mvn clean install
44 onos-package
45 onos-install -f
46 onos-wait-for-start
47 start cli sessions
48 start tcpdump
49 """
50 import time
51 import os
52 import re
53 main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
54 "initialization" )
55 main.case( "Setting up test environment" )
56 main.caseExplanation = "Setup the test environment including " +\
57 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
59
60 # load some variables from the params file
61 PULLCODE = False
62 if main.params[ 'Git' ] == 'True':
63 PULLCODE = True
64 gitBranch = main.params[ 'branch' ]
65 cellName = main.params[ 'ENV' ][ 'cellName' ]
66
67 main.numCtrls = int( main.params[ 'num_controllers' ] )
68 if main.ONOSbench.maxNodes:
69 if main.ONOSbench.maxNodes < main.numCtrls:
70 main.numCtrls = int( main.ONOSbench.maxNodes )
71 # set global variables
72 # These are for csv plotting in jenkins
73 global labels
74 global data
75 labels = []
76 data = []
77
78 try:
79 from tests.HA.dependencies.HA import HA
80 main.HA = HA()
81 from tests.HA.HAswapNodes.dependencies.Server import Server
82 main.Server = Server()
83 except Exception as e:
84 main.log.exception( e )
85 main.cleanup()
86 main.exit()
87
88 main.CLIs = []
89 main.nodes = []
90 ipList = []
91 for i in range( 1, main.numCtrls + 1 ):
92 try:
93 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
94 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
95 ipList.append( main.nodes[ -1 ].ip_address )
96 except AttributeError:
97 break
98
99 main.step( "Create cell file" )
100 cellAppString = main.params[ 'ENV' ][ 'appString' ]
101 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
102 main.Mininet1.ip_address,
103 cellAppString, ipList )
104
105 main.step( "Applying cell variable to environment" )
106 cellResult = main.ONOSbench.setCell( cellName )
107 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
108 onpass="Set cell successfull",
109 onfail="Failled to set cell" )
110
111 main.step( "Verify connectivity to cell" )
112 verifyResult = main.ONOSbench.verifyCell()
113 utilities.assert_equals( expect=main.TRUE, actual=verifyResult,
114 onpass="Verify cell passed",
115 onfail="Failled to verify cell" )
116
117 # FIXME:this is short term fix
118 main.log.info( "Removing raft logs" )
119 main.ONOSbench.onosRemoveRaftLogs()
120
121 main.log.info( "Uninstalling ONOS" )
122 for node in main.nodes:
123 main.ONOSbench.onosUninstall( node.ip_address )
124
125 # Make sure ONOS is DEAD
126 main.log.info( "Killing any ONOS processes" )
127 killResults = main.TRUE
128 for node in main.nodes:
129 killed = main.ONOSbench.onosKill( node.ip_address )
130 killResults = killResults and killed
131
132 main.step( "Setup server for cluster metadata file" )
Jon Hall8f6d4622016-05-23 15:27:18 -0700133 port = main.params['server']['port']
Jon Hall69b2b982016-05-11 12:04:59 -0700134 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
135 main.log.debug( "Root dir: {}".format( rootDir ) )
136 status = main.Server.start( main.ONOSbench,
137 rootDir,
138 port=port,
139 logDir=main.logdir + "/server.log" )
140 utilities.assert_equals( expect=main.TRUE, actual=status,
141 onpass="Server started",
142 onfail="Failled to start SimpleHTTPServer" )
143
144 main.step( "Generate initial metadata file" )
145 if main.numCtrls >= 5:
146 main.numCtrls -= 2
147 else:
148 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
149 genResult = main.Server.generateFile( main.numCtrls )
150 utilities.assert_equals( expect=main.TRUE, actual=genResult,
151 onpass="New cluster metadata file generated",
152 onfail="Failled to generate new metadata file" )
153
154 cleanInstallResult = main.TRUE
155 gitPullResult = main.TRUE
156
157 main.step( "Starting Mininet" )
158 # scp topo file to mininet
159 # TODO: move to params?
160 topoName = "obelisk.py"
161 filePath = main.ONOSbench.home + "/tools/test/topos/"
162 main.ONOSbench.scp( main.Mininet1,
163 filePath + topoName,
164 main.Mininet1.home,
165 direction="to" )
166 mnResult = main.Mininet1.startNet( )
167 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
168 onpass="Mininet Started",
169 onfail="Error starting Mininet" )
170
171 main.step( "Git checkout and pull " + gitBranch )
172 if PULLCODE:
173 main.ONOSbench.gitCheckout( gitBranch )
174 gitPullResult = main.ONOSbench.gitPull()
175 # values of 1 or 3 are good
176 utilities.assert_lesser( expect=0, actual=gitPullResult,
177 onpass="Git pull successful",
178 onfail="Git pull failed" )
179 main.ONOSbench.getVersion( report=True )
180
181 main.step( "Using mvn clean install" )
182 cleanInstallResult = main.TRUE
183 if PULLCODE and gitPullResult == main.TRUE:
184 cleanInstallResult = main.ONOSbench.cleanInstall()
185 else:
186 main.log.warn( "Did not pull new code so skipping mvn " +
187 "clean install" )
188 utilities.assert_equals( expect=main.TRUE,
189 actual=cleanInstallResult,
190 onpass="MCI successful",
191 onfail="MCI failed" )
192 # GRAPHS
193 # NOTE: important params here:
194 # job = name of Jenkins job
195 # Plot Name = Plot-HA, only can be used if multiple plots
196 # index = The number of the graph under plot name
197 job = "HAswapNodes"
198 plotName = "Plot-HA"
199 index = "0"
200 graphs = '<ac:structured-macro ac:name="html">\n'
201 graphs += '<ac:plain-text-body><![CDATA[\n'
202 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
203 '/plot/' + plotName + '/getPlot?index=' + index +\
204 '&width=500&height=300"' +\
205 'noborder="0" width="500" height="300" scrolling="yes" ' +\
206 'seamless="seamless"></iframe>\n'
207 graphs += ']]></ac:plain-text-body>\n'
208 graphs += '</ac:structured-macro>\n'
209 main.log.wiki(graphs)
210
211 main.step( "Copying backup config files" )
212 path = "~/onos/tools/package/bin/onos-service"
213 cp = main.ONOSbench.scp( main.ONOSbench,
214 path,
215 path + ".backup",
216 direction="to" )
217
218 utilities.assert_equals( expect=main.TRUE,
219 actual=cp,
220 onpass="Copy backup config file succeeded",
221 onfail="Copy backup config file failed" )
222 # we need to modify the onos-service file to use remote metadata file
223 # url for cluster metadata file
Jon Hall8f6d4622016-05-23 15:27:18 -0700224 iface = main.params['server'].get( 'interface' )
225 ip = main.ONOSbench.getIpAddr( iface=iface )
Jon Hall69b2b982016-05-11 12:04:59 -0700226 metaFile = "cluster.json"
227 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, port, metaFile )
228 main.log.warn( javaArgs )
229 main.log.warn( repr( javaArgs ) )
230 handle = main.ONOSbench.handle
231 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, path )
232 main.log.warn( sed )
233 main.log.warn( repr( sed ) )
234 handle.sendline( sed )
235 handle.expect( "\$" )
236 main.log.debug( repr( handle.before ) )
237
238 main.step( "Creating ONOS package" )
239 packageResult = main.ONOSbench.onosPackage()
240 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
241 onpass="ONOS package successful",
242 onfail="ONOS package failed" )
243
244 main.step( "Installing ONOS package" )
245 onosInstallResult = main.TRUE
246 for i in range( main.ONOSbench.maxNodes ):
247 node = main.nodes[i]
248 options = "-f"
249 if i >= main.numCtrls:
250 options = "-nf" # Don't start more than the current scale
251 tmpResult = main.ONOSbench.onosInstall( options=options,
252 node=node.ip_address )
253 onosInstallResult = onosInstallResult and tmpResult
254 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
255 onpass="ONOS install successful",
256 onfail="ONOS install failed" )
257
258 # Cleanup custom onos-service file
259 main.ONOSbench.scp( main.ONOSbench,
260 path + ".backup",
261 path,
262 direction="to" )
263
264 main.step( "Checking if ONOS is up yet" )
265 for i in range( 2 ):
266 onosIsupResult = main.TRUE
267 for i in range( main.numCtrls ):
268 node = main.nodes[i]
269 started = main.ONOSbench.isup( node.ip_address )
270 if not started:
271 main.log.error( node.name + " hasn't started" )
272 onosIsupResult = onosIsupResult and started
273 if onosIsupResult == main.TRUE:
274 break
275 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
276 onpass="ONOS startup successful",
277 onfail="ONOS startup failed" )
278
Jon Hall6509dbf2016-06-21 17:01:17 -0700279 main.step( "Starting ONOS CLI sessions" )
Jon Hall69b2b982016-05-11 12:04:59 -0700280 cliResults = main.TRUE
281 threads = []
282 for i in range( main.numCtrls ):
283 t = main.Thread( target=main.CLIs[i].startOnosCli,
284 name="startOnosCli-" + str( i ),
285 args=[main.nodes[i].ip_address] )
286 threads.append( t )
287 t.start()
288
289 for t in threads:
290 t.join()
291 cliResults = cliResults and t.result
292 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
293 onpass="ONOS cli startup successful",
294 onfail="ONOS cli startup failed" )
295
296 # Create a list of active nodes for use when some nodes are stopped
297 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
298
299 if main.params[ 'tcpdump' ].lower() == "true":
300 main.step( "Start Packet Capture MN" )
301 main.Mininet2.startTcpdump(
302 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
303 + "-MN.pcap",
304 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
305 port=main.params[ 'MNtcpdump' ][ 'port' ] )
306
307 main.step( "Checking ONOS nodes" )
308 nodeResults = utilities.retry( main.HA.nodesCheck,
309 False,
310 args=[main.activeNodes],
311 attempts=5 )
312 utilities.assert_equals( expect=True, actual=nodeResults,
313 onpass="Nodes check successful",
314 onfail="Nodes check NOT successful" )
315
316 if not nodeResults:
317 for i in main.activeNodes:
318 cli = main.CLIs[i]
319 main.log.debug( "{} components not ACTIVE: \n{}".format(
320 cli.name,
321 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
322 main.log.error( "Failed to start ONOS, stopping test" )
323 main.cleanup()
324 main.exit()
325
326 main.step( "Activate apps defined in the params file" )
327 # get data from the params
328 apps = main.params.get( 'apps' )
329 if apps:
330 apps = apps.split(',')
331 main.log.warn( apps )
332 activateResult = True
333 for app in apps:
334 main.CLIs[ 0 ].app( app, "Activate" )
335 # TODO: check this worked
336 time.sleep( 10 ) # wait for apps to activate
337 for app in apps:
338 state = main.CLIs[ 0 ].appStatus( app )
339 if state == "ACTIVE":
340 activateResult = activateResult and True
341 else:
342 main.log.error( "{} is in {} state".format( app, state ) )
343 activateResult = False
344 utilities.assert_equals( expect=True,
345 actual=activateResult,
346 onpass="Successfully activated apps",
347 onfail="Failed to activate apps" )
348 else:
349 main.log.warn( "No apps were specified to be loaded after startup" )
350
351 main.step( "Set ONOS configurations" )
352 config = main.params.get( 'ONOS_Configuration' )
353 if config:
354 main.log.debug( config )
355 checkResult = main.TRUE
356 for component in config:
357 for setting in config[component]:
358 value = config[component][setting]
359 check = main.CLIs[ 0 ].setCfg( component, setting, value )
360 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
361 checkResult = check and checkResult
362 utilities.assert_equals( expect=main.TRUE,
363 actual=checkResult,
364 onpass="Successfully set config",
365 onfail="Failed to set config" )
366 else:
367 main.log.warn( "No configurations were specified to be changed after startup" )
368
369 main.step( "App Ids check" )
370 appCheck = main.TRUE
371 threads = []
372 for i in main.activeNodes:
373 t = main.Thread( target=main.CLIs[i].appToIDCheck,
374 name="appToIDCheck-" + str( i ),
375 args=[] )
376 threads.append( t )
377 t.start()
378
379 for t in threads:
380 t.join()
381 appCheck = appCheck and t.result
382 if appCheck != main.TRUE:
383 node = main.activeNodes[0]
384 main.log.warn( main.CLIs[node].apps() )
385 main.log.warn( main.CLIs[node].appIDs() )
386 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
387 onpass="App Ids seem to be correct",
388 onfail="Something is wrong with app Ids" )
389
390 def CASE2( self, main ):
391 """
392 Assign devices to controllers
393 """
394 import re
395 assert main.numCtrls, "main.numCtrls not defined"
396 assert main, "main not defined"
397 assert utilities.assert_equals, "utilities.assert_equals not defined"
398 assert main.CLIs, "main.CLIs not defined"
399 assert main.nodes, "main.nodes not defined"
400
401 main.case( "Assigning devices to controllers" )
402 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
403 "and check that an ONOS node becomes the " +\
404 "master of the device."
405 main.step( "Assign switches to controllers" )
406
407 ipList = []
408 for i in range( main.ONOSbench.maxNodes ):
409 ipList.append( main.nodes[ i ].ip_address )
410 swList = []
411 for i in range( 1, 29 ):
412 swList.append( "s" + str( i ) )
413 main.Mininet1.assignSwController( sw=swList, ip=ipList )
414
415 mastershipCheck = main.TRUE
416 for i in range( 1, 29 ):
417 response = main.Mininet1.getSwController( "s" + str( i ) )
418 try:
419 main.log.info( str( response ) )
420 except Exception:
421 main.log.info( repr( response ) )
422 for node in main.nodes:
423 if re.search( "tcp:" + node.ip_address, response ):
424 mastershipCheck = mastershipCheck and main.TRUE
425 else:
426 main.log.error( "Error, node " + node.ip_address + " is " +
427 "not in the list of controllers s" +
428 str( i ) + " is connecting to." )
429 mastershipCheck = main.FALSE
430 utilities.assert_equals(
431 expect=main.TRUE,
432 actual=mastershipCheck,
433 onpass="Switch mastership assigned correctly",
434 onfail="Switches not assigned correctly to controllers" )
435
436 def CASE21( self, main ):
437 """
438 Assign mastership to controllers
439 """
440 import time
441 assert main.numCtrls, "main.numCtrls not defined"
442 assert main, "main not defined"
443 assert utilities.assert_equals, "utilities.assert_equals not defined"
444 assert main.CLIs, "main.CLIs not defined"
445 assert main.nodes, "main.nodes not defined"
446
447 main.case( "Assigning Controller roles for switches" )
448 main.caseExplanation = "Check that ONOS is connected to each " +\
449 "device. Then manually assign" +\
450 " mastership to specific ONOS nodes using" +\
451 " 'device-role'"
452 main.step( "Assign mastership of switches to specific controllers" )
453 # Manually assign mastership to the controller we want
454 roleCall = main.TRUE
455
456 ipList = [ ]
457 deviceList = []
458 onosCli = main.CLIs[ main.activeNodes[0] ]
459 try:
460 # Assign mastership to specific controllers. This assignment was
461 # determined for a 7 node cluser, but will work with any sized
462 # cluster
463 for i in range( 1, 29 ): # switches 1 through 28
464 # set up correct variables:
465 if i == 1:
466 c = 0
467 ip = main.nodes[ c ].ip_address # ONOS1
468 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
469 elif i == 2:
470 c = 1 % main.numCtrls
471 ip = main.nodes[ c ].ip_address # ONOS2
472 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
473 elif i == 3:
474 c = 1 % main.numCtrls
475 ip = main.nodes[ c ].ip_address # ONOS2
476 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
477 elif i == 4:
478 c = 3 % main.numCtrls
479 ip = main.nodes[ c ].ip_address # ONOS4
480 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
481 elif i == 5:
482 c = 2 % main.numCtrls
483 ip = main.nodes[ c ].ip_address # ONOS3
484 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
485 elif i == 6:
486 c = 2 % main.numCtrls
487 ip = main.nodes[ c ].ip_address # ONOS3
488 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
489 elif i == 7:
490 c = 5 % main.numCtrls
491 ip = main.nodes[ c ].ip_address # ONOS6
492 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
493 elif i >= 8 and i <= 17:
494 c = 4 % main.numCtrls
495 ip = main.nodes[ c ].ip_address # ONOS5
496 dpid = '3' + str( i ).zfill( 3 )
497 deviceId = onosCli.getDevice( dpid ).get( 'id' )
498 elif i >= 18 and i <= 27:
499 c = 6 % main.numCtrls
500 ip = main.nodes[ c ].ip_address # ONOS7
501 dpid = '6' + str( i ).zfill( 3 )
502 deviceId = onosCli.getDevice( dpid ).get( 'id' )
503 elif i == 28:
504 c = 0
505 ip = main.nodes[ c ].ip_address # ONOS1
506 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
507 else:
508 main.log.error( "You didn't write an else statement for " +
509 "switch s" + str( i ) )
510 roleCall = main.FALSE
511 # Assign switch
512 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
513 # TODO: make this controller dynamic
514 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
515 ipList.append( ip )
516 deviceList.append( deviceId )
517 except ( AttributeError, AssertionError ):
518 main.log.exception( "Something is wrong with ONOS device view" )
519 main.log.info( onosCli.devices() )
520 utilities.assert_equals(
521 expect=main.TRUE,
522 actual=roleCall,
523 onpass="Re-assigned switch mastership to designated controller",
524 onfail="Something wrong with deviceRole calls" )
525
526 main.step( "Check mastership was correctly assigned" )
527 roleCheck = main.TRUE
528 # NOTE: This is due to the fact that device mastership change is not
529 # atomic and is actually a multi step process
530 time.sleep( 5 )
531 for i in range( len( ipList ) ):
532 ip = ipList[i]
533 deviceId = deviceList[i]
534 # Check assignment
535 master = onosCli.getRole( deviceId ).get( 'master' )
536 if ip in master:
537 roleCheck = roleCheck and main.TRUE
538 else:
539 roleCheck = roleCheck and main.FALSE
540 main.log.error( "Error, controller " + ip + " is not" +
541 " master " + "of device " +
542 str( deviceId ) + ". Master is " +
543 repr( master ) + "." )
544 utilities.assert_equals(
545 expect=main.TRUE,
546 actual=roleCheck,
547 onpass="Switches were successfully reassigned to designated " +
548 "controller",
549 onfail="Switches were not successfully reassigned" )
550
551 def CASE3( self, main ):
552 """
553 Assign intents
554 """
555 import time
556 import json
557 assert main.numCtrls, "main.numCtrls not defined"
558 assert main, "main not defined"
559 assert utilities.assert_equals, "utilities.assert_equals not defined"
560 assert main.CLIs, "main.CLIs not defined"
561 assert main.nodes, "main.nodes not defined"
562 try:
563 labels
564 except NameError:
565 main.log.error( "labels not defined, setting to []" )
566 labels = []
567 try:
568 data
569 except NameError:
570 main.log.error( "data not defined, setting to []" )
571 data = []
572 # NOTE: we must reinstall intents until we have a persistant intent
573 # datastore!
574 main.case( "Adding host Intents" )
575 main.caseExplanation = "Discover hosts by using pingall then " +\
576 "assign predetermined host-to-host intents." +\
577 " After installation, check that the intent" +\
578 " is distributed to all nodes and the state" +\
579 " is INSTALLED"
580
581 # install onos-app-fwd
582 main.step( "Install reactive forwarding app" )
583 onosCli = main.CLIs[ main.activeNodes[0] ]
584 installResults = onosCli.activateApp( "org.onosproject.fwd" )
585 utilities.assert_equals( expect=main.TRUE, actual=installResults,
586 onpass="Install fwd successful",
587 onfail="Install fwd failed" )
588
589 main.step( "Check app ids" )
590 appCheck = main.TRUE
591 threads = []
592 for i in main.activeNodes:
593 t = main.Thread( target=main.CLIs[i].appToIDCheck,
594 name="appToIDCheck-" + str( i ),
595 args=[] )
596 threads.append( t )
597 t.start()
598
599 for t in threads:
600 t.join()
601 appCheck = appCheck and t.result
602 if appCheck != main.TRUE:
603 main.log.warn( onosCli.apps() )
604 main.log.warn( onosCli.appIDs() )
605 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
606 onpass="App Ids seem to be correct",
607 onfail="Something is wrong with app Ids" )
608
609 main.step( "Discovering Hosts( Via pingall for now )" )
610 # FIXME: Once we have a host discovery mechanism, use that instead
611 # REACTIVE FWD test
612 pingResult = main.FALSE
613 passMsg = "Reactive Pingall test passed"
614 time1 = time.time()
615 pingResult = main.Mininet1.pingall()
616 time2 = time.time()
617 if not pingResult:
618 main.log.warn("First pingall failed. Trying again...")
619 pingResult = main.Mininet1.pingall()
620 passMsg += " on the second try"
621 utilities.assert_equals(
622 expect=main.TRUE,
623 actual=pingResult,
624 onpass= passMsg,
625 onfail="Reactive Pingall failed, " +
626 "one or more ping pairs failed" )
627 main.log.info( "Time for pingall: %2f seconds" %
628 ( time2 - time1 ) )
629 # timeout for fwd flows
630 time.sleep( 11 )
631 # uninstall onos-app-fwd
632 main.step( "Uninstall reactive forwarding app" )
633 node = main.activeNodes[0]
634 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
635 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
636 onpass="Uninstall fwd successful",
637 onfail="Uninstall fwd failed" )
638
639 main.step( "Check app ids" )
640 threads = []
641 appCheck2 = main.TRUE
642 for i in main.activeNodes:
643 t = main.Thread( target=main.CLIs[i].appToIDCheck,
644 name="appToIDCheck-" + str( i ),
645 args=[] )
646 threads.append( t )
647 t.start()
648
649 for t in threads:
650 t.join()
651 appCheck2 = appCheck2 and t.result
652 if appCheck2 != main.TRUE:
653 node = main.activeNodes[0]
654 main.log.warn( main.CLIs[node].apps() )
655 main.log.warn( main.CLIs[node].appIDs() )
656 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
657 onpass="App Ids seem to be correct",
658 onfail="Something is wrong with app Ids" )
659
660 main.step( "Add host intents via cli" )
661 intentIds = []
662 # TODO: move the host numbers to params
663 # Maybe look at all the paths we ping?
664 intentAddResult = True
665 hostResult = main.TRUE
666 for i in range( 8, 18 ):
667 main.log.info( "Adding host intent between h" + str( i ) +
668 " and h" + str( i + 10 ) )
669 host1 = "00:00:00:00:00:" + \
670 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
671 host2 = "00:00:00:00:00:" + \
672 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
673 # NOTE: getHost can return None
674 host1Dict = onosCli.getHost( host1 )
675 host2Dict = onosCli.getHost( host2 )
676 host1Id = None
677 host2Id = None
678 if host1Dict and host2Dict:
679 host1Id = host1Dict.get( 'id', None )
680 host2Id = host2Dict.get( 'id', None )
681 if host1Id and host2Id:
682 nodeNum = ( i % len( main.activeNodes ) )
683 node = main.activeNodes[nodeNum]
684 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
685 if tmpId:
686 main.log.info( "Added intent with id: " + tmpId )
687 intentIds.append( tmpId )
688 else:
689 main.log.error( "addHostIntent returned: " +
690 repr( tmpId ) )
691 else:
692 main.log.error( "Error, getHost() failed for h" + str( i ) +
693 " and/or h" + str( i + 10 ) )
694 node = main.activeNodes[0]
695 hosts = main.CLIs[node].hosts()
696 main.log.warn( "Hosts output: " )
697 try:
698 main.log.warn( json.dumps( json.loads( hosts ),
699 sort_keys=True,
700 indent=4,
701 separators=( ',', ': ' ) ) )
702 except ( ValueError, TypeError ):
703 main.log.warn( repr( hosts ) )
704 hostResult = main.FALSE
705 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
706 onpass="Found a host id for each host",
707 onfail="Error looking up host ids" )
708
709 intentStart = time.time()
710 onosIds = onosCli.getAllIntentsId()
711 main.log.info( "Submitted intents: " + str( intentIds ) )
712 main.log.info( "Intents in ONOS: " + str( onosIds ) )
713 for intent in intentIds:
714 if intent in onosIds:
715 pass # intent submitted is in onos
716 else:
717 intentAddResult = False
718 if intentAddResult:
719 intentStop = time.time()
720 else:
721 intentStop = None
722 # Print the intent states
723 intents = onosCli.intents()
724 intentStates = []
725 installedCheck = True
726 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
727 count = 0
728 try:
729 for intent in json.loads( intents ):
730 state = intent.get( 'state', None )
731 if "INSTALLED" not in state:
732 installedCheck = False
733 intentId = intent.get( 'id', None )
734 intentStates.append( ( intentId, state ) )
735 except ( ValueError, TypeError ):
736 main.log.exception( "Error parsing intents" )
737 # add submitted intents not in the store
738 tmplist = [ i for i, s in intentStates ]
739 missingIntents = False
740 for i in intentIds:
741 if i not in tmplist:
742 intentStates.append( ( i, " - " ) )
743 missingIntents = True
744 intentStates.sort()
745 for i, s in intentStates:
746 count += 1
747 main.log.info( "%-6s%-15s%-15s" %
748 ( str( count ), str( i ), str( s ) ) )
749 leaders = onosCli.leaders()
750 try:
751 missing = False
752 if leaders:
753 parsedLeaders = json.loads( leaders )
754 main.log.warn( json.dumps( parsedLeaders,
755 sort_keys=True,
756 indent=4,
757 separators=( ',', ': ' ) ) )
758 # check for all intent partitions
759 topics = []
760 for i in range( 14 ):
761 topics.append( "intent-partition-" + str( i ) )
762 main.log.debug( topics )
763 ONOStopics = [ j['topic'] for j in parsedLeaders ]
764 for topic in topics:
765 if topic not in ONOStopics:
766 main.log.error( "Error: " + topic +
767 " not in leaders" )
768 missing = True
769 else:
770 main.log.error( "leaders() returned None" )
771 except ( ValueError, TypeError ):
772 main.log.exception( "Error parsing leaders" )
773 main.log.error( repr( leaders ) )
774 # Check all nodes
775 if missing:
776 for i in main.activeNodes:
777 response = main.CLIs[i].leaders( jsonFormat=False)
778 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
779 str( response ) )
780
781 partitions = onosCli.partitions()
782 try:
783 if partitions :
784 parsedPartitions = json.loads( partitions )
785 main.log.warn( json.dumps( parsedPartitions,
786 sort_keys=True,
787 indent=4,
788 separators=( ',', ': ' ) ) )
789 # TODO check for a leader in all paritions
790 # TODO check for consistency among nodes
791 else:
792 main.log.error( "partitions() returned None" )
793 except ( ValueError, TypeError ):
794 main.log.exception( "Error parsing partitions" )
795 main.log.error( repr( partitions ) )
796 pendingMap = onosCli.pendingMap()
797 try:
798 if pendingMap :
799 parsedPending = json.loads( pendingMap )
800 main.log.warn( json.dumps( parsedPending,
801 sort_keys=True,
802 indent=4,
803 separators=( ',', ': ' ) ) )
804 # TODO check something here?
805 else:
806 main.log.error( "pendingMap() returned None" )
807 except ( ValueError, TypeError ):
808 main.log.exception( "Error parsing pending map" )
809 main.log.error( repr( pendingMap ) )
810
811 intentAddResult = bool( intentAddResult and not missingIntents and
812 installedCheck )
813 if not intentAddResult:
814 main.log.error( "Error in pushing host intents to ONOS" )
815
816 main.step( "Intent Anti-Entropy dispersion" )
817 for j in range(100):
818 correct = True
819 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
820 for i in main.activeNodes:
821 onosIds = []
822 ids = main.CLIs[i].getAllIntentsId()
823 onosIds.append( ids )
824 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
825 str( sorted( onosIds ) ) )
826 if sorted( ids ) != sorted( intentIds ):
827 main.log.warn( "Set of intent IDs doesn't match" )
828 correct = False
829 break
830 else:
831 intents = json.loads( main.CLIs[i].intents() )
832 for intent in intents:
833 if intent[ 'state' ] != "INSTALLED":
834 main.log.warn( "Intent " + intent[ 'id' ] +
835 " is " + intent[ 'state' ] )
836 correct = False
837 break
838 if correct:
839 break
840 else:
841 time.sleep(1)
842 if not intentStop:
843 intentStop = time.time()
844 global gossipTime
845 gossipTime = intentStop - intentStart
846 main.log.info( "It took about " + str( gossipTime ) +
847 " seconds for all intents to appear in each node" )
848 append = False
849 title = "Gossip Intents"
850 count = 1
851 while append is False:
852 curTitle = title + str( count )
853 if curTitle not in labels:
854 labels.append( curTitle )
855 data.append( str( gossipTime ) )
856 append = True
857 else:
858 count += 1
859 gossipPeriod = int( main.params['timers']['gossip'] )
860 maxGossipTime = gossipPeriod * len( main.activeNodes )
861 utilities.assert_greater_equals(
862 expect=maxGossipTime, actual=gossipTime,
863 onpass="ECM anti-entropy for intents worked within " +
864 "expected time",
865 onfail="Intent ECM anti-entropy took too long. " +
866 "Expected time:{}, Actual time:{}".format( maxGossipTime,
867 gossipTime ) )
868 if gossipTime <= maxGossipTime:
869 intentAddResult = True
870
871 if not intentAddResult or "key" in pendingMap:
872 import time
873 installedCheck = True
874 main.log.info( "Sleeping 60 seconds to see if intents are found" )
875 time.sleep( 60 )
876 onosIds = onosCli.getAllIntentsId()
877 main.log.info( "Submitted intents: " + str( intentIds ) )
878 main.log.info( "Intents in ONOS: " + str( onosIds ) )
879 # Print the intent states
880 intents = onosCli.intents()
881 intentStates = []
882 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
883 count = 0
884 try:
885 for intent in json.loads( intents ):
886 # Iter through intents of a node
887 state = intent.get( 'state', None )
888 if "INSTALLED" not in state:
889 installedCheck = False
890 intentId = intent.get( 'id', None )
891 intentStates.append( ( intentId, state ) )
892 except ( ValueError, TypeError ):
893 main.log.exception( "Error parsing intents" )
894 # add submitted intents not in the store
895 tmplist = [ i for i, s in intentStates ]
896 for i in intentIds:
897 if i not in tmplist:
898 intentStates.append( ( i, " - " ) )
899 intentStates.sort()
900 for i, s in intentStates:
901 count += 1
902 main.log.info( "%-6s%-15s%-15s" %
903 ( str( count ), str( i ), str( s ) ) )
904 leaders = onosCli.leaders()
905 try:
906 missing = False
907 if leaders:
908 parsedLeaders = json.loads( leaders )
909 main.log.warn( json.dumps( parsedLeaders,
910 sort_keys=True,
911 indent=4,
912 separators=( ',', ': ' ) ) )
913 # check for all intent partitions
914 # check for election
915 topics = []
916 for i in range( 14 ):
917 topics.append( "intent-partition-" + str( i ) )
918 # FIXME: this should only be after we start the app
919 topics.append( "org.onosproject.election" )
920 main.log.debug( topics )
921 ONOStopics = [ j['topic'] for j in parsedLeaders ]
922 for topic in topics:
923 if topic not in ONOStopics:
924 main.log.error( "Error: " + topic +
925 " not in leaders" )
926 missing = True
927 else:
928 main.log.error( "leaders() returned None" )
929 except ( ValueError, TypeError ):
930 main.log.exception( "Error parsing leaders" )
931 main.log.error( repr( leaders ) )
932 # Check all nodes
933 if missing:
934 for i in main.activeNodes:
935 node = main.CLIs[i]
936 response = node.leaders( jsonFormat=False)
937 main.log.warn( str( node.name ) + " leaders output: \n" +
938 str( response ) )
939
940 partitions = onosCli.partitions()
941 try:
942 if partitions :
943 parsedPartitions = json.loads( partitions )
944 main.log.warn( json.dumps( parsedPartitions,
945 sort_keys=True,
946 indent=4,
947 separators=( ',', ': ' ) ) )
948 # TODO check for a leader in all paritions
949 # TODO check for consistency among nodes
950 else:
951 main.log.error( "partitions() returned None" )
952 except ( ValueError, TypeError ):
953 main.log.exception( "Error parsing partitions" )
954 main.log.error( repr( partitions ) )
955 pendingMap = onosCli.pendingMap()
956 try:
957 if pendingMap :
958 parsedPending = json.loads( pendingMap )
959 main.log.warn( json.dumps( parsedPending,
960 sort_keys=True,
961 indent=4,
962 separators=( ',', ': ' ) ) )
963 # TODO check something here?
964 else:
965 main.log.error( "pendingMap() returned None" )
966 except ( ValueError, TypeError ):
967 main.log.exception( "Error parsing pending map" )
968 main.log.error( repr( pendingMap ) )
969
970 def CASE4( self, main ):
971 """
972 Ping across added host intents
973 """
974 import json
975 import time
976 assert main.numCtrls, "main.numCtrls not defined"
977 assert main, "main not defined"
978 assert utilities.assert_equals, "utilities.assert_equals not defined"
979 assert main.CLIs, "main.CLIs not defined"
980 assert main.nodes, "main.nodes not defined"
981 main.case( "Verify connectivity by sending traffic across Intents" )
982 main.caseExplanation = "Ping across added host intents to check " +\
983 "functionality and check the state of " +\
984 "the intent"
985
986 onosCli = main.CLIs[ main.activeNodes[0] ]
987 main.step( "Check Intent state" )
988 installedCheck = False
989 loopCount = 0
990 while not installedCheck and loopCount < 40:
991 installedCheck = True
992 # Print the intent states
993 intents = onosCli.intents()
994 intentStates = []
995 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
996 count = 0
997 # Iter through intents of a node
998 try:
999 for intent in json.loads( intents ):
1000 state = intent.get( 'state', None )
1001 if "INSTALLED" not in state:
1002 installedCheck = False
1003 intentId = intent.get( 'id', None )
1004 intentStates.append( ( intentId, state ) )
1005 except ( ValueError, TypeError ):
1006 main.log.exception( "Error parsing intents." )
1007 # Print states
1008 intentStates.sort()
1009 for i, s in intentStates:
1010 count += 1
1011 main.log.info( "%-6s%-15s%-15s" %
1012 ( str( count ), str( i ), str( s ) ) )
1013 if not installedCheck:
1014 time.sleep( 1 )
1015 loopCount += 1
1016 utilities.assert_equals( expect=True, actual=installedCheck,
1017 onpass="Intents are all INSTALLED",
1018 onfail="Intents are not all in " +
1019 "INSTALLED state" )
1020
1021 main.step( "Ping across added host intents" )
1022 PingResult = main.TRUE
1023 for i in range( 8, 18 ):
1024 ping = main.Mininet1.pingHost( src="h" + str( i ),
1025 target="h" + str( i + 10 ) )
1026 PingResult = PingResult and ping
1027 if ping == main.FALSE:
1028 main.log.warn( "Ping failed between h" + str( i ) +
1029 " and h" + str( i + 10 ) )
1030 elif ping == main.TRUE:
1031 main.log.info( "Ping test passed!" )
1032 # Don't set PingResult or you'd override failures
1033 if PingResult == main.FALSE:
1034 main.log.error(
1035 "Intents have not been installed correctly, pings failed." )
1036 # TODO: pretty print
1037 main.log.warn( "ONOS1 intents: " )
1038 try:
1039 tmpIntents = onosCli.intents()
1040 main.log.warn( json.dumps( json.loads( tmpIntents ),
1041 sort_keys=True,
1042 indent=4,
1043 separators=( ',', ': ' ) ) )
1044 except ( ValueError, TypeError ):
1045 main.log.warn( repr( tmpIntents ) )
1046 utilities.assert_equals(
1047 expect=main.TRUE,
1048 actual=PingResult,
1049 onpass="Intents have been installed correctly and pings work",
1050 onfail="Intents have not been installed correctly, pings failed." )
1051
1052 main.step( "Check leadership of topics" )
1053 leaders = onosCli.leaders()
1054 topicCheck = main.TRUE
1055 try:
1056 if leaders:
1057 parsedLeaders = json.loads( leaders )
1058 main.log.warn( json.dumps( parsedLeaders,
1059 sort_keys=True,
1060 indent=4,
1061 separators=( ',', ': ' ) ) )
1062 # check for all intent partitions
1063 # check for election
1064 # TODO: Look at Devices as topics now that it uses this system
1065 topics = []
1066 for i in range( 14 ):
1067 topics.append( "intent-partition-" + str( i ) )
1068 # FIXME: this should only be after we start the app
1069 # FIXME: topics.append( "org.onosproject.election" )
1070 # Print leaders output
1071 main.log.debug( topics )
1072 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1073 for topic in topics:
1074 if topic not in ONOStopics:
1075 main.log.error( "Error: " + topic +
1076 " not in leaders" )
1077 topicCheck = main.FALSE
1078 else:
1079 main.log.error( "leaders() returned None" )
1080 topicCheck = main.FALSE
1081 except ( ValueError, TypeError ):
1082 topicCheck = main.FALSE
1083 main.log.exception( "Error parsing leaders" )
1084 main.log.error( repr( leaders ) )
1085 # TODO: Check for a leader of these topics
1086 # Check all nodes
1087 if topicCheck:
1088 for i in main.activeNodes:
1089 node = main.CLIs[i]
1090 response = node.leaders( jsonFormat=False)
1091 main.log.warn( str( node.name ) + " leaders output: \n" +
1092 str( response ) )
1093
1094 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1095 onpass="intent Partitions is in leaders",
1096 onfail="Some topics were lost " )
1097 # Print partitions
1098 partitions = onosCli.partitions()
1099 try:
1100 if partitions :
1101 parsedPartitions = json.loads( partitions )
1102 main.log.warn( json.dumps( parsedPartitions,
1103 sort_keys=True,
1104 indent=4,
1105 separators=( ',', ': ' ) ) )
1106 # TODO check for a leader in all paritions
1107 # TODO check for consistency among nodes
1108 else:
1109 main.log.error( "partitions() returned None" )
1110 except ( ValueError, TypeError ):
1111 main.log.exception( "Error parsing partitions" )
1112 main.log.error( repr( partitions ) )
1113 # Print Pending Map
1114 pendingMap = onosCli.pendingMap()
1115 try:
1116 if pendingMap :
1117 parsedPending = json.loads( pendingMap )
1118 main.log.warn( json.dumps( parsedPending,
1119 sort_keys=True,
1120 indent=4,
1121 separators=( ',', ': ' ) ) )
1122 # TODO check something here?
1123 else:
1124 main.log.error( "pendingMap() returned None" )
1125 except ( ValueError, TypeError ):
1126 main.log.exception( "Error parsing pending map" )
1127 main.log.error( repr( pendingMap ) )
1128
1129 if not installedCheck:
1130 main.log.info( "Waiting 60 seconds to see if the state of " +
1131 "intents change" )
1132 time.sleep( 60 )
1133 # Print the intent states
1134 intents = onosCli.intents()
1135 intentStates = []
1136 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1137 count = 0
1138 # Iter through intents of a node
1139 try:
1140 for intent in json.loads( intents ):
1141 state = intent.get( 'state', None )
1142 if "INSTALLED" not in state:
1143 installedCheck = False
1144 intentId = intent.get( 'id', None )
1145 intentStates.append( ( intentId, state ) )
1146 except ( ValueError, TypeError ):
1147 main.log.exception( "Error parsing intents." )
1148 intentStates.sort()
1149 for i, s in intentStates:
1150 count += 1
1151 main.log.info( "%-6s%-15s%-15s" %
1152 ( str( count ), str( i ), str( s ) ) )
1153 leaders = onosCli.leaders()
1154 try:
1155 missing = False
1156 if leaders:
1157 parsedLeaders = json.loads( leaders )
1158 main.log.warn( json.dumps( parsedLeaders,
1159 sort_keys=True,
1160 indent=4,
1161 separators=( ',', ': ' ) ) )
1162 # check for all intent partitions
1163 # check for election
1164 topics = []
1165 for i in range( 14 ):
1166 topics.append( "intent-partition-" + str( i ) )
1167 # FIXME: this should only be after we start the app
1168 topics.append( "org.onosproject.election" )
1169 main.log.debug( topics )
1170 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1171 for topic in topics:
1172 if topic not in ONOStopics:
1173 main.log.error( "Error: " + topic +
1174 " not in leaders" )
1175 missing = True
1176 else:
1177 main.log.error( "leaders() returned None" )
1178 except ( ValueError, TypeError ):
1179 main.log.exception( "Error parsing leaders" )
1180 main.log.error( repr( leaders ) )
1181 if missing:
1182 for i in main.activeNodes:
1183 node = main.CLIs[i]
1184 response = node.leaders( jsonFormat=False)
1185 main.log.warn( str( node.name ) + " leaders output: \n" +
1186 str( response ) )
1187
1188 partitions = onosCli.partitions()
1189 try:
1190 if partitions :
1191 parsedPartitions = json.loads( partitions )
1192 main.log.warn( json.dumps( parsedPartitions,
1193 sort_keys=True,
1194 indent=4,
1195 separators=( ',', ': ' ) ) )
1196 # TODO check for a leader in all paritions
1197 # TODO check for consistency among nodes
1198 else:
1199 main.log.error( "partitions() returned None" )
1200 except ( ValueError, TypeError ):
1201 main.log.exception( "Error parsing partitions" )
1202 main.log.error( repr( partitions ) )
1203 pendingMap = onosCli.pendingMap()
1204 try:
1205 if pendingMap :
1206 parsedPending = json.loads( pendingMap )
1207 main.log.warn( json.dumps( parsedPending,
1208 sort_keys=True,
1209 indent=4,
1210 separators=( ',', ': ' ) ) )
1211 # TODO check something here?
1212 else:
1213 main.log.error( "pendingMap() returned None" )
1214 except ( ValueError, TypeError ):
1215 main.log.exception( "Error parsing pending map" )
1216 main.log.error( repr( pendingMap ) )
1217 # Print flowrules
1218 node = main.activeNodes[0]
1219 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1220 main.step( "Wait a minute then ping again" )
1221 # the wait is above
1222 PingResult = main.TRUE
1223 for i in range( 8, 18 ):
1224 ping = main.Mininet1.pingHost( src="h" + str( i ),
1225 target="h" + str( i + 10 ) )
1226 PingResult = PingResult and ping
1227 if ping == main.FALSE:
1228 main.log.warn( "Ping failed between h" + str( i ) +
1229 " and h" + str( i + 10 ) )
1230 elif ping == main.TRUE:
1231 main.log.info( "Ping test passed!" )
1232 # Don't set PingResult or you'd override failures
1233 if PingResult == main.FALSE:
1234 main.log.error(
1235 "Intents have not been installed correctly, pings failed." )
1236 # TODO: pretty print
1237 main.log.warn( "ONOS1 intents: " )
1238 try:
1239 tmpIntents = onosCli.intents()
1240 main.log.warn( json.dumps( json.loads( tmpIntents ),
1241 sort_keys=True,
1242 indent=4,
1243 separators=( ',', ': ' ) ) )
1244 except ( ValueError, TypeError ):
1245 main.log.warn( repr( tmpIntents ) )
1246 utilities.assert_equals(
1247 expect=main.TRUE,
1248 actual=PingResult,
1249 onpass="Intents have been installed correctly and pings work",
1250 onfail="Intents have not been installed correctly, pings failed." )
1251
1252 def CASE5( self, main ):
1253 """
1254 Reading state of ONOS
1255 """
1256 import json
1257 import time
1258 assert main.numCtrls, "main.numCtrls not defined"
1259 assert main, "main not defined"
1260 assert utilities.assert_equals, "utilities.assert_equals not defined"
1261 assert main.CLIs, "main.CLIs not defined"
1262 assert main.nodes, "main.nodes not defined"
1263
1264 main.case( "Setting up and gathering data for current state" )
1265 # The general idea for this test case is to pull the state of
1266 # ( intents,flows, topology,... ) from each ONOS node
1267 # We can then compare them with each other and also with past states
1268
1269 main.step( "Check that each switch has a master" )
1270 global mastershipState
1271 mastershipState = '[]'
1272
1273 # Assert that each device has a master
1274 rolesNotNull = main.TRUE
1275 threads = []
1276 for i in main.activeNodes:
1277 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1278 name="rolesNotNull-" + str( i ),
1279 args=[] )
1280 threads.append( t )
1281 t.start()
1282
1283 for t in threads:
1284 t.join()
1285 rolesNotNull = rolesNotNull and t.result
1286 utilities.assert_equals(
1287 expect=main.TRUE,
1288 actual=rolesNotNull,
1289 onpass="Each device has a master",
1290 onfail="Some devices don't have a master assigned" )
1291
1292 main.step( "Get the Mastership of each switch from each controller" )
1293 ONOSMastership = []
1294 consistentMastership = True
1295 rolesResults = True
1296 threads = []
1297 for i in main.activeNodes:
1298 t = main.Thread( target=main.CLIs[i].roles,
1299 name="roles-" + str( i ),
1300 args=[] )
1301 threads.append( t )
1302 t.start()
1303
1304 for t in threads:
1305 t.join()
1306 ONOSMastership.append( t.result )
1307
1308 for i in range( len( ONOSMastership ) ):
1309 node = str( main.activeNodes[i] + 1 )
1310 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1311 main.log.error( "Error in getting ONOS" + node + " roles" )
1312 main.log.warn( "ONOS" + node + " mastership response: " +
1313 repr( ONOSMastership[i] ) )
1314 rolesResults = False
1315 utilities.assert_equals(
1316 expect=True,
1317 actual=rolesResults,
1318 onpass="No error in reading roles output",
1319 onfail="Error in reading roles from ONOS" )
1320
1321 main.step( "Check for consistency in roles from each controller" )
1322 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1323 main.log.info(
1324 "Switch roles are consistent across all ONOS nodes" )
1325 else:
1326 consistentMastership = False
1327 utilities.assert_equals(
1328 expect=True,
1329 actual=consistentMastership,
1330 onpass="Switch roles are consistent across all ONOS nodes",
1331 onfail="ONOS nodes have different views of switch roles" )
1332
1333 if rolesResults and not consistentMastership:
1334 for i in range( len( main.activeNodes ) ):
1335 node = str( main.activeNodes[i] + 1 )
1336 try:
1337 main.log.warn(
1338 "ONOS" + node + " roles: ",
1339 json.dumps(
1340 json.loads( ONOSMastership[ i ] ),
1341 sort_keys=True,
1342 indent=4,
1343 separators=( ',', ': ' ) ) )
1344 except ( ValueError, TypeError ):
1345 main.log.warn( repr( ONOSMastership[ i ] ) )
1346 elif rolesResults and consistentMastership:
1347 mastershipState = ONOSMastership[ 0 ]
1348
1349 main.step( "Get the intents from each controller" )
1350 global intentState
1351 intentState = []
1352 ONOSIntents = []
1353 consistentIntents = True # Are Intents consistent across nodes?
1354 intentsResults = True # Could we read Intents from ONOS?
1355 threads = []
1356 for i in main.activeNodes:
1357 t = main.Thread( target=main.CLIs[i].intents,
1358 name="intents-" + str( i ),
1359 args=[],
1360 kwargs={ 'jsonFormat': True } )
1361 threads.append( t )
1362 t.start()
1363
1364 for t in threads:
1365 t.join()
1366 ONOSIntents.append( t.result )
1367
1368 for i in range( len( ONOSIntents ) ):
1369 node = str( main.activeNodes[i] + 1 )
1370 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1371 main.log.error( "Error in getting ONOS" + node + " intents" )
1372 main.log.warn( "ONOS" + node + " intents response: " +
1373 repr( ONOSIntents[ i ] ) )
1374 intentsResults = False
1375 utilities.assert_equals(
1376 expect=True,
1377 actual=intentsResults,
1378 onpass="No error in reading intents output",
1379 onfail="Error in reading intents from ONOS" )
1380
1381 main.step( "Check for consistency in Intents from each controller" )
1382 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1383 main.log.info( "Intents are consistent across all ONOS " +
1384 "nodes" )
1385 else:
1386 consistentIntents = False
1387 main.log.error( "Intents not consistent" )
1388 utilities.assert_equals(
1389 expect=True,
1390 actual=consistentIntents,
1391 onpass="Intents are consistent across all ONOS nodes",
1392 onfail="ONOS nodes have different views of intents" )
1393
1394 if intentsResults:
1395 # Try to make it easy to figure out what is happening
1396 #
1397 # Intent ONOS1 ONOS2 ...
1398 # 0x01 INSTALLED INSTALLING
1399 # ... ... ...
1400 # ... ... ...
1401 title = " Id"
1402 for n in main.activeNodes:
1403 title += " " * 10 + "ONOS" + str( n + 1 )
1404 main.log.warn( title )
1405 # get all intent keys in the cluster
1406 keys = []
1407 try:
1408 # Get the set of all intent keys
1409 for nodeStr in ONOSIntents:
1410 node = json.loads( nodeStr )
1411 for intent in node:
1412 keys.append( intent.get( 'id' ) )
1413 keys = set( keys )
1414 # For each intent key, print the state on each node
1415 for key in keys:
1416 row = "%-13s" % key
1417 for nodeStr in ONOSIntents:
1418 node = json.loads( nodeStr )
1419 for intent in node:
1420 if intent.get( 'id', "Error" ) == key:
1421 row += "%-15s" % intent.get( 'state' )
1422 main.log.warn( row )
1423 # End of intent state table
1424 except ValueError as e:
1425 main.log.exception( e )
1426 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1427
1428 if intentsResults and not consistentIntents:
1429 # print the json objects
1430 n = str( main.activeNodes[-1] + 1 )
1431 main.log.debug( "ONOS" + n + " intents: " )
1432 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1433 sort_keys=True,
1434 indent=4,
1435 separators=( ',', ': ' ) ) )
1436 for i in range( len( ONOSIntents ) ):
1437 node = str( main.activeNodes[i] + 1 )
1438 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1439 main.log.debug( "ONOS" + node + " intents: " )
1440 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1441 sort_keys=True,
1442 indent=4,
1443 separators=( ',', ': ' ) ) )
1444 else:
1445 main.log.debug( "ONOS" + node + " intents match ONOS" +
1446 n + " intents" )
1447 elif intentsResults and consistentIntents:
1448 intentState = ONOSIntents[ 0 ]
1449
1450 main.step( "Get the flows from each controller" )
1451 global flowState
1452 flowState = []
1453 ONOSFlows = []
1454 ONOSFlowsJson = []
1455 flowCheck = main.FALSE
1456 consistentFlows = True
1457 flowsResults = True
1458 threads = []
1459 for i in main.activeNodes:
1460 t = main.Thread( target=main.CLIs[i].flows,
1461 name="flows-" + str( i ),
1462 args=[],
1463 kwargs={ 'jsonFormat': True } )
1464 threads.append( t )
1465 t.start()
1466
1467 # NOTE: Flows command can take some time to run
1468 time.sleep(30)
1469 for t in threads:
1470 t.join()
1471 result = t.result
1472 ONOSFlows.append( result )
1473
1474 for i in range( len( ONOSFlows ) ):
1475 num = str( main.activeNodes[i] + 1 )
1476 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1477 main.log.error( "Error in getting ONOS" + num + " flows" )
1478 main.log.warn( "ONOS" + num + " flows response: " +
1479 repr( ONOSFlows[ i ] ) )
1480 flowsResults = False
1481 ONOSFlowsJson.append( None )
1482 else:
1483 try:
1484 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1485 except ( ValueError, TypeError ):
1486 # FIXME: change this to log.error?
1487 main.log.exception( "Error in parsing ONOS" + num +
1488 " response as json." )
1489 main.log.error( repr( ONOSFlows[ i ] ) )
1490 ONOSFlowsJson.append( None )
1491 flowsResults = False
1492 utilities.assert_equals(
1493 expect=True,
1494 actual=flowsResults,
1495 onpass="No error in reading flows output",
1496 onfail="Error in reading flows from ONOS" )
1497
1498 main.step( "Check for consistency in Flows from each controller" )
1499 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1500 if all( tmp ):
1501 main.log.info( "Flow count is consistent across all ONOS nodes" )
1502 else:
1503 consistentFlows = False
1504 utilities.assert_equals(
1505 expect=True,
1506 actual=consistentFlows,
1507 onpass="The flow count is consistent across all ONOS nodes",
1508 onfail="ONOS nodes have different flow counts" )
1509
1510 if flowsResults and not consistentFlows:
1511 for i in range( len( ONOSFlows ) ):
1512 node = str( main.activeNodes[i] + 1 )
1513 try:
1514 main.log.warn(
1515 "ONOS" + node + " flows: " +
1516 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1517 indent=4, separators=( ',', ': ' ) ) )
1518 except ( ValueError, TypeError ):
1519 main.log.warn( "ONOS" + node + " flows: " +
1520 repr( ONOSFlows[ i ] ) )
1521 elif flowsResults and consistentFlows:
1522 flowCheck = main.TRUE
1523 flowState = ONOSFlows[ 0 ]
1524
1525 main.step( "Get the OF Table entries" )
1526 global flows
1527 flows = []
1528 for i in range( 1, 29 ):
1529 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1530 if flowCheck == main.FALSE:
1531 for table in flows:
1532 main.log.warn( table )
1533 # TODO: Compare switch flow tables with ONOS flow tables
1534
1535 main.step( "Start continuous pings" )
1536 main.Mininet2.pingLong(
1537 src=main.params[ 'PING' ][ 'source1' ],
1538 target=main.params[ 'PING' ][ 'target1' ],
1539 pingTime=500 )
1540 main.Mininet2.pingLong(
1541 src=main.params[ 'PING' ][ 'source2' ],
1542 target=main.params[ 'PING' ][ 'target2' ],
1543 pingTime=500 )
1544 main.Mininet2.pingLong(
1545 src=main.params[ 'PING' ][ 'source3' ],
1546 target=main.params[ 'PING' ][ 'target3' ],
1547 pingTime=500 )
1548 main.Mininet2.pingLong(
1549 src=main.params[ 'PING' ][ 'source4' ],
1550 target=main.params[ 'PING' ][ 'target4' ],
1551 pingTime=500 )
1552 main.Mininet2.pingLong(
1553 src=main.params[ 'PING' ][ 'source5' ],
1554 target=main.params[ 'PING' ][ 'target5' ],
1555 pingTime=500 )
1556 main.Mininet2.pingLong(
1557 src=main.params[ 'PING' ][ 'source6' ],
1558 target=main.params[ 'PING' ][ 'target6' ],
1559 pingTime=500 )
1560 main.Mininet2.pingLong(
1561 src=main.params[ 'PING' ][ 'source7' ],
1562 target=main.params[ 'PING' ][ 'target7' ],
1563 pingTime=500 )
1564 main.Mininet2.pingLong(
1565 src=main.params[ 'PING' ][ 'source8' ],
1566 target=main.params[ 'PING' ][ 'target8' ],
1567 pingTime=500 )
1568 main.Mininet2.pingLong(
1569 src=main.params[ 'PING' ][ 'source9' ],
1570 target=main.params[ 'PING' ][ 'target9' ],
1571 pingTime=500 )
1572 main.Mininet2.pingLong(
1573 src=main.params[ 'PING' ][ 'source10' ],
1574 target=main.params[ 'PING' ][ 'target10' ],
1575 pingTime=500 )
1576
1577 main.step( "Collecting topology information from ONOS" )
1578 devices = []
1579 threads = []
1580 for i in main.activeNodes:
1581 t = main.Thread( target=main.CLIs[i].devices,
1582 name="devices-" + str( i ),
1583 args=[ ] )
1584 threads.append( t )
1585 t.start()
1586
1587 for t in threads:
1588 t.join()
1589 devices.append( t.result )
1590 hosts = []
1591 threads = []
1592 for i in main.activeNodes:
1593 t = main.Thread( target=main.CLIs[i].hosts,
1594 name="hosts-" + str( i ),
1595 args=[ ] )
1596 threads.append( t )
1597 t.start()
1598
1599 for t in threads:
1600 t.join()
1601 try:
1602 hosts.append( json.loads( t.result ) )
1603 except ( ValueError, TypeError ):
1604 # FIXME: better handling of this, print which node
1605 # Maybe use thread name?
1606 main.log.exception( "Error parsing json output of hosts" )
1607 main.log.warn( repr( t.result ) )
1608 hosts.append( None )
1609
1610 ports = []
1611 threads = []
1612 for i in main.activeNodes:
1613 t = main.Thread( target=main.CLIs[i].ports,
1614 name="ports-" + str( i ),
1615 args=[ ] )
1616 threads.append( t )
1617 t.start()
1618
1619 for t in threads:
1620 t.join()
1621 ports.append( t.result )
1622 links = []
1623 threads = []
1624 for i in main.activeNodes:
1625 t = main.Thread( target=main.CLIs[i].links,
1626 name="links-" + str( i ),
1627 args=[ ] )
1628 threads.append( t )
1629 t.start()
1630
1631 for t in threads:
1632 t.join()
1633 links.append( t.result )
1634 clusters = []
1635 threads = []
1636 for i in main.activeNodes:
1637 t = main.Thread( target=main.CLIs[i].clusters,
1638 name="clusters-" + str( i ),
1639 args=[ ] )
1640 threads.append( t )
1641 t.start()
1642
1643 for t in threads:
1644 t.join()
1645 clusters.append( t.result )
1646 # Compare json objects for hosts and dataplane clusters
1647
1648 # hosts
1649 main.step( "Host view is consistent across ONOS nodes" )
1650 consistentHostsResult = main.TRUE
1651 for controller in range( len( hosts ) ):
1652 controllerStr = str( main.activeNodes[controller] + 1 )
1653 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1654 if hosts[ controller ] == hosts[ 0 ]:
1655 continue
1656 else: # hosts not consistent
1657 main.log.error( "hosts from ONOS" +
1658 controllerStr +
1659 " is inconsistent with ONOS1" )
1660 main.log.warn( repr( hosts[ controller ] ) )
1661 consistentHostsResult = main.FALSE
1662
1663 else:
1664 main.log.error( "Error in getting ONOS hosts from ONOS" +
1665 controllerStr )
1666 consistentHostsResult = main.FALSE
1667 main.log.warn( "ONOS" + controllerStr +
1668 " hosts response: " +
1669 repr( hosts[ controller ] ) )
1670 utilities.assert_equals(
1671 expect=main.TRUE,
1672 actual=consistentHostsResult,
1673 onpass="Hosts view is consistent across all ONOS nodes",
1674 onfail="ONOS nodes have different views of hosts" )
1675
1676 main.step( "Each host has an IP address" )
1677 ipResult = main.TRUE
1678 for controller in range( 0, len( hosts ) ):
1679 controllerStr = str( main.activeNodes[controller] + 1 )
1680 if hosts[ controller ]:
1681 for host in hosts[ controller ]:
1682 if not host.get( 'ipAddresses', [ ] ):
1683 main.log.error( "Error with host ips on controller" +
1684 controllerStr + ": " + str( host ) )
1685 ipResult = main.FALSE
1686 utilities.assert_equals(
1687 expect=main.TRUE,
1688 actual=ipResult,
1689 onpass="The ips of the hosts aren't empty",
1690 onfail="The ip of at least one host is missing" )
1691
1692 # Strongly connected clusters of devices
1693 main.step( "Cluster view is consistent across ONOS nodes" )
1694 consistentClustersResult = main.TRUE
1695 for controller in range( len( clusters ) ):
1696 controllerStr = str( main.activeNodes[controller] + 1 )
1697 if "Error" not in clusters[ controller ]:
1698 if clusters[ controller ] == clusters[ 0 ]:
1699 continue
1700 else: # clusters not consistent
1701 main.log.error( "clusters from ONOS" + controllerStr +
1702 " is inconsistent with ONOS1" )
1703 consistentClustersResult = main.FALSE
1704
1705 else:
1706 main.log.error( "Error in getting dataplane clusters " +
1707 "from ONOS" + controllerStr )
1708 consistentClustersResult = main.FALSE
1709 main.log.warn( "ONOS" + controllerStr +
1710 " clusters response: " +
1711 repr( clusters[ controller ] ) )
1712 utilities.assert_equals(
1713 expect=main.TRUE,
1714 actual=consistentClustersResult,
1715 onpass="Clusters view is consistent across all ONOS nodes",
1716 onfail="ONOS nodes have different views of clusters" )
1717 if not consistentClustersResult:
1718 main.log.debug( clusters )
1719
1720 # there should always only be one cluster
1721 main.step( "Cluster view correct across ONOS nodes" )
1722 try:
1723 numClusters = len( json.loads( clusters[ 0 ] ) )
1724 except ( ValueError, TypeError ):
1725 main.log.exception( "Error parsing clusters[0]: " +
1726 repr( clusters[ 0 ] ) )
1727 numClusters = "ERROR"
1728 utilities.assert_equals(
1729 expect=1,
1730 actual=numClusters,
1731 onpass="ONOS shows 1 SCC",
1732 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1733
1734 main.step( "Comparing ONOS topology to MN" )
1735 devicesResults = main.TRUE
1736 linksResults = main.TRUE
1737 hostsResults = main.TRUE
1738 mnSwitches = main.Mininet1.getSwitches()
1739 mnLinks = main.Mininet1.getLinks()
1740 mnHosts = main.Mininet1.getHosts()
1741 for controller in main.activeNodes:
1742 controllerStr = str( main.activeNodes[controller] + 1 )
1743 if devices[ controller ] and ports[ controller ] and\
1744 "Error" not in devices[ controller ] and\
1745 "Error" not in ports[ controller ]:
1746 currentDevicesResult = main.Mininet1.compareSwitches(
1747 mnSwitches,
1748 json.loads( devices[ controller ] ),
1749 json.loads( ports[ controller ] ) )
1750 else:
1751 currentDevicesResult = main.FALSE
1752 utilities.assert_equals( expect=main.TRUE,
1753 actual=currentDevicesResult,
1754 onpass="ONOS" + controllerStr +
1755 " Switches view is correct",
1756 onfail="ONOS" + controllerStr +
1757 " Switches view is incorrect" )
1758 if links[ controller ] and "Error" not in links[ controller ]:
1759 currentLinksResult = main.Mininet1.compareLinks(
1760 mnSwitches, mnLinks,
1761 json.loads( links[ controller ] ) )
1762 else:
1763 currentLinksResult = main.FALSE
1764 utilities.assert_equals( expect=main.TRUE,
1765 actual=currentLinksResult,
1766 onpass="ONOS" + controllerStr +
1767 " links view is correct",
1768 onfail="ONOS" + controllerStr +
1769 " links view is incorrect" )
1770
1771 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1772 currentHostsResult = main.Mininet1.compareHosts(
1773 mnHosts,
1774 hosts[ controller ] )
1775 else:
1776 currentHostsResult = main.FALSE
1777 utilities.assert_equals( expect=main.TRUE,
1778 actual=currentHostsResult,
1779 onpass="ONOS" + controllerStr +
1780 " hosts exist in Mininet",
1781 onfail="ONOS" + controllerStr +
1782 " hosts don't match Mininet" )
1783
1784 devicesResults = devicesResults and currentDevicesResult
1785 linksResults = linksResults and currentLinksResult
1786 hostsResults = hostsResults and currentHostsResult
1787
1788 main.step( "Device information is correct" )
1789 utilities.assert_equals(
1790 expect=main.TRUE,
1791 actual=devicesResults,
1792 onpass="Device information is correct",
1793 onfail="Device information is incorrect" )
1794
1795 main.step( "Links are correct" )
1796 utilities.assert_equals(
1797 expect=main.TRUE,
1798 actual=linksResults,
1799 onpass="Link are correct",
1800 onfail="Links are incorrect" )
1801
1802 main.step( "Hosts are correct" )
1803 utilities.assert_equals(
1804 expect=main.TRUE,
1805 actual=hostsResults,
1806 onpass="Hosts are correct",
1807 onfail="Hosts are incorrect" )
1808
1809 def CASE6( self, main ):
1810 """
1811 The Scaling case.
1812 """
1813 import time
1814 import re
1815 assert main.numCtrls, "main.numCtrls not defined"
1816 assert main, "main not defined"
1817 assert utilities.assert_equals, "utilities.assert_equals not defined"
1818 assert main.CLIs, "main.CLIs not defined"
1819 assert main.nodes, "main.nodes not defined"
1820 try:
1821 labels
1822 except NameError:
1823 main.log.error( "labels not defined, setting to []" )
1824 global labels
1825 labels = []
1826 try:
1827 data
1828 except NameError:
1829 main.log.error( "data not defined, setting to []" )
1830 global data
1831 data = []
1832
1833 main.case( "Swap some of the ONOS nodes" )
1834
1835 main.step( "Checking ONOS Logs for errors" )
1836 for i in main.activeNodes:
1837 node = main.nodes[i]
1838 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1839 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1840
1841 main.step( "Generate new metadata file" )
1842 old = [ main.activeNodes[0], main.activeNodes[-1] ]
1843 new = range( main.ONOSbench.maxNodes )[-2:]
1844 assert len( old ) == len( new ), "Length of nodes to swap don't match"
1845 handle = main.ONOSbench.handle
1846 for x, y in zip( old, new ):
1847 handle.sendline( "export OC{}=$OC{}".format( x + 1, y + 1 ) )
1848 handle.expect( "\$" ) # from the variable
1849 ret = handle.before
1850 handle.expect( "\$" ) # From the prompt
1851 ret += handle.before
1852 main.log.debug( ret )
1853 main.activeNodes.remove( x )
1854 main.activeNodes.append( y )
1855
1856 genResult = main.Server.generateFile( main.numCtrls )
1857 utilities.assert_equals( expect=main.TRUE, actual=genResult,
1858 onpass="New cluster metadata file generated",
1859 onfail="Failled to generate new metadata file" )
1860 time.sleep( 5 ) # Give time for nodes to read new file
1861
1862 main.step( "Start new nodes" ) # OR stop old nodes?
1863 started = main.TRUE
1864 for i in new:
1865 started = main.ONOSbench.onosStart( main.nodes[i].ip_address ) and main.TRUE
1866 utilities.assert_equals( expect=main.TRUE, actual=started,
1867 onpass="ONOS started",
1868 onfail="ONOS start NOT successful" )
1869
1870 main.step( "Checking if ONOS is up yet" )
1871 for i in range( 2 ):
1872 onosIsupResult = main.TRUE
1873 for i in main.activeNodes:
1874 node = main.nodes[i]
1875 started = main.ONOSbench.isup( node.ip_address )
1876 if not started:
1877 main.log.error( node.name + " didn't start!" )
1878 onosIsupResult = onosIsupResult and started
1879 if onosIsupResult == main.TRUE:
1880 break
1881 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1882 onpass="ONOS started",
1883 onfail="ONOS start NOT successful" )
1884
Jon Hall6509dbf2016-06-21 17:01:17 -07001885 main.step( "Starting ONOS CLI sessions" )
Jon Hall69b2b982016-05-11 12:04:59 -07001886 cliResults = main.TRUE
1887 threads = []
1888 for i in main.activeNodes:
1889 t = main.Thread( target=main.CLIs[i].startOnosCli,
1890 name="startOnosCli-" + str( i ),
1891 args=[main.nodes[i].ip_address] )
1892 threads.append( t )
1893 t.start()
1894
1895 for t in threads:
1896 t.join()
1897 cliResults = cliResults and t.result
1898 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1899 onpass="ONOS cli started",
1900 onfail="ONOS clis did not start" )
1901
1902 main.step( "Checking ONOS nodes" )
1903 nodeResults = utilities.retry( main.HA.nodesCheck,
1904 False,
1905 args=[main.activeNodes],
1906 attempts=5 )
1907 utilities.assert_equals( expect=True, actual=nodeResults,
1908 onpass="Nodes check successful",
1909 onfail="Nodes check NOT successful" )
1910
1911 for i in range( 10 ):
1912 ready = True
1913 for i in main.activeNodes:
1914 cli = main.CLIs[i]
1915 output = cli.summary()
1916 if not output:
1917 ready = False
1918 if ready:
1919 break
1920 time.sleep( 30 )
1921 utilities.assert_equals( expect=True, actual=ready,
1922 onpass="ONOS summary command succeded",
1923 onfail="ONOS summary command failed" )
1924 if not ready:
1925 main.cleanup()
1926 main.exit()
1927
1928 # Rerun for election on new nodes
1929 runResults = main.TRUE
1930 for i in main.activeNodes:
1931 cli = main.CLIs[i]
1932 run = cli.electionTestRun()
1933 if run != main.TRUE:
1934 main.log.error( "Error running for election on " + cli.name )
1935 runResults = runResults and run
1936 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1937 onpass="Reran for election",
1938 onfail="Failed to rerun for election" )
1939
1940 for node in main.activeNodes:
1941 main.log.warn( "\n****************** {} **************".format( main.nodes[node].ip_address ) )
1942 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1943 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1944 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
1945 main.log.debug( main.CLIs[node].apps( jsonFormat=False ) )
1946
1947 main.step( "Reapplying cell variable to environment" )
1948 cellName = main.params[ 'ENV' ][ 'cellName' ]
1949 cellResult = main.ONOSbench.setCell( cellName )
1950 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
1951 onpass="Set cell successfull",
1952 onfail="Failled to set cell" )
1953
1954 def CASE7( self, main ):
1955 """
1956 Check state after ONOS scaling
1957 """
1958 import json
1959 assert main.numCtrls, "main.numCtrls not defined"
1960 assert main, "main not defined"
1961 assert utilities.assert_equals, "utilities.assert_equals not defined"
1962 assert main.CLIs, "main.CLIs not defined"
1963 assert main.nodes, "main.nodes not defined"
1964 main.case( "Running ONOS Constant State Tests" )
1965
1966 main.step( "Check that each switch has a master" )
1967 # Assert that each device has a master
1968 rolesNotNull = main.TRUE
1969 threads = []
1970 for i in main.activeNodes:
1971 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1972 name="rolesNotNull-" + str( i ),
1973 args=[ ] )
1974 threads.append( t )
1975 t.start()
1976
1977 for t in threads:
1978 t.join()
1979 rolesNotNull = rolesNotNull and t.result
1980 utilities.assert_equals(
1981 expect=main.TRUE,
1982 actual=rolesNotNull,
1983 onpass="Each device has a master",
1984 onfail="Some devices don't have a master assigned" )
1985
1986 main.step( "Read device roles from ONOS" )
1987 ONOSMastership = []
1988 consistentMastership = True
1989 rolesResults = True
1990 threads = []
1991 for i in main.activeNodes:
1992 t = main.Thread( target=main.CLIs[i].roles,
1993 name="roles-" + str( i ),
1994 args=[] )
1995 threads.append( t )
1996 t.start()
1997
1998 for t in threads:
1999 t.join()
2000 ONOSMastership.append( t.result )
2001
2002 for i in range( len( ONOSMastership ) ):
2003 node = str( main.activeNodes[i] + 1 )
2004 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
2005 main.log.error( "Error in getting ONOS" + node + " roles" )
2006 main.log.warn( "ONOS" + node + " mastership response: " +
2007 repr( ONOSMastership[i] ) )
2008 rolesResults = False
2009 utilities.assert_equals(
2010 expect=True,
2011 actual=rolesResults,
2012 onpass="No error in reading roles output",
2013 onfail="Error in reading roles from ONOS" )
2014
2015 main.step( "Check for consistency in roles from each controller" )
2016 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
2017 main.log.info(
2018 "Switch roles are consistent across all ONOS nodes" )
2019 else:
2020 consistentMastership = False
2021 utilities.assert_equals(
2022 expect=True,
2023 actual=consistentMastership,
2024 onpass="Switch roles are consistent across all ONOS nodes",
2025 onfail="ONOS nodes have different views of switch roles" )
2026
2027 if rolesResults and not consistentMastership:
2028 for i in range( len( ONOSMastership ) ):
2029 node = str( main.activeNodes[i] + 1 )
2030 main.log.warn( "ONOS" + node + " roles: ",
2031 json.dumps( json.loads( ONOSMastership[ i ] ),
2032 sort_keys=True,
2033 indent=4,
2034 separators=( ',', ': ' ) ) )
2035
2036 # NOTE: we expect mastership to change on controller scaling down
2037
2038 main.step( "Get the intents and compare across all nodes" )
2039 ONOSIntents = []
2040 intentCheck = main.FALSE
2041 consistentIntents = True
2042 intentsResults = True
2043 threads = []
2044 for i in main.activeNodes:
2045 t = main.Thread( target=main.CLIs[i].intents,
2046 name="intents-" + str( i ),
2047 args=[],
2048 kwargs={ 'jsonFormat': True } )
2049 threads.append( t )
2050 t.start()
2051
2052 for t in threads:
2053 t.join()
2054 ONOSIntents.append( t.result )
2055
2056 for i in range( len( ONOSIntents) ):
2057 node = str( main.activeNodes[i] + 1 )
2058 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2059 main.log.error( "Error in getting ONOS" + node + " intents" )
2060 main.log.warn( "ONOS" + node + " intents response: " +
2061 repr( ONOSIntents[ i ] ) )
2062 intentsResults = False
2063 utilities.assert_equals(
2064 expect=True,
2065 actual=intentsResults,
2066 onpass="No error in reading intents output",
2067 onfail="Error in reading intents from ONOS" )
2068
2069 main.step( "Check for consistency in Intents from each controller" )
2070 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2071 main.log.info( "Intents are consistent across all ONOS " +
2072 "nodes" )
2073 else:
2074 consistentIntents = False
2075
2076 # Try to make it easy to figure out what is happening
2077 #
2078 # Intent ONOS1 ONOS2 ...
2079 # 0x01 INSTALLED INSTALLING
2080 # ... ... ...
2081 # ... ... ...
2082 title = " ID"
2083 for n in main.activeNodes:
2084 title += " " * 10 + "ONOS" + str( n + 1 )
2085 main.log.warn( title )
2086 # get all intent keys in the cluster
2087 keys = []
2088 for nodeStr in ONOSIntents:
2089 node = json.loads( nodeStr )
2090 for intent in node:
2091 keys.append( intent.get( 'id' ) )
2092 keys = set( keys )
2093 for key in keys:
2094 row = "%-13s" % key
2095 for nodeStr in ONOSIntents:
2096 node = json.loads( nodeStr )
2097 for intent in node:
2098 if intent.get( 'id' ) == key:
2099 row += "%-15s" % intent.get( 'state' )
2100 main.log.warn( row )
2101 # End table view
2102
2103 utilities.assert_equals(
2104 expect=True,
2105 actual=consistentIntents,
2106 onpass="Intents are consistent across all ONOS nodes",
2107 onfail="ONOS nodes have different views of intents" )
2108 intentStates = []
2109 for node in ONOSIntents: # Iter through ONOS nodes
2110 nodeStates = []
2111 # Iter through intents of a node
2112 try:
2113 for intent in json.loads( node ):
2114 nodeStates.append( intent[ 'state' ] )
2115 except ( ValueError, TypeError ):
2116 main.log.exception( "Error in parsing intents" )
2117 main.log.error( repr( node ) )
2118 intentStates.append( nodeStates )
2119 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2120 main.log.info( dict( out ) )
2121
2122 if intentsResults and not consistentIntents:
2123 for i in range( len( main.activeNodes ) ):
2124 node = str( main.activeNodes[i] + 1 )
2125 main.log.warn( "ONOS" + node + " intents: " )
2126 main.log.warn( json.dumps(
2127 json.loads( ONOSIntents[ i ] ),
2128 sort_keys=True,
2129 indent=4,
2130 separators=( ',', ': ' ) ) )
2131 elif intentsResults and consistentIntents:
2132 intentCheck = main.TRUE
2133
2134 main.step( "Compare current intents with intents before the scaling" )
2135 # NOTE: this requires case 5 to pass for intentState to be set.
2136 # maybe we should stop the test if that fails?
2137 sameIntents = main.FALSE
2138 try:
2139 intentState
2140 except NameError:
2141 main.log.warn( "No previous intent state was saved" )
2142 else:
2143 if intentState and intentState == ONOSIntents[ 0 ]:
2144 sameIntents = main.TRUE
2145 main.log.info( "Intents are consistent with before scaling" )
2146 # TODO: possibly the states have changed? we may need to figure out
2147 # what the acceptable states are
2148 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2149 sameIntents = main.TRUE
2150 try:
2151 before = json.loads( intentState )
2152 after = json.loads( ONOSIntents[ 0 ] )
2153 for intent in before:
2154 if intent not in after:
2155 sameIntents = main.FALSE
2156 main.log.debug( "Intent is not currently in ONOS " +
2157 "(at least in the same form):" )
2158 main.log.debug( json.dumps( intent ) )
2159 except ( ValueError, TypeError ):
2160 main.log.exception( "Exception printing intents" )
2161 main.log.debug( repr( ONOSIntents[0] ) )
2162 main.log.debug( repr( intentState ) )
2163 if sameIntents == main.FALSE:
2164 try:
2165 main.log.debug( "ONOS intents before: " )
2166 main.log.debug( json.dumps( json.loads( intentState ),
2167 sort_keys=True, indent=4,
2168 separators=( ',', ': ' ) ) )
2169 main.log.debug( "Current ONOS intents: " )
2170 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2171 sort_keys=True, indent=4,
2172 separators=( ',', ': ' ) ) )
2173 except ( ValueError, TypeError ):
2174 main.log.exception( "Exception printing intents" )
2175 main.log.debug( repr( ONOSIntents[0] ) )
2176 main.log.debug( repr( intentState ) )
2177 utilities.assert_equals(
2178 expect=main.TRUE,
2179 actual=sameIntents,
2180 onpass="Intents are consistent with before scaling",
2181 onfail="The Intents changed during scaling" )
2182 intentCheck = intentCheck and sameIntents
2183
2184 main.step( "Get the OF Table entries and compare to before " +
2185 "component scaling" )
2186 FlowTables = main.TRUE
2187 for i in range( 28 ):
2188 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2189 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2190 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2191 FlowTables = FlowTables and curSwitch
2192 if curSwitch == main.FALSE:
2193 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2194 utilities.assert_equals(
2195 expect=main.TRUE,
2196 actual=FlowTables,
2197 onpass="No changes were found in the flow tables",
2198 onfail="Changes were found in the flow tables" )
2199
2200 main.Mininet2.pingLongKill()
2201 '''
2202 # main.step( "Check the continuous pings to ensure that no packets " +
2203 # "were dropped during component failure" )
2204 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2205 main.params[ 'TESTONIP' ] )
2206 LossInPings = main.FALSE
2207 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2208 for i in range( 8, 18 ):
2209 main.log.info(
2210 "Checking for a loss in pings along flow from s" +
2211 str( i ) )
2212 LossInPings = main.Mininet2.checkForLoss(
2213 "/tmp/ping.h" +
2214 str( i ) ) or LossInPings
2215 if LossInPings == main.TRUE:
2216 main.log.info( "Loss in ping detected" )
2217 elif LossInPings == main.ERROR:
2218 main.log.info( "There are multiple mininet process running" )
2219 elif LossInPings == main.FALSE:
2220 main.log.info( "No Loss in the pings" )
2221 main.log.info( "No loss of dataplane connectivity" )
2222 # utilities.assert_equals(
2223 # expect=main.FALSE,
2224 # actual=LossInPings,
2225 # onpass="No Loss of connectivity",
2226 # onfail="Loss of dataplane connectivity detected" )
2227
2228 # NOTE: Since intents are not persisted with IntnentStore,
2229 # we expect loss in dataplane connectivity
2230 LossInPings = main.FALSE
2231 '''
2232
2233 main.step( "Leadership Election is still functional" )
2234 # Test of LeadershipElection
2235 leaderList = []
2236 leaderResult = main.TRUE
2237
2238 for i in main.activeNodes:
2239 cli = main.CLIs[i]
2240 leaderN = cli.electionTestLeader()
2241 leaderList.append( leaderN )
2242 if leaderN == main.FALSE:
2243 # error in response
2244 main.log.error( "Something is wrong with " +
2245 "electionTestLeader function, check the" +
2246 " error logs" )
2247 leaderResult = main.FALSE
2248 elif leaderN is None:
2249 main.log.error( cli.name +
2250 " shows no leader for the election-app." )
2251 leaderResult = main.FALSE
2252 if len( set( leaderList ) ) != 1:
2253 leaderResult = main.FALSE
2254 main.log.error(
2255 "Inconsistent view of leader for the election test app" )
2256 # TODO: print the list
2257 utilities.assert_equals(
2258 expect=main.TRUE,
2259 actual=leaderResult,
2260 onpass="Leadership election passed",
2261 onfail="Something went wrong with Leadership election" )
2262
2263 def CASE8( self, main ):
2264 """
2265 Compare topo
2266 """
2267 import json
2268 import time
2269 assert main.numCtrls, "main.numCtrls not defined"
2270 assert main, "main not defined"
2271 assert utilities.assert_equals, "utilities.assert_equals not defined"
2272 assert main.CLIs, "main.CLIs not defined"
2273 assert main.nodes, "main.nodes not defined"
2274
2275 main.case( "Compare ONOS Topology view to Mininet topology" )
2276 main.caseExplanation = "Compare topology objects between Mininet" +\
2277 " and ONOS"
2278 topoResult = main.FALSE
2279 topoFailMsg = "ONOS topology don't match Mininet"
2280 elapsed = 0
2281 count = 0
2282 main.step( "Comparing ONOS topology to MN topology" )
2283 startTime = time.time()
2284 # Give time for Gossip to work
2285 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2286 devicesResults = main.TRUE
2287 linksResults = main.TRUE
2288 hostsResults = main.TRUE
2289 hostAttachmentResults = True
2290 count += 1
2291 cliStart = time.time()
2292 devices = []
2293 threads = []
2294 for i in main.activeNodes:
2295 t = main.Thread( target=utilities.retry,
2296 name="devices-" + str( i ),
2297 args=[ main.CLIs[i].devices, [ None ] ],
2298 kwargs= { 'sleep': 5, 'attempts': 5,
2299 'randomTime': True } )
2300 threads.append( t )
2301 t.start()
2302
2303 for t in threads:
2304 t.join()
2305 devices.append( t.result )
2306 hosts = []
2307 ipResult = main.TRUE
2308 threads = []
2309 for i in main.activeNodes:
2310 t = main.Thread( target=utilities.retry,
2311 name="hosts-" + str( i ),
2312 args=[ main.CLIs[i].hosts, [ None ] ],
2313 kwargs= { 'sleep': 5, 'attempts': 5,
2314 'randomTime': True } )
2315 threads.append( t )
2316 t.start()
2317
2318 for t in threads:
2319 t.join()
2320 try:
2321 hosts.append( json.loads( t.result ) )
2322 except ( ValueError, TypeError ):
2323 main.log.exception( "Error parsing hosts results" )
2324 main.log.error( repr( t.result ) )
2325 hosts.append( None )
2326 for controller in range( 0, len( hosts ) ):
2327 controllerStr = str( main.activeNodes[controller] + 1 )
2328 if hosts[ controller ]:
2329 for host in hosts[ controller ]:
2330 if host is None or host.get( 'ipAddresses', [] ) == []:
2331 main.log.error(
2332 "Error with host ipAddresses on controller" +
2333 controllerStr + ": " + str( host ) )
2334 ipResult = main.FALSE
2335 ports = []
2336 threads = []
2337 for i in main.activeNodes:
2338 t = main.Thread( target=utilities.retry,
2339 name="ports-" + str( i ),
2340 args=[ main.CLIs[i].ports, [ None ] ],
2341 kwargs= { 'sleep': 5, 'attempts': 5,
2342 'randomTime': True } )
2343 threads.append( t )
2344 t.start()
2345
2346 for t in threads:
2347 t.join()
2348 ports.append( t.result )
2349 links = []
2350 threads = []
2351 for i in main.activeNodes:
2352 t = main.Thread( target=utilities.retry,
2353 name="links-" + str( i ),
2354 args=[ main.CLIs[i].links, [ None ] ],
2355 kwargs= { 'sleep': 5, 'attempts': 5,
2356 'randomTime': True } )
2357 threads.append( t )
2358 t.start()
2359
2360 for t in threads:
2361 t.join()
2362 links.append( t.result )
2363 clusters = []
2364 threads = []
2365 for i in main.activeNodes:
2366 t = main.Thread( target=utilities.retry,
2367 name="clusters-" + str( i ),
2368 args=[ main.CLIs[i].clusters, [ None ] ],
2369 kwargs= { 'sleep': 5, 'attempts': 5,
2370 'randomTime': True } )
2371 threads.append( t )
2372 t.start()
2373
2374 for t in threads:
2375 t.join()
2376 clusters.append( t.result )
2377
2378 elapsed = time.time() - startTime
2379 cliTime = time.time() - cliStart
2380 print "Elapsed time: " + str( elapsed )
2381 print "CLI time: " + str( cliTime )
2382
2383 if all( e is None for e in devices ) and\
2384 all( e is None for e in hosts ) and\
2385 all( e is None for e in ports ) and\
2386 all( e is None for e in links ) and\
2387 all( e is None for e in clusters ):
2388 topoFailMsg = "Could not get topology from ONOS"
2389 main.log.error( topoFailMsg )
2390 continue # Try again, No use trying to compare
2391
2392 mnSwitches = main.Mininet1.getSwitches()
2393 mnLinks = main.Mininet1.getLinks()
2394 mnHosts = main.Mininet1.getHosts()
2395 for controller in range( len( main.activeNodes ) ):
2396 controllerStr = str( main.activeNodes[controller] + 1 )
2397 if devices[ controller ] and ports[ controller ] and\
2398 "Error" not in devices[ controller ] and\
2399 "Error" not in ports[ controller ]:
2400
2401 try:
2402 currentDevicesResult = main.Mininet1.compareSwitches(
2403 mnSwitches,
2404 json.loads( devices[ controller ] ),
2405 json.loads( ports[ controller ] ) )
2406 except ( TypeError, ValueError ):
2407 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2408 devices[ controller ], ports[ controller ] ) )
2409 else:
2410 currentDevicesResult = main.FALSE
2411 utilities.assert_equals( expect=main.TRUE,
2412 actual=currentDevicesResult,
2413 onpass="ONOS" + controllerStr +
2414 " Switches view is correct",
2415 onfail="ONOS" + controllerStr +
2416 " Switches view is incorrect" )
2417
2418 if links[ controller ] and "Error" not in links[ controller ]:
2419 currentLinksResult = main.Mininet1.compareLinks(
2420 mnSwitches, mnLinks,
2421 json.loads( links[ controller ] ) )
2422 else:
2423 currentLinksResult = main.FALSE
2424 utilities.assert_equals( expect=main.TRUE,
2425 actual=currentLinksResult,
2426 onpass="ONOS" + controllerStr +
2427 " links view is correct",
2428 onfail="ONOS" + controllerStr +
2429 " links view is incorrect" )
2430 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2431 currentHostsResult = main.Mininet1.compareHosts(
2432 mnHosts,
2433 hosts[ controller ] )
2434 elif hosts[ controller ] == []:
2435 currentHostsResult = main.TRUE
2436 else:
2437 currentHostsResult = main.FALSE
2438 utilities.assert_equals( expect=main.TRUE,
2439 actual=currentHostsResult,
2440 onpass="ONOS" + controllerStr +
2441 " hosts exist in Mininet",
2442 onfail="ONOS" + controllerStr +
2443 " hosts don't match Mininet" )
2444 # CHECKING HOST ATTACHMENT POINTS
2445 hostAttachment = True
2446 zeroHosts = False
2447 # FIXME: topo-HA/obelisk specific mappings:
2448 # key is mac and value is dpid
2449 mappings = {}
2450 for i in range( 1, 29 ): # hosts 1 through 28
2451 # set up correct variables:
2452 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2453 if i == 1:
2454 deviceId = "1000".zfill(16)
2455 elif i == 2:
2456 deviceId = "2000".zfill(16)
2457 elif i == 3:
2458 deviceId = "3000".zfill(16)
2459 elif i == 4:
2460 deviceId = "3004".zfill(16)
2461 elif i == 5:
2462 deviceId = "5000".zfill(16)
2463 elif i == 6:
2464 deviceId = "6000".zfill(16)
2465 elif i == 7:
2466 deviceId = "6007".zfill(16)
2467 elif i >= 8 and i <= 17:
2468 dpid = '3' + str( i ).zfill( 3 )
2469 deviceId = dpid.zfill(16)
2470 elif i >= 18 and i <= 27:
2471 dpid = '6' + str( i ).zfill( 3 )
2472 deviceId = dpid.zfill(16)
2473 elif i == 28:
2474 deviceId = "2800".zfill(16)
2475 mappings[ macId ] = deviceId
2476 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2477 if hosts[ controller ] == []:
2478 main.log.warn( "There are no hosts discovered" )
2479 zeroHosts = True
2480 else:
2481 for host in hosts[ controller ]:
2482 mac = None
2483 location = None
2484 device = None
2485 port = None
2486 try:
2487 mac = host.get( 'mac' )
2488 assert mac, "mac field could not be found for this host object"
2489
2490 location = host.get( 'location' )
2491 assert location, "location field could not be found for this host object"
2492
2493 # Trim the protocol identifier off deviceId
2494 device = str( location.get( 'elementId' ) ).split(':')[1]
2495 assert device, "elementId field could not be found for this host location object"
2496
2497 port = location.get( 'port' )
2498 assert port, "port field could not be found for this host location object"
2499
2500 # Now check if this matches where they should be
2501 if mac and device and port:
2502 if str( port ) != "1":
2503 main.log.error( "The attachment port is incorrect for " +
2504 "host " + str( mac ) +
2505 ". Expected: 1 Actual: " + str( port) )
2506 hostAttachment = False
2507 if device != mappings[ str( mac ) ]:
2508 main.log.error( "The attachment device is incorrect for " +
2509 "host " + str( mac ) +
2510 ". Expected: " + mappings[ str( mac ) ] +
2511 " Actual: " + device )
2512 hostAttachment = False
2513 else:
2514 hostAttachment = False
2515 except AssertionError:
2516 main.log.exception( "Json object not as expected" )
2517 main.log.error( repr( host ) )
2518 hostAttachment = False
2519 else:
2520 main.log.error( "No hosts json output or \"Error\"" +
2521 " in output. hosts = " +
2522 repr( hosts[ controller ] ) )
2523 if zeroHosts is False:
2524 # TODO: Find a way to know if there should be hosts in a
2525 # given point of the test
2526 hostAttachment = True
2527
2528 # END CHECKING HOST ATTACHMENT POINTS
2529 devicesResults = devicesResults and currentDevicesResult
2530 linksResults = linksResults and currentLinksResult
2531 hostsResults = hostsResults and currentHostsResult
2532 hostAttachmentResults = hostAttachmentResults and\
2533 hostAttachment
2534 topoResult = ( devicesResults and linksResults
2535 and hostsResults and ipResult and
2536 hostAttachmentResults )
2537 utilities.assert_equals( expect=True,
2538 actual=topoResult,
2539 onpass="ONOS topology matches Mininet",
2540 onfail=topoFailMsg )
2541 # End of While loop to pull ONOS state
2542
2543 # Compare json objects for hosts and dataplane clusters
2544
2545 # hosts
2546 main.step( "Hosts view is consistent across all ONOS nodes" )
2547 consistentHostsResult = main.TRUE
2548 for controller in range( len( hosts ) ):
2549 controllerStr = str( main.activeNodes[controller] + 1 )
2550 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2551 if hosts[ controller ] == hosts[ 0 ]:
2552 continue
2553 else: # hosts not consistent
2554 main.log.error( "hosts from ONOS" + controllerStr +
2555 " is inconsistent with ONOS1" )
2556 main.log.warn( repr( hosts[ controller ] ) )
2557 consistentHostsResult = main.FALSE
2558
2559 else:
2560 main.log.error( "Error in getting ONOS hosts from ONOS" +
2561 controllerStr )
2562 consistentHostsResult = main.FALSE
2563 main.log.warn( "ONOS" + controllerStr +
2564 " hosts response: " +
2565 repr( hosts[ controller ] ) )
2566 utilities.assert_equals(
2567 expect=main.TRUE,
2568 actual=consistentHostsResult,
2569 onpass="Hosts view is consistent across all ONOS nodes",
2570 onfail="ONOS nodes have different views of hosts" )
2571
2572 main.step( "Hosts information is correct" )
2573 hostsResults = hostsResults and ipResult
2574 utilities.assert_equals(
2575 expect=main.TRUE,
2576 actual=hostsResults,
2577 onpass="Host information is correct",
2578 onfail="Host information is incorrect" )
2579
2580 main.step( "Host attachment points to the network" )
2581 utilities.assert_equals(
2582 expect=True,
2583 actual=hostAttachmentResults,
2584 onpass="Hosts are correctly attached to the network",
2585 onfail="ONOS did not correctly attach hosts to the network" )
2586
2587 # Strongly connected clusters of devices
2588 main.step( "Clusters view is consistent across all ONOS nodes" )
2589 consistentClustersResult = main.TRUE
2590 for controller in range( len( clusters ) ):
2591 controllerStr = str( main.activeNodes[controller] + 1 )
2592 if "Error" not in clusters[ controller ]:
2593 if clusters[ controller ] == clusters[ 0 ]:
2594 continue
2595 else: # clusters not consistent
2596 main.log.error( "clusters from ONOS" +
2597 controllerStr +
2598 " is inconsistent with ONOS1" )
2599 consistentClustersResult = main.FALSE
2600 else:
2601 main.log.error( "Error in getting dataplane clusters " +
2602 "from ONOS" + controllerStr )
2603 consistentClustersResult = main.FALSE
2604 main.log.warn( "ONOS" + controllerStr +
2605 " clusters response: " +
2606 repr( clusters[ controller ] ) )
2607 utilities.assert_equals(
2608 expect=main.TRUE,
2609 actual=consistentClustersResult,
2610 onpass="Clusters view is consistent across all ONOS nodes",
2611 onfail="ONOS nodes have different views of clusters" )
2612 if not consistentClustersResult:
2613 main.log.debug( clusters )
2614 for x in links:
2615 main.log.warn( "{}: {}".format( len( x ), x ) )
2616
2617
2618 main.step( "There is only one SCC" )
2619 # there should always only be one cluster
2620 try:
2621 numClusters = len( json.loads( clusters[ 0 ] ) )
2622 except ( ValueError, TypeError ):
2623 main.log.exception( "Error parsing clusters[0]: " +
2624 repr( clusters[0] ) )
2625 numClusters = "ERROR"
2626 clusterResults = main.FALSE
2627 if numClusters == 1:
2628 clusterResults = main.TRUE
2629 utilities.assert_equals(
2630 expect=1,
2631 actual=numClusters,
2632 onpass="ONOS shows 1 SCC",
2633 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2634
2635 topoResult = ( devicesResults and linksResults
2636 and hostsResults and consistentHostsResult
2637 and consistentClustersResult and clusterResults
2638 and ipResult and hostAttachmentResults )
2639
2640 topoResult = topoResult and int( count <= 2 )
2641 note = "note it takes about " + str( int( cliTime ) ) + \
2642 " seconds for the test to make all the cli calls to fetch " +\
2643 "the topology from each ONOS instance"
2644 main.log.info(
2645 "Very crass estimate for topology discovery/convergence( " +
2646 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2647 str( count ) + " tries" )
2648
2649 main.step( "Device information is correct" )
2650 utilities.assert_equals(
2651 expect=main.TRUE,
2652 actual=devicesResults,
2653 onpass="Device information is correct",
2654 onfail="Device information is incorrect" )
2655
2656 main.step( "Links are correct" )
2657 utilities.assert_equals(
2658 expect=main.TRUE,
2659 actual=linksResults,
2660 onpass="Link are correct",
2661 onfail="Links are incorrect" )
2662
2663 main.step( "Hosts are correct" )
2664 utilities.assert_equals(
2665 expect=main.TRUE,
2666 actual=hostsResults,
2667 onpass="Hosts are correct",
2668 onfail="Hosts are incorrect" )
2669
2670 # FIXME: move this to an ONOS state case
2671 main.step( "Checking ONOS nodes" )
2672 nodeResults = utilities.retry( main.HA.nodesCheck,
2673 False,
2674 args=[main.activeNodes],
2675 attempts=5 )
2676 utilities.assert_equals( expect=True, actual=nodeResults,
2677 onpass="Nodes check successful",
2678 onfail="Nodes check NOT successful" )
2679 if not nodeResults:
2680 for i in main.activeNodes:
2681 main.log.debug( "{} components not ACTIVE: \n{}".format(
2682 main.CLIs[i].name,
2683 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
2684
Jon Halld2871c22016-07-26 11:01:14 -07002685 if not topoResult:
2686 main.cleanup()
2687 main.exit()
2688
Jon Hall69b2b982016-05-11 12:04:59 -07002689 def CASE9( self, main ):
2690 """
2691 Link s3-s28 down
2692 """
2693 import time
2694 assert main.numCtrls, "main.numCtrls not defined"
2695 assert main, "main not defined"
2696 assert utilities.assert_equals, "utilities.assert_equals not defined"
2697 assert main.CLIs, "main.CLIs not defined"
2698 assert main.nodes, "main.nodes not defined"
2699 # NOTE: You should probably run a topology check after this
2700
2701 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2702
2703 description = "Turn off a link to ensure that Link Discovery " +\
2704 "is working properly"
2705 main.case( description )
2706
2707 main.step( "Kill Link between s3 and s28" )
2708 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2709 main.log.info( "Waiting " + str( linkSleep ) +
2710 " seconds for link down to be discovered" )
2711 time.sleep( linkSleep )
2712 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2713 onpass="Link down successful",
2714 onfail="Failed to bring link down" )
2715 # TODO do some sort of check here
2716
2717 def CASE10( self, main ):
2718 """
2719 Link s3-s28 up
2720 """
2721 import time
2722 assert main.numCtrls, "main.numCtrls not defined"
2723 assert main, "main not defined"
2724 assert utilities.assert_equals, "utilities.assert_equals not defined"
2725 assert main.CLIs, "main.CLIs not defined"
2726 assert main.nodes, "main.nodes not defined"
2727 # NOTE: You should probably run a topology check after this
2728
2729 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2730
2731 description = "Restore a link to ensure that Link Discovery is " + \
2732 "working properly"
2733 main.case( description )
2734
2735 main.step( "Bring link between s3 and s28 back up" )
2736 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2737 main.log.info( "Waiting " + str( linkSleep ) +
2738 " seconds for link up to be discovered" )
2739 time.sleep( linkSleep )
2740 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2741 onpass="Link up successful",
2742 onfail="Failed to bring link up" )
2743 # TODO do some sort of check here
2744
2745 def CASE11( self, main ):
2746 """
2747 Switch Down
2748 """
2749 # NOTE: You should probably run a topology check after this
2750 import time
2751 assert main.numCtrls, "main.numCtrls not defined"
2752 assert main, "main not defined"
2753 assert utilities.assert_equals, "utilities.assert_equals not defined"
2754 assert main.CLIs, "main.CLIs not defined"
2755 assert main.nodes, "main.nodes not defined"
2756
2757 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2758
2759 description = "Killing a switch to ensure it is discovered correctly"
2760 onosCli = main.CLIs[ main.activeNodes[0] ]
2761 main.case( description )
2762 switch = main.params[ 'kill' ][ 'switch' ]
2763 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2764
2765 # TODO: Make this switch parameterizable
2766 main.step( "Kill " + switch )
2767 main.log.info( "Deleting " + switch )
2768 main.Mininet1.delSwitch( switch )
2769 main.log.info( "Waiting " + str( switchSleep ) +
2770 " seconds for switch down to be discovered" )
2771 time.sleep( switchSleep )
2772 device = onosCli.getDevice( dpid=switchDPID )
2773 # Peek at the deleted switch
2774 main.log.warn( str( device ) )
2775 result = main.FALSE
2776 if device and device[ 'available' ] is False:
2777 result = main.TRUE
2778 utilities.assert_equals( expect=main.TRUE, actual=result,
2779 onpass="Kill switch successful",
2780 onfail="Failed to kill switch?" )
2781
2782 def CASE12( self, main ):
2783 """
2784 Switch Up
2785 """
2786 # NOTE: You should probably run a topology check after this
2787 import time
2788 assert main.numCtrls, "main.numCtrls not defined"
2789 assert main, "main not defined"
2790 assert utilities.assert_equals, "utilities.assert_equals not defined"
2791 assert main.CLIs, "main.CLIs not defined"
2792 assert main.nodes, "main.nodes not defined"
2793
2794 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2795 switch = main.params[ 'kill' ][ 'switch' ]
2796 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2797 links = main.params[ 'kill' ][ 'links' ].split()
2798 onosCli = main.CLIs[ main.activeNodes[0] ]
2799 description = "Adding a switch to ensure it is discovered correctly"
2800 main.case( description )
2801
2802 main.step( "Add back " + switch )
2803 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2804 for peer in links:
2805 main.Mininet1.addLink( switch, peer )
2806 ipList = [ node.ip_address for node in main.nodes ]
2807 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2808 main.log.info( "Waiting " + str( switchSleep ) +
2809 " seconds for switch up to be discovered" )
2810 time.sleep( switchSleep )
2811 device = onosCli.getDevice( dpid=switchDPID )
2812 # Peek at the deleted switch
2813 main.log.warn( str( device ) )
2814 result = main.FALSE
2815 if device and device[ 'available' ]:
2816 result = main.TRUE
2817 utilities.assert_equals( expect=main.TRUE, actual=result,
2818 onpass="add switch successful",
2819 onfail="Failed to add switch?" )
2820
2821 def CASE13( self, main ):
2822 """
2823 Clean up
2824 """
2825 assert main.numCtrls, "main.numCtrls not defined"
2826 assert main, "main not defined"
2827 assert utilities.assert_equals, "utilities.assert_equals not defined"
2828 assert main.CLIs, "main.CLIs not defined"
2829 assert main.nodes, "main.nodes not defined"
2830
2831 main.case( "Test Cleanup" )
2832 main.step( "Killing tcpdumps" )
2833 main.Mininet2.stopTcpdump()
2834
2835 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2836 main.step( "Copying MN pcap and ONOS log files to test station" )
2837 # NOTE: MN Pcap file is being saved to logdir.
2838 # We scp this file as MN and TestON aren't necessarily the same vm
2839
2840 # FIXME: To be replaced with a Jenkin's post script
2841 # TODO: Load these from params
2842 # NOTE: must end in /
2843 logFolder = "/opt/onos/log/"
2844 logFiles = [ "karaf.log", "karaf.log.1" ]
2845 # NOTE: must end in /
2846 for f in logFiles:
2847 for node in main.nodes:
2848 dstName = main.logdir + "/" + node.name + "-" + f
2849 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2850 logFolder + f, dstName )
2851 # std*.log's
2852 # NOTE: must end in /
2853 logFolder = "/opt/onos/var/"
2854 logFiles = [ "stderr.log", "stdout.log" ]
2855 # NOTE: must end in /
2856 for f in logFiles:
2857 for node in main.nodes:
2858 dstName = main.logdir + "/" + node.name + "-" + f
2859 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2860 logFolder + f, dstName )
2861 else:
2862 main.log.debug( "skipping saving log files" )
2863
2864 main.step( "Stopping Mininet" )
2865 mnResult = main.Mininet1.stopNet()
2866 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2867 onpass="Mininet stopped",
2868 onfail="MN cleanup NOT successful" )
2869
2870 main.step( "Checking ONOS Logs for errors" )
2871 for node in main.nodes:
2872 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2873 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2874
2875 try:
2876 timerLog = open( main.logdir + "/Timers.csv", 'w')
2877 main.log.error( ", ".join( labels ) + "\n" + ", ".join( data ) )
2878 timerLog.write( ", ".join( labels ) + "\n" + ", ".join( data ) )
2879 timerLog.close()
2880 except NameError, e:
2881 main.log.exception(e)
2882
2883 main.step( "Stopping webserver" )
2884 status = main.Server.stop( )
2885 utilities.assert_equals( expect=main.TRUE, actual=status,
2886 onpass="Stop Server",
2887 onfail="Failled to stop SimpleHTTPServer" )
2888 del main.Server
2889
2890 def CASE14( self, main ):
2891 """
2892 start election app on all onos nodes
2893 """
2894 import time
2895 assert main.numCtrls, "main.numCtrls not defined"
2896 assert main, "main not defined"
2897 assert utilities.assert_equals, "utilities.assert_equals not defined"
2898 assert main.CLIs, "main.CLIs not defined"
2899 assert main.nodes, "main.nodes not defined"
2900
2901 main.case("Start Leadership Election app")
2902 main.step( "Install leadership election app" )
2903 onosCli = main.CLIs[ main.activeNodes[0] ]
2904 appResult = onosCli.activateApp( "org.onosproject.election" )
2905 utilities.assert_equals(
2906 expect=main.TRUE,
2907 actual=appResult,
2908 onpass="Election app installed",
2909 onfail="Something went wrong with installing Leadership election" )
2910
2911 main.step( "Run for election on each node" )
2912 for i in main.activeNodes:
2913 main.CLIs[i].electionTestRun()
2914 time.sleep(5)
2915 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2916 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
2917 utilities.assert_equals(
2918 expect=True,
2919 actual=sameResult,
2920 onpass="All nodes see the same leaderboards",
2921 onfail="Inconsistent leaderboards" )
2922
2923 if sameResult:
2924 leader = leaders[ 0 ][ 0 ]
2925 if main.nodes[ main.activeNodes[0] ].ip_address in leader:
2926 correctLeader = True
2927 else:
2928 correctLeader = False
2929 main.step( "First node was elected leader" )
2930 utilities.assert_equals(
2931 expect=True,
2932 actual=correctLeader,
2933 onpass="Correct leader was elected",
2934 onfail="Incorrect leader" )
2935
2936 def CASE15( self, main ):
2937 """
2938 Check that Leadership Election is still functional
2939 15.1 Run election on each node
2940 15.2 Check that each node has the same leaders and candidates
2941 15.3 Find current leader and withdraw
2942 15.4 Check that a new node was elected leader
2943 15.5 Check that that new leader was the candidate of old leader
2944 15.6 Run for election on old leader
2945 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2946 15.8 Make sure that the old leader was added to the candidate list
2947
2948 old and new variable prefixes refer to data from before vs after
2949 withdrawl and later before withdrawl vs after re-election
2950 """
2951 import time
2952 assert main.numCtrls, "main.numCtrls not defined"
2953 assert main, "main not defined"
2954 assert utilities.assert_equals, "utilities.assert_equals not defined"
2955 assert main.CLIs, "main.CLIs not defined"
2956 assert main.nodes, "main.nodes not defined"
2957
2958 description = "Check that Leadership Election is still functional"
2959 main.case( description )
2960 # NOTE: Need to re-run after restarts since being a canidate is not persistant
2961
2962 oldLeaders = [] # list of lists of each nodes' candidates before
2963 newLeaders = [] # list of lists of each nodes' candidates after
2964 oldLeader = '' # the old leader from oldLeaders, None if not same
2965 newLeader = '' # the new leaders fron newLoeaders, None if not same
2966 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2967 expectNoLeader = False # True when there is only one leader
2968 if main.numCtrls == 1:
2969 expectNoLeader = True
2970
2971 main.step( "Run for election on each node" )
2972 electionResult = main.TRUE
2973
2974 for i in main.activeNodes: # run test election on each node
2975 if main.CLIs[i].electionTestRun() == main.FALSE:
2976 electionResult = main.FALSE
2977 utilities.assert_equals(
2978 expect=main.TRUE,
2979 actual=electionResult,
2980 onpass="All nodes successfully ran for leadership",
2981 onfail="At least one node failed to run for leadership" )
2982
2983 if electionResult == main.FALSE:
2984 main.log.error(
2985 "Skipping Test Case because Election Test App isn't loaded" )
2986 main.skipCase()
2987
2988 main.step( "Check that each node shows the same leader and candidates" )
2989 failMessage = "Nodes have different leaderboards"
2990 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2991 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
2992 if sameResult:
2993 oldLeader = oldLeaders[ 0 ][ 0 ]
2994 main.log.warn( oldLeader )
2995 else:
2996 oldLeader = None
2997 utilities.assert_equals(
2998 expect=True,
2999 actual=sameResult,
3000 onpass="Leaderboards are consistent for the election topic",
3001 onfail=failMessage )
3002
3003 main.step( "Find current leader and withdraw" )
3004 withdrawResult = main.TRUE
3005 # do some sanity checking on leader before using it
3006 if oldLeader is None:
3007 main.log.error( "Leadership isn't consistent." )
3008 withdrawResult = main.FALSE
3009 # Get the CLI of the oldLeader
3010 for i in main.activeNodes:
3011 if oldLeader == main.nodes[ i ].ip_address:
3012 oldLeaderCLI = main.CLIs[ i ]
3013 break
3014 else: # FOR/ELSE statement
3015 main.log.error( "Leader election, could not find current leader" )
3016 if oldLeader:
3017 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3018 utilities.assert_equals(
3019 expect=main.TRUE,
3020 actual=withdrawResult,
3021 onpass="Node was withdrawn from election",
3022 onfail="Node was not withdrawn from election" )
3023
3024 main.step( "Check that a new node was elected leader" )
3025 failMessage = "Nodes have different leaders"
3026 # Get new leaders and candidates
3027 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
3028 newLeader = None
3029 if newLeaderResult:
3030 if newLeaders[ 0 ][ 0 ] == 'none':
3031 main.log.error( "No leader was elected on at least 1 node" )
3032 if not expectNoLeader:
3033 newLeaderResult = False
3034 newLeader = newLeaders[ 0 ][ 0 ]
3035
3036 # Check that the new leader is not the older leader, which was withdrawn
3037 if newLeader == oldLeader:
3038 newLeaderResult = False
3039 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3040 " as the current leader" )
3041 utilities.assert_equals(
3042 expect=True,
3043 actual=newLeaderResult,
3044 onpass="Leadership election passed",
3045 onfail="Something went wrong with Leadership election" )
3046
3047 main.step( "Check that that new leader was the candidate of old leader" )
3048 # candidates[ 2 ] should become the top candidate after withdrawl
3049 correctCandidateResult = main.TRUE
3050 if expectNoLeader:
3051 if newLeader == 'none':
3052 main.log.info( "No leader expected. None found. Pass" )
3053 correctCandidateResult = main.TRUE
3054 else:
3055 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3056 correctCandidateResult = main.FALSE
3057 elif len( oldLeaders[0] ) >= 3:
3058 if newLeader == oldLeaders[ 0 ][ 2 ]:
3059 # correct leader was elected
3060 correctCandidateResult = main.TRUE
3061 else:
3062 correctCandidateResult = main.FALSE
3063 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3064 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3065 else:
3066 main.log.warn( "Could not determine who should be the correct leader" )
3067 main.log.debug( oldLeaders[ 0 ] )
3068 correctCandidateResult = main.FALSE
3069 utilities.assert_equals(
3070 expect=main.TRUE,
3071 actual=correctCandidateResult,
3072 onpass="Correct Candidate Elected",
3073 onfail="Incorrect Candidate Elected" )
3074
3075 main.step( "Run for election on old leader( just so everyone " +
3076 "is in the hat )" )
3077 if oldLeaderCLI is not None:
3078 runResult = oldLeaderCLI.electionTestRun()
3079 else:
3080 main.log.error( "No old leader to re-elect" )
3081 runResult = main.FALSE
3082 utilities.assert_equals(
3083 expect=main.TRUE,
3084 actual=runResult,
3085 onpass="App re-ran for election",
3086 onfail="App failed to run for election" )
3087
3088 main.step(
3089 "Check that oldLeader is a candidate, and leader if only 1 node" )
3090 # verify leader didn't just change
3091 # Get new leaders and candidates
3092 reRunLeaders = []
3093 time.sleep( 5 ) # Paremterize
3094 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
3095
3096 # Check that the re-elected node is last on the candidate List
3097 if not reRunLeaders[0]:
3098 positionResult = main.FALSE
3099 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3100 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3101 str( reRunLeaders[ 0 ] ) ) )
3102 positionResult = main.FALSE
3103 utilities.assert_equals(
3104 expect=True,
3105 actual=positionResult,
3106 onpass="Old leader successfully re-ran for election",
3107 onfail="Something went wrong with Leadership election after " +
3108 "the old leader re-ran for election" )
3109
3110 def CASE16( self, main ):
3111 """
3112 Install Distributed Primitives app
3113 """
3114 import time
3115 assert main.numCtrls, "main.numCtrls not defined"
3116 assert main, "main not defined"
3117 assert utilities.assert_equals, "utilities.assert_equals not defined"
3118 assert main.CLIs, "main.CLIs not defined"
3119 assert main.nodes, "main.nodes not defined"
3120
3121 # Variables for the distributed primitives tests
3122 global pCounterName
3123 global pCounterValue
3124 global onosSet
3125 global onosSetName
3126 pCounterName = "TestON-Partitions"
3127 pCounterValue = 0
3128 onosSet = set([])
3129 onosSetName = "TestON-set"
3130
3131 description = "Install Primitives app"
3132 main.case( description )
3133 main.step( "Install Primitives app" )
3134 appName = "org.onosproject.distributedprimitives"
3135 node = main.activeNodes[0]
3136 appResults = main.CLIs[node].activateApp( appName )
3137 utilities.assert_equals( expect=main.TRUE,
3138 actual=appResults,
3139 onpass="Primitives app activated",
3140 onfail="Primitives app not activated" )
3141 time.sleep( 5 ) # To allow all nodes to activate
3142
3143 def CASE17( self, main ):
3144 """
3145 Check for basic functionality with distributed primitives
3146 """
3147 # Make sure variables are defined/set
3148 assert main.numCtrls, "main.numCtrls not defined"
3149 assert main, "main not defined"
3150 assert utilities.assert_equals, "utilities.assert_equals not defined"
3151 assert main.CLIs, "main.CLIs not defined"
3152 assert main.nodes, "main.nodes not defined"
3153 assert pCounterName, "pCounterName not defined"
3154 assert onosSetName, "onosSetName not defined"
3155 # NOTE: assert fails if value is 0/None/Empty/False
3156 try:
3157 pCounterValue
3158 except NameError:
3159 main.log.error( "pCounterValue not defined, setting to 0" )
3160 pCounterValue = 0
3161 try:
3162 onosSet
3163 except NameError:
3164 main.log.error( "onosSet not defined, setting to empty Set" )
3165 onosSet = set([])
3166 # Variables for the distributed primitives tests. These are local only
3167 addValue = "a"
3168 addAllValue = "a b c d e f"
3169 retainValue = "c d e f"
3170
3171 description = "Check for basic functionality with distributed " +\
3172 "primitives"
3173 main.case( description )
3174 main.caseExplanation = "Test the methods of the distributed " +\
3175 "primitives (counters and sets) throught the cli"
3176 # DISTRIBUTED ATOMIC COUNTERS
3177 # Partitioned counters
3178 main.step( "Increment then get a default counter on each node" )
3179 pCounters = []
3180 threads = []
3181 addedPValues = []
3182 for i in main.activeNodes:
3183 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3184 name="counterAddAndGet-" + str( i ),
3185 args=[ pCounterName ] )
3186 pCounterValue += 1
3187 addedPValues.append( pCounterValue )
3188 threads.append( t )
3189 t.start()
3190
3191 for t in threads:
3192 t.join()
3193 pCounters.append( t.result )
3194 # Check that counter incremented numController times
3195 pCounterResults = True
3196 for i in addedPValues:
3197 tmpResult = i in pCounters
3198 pCounterResults = pCounterResults and tmpResult
3199 if not tmpResult:
3200 main.log.error( str( i ) + " is not in partitioned "
3201 "counter incremented results" )
3202 utilities.assert_equals( expect=True,
3203 actual=pCounterResults,
3204 onpass="Default counter incremented",
3205 onfail="Error incrementing default" +
3206 " counter" )
3207
3208 main.step( "Get then Increment a default counter on each node" )
3209 pCounters = []
3210 threads = []
3211 addedPValues = []
3212 for i in main.activeNodes:
3213 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3214 name="counterGetAndAdd-" + str( i ),
3215 args=[ pCounterName ] )
3216 addedPValues.append( pCounterValue )
3217 pCounterValue += 1
3218 threads.append( t )
3219 t.start()
3220
3221 for t in threads:
3222 t.join()
3223 pCounters.append( t.result )
3224 # Check that counter incremented numController times
3225 pCounterResults = True
3226 for i in addedPValues:
3227 tmpResult = i in pCounters
3228 pCounterResults = pCounterResults and tmpResult
3229 if not tmpResult:
3230 main.log.error( str( i ) + " is not in partitioned "
3231 "counter incremented results" )
3232 utilities.assert_equals( expect=True,
3233 actual=pCounterResults,
3234 onpass="Default counter incremented",
3235 onfail="Error incrementing default" +
3236 " counter" )
3237
3238 main.step( "Counters we added have the correct values" )
3239 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3240 utilities.assert_equals( expect=main.TRUE,
3241 actual=incrementCheck,
3242 onpass="Added counters are correct",
3243 onfail="Added counters are incorrect" )
3244
3245 main.step( "Add -8 to then get a default counter on each node" )
3246 pCounters = []
3247 threads = []
3248 addedPValues = []
3249 for i in main.activeNodes:
3250 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3251 name="counterIncrement-" + str( i ),
3252 args=[ pCounterName ],
3253 kwargs={ "delta": -8 } )
3254 pCounterValue += -8
3255 addedPValues.append( pCounterValue )
3256 threads.append( t )
3257 t.start()
3258
3259 for t in threads:
3260 t.join()
3261 pCounters.append( t.result )
3262 # Check that counter incremented numController times
3263 pCounterResults = True
3264 for i in addedPValues:
3265 tmpResult = i in pCounters
3266 pCounterResults = pCounterResults and tmpResult
3267 if not tmpResult:
3268 main.log.error( str( i ) + " is not in partitioned "
3269 "counter incremented results" )
3270 utilities.assert_equals( expect=True,
3271 actual=pCounterResults,
3272 onpass="Default counter incremented",
3273 onfail="Error incrementing default" +
3274 " counter" )
3275
3276 main.step( "Add 5 to then get a default counter on each node" )
3277 pCounters = []
3278 threads = []
3279 addedPValues = []
3280 for i in main.activeNodes:
3281 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3282 name="counterIncrement-" + str( i ),
3283 args=[ pCounterName ],
3284 kwargs={ "delta": 5 } )
3285 pCounterValue += 5
3286 addedPValues.append( pCounterValue )
3287 threads.append( t )
3288 t.start()
3289
3290 for t in threads:
3291 t.join()
3292 pCounters.append( t.result )
3293 # Check that counter incremented numController times
3294 pCounterResults = True
3295 for i in addedPValues:
3296 tmpResult = i in pCounters
3297 pCounterResults = pCounterResults and tmpResult
3298 if not tmpResult:
3299 main.log.error( str( i ) + " is not in partitioned "
3300 "counter incremented results" )
3301 utilities.assert_equals( expect=True,
3302 actual=pCounterResults,
3303 onpass="Default counter incremented",
3304 onfail="Error incrementing default" +
3305 " counter" )
3306
3307 main.step( "Get then add 5 to a default counter on each node" )
3308 pCounters = []
3309 threads = []
3310 addedPValues = []
3311 for i in main.activeNodes:
3312 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3313 name="counterIncrement-" + str( i ),
3314 args=[ pCounterName ],
3315 kwargs={ "delta": 5 } )
3316 addedPValues.append( pCounterValue )
3317 pCounterValue += 5
3318 threads.append( t )
3319 t.start()
3320
3321 for t in threads:
3322 t.join()
3323 pCounters.append( t.result )
3324 # Check that counter incremented numController times
3325 pCounterResults = True
3326 for i in addedPValues:
3327 tmpResult = i in pCounters
3328 pCounterResults = pCounterResults and tmpResult
3329 if not tmpResult:
3330 main.log.error( str( i ) + " is not in partitioned "
3331 "counter incremented results" )
3332 utilities.assert_equals( expect=True,
3333 actual=pCounterResults,
3334 onpass="Default counter incremented",
3335 onfail="Error incrementing default" +
3336 " counter" )
3337
3338 main.step( "Counters we added have the correct values" )
3339 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3340 utilities.assert_equals( expect=main.TRUE,
3341 actual=incrementCheck,
3342 onpass="Added counters are correct",
3343 onfail="Added counters are incorrect" )
3344
3345 # DISTRIBUTED SETS
3346 main.step( "Distributed Set get" )
3347 size = len( onosSet )
3348 getResponses = []
3349 threads = []
3350 for i in main.activeNodes:
3351 t = main.Thread( target=main.CLIs[i].setTestGet,
3352 name="setTestGet-" + str( i ),
3353 args=[ onosSetName ] )
3354 threads.append( t )
3355 t.start()
3356 for t in threads:
3357 t.join()
3358 getResponses.append( t.result )
3359
3360 getResults = main.TRUE
3361 for i in range( len( main.activeNodes ) ):
3362 node = str( main.activeNodes[i] + 1 )
3363 if isinstance( getResponses[ i ], list):
3364 current = set( getResponses[ i ] )
3365 if len( current ) == len( getResponses[ i ] ):
3366 # no repeats
3367 if onosSet != current:
3368 main.log.error( "ONOS" + node +
3369 " has incorrect view" +
3370 " of set " + onosSetName + ":\n" +
3371 str( getResponses[ i ] ) )
3372 main.log.debug( "Expected: " + str( onosSet ) )
3373 main.log.debug( "Actual: " + str( current ) )
3374 getResults = main.FALSE
3375 else:
3376 # error, set is not a set
3377 main.log.error( "ONOS" + node +
3378 " has repeat elements in" +
3379 " set " + onosSetName + ":\n" +
3380 str( getResponses[ i ] ) )
3381 getResults = main.FALSE
3382 elif getResponses[ i ] == main.ERROR:
3383 getResults = main.FALSE
3384 utilities.assert_equals( expect=main.TRUE,
3385 actual=getResults,
3386 onpass="Set elements are correct",
3387 onfail="Set elements are incorrect" )
3388
3389 main.step( "Distributed Set size" )
3390 sizeResponses = []
3391 threads = []
3392 for i in main.activeNodes:
3393 t = main.Thread( target=main.CLIs[i].setTestSize,
3394 name="setTestSize-" + str( i ),
3395 args=[ onosSetName ] )
3396 threads.append( t )
3397 t.start()
3398 for t in threads:
3399 t.join()
3400 sizeResponses.append( t.result )
3401
3402 sizeResults = main.TRUE
3403 for i in range( len( main.activeNodes ) ):
3404 node = str( main.activeNodes[i] + 1 )
3405 if size != sizeResponses[ i ]:
3406 sizeResults = main.FALSE
3407 main.log.error( "ONOS" + node +
3408 " expected a size of " + str( size ) +
3409 " for set " + onosSetName +
3410 " but got " + str( sizeResponses[ i ] ) )
3411 utilities.assert_equals( expect=main.TRUE,
3412 actual=sizeResults,
3413 onpass="Set sizes are correct",
3414 onfail="Set sizes are incorrect" )
3415
3416 main.step( "Distributed Set add()" )
3417 onosSet.add( addValue )
3418 addResponses = []
3419 threads = []
3420 for i in main.activeNodes:
3421 t = main.Thread( target=main.CLIs[i].setTestAdd,
3422 name="setTestAdd-" + str( i ),
3423 args=[ onosSetName, addValue ] )
3424 threads.append( t )
3425 t.start()
3426 for t in threads:
3427 t.join()
3428 addResponses.append( t.result )
3429
3430 # main.TRUE = successfully changed the set
3431 # main.FALSE = action resulted in no change in set
3432 # main.ERROR - Some error in executing the function
3433 addResults = main.TRUE
3434 for i in range( len( main.activeNodes ) ):
3435 if addResponses[ i ] == main.TRUE:
3436 # All is well
3437 pass
3438 elif addResponses[ i ] == main.FALSE:
3439 # Already in set, probably fine
3440 pass
3441 elif addResponses[ i ] == main.ERROR:
3442 # Error in execution
3443 addResults = main.FALSE
3444 else:
3445 # unexpected result
3446 addResults = main.FALSE
3447 if addResults != main.TRUE:
3448 main.log.error( "Error executing set add" )
3449
3450 # Check if set is still correct
3451 size = len( onosSet )
3452 getResponses = []
3453 threads = []
3454 for i in main.activeNodes:
3455 t = main.Thread( target=main.CLIs[i].setTestGet,
3456 name="setTestGet-" + str( i ),
3457 args=[ onosSetName ] )
3458 threads.append( t )
3459 t.start()
3460 for t in threads:
3461 t.join()
3462 getResponses.append( t.result )
3463 getResults = main.TRUE
3464 for i in range( len( main.activeNodes ) ):
3465 node = str( main.activeNodes[i] + 1 )
3466 if isinstance( getResponses[ i ], list):
3467 current = set( getResponses[ i ] )
3468 if len( current ) == len( getResponses[ i ] ):
3469 # no repeats
3470 if onosSet != current:
3471 main.log.error( "ONOS" + node + " has incorrect view" +
3472 " of set " + onosSetName + ":\n" +
3473 str( getResponses[ i ] ) )
3474 main.log.debug( "Expected: " + str( onosSet ) )
3475 main.log.debug( "Actual: " + str( current ) )
3476 getResults = main.FALSE
3477 else:
3478 # error, set is not a set
3479 main.log.error( "ONOS" + node + " has repeat elements in" +
3480 " set " + onosSetName + ":\n" +
3481 str( getResponses[ i ] ) )
3482 getResults = main.FALSE
3483 elif getResponses[ i ] == main.ERROR:
3484 getResults = main.FALSE
3485 sizeResponses = []
3486 threads = []
3487 for i in main.activeNodes:
3488 t = main.Thread( target=main.CLIs[i].setTestSize,
3489 name="setTestSize-" + str( i ),
3490 args=[ onosSetName ] )
3491 threads.append( t )
3492 t.start()
3493 for t in threads:
3494 t.join()
3495 sizeResponses.append( t.result )
3496 sizeResults = main.TRUE
3497 for i in range( len( main.activeNodes ) ):
3498 node = str( main.activeNodes[i] + 1 )
3499 if size != sizeResponses[ i ]:
3500 sizeResults = main.FALSE
3501 main.log.error( "ONOS" + node +
3502 " expected a size of " + str( size ) +
3503 " for set " + onosSetName +
3504 " but got " + str( sizeResponses[ i ] ) )
3505 addResults = addResults and getResults and sizeResults
3506 utilities.assert_equals( expect=main.TRUE,
3507 actual=addResults,
3508 onpass="Set add correct",
3509 onfail="Set add was incorrect" )
3510
3511 main.step( "Distributed Set addAll()" )
3512 onosSet.update( addAllValue.split() )
3513 addResponses = []
3514 threads = []
3515 for i in main.activeNodes:
3516 t = main.Thread( target=main.CLIs[i].setTestAdd,
3517 name="setTestAddAll-" + str( i ),
3518 args=[ onosSetName, addAllValue ] )
3519 threads.append( t )
3520 t.start()
3521 for t in threads:
3522 t.join()
3523 addResponses.append( t.result )
3524
3525 # main.TRUE = successfully changed the set
3526 # main.FALSE = action resulted in no change in set
3527 # main.ERROR - Some error in executing the function
3528 addAllResults = main.TRUE
3529 for i in range( len( main.activeNodes ) ):
3530 if addResponses[ i ] == main.TRUE:
3531 # All is well
3532 pass
3533 elif addResponses[ i ] == main.FALSE:
3534 # Already in set, probably fine
3535 pass
3536 elif addResponses[ i ] == main.ERROR:
3537 # Error in execution
3538 addAllResults = main.FALSE
3539 else:
3540 # unexpected result
3541 addAllResults = main.FALSE
3542 if addAllResults != main.TRUE:
3543 main.log.error( "Error executing set addAll" )
3544
3545 # Check if set is still correct
3546 size = len( onosSet )
3547 getResponses = []
3548 threads = []
3549 for i in main.activeNodes:
3550 t = main.Thread( target=main.CLIs[i].setTestGet,
3551 name="setTestGet-" + str( i ),
3552 args=[ onosSetName ] )
3553 threads.append( t )
3554 t.start()
3555 for t in threads:
3556 t.join()
3557 getResponses.append( t.result )
3558 getResults = main.TRUE
3559 for i in range( len( main.activeNodes ) ):
3560 node = str( main.activeNodes[i] + 1 )
3561 if isinstance( getResponses[ i ], list):
3562 current = set( getResponses[ i ] )
3563 if len( current ) == len( getResponses[ i ] ):
3564 # no repeats
3565 if onosSet != current:
3566 main.log.error( "ONOS" + node +
3567 " has incorrect view" +
3568 " of set " + onosSetName + ":\n" +
3569 str( getResponses[ i ] ) )
3570 main.log.debug( "Expected: " + str( onosSet ) )
3571 main.log.debug( "Actual: " + str( current ) )
3572 getResults = main.FALSE
3573 else:
3574 # error, set is not a set
3575 main.log.error( "ONOS" + node +
3576 " has repeat elements in" +
3577 " set " + onosSetName + ":\n" +
3578 str( getResponses[ i ] ) )
3579 getResults = main.FALSE
3580 elif getResponses[ i ] == main.ERROR:
3581 getResults = main.FALSE
3582 sizeResponses = []
3583 threads = []
3584 for i in main.activeNodes:
3585 t = main.Thread( target=main.CLIs[i].setTestSize,
3586 name="setTestSize-" + str( i ),
3587 args=[ onosSetName ] )
3588 threads.append( t )
3589 t.start()
3590 for t in threads:
3591 t.join()
3592 sizeResponses.append( t.result )
3593 sizeResults = main.TRUE
3594 for i in range( len( main.activeNodes ) ):
3595 node = str( main.activeNodes[i] + 1 )
3596 if size != sizeResponses[ i ]:
3597 sizeResults = main.FALSE
3598 main.log.error( "ONOS" + node +
3599 " expected a size of " + str( size ) +
3600 " for set " + onosSetName +
3601 " but got " + str( sizeResponses[ i ] ) )
3602 addAllResults = addAllResults and getResults and sizeResults
3603 utilities.assert_equals( expect=main.TRUE,
3604 actual=addAllResults,
3605 onpass="Set addAll correct",
3606 onfail="Set addAll was incorrect" )
3607
3608 main.step( "Distributed Set contains()" )
3609 containsResponses = []
3610 threads = []
3611 for i in main.activeNodes:
3612 t = main.Thread( target=main.CLIs[i].setTestGet,
3613 name="setContains-" + str( i ),
3614 args=[ onosSetName ],
3615 kwargs={ "values": addValue } )
3616 threads.append( t )
3617 t.start()
3618 for t in threads:
3619 t.join()
3620 # NOTE: This is the tuple
3621 containsResponses.append( t.result )
3622
3623 containsResults = main.TRUE
3624 for i in range( len( main.activeNodes ) ):
3625 if containsResponses[ i ] == main.ERROR:
3626 containsResults = main.FALSE
3627 else:
3628 containsResults = containsResults and\
3629 containsResponses[ i ][ 1 ]
3630 utilities.assert_equals( expect=main.TRUE,
3631 actual=containsResults,
3632 onpass="Set contains is functional",
3633 onfail="Set contains failed" )
3634
3635 main.step( "Distributed Set containsAll()" )
3636 containsAllResponses = []
3637 threads = []
3638 for i in main.activeNodes:
3639 t = main.Thread( target=main.CLIs[i].setTestGet,
3640 name="setContainsAll-" + str( i ),
3641 args=[ onosSetName ],
3642 kwargs={ "values": addAllValue } )
3643 threads.append( t )
3644 t.start()
3645 for t in threads:
3646 t.join()
3647 # NOTE: This is the tuple
3648 containsAllResponses.append( t.result )
3649
3650 containsAllResults = main.TRUE
3651 for i in range( len( main.activeNodes ) ):
3652 if containsResponses[ i ] == main.ERROR:
3653 containsResults = main.FALSE
3654 else:
3655 containsResults = containsResults and\
3656 containsResponses[ i ][ 1 ]
3657 utilities.assert_equals( expect=main.TRUE,
3658 actual=containsAllResults,
3659 onpass="Set containsAll is functional",
3660 onfail="Set containsAll failed" )
3661
3662 main.step( "Distributed Set remove()" )
3663 onosSet.remove( addValue )
3664 removeResponses = []
3665 threads = []
3666 for i in main.activeNodes:
3667 t = main.Thread( target=main.CLIs[i].setTestRemove,
3668 name="setTestRemove-" + str( i ),
3669 args=[ onosSetName, addValue ] )
3670 threads.append( t )
3671 t.start()
3672 for t in threads:
3673 t.join()
3674 removeResponses.append( t.result )
3675
3676 # main.TRUE = successfully changed the set
3677 # main.FALSE = action resulted in no change in set
3678 # main.ERROR - Some error in executing the function
3679 removeResults = main.TRUE
3680 for i in range( len( main.activeNodes ) ):
3681 if removeResponses[ i ] == main.TRUE:
3682 # All is well
3683 pass
3684 elif removeResponses[ i ] == main.FALSE:
3685 # not in set, probably fine
3686 pass
3687 elif removeResponses[ i ] == main.ERROR:
3688 # Error in execution
3689 removeResults = main.FALSE
3690 else:
3691 # unexpected result
3692 removeResults = main.FALSE
3693 if removeResults != main.TRUE:
3694 main.log.error( "Error executing set remove" )
3695
3696 # Check if set is still correct
3697 size = len( onosSet )
3698 getResponses = []
3699 threads = []
3700 for i in main.activeNodes:
3701 t = main.Thread( target=main.CLIs[i].setTestGet,
3702 name="setTestGet-" + str( i ),
3703 args=[ onosSetName ] )
3704 threads.append( t )
3705 t.start()
3706 for t in threads:
3707 t.join()
3708 getResponses.append( t.result )
3709 getResults = main.TRUE
3710 for i in range( len( main.activeNodes ) ):
3711 node = str( main.activeNodes[i] + 1 )
3712 if isinstance( getResponses[ i ], list):
3713 current = set( getResponses[ i ] )
3714 if len( current ) == len( getResponses[ i ] ):
3715 # no repeats
3716 if onosSet != current:
3717 main.log.error( "ONOS" + node +
3718 " has incorrect view" +
3719 " of set " + onosSetName + ":\n" +
3720 str( getResponses[ i ] ) )
3721 main.log.debug( "Expected: " + str( onosSet ) )
3722 main.log.debug( "Actual: " + str( current ) )
3723 getResults = main.FALSE
3724 else:
3725 # error, set is not a set
3726 main.log.error( "ONOS" + node +
3727 " has repeat elements in" +
3728 " set " + onosSetName + ":\n" +
3729 str( getResponses[ i ] ) )
3730 getResults = main.FALSE
3731 elif getResponses[ i ] == main.ERROR:
3732 getResults = main.FALSE
3733 sizeResponses = []
3734 threads = []
3735 for i in main.activeNodes:
3736 t = main.Thread( target=main.CLIs[i].setTestSize,
3737 name="setTestSize-" + str( i ),
3738 args=[ onosSetName ] )
3739 threads.append( t )
3740 t.start()
3741 for t in threads:
3742 t.join()
3743 sizeResponses.append( t.result )
3744 sizeResults = main.TRUE
3745 for i in range( len( main.activeNodes ) ):
3746 node = str( main.activeNodes[i] + 1 )
3747 if size != sizeResponses[ i ]:
3748 sizeResults = main.FALSE
3749 main.log.error( "ONOS" + node +
3750 " expected a size of " + str( size ) +
3751 " for set " + onosSetName +
3752 " but got " + str( sizeResponses[ i ] ) )
3753 removeResults = removeResults and getResults and sizeResults
3754 utilities.assert_equals( expect=main.TRUE,
3755 actual=removeResults,
3756 onpass="Set remove correct",
3757 onfail="Set remove was incorrect" )
3758
3759 main.step( "Distributed Set removeAll()" )
3760 onosSet.difference_update( addAllValue.split() )
3761 removeAllResponses = []
3762 threads = []
3763 try:
3764 for i in main.activeNodes:
3765 t = main.Thread( target=main.CLIs[i].setTestRemove,
3766 name="setTestRemoveAll-" + str( i ),
3767 args=[ onosSetName, addAllValue ] )
3768 threads.append( t )
3769 t.start()
3770 for t in threads:
3771 t.join()
3772 removeAllResponses.append( t.result )
3773 except Exception, e:
3774 main.log.exception(e)
3775
3776 # main.TRUE = successfully changed the set
3777 # main.FALSE = action resulted in no change in set
3778 # main.ERROR - Some error in executing the function
3779 removeAllResults = main.TRUE
3780 for i in range( len( main.activeNodes ) ):
3781 if removeAllResponses[ i ] == main.TRUE:
3782 # All is well
3783 pass
3784 elif removeAllResponses[ i ] == main.FALSE:
3785 # not in set, probably fine
3786 pass
3787 elif removeAllResponses[ i ] == main.ERROR:
3788 # Error in execution
3789 removeAllResults = main.FALSE
3790 else:
3791 # unexpected result
3792 removeAllResults = main.FALSE
3793 if removeAllResults != main.TRUE:
3794 main.log.error( "Error executing set removeAll" )
3795
3796 # Check if set is still correct
3797 size = len( onosSet )
3798 getResponses = []
3799 threads = []
3800 for i in main.activeNodes:
3801 t = main.Thread( target=main.CLIs[i].setTestGet,
3802 name="setTestGet-" + str( i ),
3803 args=[ onosSetName ] )
3804 threads.append( t )
3805 t.start()
3806 for t in threads:
3807 t.join()
3808 getResponses.append( t.result )
3809 getResults = main.TRUE
3810 for i in range( len( main.activeNodes ) ):
3811 node = str( main.activeNodes[i] + 1 )
3812 if isinstance( getResponses[ i ], list):
3813 current = set( getResponses[ i ] )
3814 if len( current ) == len( getResponses[ i ] ):
3815 # no repeats
3816 if onosSet != current:
3817 main.log.error( "ONOS" + node +
3818 " has incorrect view" +
3819 " of set " + onosSetName + ":\n" +
3820 str( getResponses[ i ] ) )
3821 main.log.debug( "Expected: " + str( onosSet ) )
3822 main.log.debug( "Actual: " + str( current ) )
3823 getResults = main.FALSE
3824 else:
3825 # error, set is not a set
3826 main.log.error( "ONOS" + node +
3827 " has repeat elements in" +
3828 " set " + onosSetName + ":\n" +
3829 str( getResponses[ i ] ) )
3830 getResults = main.FALSE
3831 elif getResponses[ i ] == main.ERROR:
3832 getResults = main.FALSE
3833 sizeResponses = []
3834 threads = []
3835 for i in main.activeNodes:
3836 t = main.Thread( target=main.CLIs[i].setTestSize,
3837 name="setTestSize-" + str( i ),
3838 args=[ onosSetName ] )
3839 threads.append( t )
3840 t.start()
3841 for t in threads:
3842 t.join()
3843 sizeResponses.append( t.result )
3844 sizeResults = main.TRUE
3845 for i in range( len( main.activeNodes ) ):
3846 node = str( main.activeNodes[i] + 1 )
3847 if size != sizeResponses[ i ]:
3848 sizeResults = main.FALSE
3849 main.log.error( "ONOS" + node +
3850 " expected a size of " + str( size ) +
3851 " for set " + onosSetName +
3852 " but got " + str( sizeResponses[ i ] ) )
3853 removeAllResults = removeAllResults and getResults and sizeResults
3854 utilities.assert_equals( expect=main.TRUE,
3855 actual=removeAllResults,
3856 onpass="Set removeAll correct",
3857 onfail="Set removeAll was incorrect" )
3858
3859 main.step( "Distributed Set addAll()" )
3860 onosSet.update( addAllValue.split() )
3861 addResponses = []
3862 threads = []
3863 for i in main.activeNodes:
3864 t = main.Thread( target=main.CLIs[i].setTestAdd,
3865 name="setTestAddAll-" + str( i ),
3866 args=[ onosSetName, addAllValue ] )
3867 threads.append( t )
3868 t.start()
3869 for t in threads:
3870 t.join()
3871 addResponses.append( t.result )
3872
3873 # main.TRUE = successfully changed the set
3874 # main.FALSE = action resulted in no change in set
3875 # main.ERROR - Some error in executing the function
3876 addAllResults = main.TRUE
3877 for i in range( len( main.activeNodes ) ):
3878 if addResponses[ i ] == main.TRUE:
3879 # All is well
3880 pass
3881 elif addResponses[ i ] == main.FALSE:
3882 # Already in set, probably fine
3883 pass
3884 elif addResponses[ i ] == main.ERROR:
3885 # Error in execution
3886 addAllResults = main.FALSE
3887 else:
3888 # unexpected result
3889 addAllResults = main.FALSE
3890 if addAllResults != main.TRUE:
3891 main.log.error( "Error executing set addAll" )
3892
3893 # Check if set is still correct
3894 size = len( onosSet )
3895 getResponses = []
3896 threads = []
3897 for i in main.activeNodes:
3898 t = main.Thread( target=main.CLIs[i].setTestGet,
3899 name="setTestGet-" + str( i ),
3900 args=[ onosSetName ] )
3901 threads.append( t )
3902 t.start()
3903 for t in threads:
3904 t.join()
3905 getResponses.append( t.result )
3906 getResults = main.TRUE
3907 for i in range( len( main.activeNodes ) ):
3908 node = str( main.activeNodes[i] + 1 )
3909 if isinstance( getResponses[ i ], list):
3910 current = set( getResponses[ i ] )
3911 if len( current ) == len( getResponses[ i ] ):
3912 # no repeats
3913 if onosSet != current:
3914 main.log.error( "ONOS" + node +
3915 " has incorrect view" +
3916 " of set " + onosSetName + ":\n" +
3917 str( getResponses[ i ] ) )
3918 main.log.debug( "Expected: " + str( onosSet ) )
3919 main.log.debug( "Actual: " + str( current ) )
3920 getResults = main.FALSE
3921 else:
3922 # error, set is not a set
3923 main.log.error( "ONOS" + node +
3924 " has repeat elements in" +
3925 " set " + onosSetName + ":\n" +
3926 str( getResponses[ i ] ) )
3927 getResults = main.FALSE
3928 elif getResponses[ i ] == main.ERROR:
3929 getResults = main.FALSE
3930 sizeResponses = []
3931 threads = []
3932 for i in main.activeNodes:
3933 t = main.Thread( target=main.CLIs[i].setTestSize,
3934 name="setTestSize-" + str( i ),
3935 args=[ onosSetName ] )
3936 threads.append( t )
3937 t.start()
3938 for t in threads:
3939 t.join()
3940 sizeResponses.append( t.result )
3941 sizeResults = main.TRUE
3942 for i in range( len( main.activeNodes ) ):
3943 node = str( main.activeNodes[i] + 1 )
3944 if size != sizeResponses[ i ]:
3945 sizeResults = main.FALSE
3946 main.log.error( "ONOS" + node +
3947 " expected a size of " + str( size ) +
3948 " for set " + onosSetName +
3949 " but got " + str( sizeResponses[ i ] ) )
3950 addAllResults = addAllResults and getResults and sizeResults
3951 utilities.assert_equals( expect=main.TRUE,
3952 actual=addAllResults,
3953 onpass="Set addAll correct",
3954 onfail="Set addAll was incorrect" )
3955
3956 main.step( "Distributed Set clear()" )
3957 onosSet.clear()
3958 clearResponses = []
3959 threads = []
3960 for i in main.activeNodes:
3961 t = main.Thread( target=main.CLIs[i].setTestRemove,
3962 name="setTestClear-" + str( i ),
3963 args=[ onosSetName, " "], # Values doesn't matter
3964 kwargs={ "clear": True } )
3965 threads.append( t )
3966 t.start()
3967 for t in threads:
3968 t.join()
3969 clearResponses.append( t.result )
3970
3971 # main.TRUE = successfully changed the set
3972 # main.FALSE = action resulted in no change in set
3973 # main.ERROR - Some error in executing the function
3974 clearResults = main.TRUE
3975 for i in range( len( main.activeNodes ) ):
3976 if clearResponses[ i ] == main.TRUE:
3977 # All is well
3978 pass
3979 elif clearResponses[ i ] == main.FALSE:
3980 # Nothing set, probably fine
3981 pass
3982 elif clearResponses[ i ] == main.ERROR:
3983 # Error in execution
3984 clearResults = main.FALSE
3985 else:
3986 # unexpected result
3987 clearResults = main.FALSE
3988 if clearResults != main.TRUE:
3989 main.log.error( "Error executing set clear" )
3990
3991 # Check if set is still correct
3992 size = len( onosSet )
3993 getResponses = []
3994 threads = []
3995 for i in main.activeNodes:
3996 t = main.Thread( target=main.CLIs[i].setTestGet,
3997 name="setTestGet-" + str( i ),
3998 args=[ onosSetName ] )
3999 threads.append( t )
4000 t.start()
4001 for t in threads:
4002 t.join()
4003 getResponses.append( t.result )
4004 getResults = main.TRUE
4005 for i in range( len( main.activeNodes ) ):
4006 node = str( main.activeNodes[i] + 1 )
4007 if isinstance( getResponses[ i ], list):
4008 current = set( getResponses[ i ] )
4009 if len( current ) == len( getResponses[ i ] ):
4010 # no repeats
4011 if onosSet != current:
4012 main.log.error( "ONOS" + node +
4013 " has incorrect view" +
4014 " of set " + onosSetName + ":\n" +
4015 str( getResponses[ i ] ) )
4016 main.log.debug( "Expected: " + str( onosSet ) )
4017 main.log.debug( "Actual: " + str( current ) )
4018 getResults = main.FALSE
4019 else:
4020 # error, set is not a set
4021 main.log.error( "ONOS" + node +
4022 " has repeat elements in" +
4023 " set " + onosSetName + ":\n" +
4024 str( getResponses[ i ] ) )
4025 getResults = main.FALSE
4026 elif getResponses[ i ] == main.ERROR:
4027 getResults = main.FALSE
4028 sizeResponses = []
4029 threads = []
4030 for i in main.activeNodes:
4031 t = main.Thread( target=main.CLIs[i].setTestSize,
4032 name="setTestSize-" + str( i ),
4033 args=[ onosSetName ] )
4034 threads.append( t )
4035 t.start()
4036 for t in threads:
4037 t.join()
4038 sizeResponses.append( t.result )
4039 sizeResults = main.TRUE
4040 for i in range( len( main.activeNodes ) ):
4041 node = str( main.activeNodes[i] + 1 )
4042 if size != sizeResponses[ i ]:
4043 sizeResults = main.FALSE
4044 main.log.error( "ONOS" + node +
4045 " expected a size of " + str( size ) +
4046 " for set " + onosSetName +
4047 " but got " + str( sizeResponses[ i ] ) )
4048 clearResults = clearResults and getResults and sizeResults
4049 utilities.assert_equals( expect=main.TRUE,
4050 actual=clearResults,
4051 onpass="Set clear correct",
4052 onfail="Set clear was incorrect" )
4053
4054 main.step( "Distributed Set addAll()" )
4055 onosSet.update( addAllValue.split() )
4056 addResponses = []
4057 threads = []
4058 for i in main.activeNodes:
4059 t = main.Thread( target=main.CLIs[i].setTestAdd,
4060 name="setTestAddAll-" + str( i ),
4061 args=[ onosSetName, addAllValue ] )
4062 threads.append( t )
4063 t.start()
4064 for t in threads:
4065 t.join()
4066 addResponses.append( t.result )
4067
4068 # main.TRUE = successfully changed the set
4069 # main.FALSE = action resulted in no change in set
4070 # main.ERROR - Some error in executing the function
4071 addAllResults = main.TRUE
4072 for i in range( len( main.activeNodes ) ):
4073 if addResponses[ i ] == main.TRUE:
4074 # All is well
4075 pass
4076 elif addResponses[ i ] == main.FALSE:
4077 # Already in set, probably fine
4078 pass
4079 elif addResponses[ i ] == main.ERROR:
4080 # Error in execution
4081 addAllResults = main.FALSE
4082 else:
4083 # unexpected result
4084 addAllResults = main.FALSE
4085 if addAllResults != main.TRUE:
4086 main.log.error( "Error executing set addAll" )
4087
4088 # Check if set is still correct
4089 size = len( onosSet )
4090 getResponses = []
4091 threads = []
4092 for i in main.activeNodes:
4093 t = main.Thread( target=main.CLIs[i].setTestGet,
4094 name="setTestGet-" + str( i ),
4095 args=[ onosSetName ] )
4096 threads.append( t )
4097 t.start()
4098 for t in threads:
4099 t.join()
4100 getResponses.append( t.result )
4101 getResults = main.TRUE
4102 for i in range( len( main.activeNodes ) ):
4103 node = str( main.activeNodes[i] + 1 )
4104 if isinstance( getResponses[ i ], list):
4105 current = set( getResponses[ i ] )
4106 if len( current ) == len( getResponses[ i ] ):
4107 # no repeats
4108 if onosSet != current:
4109 main.log.error( "ONOS" + node +
4110 " has incorrect view" +
4111 " of set " + onosSetName + ":\n" +
4112 str( getResponses[ i ] ) )
4113 main.log.debug( "Expected: " + str( onosSet ) )
4114 main.log.debug( "Actual: " + str( current ) )
4115 getResults = main.FALSE
4116 else:
4117 # error, set is not a set
4118 main.log.error( "ONOS" + node +
4119 " has repeat elements in" +
4120 " set " + onosSetName + ":\n" +
4121 str( getResponses[ i ] ) )
4122 getResults = main.FALSE
4123 elif getResponses[ i ] == main.ERROR:
4124 getResults = main.FALSE
4125 sizeResponses = []
4126 threads = []
4127 for i in main.activeNodes:
4128 t = main.Thread( target=main.CLIs[i].setTestSize,
4129 name="setTestSize-" + str( i ),
4130 args=[ onosSetName ] )
4131 threads.append( t )
4132 t.start()
4133 for t in threads:
4134 t.join()
4135 sizeResponses.append( t.result )
4136 sizeResults = main.TRUE
4137 for i in range( len( main.activeNodes ) ):
4138 node = str( main.activeNodes[i] + 1 )
4139 if size != sizeResponses[ i ]:
4140 sizeResults = main.FALSE
4141 main.log.error( "ONOS" + node +
4142 " expected a size of " + str( size ) +
4143 " for set " + onosSetName +
4144 " but got " + str( sizeResponses[ i ] ) )
4145 addAllResults = addAllResults and getResults and sizeResults
4146 utilities.assert_equals( expect=main.TRUE,
4147 actual=addAllResults,
4148 onpass="Set addAll correct",
4149 onfail="Set addAll was incorrect" )
4150
4151 main.step( "Distributed Set retain()" )
4152 onosSet.intersection_update( retainValue.split() )
4153 retainResponses = []
4154 threads = []
4155 for i in main.activeNodes:
4156 t = main.Thread( target=main.CLIs[i].setTestRemove,
4157 name="setTestRetain-" + str( i ),
4158 args=[ onosSetName, retainValue ],
4159 kwargs={ "retain": True } )
4160 threads.append( t )
4161 t.start()
4162 for t in threads:
4163 t.join()
4164 retainResponses.append( t.result )
4165
4166 # main.TRUE = successfully changed the set
4167 # main.FALSE = action resulted in no change in set
4168 # main.ERROR - Some error in executing the function
4169 retainResults = main.TRUE
4170 for i in range( len( main.activeNodes ) ):
4171 if retainResponses[ i ] == main.TRUE:
4172 # All is well
4173 pass
4174 elif retainResponses[ i ] == main.FALSE:
4175 # Already in set, probably fine
4176 pass
4177 elif retainResponses[ i ] == main.ERROR:
4178 # Error in execution
4179 retainResults = main.FALSE
4180 else:
4181 # unexpected result
4182 retainResults = main.FALSE
4183 if retainResults != main.TRUE:
4184 main.log.error( "Error executing set retain" )
4185
4186 # Check if set is still correct
4187 size = len( onosSet )
4188 getResponses = []
4189 threads = []
4190 for i in main.activeNodes:
4191 t = main.Thread( target=main.CLIs[i].setTestGet,
4192 name="setTestGet-" + str( i ),
4193 args=[ onosSetName ] )
4194 threads.append( t )
4195 t.start()
4196 for t in threads:
4197 t.join()
4198 getResponses.append( t.result )
4199 getResults = main.TRUE
4200 for i in range( len( main.activeNodes ) ):
4201 node = str( main.activeNodes[i] + 1 )
4202 if isinstance( getResponses[ i ], list):
4203 current = set( getResponses[ i ] )
4204 if len( current ) == len( getResponses[ i ] ):
4205 # no repeats
4206 if onosSet != current:
4207 main.log.error( "ONOS" + node +
4208 " has incorrect view" +
4209 " of set " + onosSetName + ":\n" +
4210 str( getResponses[ i ] ) )
4211 main.log.debug( "Expected: " + str( onosSet ) )
4212 main.log.debug( "Actual: " + str( current ) )
4213 getResults = main.FALSE
4214 else:
4215 # error, set is not a set
4216 main.log.error( "ONOS" + node +
4217 " has repeat elements in" +
4218 " set " + onosSetName + ":\n" +
4219 str( getResponses[ i ] ) )
4220 getResults = main.FALSE
4221 elif getResponses[ i ] == main.ERROR:
4222 getResults = main.FALSE
4223 sizeResponses = []
4224 threads = []
4225 for i in main.activeNodes:
4226 t = main.Thread( target=main.CLIs[i].setTestSize,
4227 name="setTestSize-" + str( i ),
4228 args=[ onosSetName ] )
4229 threads.append( t )
4230 t.start()
4231 for t in threads:
4232 t.join()
4233 sizeResponses.append( t.result )
4234 sizeResults = main.TRUE
4235 for i in range( len( main.activeNodes ) ):
4236 node = str( main.activeNodes[i] + 1 )
4237 if size != sizeResponses[ i ]:
4238 sizeResults = main.FALSE
4239 main.log.error( "ONOS" + node + " expected a size of " +
4240 str( size ) + " for set " + onosSetName +
4241 " but got " + str( sizeResponses[ i ] ) )
4242 retainResults = retainResults and getResults and sizeResults
4243 utilities.assert_equals( expect=main.TRUE,
4244 actual=retainResults,
4245 onpass="Set retain correct",
4246 onfail="Set retain was incorrect" )
4247
4248 # Transactional maps
4249 main.step( "Partitioned Transactional maps put" )
4250 tMapValue = "Testing"
4251 numKeys = 100
4252 putResult = True
4253 node = main.activeNodes[0]
4254 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4255 if putResponses and len( putResponses ) == 100:
4256 for i in putResponses:
4257 if putResponses[ i ][ 'value' ] != tMapValue:
4258 putResult = False
4259 else:
4260 putResult = False
4261 if not putResult:
4262 main.log.debug( "Put response values: " + str( putResponses ) )
4263 utilities.assert_equals( expect=True,
4264 actual=putResult,
4265 onpass="Partitioned Transactional Map put successful",
4266 onfail="Partitioned Transactional Map put values are incorrect" )
4267
4268 main.step( "Partitioned Transactional maps get" )
4269 getCheck = True
4270 for n in range( 1, numKeys + 1 ):
4271 getResponses = []
4272 threads = []
4273 valueCheck = True
4274 for i in main.activeNodes:
4275 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4276 name="TMap-get-" + str( i ),
4277 args=[ "Key" + str( n ) ] )
4278 threads.append( t )
4279 t.start()
4280 for t in threads:
4281 t.join()
4282 getResponses.append( t.result )
4283 for node in getResponses:
4284 if node != tMapValue:
4285 valueCheck = False
4286 if not valueCheck:
4287 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4288 main.log.warn( getResponses )
4289 getCheck = getCheck and valueCheck
4290 utilities.assert_equals( expect=True,
4291 actual=getCheck,
4292 onpass="Partitioned Transactional Map get values were correct",
4293 onfail="Partitioned Transactional Map values incorrect" )