blob: 8ea1490d9ffcd4b7b42f7b3d68eb3a520102f432 [file] [log] [blame]
Jon Hall69b2b982016-05-11 12:04:59 -07001"""
2Description: This test is to determine if ONOS can handle
3 dynamic swapping of cluster nodes.
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE6: Swap nodes
13CASE7: Check state after control plane failure
14CASE8: Compare topo
15CASE9: Link s3-s28 down
16CASE10: Link s3-s28 up
17CASE11: Switch down
18CASE12: Switch up
19CASE13: Clean up
20CASE14: start election app on all onos nodes
21CASE15: Check that Leadership Election is still functional
22CASE16: Install Distributed Primitives app
23CASE17: Check for basic functionality with distributed primitives
24"""
25
26
27class HAswapNodes:
28
29 def __init__( self ):
30 self.default = ''
31
32 def CASE1( self, main ):
33 """
34 CASE1 is to compile ONOS and push it to the test machines
35
36 Startup sequence:
37 cell <name>
38 onos-verify-cell
39 NOTE: temporary - onos-remove-raft-logs
40 onos-uninstall
41 start mininet
42 git pull
43 mvn clean install
44 onos-package
45 onos-install -f
46 onos-wait-for-start
47 start cli sessions
48 start tcpdump
49 """
50 import time
51 import os
52 import re
53 main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
54 "initialization" )
55 main.case( "Setting up test environment" )
56 main.caseExplanation = "Setup the test environment including " +\
57 "installing ONOS, starting Mininet and ONOS" +\
58 "cli sessions."
59
60 # load some variables from the params file
61 PULLCODE = False
62 if main.params[ 'Git' ] == 'True':
63 PULLCODE = True
64 gitBranch = main.params[ 'branch' ]
65 cellName = main.params[ 'ENV' ][ 'cellName' ]
66
67 main.numCtrls = int( main.params[ 'num_controllers' ] )
68 if main.ONOSbench.maxNodes:
69 if main.ONOSbench.maxNodes < main.numCtrls:
70 main.numCtrls = int( main.ONOSbench.maxNodes )
71 # set global variables
72 # These are for csv plotting in jenkins
73 global labels
74 global data
75 labels = []
76 data = []
77
78 try:
79 from tests.HA.dependencies.HA import HA
80 main.HA = HA()
81 from tests.HA.HAswapNodes.dependencies.Server import Server
82 main.Server = Server()
83 except Exception as e:
84 main.log.exception( e )
85 main.cleanup()
86 main.exit()
87
88 main.CLIs = []
89 main.nodes = []
90 ipList = []
91 for i in range( 1, main.numCtrls + 1 ):
92 try:
93 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
94 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
95 ipList.append( main.nodes[ -1 ].ip_address )
96 except AttributeError:
97 break
98
99 main.step( "Create cell file" )
100 cellAppString = main.params[ 'ENV' ][ 'appString' ]
101 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
102 main.Mininet1.ip_address,
103 cellAppString, ipList )
104
105 main.step( "Applying cell variable to environment" )
106 cellResult = main.ONOSbench.setCell( cellName )
107 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
108 onpass="Set cell successfull",
109 onfail="Failled to set cell" )
110
111 main.step( "Verify connectivity to cell" )
112 verifyResult = main.ONOSbench.verifyCell()
113 utilities.assert_equals( expect=main.TRUE, actual=verifyResult,
114 onpass="Verify cell passed",
115 onfail="Failled to verify cell" )
116
117 # FIXME:this is short term fix
118 main.log.info( "Removing raft logs" )
119 main.ONOSbench.onosRemoveRaftLogs()
120
121 main.log.info( "Uninstalling ONOS" )
122 for node in main.nodes:
123 main.ONOSbench.onosUninstall( node.ip_address )
124
125 # Make sure ONOS is DEAD
126 main.log.info( "Killing any ONOS processes" )
127 killResults = main.TRUE
128 for node in main.nodes:
129 killed = main.ONOSbench.onosKill( node.ip_address )
130 killResults = killResults and killed
131
132 main.step( "Setup server for cluster metadata file" )
Jon Hall8f6d4622016-05-23 15:27:18 -0700133 port = main.params['server']['port']
Jon Hall69b2b982016-05-11 12:04:59 -0700134 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
135 main.log.debug( "Root dir: {}".format( rootDir ) )
136 status = main.Server.start( main.ONOSbench,
137 rootDir,
138 port=port,
139 logDir=main.logdir + "/server.log" )
140 utilities.assert_equals( expect=main.TRUE, actual=status,
141 onpass="Server started",
142 onfail="Failled to start SimpleHTTPServer" )
143
144 main.step( "Generate initial metadata file" )
145 if main.numCtrls >= 5:
146 main.numCtrls -= 2
147 else:
148 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
149 genResult = main.Server.generateFile( main.numCtrls )
150 utilities.assert_equals( expect=main.TRUE, actual=genResult,
151 onpass="New cluster metadata file generated",
152 onfail="Failled to generate new metadata file" )
153
154 cleanInstallResult = main.TRUE
155 gitPullResult = main.TRUE
156
157 main.step( "Starting Mininet" )
158 # scp topo file to mininet
159 # TODO: move to params?
160 topoName = "obelisk.py"
161 filePath = main.ONOSbench.home + "/tools/test/topos/"
162 main.ONOSbench.scp( main.Mininet1,
163 filePath + topoName,
164 main.Mininet1.home,
165 direction="to" )
166 mnResult = main.Mininet1.startNet( )
167 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
168 onpass="Mininet Started",
169 onfail="Error starting Mininet" )
170
171 main.step( "Git checkout and pull " + gitBranch )
172 if PULLCODE:
173 main.ONOSbench.gitCheckout( gitBranch )
174 gitPullResult = main.ONOSbench.gitPull()
175 # values of 1 or 3 are good
176 utilities.assert_lesser( expect=0, actual=gitPullResult,
177 onpass="Git pull successful",
178 onfail="Git pull failed" )
179 main.ONOSbench.getVersion( report=True )
180
181 main.step( "Using mvn clean install" )
182 cleanInstallResult = main.TRUE
183 if PULLCODE and gitPullResult == main.TRUE:
184 cleanInstallResult = main.ONOSbench.cleanInstall()
185 else:
186 main.log.warn( "Did not pull new code so skipping mvn " +
187 "clean install" )
188 utilities.assert_equals( expect=main.TRUE,
189 actual=cleanInstallResult,
190 onpass="MCI successful",
191 onfail="MCI failed" )
192 # GRAPHS
193 # NOTE: important params here:
194 # job = name of Jenkins job
195 # Plot Name = Plot-HA, only can be used if multiple plots
196 # index = The number of the graph under plot name
197 job = "HAswapNodes"
198 plotName = "Plot-HA"
199 index = "0"
200 graphs = '<ac:structured-macro ac:name="html">\n'
201 graphs += '<ac:plain-text-body><![CDATA[\n'
202 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
203 '/plot/' + plotName + '/getPlot?index=' + index +\
204 '&width=500&height=300"' +\
205 'noborder="0" width="500" height="300" scrolling="yes" ' +\
206 'seamless="seamless"></iframe>\n'
207 graphs += ']]></ac:plain-text-body>\n'
208 graphs += '</ac:structured-macro>\n'
209 main.log.wiki(graphs)
210
211 main.step( "Copying backup config files" )
212 path = "~/onos/tools/package/bin/onos-service"
213 cp = main.ONOSbench.scp( main.ONOSbench,
214 path,
215 path + ".backup",
216 direction="to" )
217
218 utilities.assert_equals( expect=main.TRUE,
219 actual=cp,
220 onpass="Copy backup config file succeeded",
221 onfail="Copy backup config file failed" )
222 # we need to modify the onos-service file to use remote metadata file
223 # url for cluster metadata file
Jon Hall8f6d4622016-05-23 15:27:18 -0700224 iface = main.params['server'].get( 'interface' )
225 ip = main.ONOSbench.getIpAddr( iface=iface )
Jon Hall69b2b982016-05-11 12:04:59 -0700226 metaFile = "cluster.json"
227 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, port, metaFile )
228 main.log.warn( javaArgs )
229 main.log.warn( repr( javaArgs ) )
230 handle = main.ONOSbench.handle
231 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, path )
232 main.log.warn( sed )
233 main.log.warn( repr( sed ) )
234 handle.sendline( sed )
235 handle.expect( "\$" )
236 main.log.debug( repr( handle.before ) )
237
238 main.step( "Creating ONOS package" )
239 packageResult = main.ONOSbench.onosPackage()
240 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
241 onpass="ONOS package successful",
242 onfail="ONOS package failed" )
243
244 main.step( "Installing ONOS package" )
245 onosInstallResult = main.TRUE
246 for i in range( main.ONOSbench.maxNodes ):
247 node = main.nodes[i]
248 options = "-f"
249 if i >= main.numCtrls:
250 options = "-nf" # Don't start more than the current scale
251 tmpResult = main.ONOSbench.onosInstall( options=options,
252 node=node.ip_address )
253 onosInstallResult = onosInstallResult and tmpResult
254 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
255 onpass="ONOS install successful",
256 onfail="ONOS install failed" )
257
258 # Cleanup custom onos-service file
259 main.ONOSbench.scp( main.ONOSbench,
260 path + ".backup",
261 path,
262 direction="to" )
263
264 main.step( "Checking if ONOS is up yet" )
265 for i in range( 2 ):
266 onosIsupResult = main.TRUE
267 for i in range( main.numCtrls ):
268 node = main.nodes[i]
269 started = main.ONOSbench.isup( node.ip_address )
270 if not started:
271 main.log.error( node.name + " hasn't started" )
272 onosIsupResult = onosIsupResult and started
273 if onosIsupResult == main.TRUE:
274 break
275 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
276 onpass="ONOS startup successful",
277 onfail="ONOS startup failed" )
278
279 main.log.step( "Starting ONOS CLI sessions" )
280 cliResults = main.TRUE
281 threads = []
282 for i in range( main.numCtrls ):
283 t = main.Thread( target=main.CLIs[i].startOnosCli,
284 name="startOnosCli-" + str( i ),
285 args=[main.nodes[i].ip_address] )
286 threads.append( t )
287 t.start()
288
289 for t in threads:
290 t.join()
291 cliResults = cliResults and t.result
292 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
293 onpass="ONOS cli startup successful",
294 onfail="ONOS cli startup failed" )
295
296 # Create a list of active nodes for use when some nodes are stopped
297 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
298
299 if main.params[ 'tcpdump' ].lower() == "true":
300 main.step( "Start Packet Capture MN" )
301 main.Mininet2.startTcpdump(
302 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
303 + "-MN.pcap",
304 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
305 port=main.params[ 'MNtcpdump' ][ 'port' ] )
306
307 main.step( "Checking ONOS nodes" )
308 nodeResults = utilities.retry( main.HA.nodesCheck,
309 False,
310 args=[main.activeNodes],
311 attempts=5 )
312 utilities.assert_equals( expect=True, actual=nodeResults,
313 onpass="Nodes check successful",
314 onfail="Nodes check NOT successful" )
315
316 if not nodeResults:
317 for i in main.activeNodes:
318 cli = main.CLIs[i]
319 main.log.debug( "{} components not ACTIVE: \n{}".format(
320 cli.name,
321 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
322 main.log.error( "Failed to start ONOS, stopping test" )
323 main.cleanup()
324 main.exit()
325
326 main.step( "Activate apps defined in the params file" )
327 # get data from the params
328 apps = main.params.get( 'apps' )
329 if apps:
330 apps = apps.split(',')
331 main.log.warn( apps )
332 activateResult = True
333 for app in apps:
334 main.CLIs[ 0 ].app( app, "Activate" )
335 # TODO: check this worked
336 time.sleep( 10 ) # wait for apps to activate
337 for app in apps:
338 state = main.CLIs[ 0 ].appStatus( app )
339 if state == "ACTIVE":
340 activateResult = activateResult and True
341 else:
342 main.log.error( "{} is in {} state".format( app, state ) )
343 activateResult = False
344 utilities.assert_equals( expect=True,
345 actual=activateResult,
346 onpass="Successfully activated apps",
347 onfail="Failed to activate apps" )
348 else:
349 main.log.warn( "No apps were specified to be loaded after startup" )
350
351 main.step( "Set ONOS configurations" )
352 config = main.params.get( 'ONOS_Configuration' )
353 if config:
354 main.log.debug( config )
355 checkResult = main.TRUE
356 for component in config:
357 for setting in config[component]:
358 value = config[component][setting]
359 check = main.CLIs[ 0 ].setCfg( component, setting, value )
360 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
361 checkResult = check and checkResult
362 utilities.assert_equals( expect=main.TRUE,
363 actual=checkResult,
364 onpass="Successfully set config",
365 onfail="Failed to set config" )
366 else:
367 main.log.warn( "No configurations were specified to be changed after startup" )
368
369 main.step( "App Ids check" )
370 appCheck = main.TRUE
371 threads = []
372 for i in main.activeNodes:
373 t = main.Thread( target=main.CLIs[i].appToIDCheck,
374 name="appToIDCheck-" + str( i ),
375 args=[] )
376 threads.append( t )
377 t.start()
378
379 for t in threads:
380 t.join()
381 appCheck = appCheck and t.result
382 if appCheck != main.TRUE:
383 node = main.activeNodes[0]
384 main.log.warn( main.CLIs[node].apps() )
385 main.log.warn( main.CLIs[node].appIDs() )
386 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
387 onpass="App Ids seem to be correct",
388 onfail="Something is wrong with app Ids" )
389
390 def CASE2( self, main ):
391 """
392 Assign devices to controllers
393 """
394 import re
395 assert main.numCtrls, "main.numCtrls not defined"
396 assert main, "main not defined"
397 assert utilities.assert_equals, "utilities.assert_equals not defined"
398 assert main.CLIs, "main.CLIs not defined"
399 assert main.nodes, "main.nodes not defined"
400
401 main.case( "Assigning devices to controllers" )
402 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
403 "and check that an ONOS node becomes the " +\
404 "master of the device."
405 main.step( "Assign switches to controllers" )
406
407 ipList = []
408 for i in range( main.ONOSbench.maxNodes ):
409 ipList.append( main.nodes[ i ].ip_address )
410 swList = []
411 for i in range( 1, 29 ):
412 swList.append( "s" + str( i ) )
413 main.Mininet1.assignSwController( sw=swList, ip=ipList )
414
415 mastershipCheck = main.TRUE
416 for i in range( 1, 29 ):
417 response = main.Mininet1.getSwController( "s" + str( i ) )
418 try:
419 main.log.info( str( response ) )
420 except Exception:
421 main.log.info( repr( response ) )
422 for node in main.nodes:
423 if re.search( "tcp:" + node.ip_address, response ):
424 mastershipCheck = mastershipCheck and main.TRUE
425 else:
426 main.log.error( "Error, node " + node.ip_address + " is " +
427 "not in the list of controllers s" +
428 str( i ) + " is connecting to." )
429 mastershipCheck = main.FALSE
430 utilities.assert_equals(
431 expect=main.TRUE,
432 actual=mastershipCheck,
433 onpass="Switch mastership assigned correctly",
434 onfail="Switches not assigned correctly to controllers" )
435
436 def CASE21( self, main ):
437 """
438 Assign mastership to controllers
439 """
440 import time
441 assert main.numCtrls, "main.numCtrls not defined"
442 assert main, "main not defined"
443 assert utilities.assert_equals, "utilities.assert_equals not defined"
444 assert main.CLIs, "main.CLIs not defined"
445 assert main.nodes, "main.nodes not defined"
446
447 main.case( "Assigning Controller roles for switches" )
448 main.caseExplanation = "Check that ONOS is connected to each " +\
449 "device. Then manually assign" +\
450 " mastership to specific ONOS nodes using" +\
451 " 'device-role'"
452 main.step( "Assign mastership of switches to specific controllers" )
453 # Manually assign mastership to the controller we want
454 roleCall = main.TRUE
455
456 ipList = [ ]
457 deviceList = []
458 onosCli = main.CLIs[ main.activeNodes[0] ]
459 try:
460 # Assign mastership to specific controllers. This assignment was
461 # determined for a 7 node cluser, but will work with any sized
462 # cluster
463 for i in range( 1, 29 ): # switches 1 through 28
464 # set up correct variables:
465 if i == 1:
466 c = 0
467 ip = main.nodes[ c ].ip_address # ONOS1
468 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
469 elif i == 2:
470 c = 1 % main.numCtrls
471 ip = main.nodes[ c ].ip_address # ONOS2
472 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
473 elif i == 3:
474 c = 1 % main.numCtrls
475 ip = main.nodes[ c ].ip_address # ONOS2
476 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
477 elif i == 4:
478 c = 3 % main.numCtrls
479 ip = main.nodes[ c ].ip_address # ONOS4
480 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
481 elif i == 5:
482 c = 2 % main.numCtrls
483 ip = main.nodes[ c ].ip_address # ONOS3
484 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
485 elif i == 6:
486 c = 2 % main.numCtrls
487 ip = main.nodes[ c ].ip_address # ONOS3
488 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
489 elif i == 7:
490 c = 5 % main.numCtrls
491 ip = main.nodes[ c ].ip_address # ONOS6
492 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
493 elif i >= 8 and i <= 17:
494 c = 4 % main.numCtrls
495 ip = main.nodes[ c ].ip_address # ONOS5
496 dpid = '3' + str( i ).zfill( 3 )
497 deviceId = onosCli.getDevice( dpid ).get( 'id' )
498 elif i >= 18 and i <= 27:
499 c = 6 % main.numCtrls
500 ip = main.nodes[ c ].ip_address # ONOS7
501 dpid = '6' + str( i ).zfill( 3 )
502 deviceId = onosCli.getDevice( dpid ).get( 'id' )
503 elif i == 28:
504 c = 0
505 ip = main.nodes[ c ].ip_address # ONOS1
506 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
507 else:
508 main.log.error( "You didn't write an else statement for " +
509 "switch s" + str( i ) )
510 roleCall = main.FALSE
511 # Assign switch
512 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
513 # TODO: make this controller dynamic
514 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
515 ipList.append( ip )
516 deviceList.append( deviceId )
517 except ( AttributeError, AssertionError ):
518 main.log.exception( "Something is wrong with ONOS device view" )
519 main.log.info( onosCli.devices() )
520 utilities.assert_equals(
521 expect=main.TRUE,
522 actual=roleCall,
523 onpass="Re-assigned switch mastership to designated controller",
524 onfail="Something wrong with deviceRole calls" )
525
526 main.step( "Check mastership was correctly assigned" )
527 roleCheck = main.TRUE
528 # NOTE: This is due to the fact that device mastership change is not
529 # atomic and is actually a multi step process
530 time.sleep( 5 )
531 for i in range( len( ipList ) ):
532 ip = ipList[i]
533 deviceId = deviceList[i]
534 # Check assignment
535 master = onosCli.getRole( deviceId ).get( 'master' )
536 if ip in master:
537 roleCheck = roleCheck and main.TRUE
538 else:
539 roleCheck = roleCheck and main.FALSE
540 main.log.error( "Error, controller " + ip + " is not" +
541 " master " + "of device " +
542 str( deviceId ) + ". Master is " +
543 repr( master ) + "." )
544 utilities.assert_equals(
545 expect=main.TRUE,
546 actual=roleCheck,
547 onpass="Switches were successfully reassigned to designated " +
548 "controller",
549 onfail="Switches were not successfully reassigned" )
550
551 def CASE3( self, main ):
552 """
553 Assign intents
554 """
555 import time
556 import json
557 assert main.numCtrls, "main.numCtrls not defined"
558 assert main, "main not defined"
559 assert utilities.assert_equals, "utilities.assert_equals not defined"
560 assert main.CLIs, "main.CLIs not defined"
561 assert main.nodes, "main.nodes not defined"
562 try:
563 labels
564 except NameError:
565 main.log.error( "labels not defined, setting to []" )
566 labels = []
567 try:
568 data
569 except NameError:
570 main.log.error( "data not defined, setting to []" )
571 data = []
572 # NOTE: we must reinstall intents until we have a persistant intent
573 # datastore!
574 main.case( "Adding host Intents" )
575 main.caseExplanation = "Discover hosts by using pingall then " +\
576 "assign predetermined host-to-host intents." +\
577 " After installation, check that the intent" +\
578 " is distributed to all nodes and the state" +\
579 " is INSTALLED"
580
581 # install onos-app-fwd
582 main.step( "Install reactive forwarding app" )
583 onosCli = main.CLIs[ main.activeNodes[0] ]
584 installResults = onosCli.activateApp( "org.onosproject.fwd" )
585 utilities.assert_equals( expect=main.TRUE, actual=installResults,
586 onpass="Install fwd successful",
587 onfail="Install fwd failed" )
588
589 main.step( "Check app ids" )
590 appCheck = main.TRUE
591 threads = []
592 for i in main.activeNodes:
593 t = main.Thread( target=main.CLIs[i].appToIDCheck,
594 name="appToIDCheck-" + str( i ),
595 args=[] )
596 threads.append( t )
597 t.start()
598
599 for t in threads:
600 t.join()
601 appCheck = appCheck and t.result
602 if appCheck != main.TRUE:
603 main.log.warn( onosCli.apps() )
604 main.log.warn( onosCli.appIDs() )
605 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
606 onpass="App Ids seem to be correct",
607 onfail="Something is wrong with app Ids" )
608
609 main.step( "Discovering Hosts( Via pingall for now )" )
610 # FIXME: Once we have a host discovery mechanism, use that instead
611 # REACTIVE FWD test
612 pingResult = main.FALSE
613 passMsg = "Reactive Pingall test passed"
614 time1 = time.time()
615 pingResult = main.Mininet1.pingall()
616 time2 = time.time()
617 if not pingResult:
618 main.log.warn("First pingall failed. Trying again...")
619 pingResult = main.Mininet1.pingall()
620 passMsg += " on the second try"
621 utilities.assert_equals(
622 expect=main.TRUE,
623 actual=pingResult,
624 onpass= passMsg,
625 onfail="Reactive Pingall failed, " +
626 "one or more ping pairs failed" )
627 main.log.info( "Time for pingall: %2f seconds" %
628 ( time2 - time1 ) )
629 # timeout for fwd flows
630 time.sleep( 11 )
631 # uninstall onos-app-fwd
632 main.step( "Uninstall reactive forwarding app" )
633 node = main.activeNodes[0]
634 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
635 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
636 onpass="Uninstall fwd successful",
637 onfail="Uninstall fwd failed" )
638
639 main.step( "Check app ids" )
640 threads = []
641 appCheck2 = main.TRUE
642 for i in main.activeNodes:
643 t = main.Thread( target=main.CLIs[i].appToIDCheck,
644 name="appToIDCheck-" + str( i ),
645 args=[] )
646 threads.append( t )
647 t.start()
648
649 for t in threads:
650 t.join()
651 appCheck2 = appCheck2 and t.result
652 if appCheck2 != main.TRUE:
653 node = main.activeNodes[0]
654 main.log.warn( main.CLIs[node].apps() )
655 main.log.warn( main.CLIs[node].appIDs() )
656 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
657 onpass="App Ids seem to be correct",
658 onfail="Something is wrong with app Ids" )
659
660 main.step( "Add host intents via cli" )
661 intentIds = []
662 # TODO: move the host numbers to params
663 # Maybe look at all the paths we ping?
664 intentAddResult = True
665 hostResult = main.TRUE
666 for i in range( 8, 18 ):
667 main.log.info( "Adding host intent between h" + str( i ) +
668 " and h" + str( i + 10 ) )
669 host1 = "00:00:00:00:00:" + \
670 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
671 host2 = "00:00:00:00:00:" + \
672 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
673 # NOTE: getHost can return None
674 host1Dict = onosCli.getHost( host1 )
675 host2Dict = onosCli.getHost( host2 )
676 host1Id = None
677 host2Id = None
678 if host1Dict and host2Dict:
679 host1Id = host1Dict.get( 'id', None )
680 host2Id = host2Dict.get( 'id', None )
681 if host1Id and host2Id:
682 nodeNum = ( i % len( main.activeNodes ) )
683 node = main.activeNodes[nodeNum]
684 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
685 if tmpId:
686 main.log.info( "Added intent with id: " + tmpId )
687 intentIds.append( tmpId )
688 else:
689 main.log.error( "addHostIntent returned: " +
690 repr( tmpId ) )
691 else:
692 main.log.error( "Error, getHost() failed for h" + str( i ) +
693 " and/or h" + str( i + 10 ) )
694 node = main.activeNodes[0]
695 hosts = main.CLIs[node].hosts()
696 main.log.warn( "Hosts output: " )
697 try:
698 main.log.warn( json.dumps( json.loads( hosts ),
699 sort_keys=True,
700 indent=4,
701 separators=( ',', ': ' ) ) )
702 except ( ValueError, TypeError ):
703 main.log.warn( repr( hosts ) )
704 hostResult = main.FALSE
705 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
706 onpass="Found a host id for each host",
707 onfail="Error looking up host ids" )
708
709 intentStart = time.time()
710 onosIds = onosCli.getAllIntentsId()
711 main.log.info( "Submitted intents: " + str( intentIds ) )
712 main.log.info( "Intents in ONOS: " + str( onosIds ) )
713 for intent in intentIds:
714 if intent in onosIds:
715 pass # intent submitted is in onos
716 else:
717 intentAddResult = False
718 if intentAddResult:
719 intentStop = time.time()
720 else:
721 intentStop = None
722 # Print the intent states
723 intents = onosCli.intents()
724 intentStates = []
725 installedCheck = True
726 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
727 count = 0
728 try:
729 for intent in json.loads( intents ):
730 state = intent.get( 'state', None )
731 if "INSTALLED" not in state:
732 installedCheck = False
733 intentId = intent.get( 'id', None )
734 intentStates.append( ( intentId, state ) )
735 except ( ValueError, TypeError ):
736 main.log.exception( "Error parsing intents" )
737 # add submitted intents not in the store
738 tmplist = [ i for i, s in intentStates ]
739 missingIntents = False
740 for i in intentIds:
741 if i not in tmplist:
742 intentStates.append( ( i, " - " ) )
743 missingIntents = True
744 intentStates.sort()
745 for i, s in intentStates:
746 count += 1
747 main.log.info( "%-6s%-15s%-15s" %
748 ( str( count ), str( i ), str( s ) ) )
749 leaders = onosCli.leaders()
750 try:
751 missing = False
752 if leaders:
753 parsedLeaders = json.loads( leaders )
754 main.log.warn( json.dumps( parsedLeaders,
755 sort_keys=True,
756 indent=4,
757 separators=( ',', ': ' ) ) )
758 # check for all intent partitions
759 topics = []
760 for i in range( 14 ):
761 topics.append( "intent-partition-" + str( i ) )
762 main.log.debug( topics )
763 ONOStopics = [ j['topic'] for j in parsedLeaders ]
764 for topic in topics:
765 if topic not in ONOStopics:
766 main.log.error( "Error: " + topic +
767 " not in leaders" )
768 missing = True
769 else:
770 main.log.error( "leaders() returned None" )
771 except ( ValueError, TypeError ):
772 main.log.exception( "Error parsing leaders" )
773 main.log.error( repr( leaders ) )
774 # Check all nodes
775 if missing:
776 for i in main.activeNodes:
777 response = main.CLIs[i].leaders( jsonFormat=False)
778 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
779 str( response ) )
780
781 partitions = onosCli.partitions()
782 try:
783 if partitions :
784 parsedPartitions = json.loads( partitions )
785 main.log.warn( json.dumps( parsedPartitions,
786 sort_keys=True,
787 indent=4,
788 separators=( ',', ': ' ) ) )
789 # TODO check for a leader in all paritions
790 # TODO check for consistency among nodes
791 else:
792 main.log.error( "partitions() returned None" )
793 except ( ValueError, TypeError ):
794 main.log.exception( "Error parsing partitions" )
795 main.log.error( repr( partitions ) )
796 pendingMap = onosCli.pendingMap()
797 try:
798 if pendingMap :
799 parsedPending = json.loads( pendingMap )
800 main.log.warn( json.dumps( parsedPending,
801 sort_keys=True,
802 indent=4,
803 separators=( ',', ': ' ) ) )
804 # TODO check something here?
805 else:
806 main.log.error( "pendingMap() returned None" )
807 except ( ValueError, TypeError ):
808 main.log.exception( "Error parsing pending map" )
809 main.log.error( repr( pendingMap ) )
810
811 intentAddResult = bool( intentAddResult and not missingIntents and
812 installedCheck )
813 if not intentAddResult:
814 main.log.error( "Error in pushing host intents to ONOS" )
815
816 main.step( "Intent Anti-Entropy dispersion" )
817 for j in range(100):
818 correct = True
819 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
820 for i in main.activeNodes:
821 onosIds = []
822 ids = main.CLIs[i].getAllIntentsId()
823 onosIds.append( ids )
824 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
825 str( sorted( onosIds ) ) )
826 if sorted( ids ) != sorted( intentIds ):
827 main.log.warn( "Set of intent IDs doesn't match" )
828 correct = False
829 break
830 else:
831 intents = json.loads( main.CLIs[i].intents() )
832 for intent in intents:
833 if intent[ 'state' ] != "INSTALLED":
834 main.log.warn( "Intent " + intent[ 'id' ] +
835 " is " + intent[ 'state' ] )
836 correct = False
837 break
838 if correct:
839 break
840 else:
841 time.sleep(1)
842 if not intentStop:
843 intentStop = time.time()
844 global gossipTime
845 gossipTime = intentStop - intentStart
846 main.log.info( "It took about " + str( gossipTime ) +
847 " seconds for all intents to appear in each node" )
848 append = False
849 title = "Gossip Intents"
850 count = 1
851 while append is False:
852 curTitle = title + str( count )
853 if curTitle not in labels:
854 labels.append( curTitle )
855 data.append( str( gossipTime ) )
856 append = True
857 else:
858 count += 1
859 gossipPeriod = int( main.params['timers']['gossip'] )
860 maxGossipTime = gossipPeriod * len( main.activeNodes )
861 utilities.assert_greater_equals(
862 expect=maxGossipTime, actual=gossipTime,
863 onpass="ECM anti-entropy for intents worked within " +
864 "expected time",
865 onfail="Intent ECM anti-entropy took too long. " +
866 "Expected time:{}, Actual time:{}".format( maxGossipTime,
867 gossipTime ) )
868 if gossipTime <= maxGossipTime:
869 intentAddResult = True
870
871 if not intentAddResult or "key" in pendingMap:
872 import time
873 installedCheck = True
874 main.log.info( "Sleeping 60 seconds to see if intents are found" )
875 time.sleep( 60 )
876 onosIds = onosCli.getAllIntentsId()
877 main.log.info( "Submitted intents: " + str( intentIds ) )
878 main.log.info( "Intents in ONOS: " + str( onosIds ) )
879 # Print the intent states
880 intents = onosCli.intents()
881 intentStates = []
882 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
883 count = 0
884 try:
885 for intent in json.loads( intents ):
886 # Iter through intents of a node
887 state = intent.get( 'state', None )
888 if "INSTALLED" not in state:
889 installedCheck = False
890 intentId = intent.get( 'id', None )
891 intentStates.append( ( intentId, state ) )
892 except ( ValueError, TypeError ):
893 main.log.exception( "Error parsing intents" )
894 # add submitted intents not in the store
895 tmplist = [ i for i, s in intentStates ]
896 for i in intentIds:
897 if i not in tmplist:
898 intentStates.append( ( i, " - " ) )
899 intentStates.sort()
900 for i, s in intentStates:
901 count += 1
902 main.log.info( "%-6s%-15s%-15s" %
903 ( str( count ), str( i ), str( s ) ) )
904 leaders = onosCli.leaders()
905 try:
906 missing = False
907 if leaders:
908 parsedLeaders = json.loads( leaders )
909 main.log.warn( json.dumps( parsedLeaders,
910 sort_keys=True,
911 indent=4,
912 separators=( ',', ': ' ) ) )
913 # check for all intent partitions
914 # check for election
915 topics = []
916 for i in range( 14 ):
917 topics.append( "intent-partition-" + str( i ) )
918 # FIXME: this should only be after we start the app
919 topics.append( "org.onosproject.election" )
920 main.log.debug( topics )
921 ONOStopics = [ j['topic'] for j in parsedLeaders ]
922 for topic in topics:
923 if topic not in ONOStopics:
924 main.log.error( "Error: " + topic +
925 " not in leaders" )
926 missing = True
927 else:
928 main.log.error( "leaders() returned None" )
929 except ( ValueError, TypeError ):
930 main.log.exception( "Error parsing leaders" )
931 main.log.error( repr( leaders ) )
932 # Check all nodes
933 if missing:
934 for i in main.activeNodes:
935 node = main.CLIs[i]
936 response = node.leaders( jsonFormat=False)
937 main.log.warn( str( node.name ) + " leaders output: \n" +
938 str( response ) )
939
940 partitions = onosCli.partitions()
941 try:
942 if partitions :
943 parsedPartitions = json.loads( partitions )
944 main.log.warn( json.dumps( parsedPartitions,
945 sort_keys=True,
946 indent=4,
947 separators=( ',', ': ' ) ) )
948 # TODO check for a leader in all paritions
949 # TODO check for consistency among nodes
950 else:
951 main.log.error( "partitions() returned None" )
952 except ( ValueError, TypeError ):
953 main.log.exception( "Error parsing partitions" )
954 main.log.error( repr( partitions ) )
955 pendingMap = onosCli.pendingMap()
956 try:
957 if pendingMap :
958 parsedPending = json.loads( pendingMap )
959 main.log.warn( json.dumps( parsedPending,
960 sort_keys=True,
961 indent=4,
962 separators=( ',', ': ' ) ) )
963 # TODO check something here?
964 else:
965 main.log.error( "pendingMap() returned None" )
966 except ( ValueError, TypeError ):
967 main.log.exception( "Error parsing pending map" )
968 main.log.error( repr( pendingMap ) )
969
970 def CASE4( self, main ):
971 """
972 Ping across added host intents
973 """
974 import json
975 import time
976 assert main.numCtrls, "main.numCtrls not defined"
977 assert main, "main not defined"
978 assert utilities.assert_equals, "utilities.assert_equals not defined"
979 assert main.CLIs, "main.CLIs not defined"
980 assert main.nodes, "main.nodes not defined"
981 main.case( "Verify connectivity by sending traffic across Intents" )
982 main.caseExplanation = "Ping across added host intents to check " +\
983 "functionality and check the state of " +\
984 "the intent"
985
986 onosCli = main.CLIs[ main.activeNodes[0] ]
987 main.step( "Check Intent state" )
988 installedCheck = False
989 loopCount = 0
990 while not installedCheck and loopCount < 40:
991 installedCheck = True
992 # Print the intent states
993 intents = onosCli.intents()
994 intentStates = []
995 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
996 count = 0
997 # Iter through intents of a node
998 try:
999 for intent in json.loads( intents ):
1000 state = intent.get( 'state', None )
1001 if "INSTALLED" not in state:
1002 installedCheck = False
1003 intentId = intent.get( 'id', None )
1004 intentStates.append( ( intentId, state ) )
1005 except ( ValueError, TypeError ):
1006 main.log.exception( "Error parsing intents." )
1007 # Print states
1008 intentStates.sort()
1009 for i, s in intentStates:
1010 count += 1
1011 main.log.info( "%-6s%-15s%-15s" %
1012 ( str( count ), str( i ), str( s ) ) )
1013 if not installedCheck:
1014 time.sleep( 1 )
1015 loopCount += 1
1016 utilities.assert_equals( expect=True, actual=installedCheck,
1017 onpass="Intents are all INSTALLED",
1018 onfail="Intents are not all in " +
1019 "INSTALLED state" )
1020
1021 main.step( "Ping across added host intents" )
1022 PingResult = main.TRUE
1023 for i in range( 8, 18 ):
1024 ping = main.Mininet1.pingHost( src="h" + str( i ),
1025 target="h" + str( i + 10 ) )
1026 PingResult = PingResult and ping
1027 if ping == main.FALSE:
1028 main.log.warn( "Ping failed between h" + str( i ) +
1029 " and h" + str( i + 10 ) )
1030 elif ping == main.TRUE:
1031 main.log.info( "Ping test passed!" )
1032 # Don't set PingResult or you'd override failures
1033 if PingResult == main.FALSE:
1034 main.log.error(
1035 "Intents have not been installed correctly, pings failed." )
1036 # TODO: pretty print
1037 main.log.warn( "ONOS1 intents: " )
1038 try:
1039 tmpIntents = onosCli.intents()
1040 main.log.warn( json.dumps( json.loads( tmpIntents ),
1041 sort_keys=True,
1042 indent=4,
1043 separators=( ',', ': ' ) ) )
1044 except ( ValueError, TypeError ):
1045 main.log.warn( repr( tmpIntents ) )
1046 utilities.assert_equals(
1047 expect=main.TRUE,
1048 actual=PingResult,
1049 onpass="Intents have been installed correctly and pings work",
1050 onfail="Intents have not been installed correctly, pings failed." )
1051
1052 main.step( "Check leadership of topics" )
1053 leaders = onosCli.leaders()
1054 topicCheck = main.TRUE
1055 try:
1056 if leaders:
1057 parsedLeaders = json.loads( leaders )
1058 main.log.warn( json.dumps( parsedLeaders,
1059 sort_keys=True,
1060 indent=4,
1061 separators=( ',', ': ' ) ) )
1062 # check for all intent partitions
1063 # check for election
1064 # TODO: Look at Devices as topics now that it uses this system
1065 topics = []
1066 for i in range( 14 ):
1067 topics.append( "intent-partition-" + str( i ) )
1068 # FIXME: this should only be after we start the app
1069 # FIXME: topics.append( "org.onosproject.election" )
1070 # Print leaders output
1071 main.log.debug( topics )
1072 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1073 for topic in topics:
1074 if topic not in ONOStopics:
1075 main.log.error( "Error: " + topic +
1076 " not in leaders" )
1077 topicCheck = main.FALSE
1078 else:
1079 main.log.error( "leaders() returned None" )
1080 topicCheck = main.FALSE
1081 except ( ValueError, TypeError ):
1082 topicCheck = main.FALSE
1083 main.log.exception( "Error parsing leaders" )
1084 main.log.error( repr( leaders ) )
1085 # TODO: Check for a leader of these topics
1086 # Check all nodes
1087 if topicCheck:
1088 for i in main.activeNodes:
1089 node = main.CLIs[i]
1090 response = node.leaders( jsonFormat=False)
1091 main.log.warn( str( node.name ) + " leaders output: \n" +
1092 str( response ) )
1093
1094 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1095 onpass="intent Partitions is in leaders",
1096 onfail="Some topics were lost " )
1097 # Print partitions
1098 partitions = onosCli.partitions()
1099 try:
1100 if partitions :
1101 parsedPartitions = json.loads( partitions )
1102 main.log.warn( json.dumps( parsedPartitions,
1103 sort_keys=True,
1104 indent=4,
1105 separators=( ',', ': ' ) ) )
1106 # TODO check for a leader in all paritions
1107 # TODO check for consistency among nodes
1108 else:
1109 main.log.error( "partitions() returned None" )
1110 except ( ValueError, TypeError ):
1111 main.log.exception( "Error parsing partitions" )
1112 main.log.error( repr( partitions ) )
1113 # Print Pending Map
1114 pendingMap = onosCli.pendingMap()
1115 try:
1116 if pendingMap :
1117 parsedPending = json.loads( pendingMap )
1118 main.log.warn( json.dumps( parsedPending,
1119 sort_keys=True,
1120 indent=4,
1121 separators=( ',', ': ' ) ) )
1122 # TODO check something here?
1123 else:
1124 main.log.error( "pendingMap() returned None" )
1125 except ( ValueError, TypeError ):
1126 main.log.exception( "Error parsing pending map" )
1127 main.log.error( repr( pendingMap ) )
1128
1129 if not installedCheck:
1130 main.log.info( "Waiting 60 seconds to see if the state of " +
1131 "intents change" )
1132 time.sleep( 60 )
1133 # Print the intent states
1134 intents = onosCli.intents()
1135 intentStates = []
1136 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1137 count = 0
1138 # Iter through intents of a node
1139 try:
1140 for intent in json.loads( intents ):
1141 state = intent.get( 'state', None )
1142 if "INSTALLED" not in state:
1143 installedCheck = False
1144 intentId = intent.get( 'id', None )
1145 intentStates.append( ( intentId, state ) )
1146 except ( ValueError, TypeError ):
1147 main.log.exception( "Error parsing intents." )
1148 intentStates.sort()
1149 for i, s in intentStates:
1150 count += 1
1151 main.log.info( "%-6s%-15s%-15s" %
1152 ( str( count ), str( i ), str( s ) ) )
1153 leaders = onosCli.leaders()
1154 try:
1155 missing = False
1156 if leaders:
1157 parsedLeaders = json.loads( leaders )
1158 main.log.warn( json.dumps( parsedLeaders,
1159 sort_keys=True,
1160 indent=4,
1161 separators=( ',', ': ' ) ) )
1162 # check for all intent partitions
1163 # check for election
1164 topics = []
1165 for i in range( 14 ):
1166 topics.append( "intent-partition-" + str( i ) )
1167 # FIXME: this should only be after we start the app
1168 topics.append( "org.onosproject.election" )
1169 main.log.debug( topics )
1170 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1171 for topic in topics:
1172 if topic not in ONOStopics:
1173 main.log.error( "Error: " + topic +
1174 " not in leaders" )
1175 missing = True
1176 else:
1177 main.log.error( "leaders() returned None" )
1178 except ( ValueError, TypeError ):
1179 main.log.exception( "Error parsing leaders" )
1180 main.log.error( repr( leaders ) )
1181 if missing:
1182 for i in main.activeNodes:
1183 node = main.CLIs[i]
1184 response = node.leaders( jsonFormat=False)
1185 main.log.warn( str( node.name ) + " leaders output: \n" +
1186 str( response ) )
1187
1188 partitions = onosCli.partitions()
1189 try:
1190 if partitions :
1191 parsedPartitions = json.loads( partitions )
1192 main.log.warn( json.dumps( parsedPartitions,
1193 sort_keys=True,
1194 indent=4,
1195 separators=( ',', ': ' ) ) )
1196 # TODO check for a leader in all paritions
1197 # TODO check for consistency among nodes
1198 else:
1199 main.log.error( "partitions() returned None" )
1200 except ( ValueError, TypeError ):
1201 main.log.exception( "Error parsing partitions" )
1202 main.log.error( repr( partitions ) )
1203 pendingMap = onosCli.pendingMap()
1204 try:
1205 if pendingMap :
1206 parsedPending = json.loads( pendingMap )
1207 main.log.warn( json.dumps( parsedPending,
1208 sort_keys=True,
1209 indent=4,
1210 separators=( ',', ': ' ) ) )
1211 # TODO check something here?
1212 else:
1213 main.log.error( "pendingMap() returned None" )
1214 except ( ValueError, TypeError ):
1215 main.log.exception( "Error parsing pending map" )
1216 main.log.error( repr( pendingMap ) )
1217 # Print flowrules
1218 node = main.activeNodes[0]
1219 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1220 main.step( "Wait a minute then ping again" )
1221 # the wait is above
1222 PingResult = main.TRUE
1223 for i in range( 8, 18 ):
1224 ping = main.Mininet1.pingHost( src="h" + str( i ),
1225 target="h" + str( i + 10 ) )
1226 PingResult = PingResult and ping
1227 if ping == main.FALSE:
1228 main.log.warn( "Ping failed between h" + str( i ) +
1229 " and h" + str( i + 10 ) )
1230 elif ping == main.TRUE:
1231 main.log.info( "Ping test passed!" )
1232 # Don't set PingResult or you'd override failures
1233 if PingResult == main.FALSE:
1234 main.log.error(
1235 "Intents have not been installed correctly, pings failed." )
1236 # TODO: pretty print
1237 main.log.warn( "ONOS1 intents: " )
1238 try:
1239 tmpIntents = onosCli.intents()
1240 main.log.warn( json.dumps( json.loads( tmpIntents ),
1241 sort_keys=True,
1242 indent=4,
1243 separators=( ',', ': ' ) ) )
1244 except ( ValueError, TypeError ):
1245 main.log.warn( repr( tmpIntents ) )
1246 utilities.assert_equals(
1247 expect=main.TRUE,
1248 actual=PingResult,
1249 onpass="Intents have been installed correctly and pings work",
1250 onfail="Intents have not been installed correctly, pings failed." )
1251
1252 def CASE5( self, main ):
1253 """
1254 Reading state of ONOS
1255 """
1256 import json
1257 import time
1258 assert main.numCtrls, "main.numCtrls not defined"
1259 assert main, "main not defined"
1260 assert utilities.assert_equals, "utilities.assert_equals not defined"
1261 assert main.CLIs, "main.CLIs not defined"
1262 assert main.nodes, "main.nodes not defined"
1263
1264 main.case( "Setting up and gathering data for current state" )
1265 # The general idea for this test case is to pull the state of
1266 # ( intents,flows, topology,... ) from each ONOS node
1267 # We can then compare them with each other and also with past states
1268
1269 main.step( "Check that each switch has a master" )
1270 global mastershipState
1271 mastershipState = '[]'
1272
1273 # Assert that each device has a master
1274 rolesNotNull = main.TRUE
1275 threads = []
1276 for i in main.activeNodes:
1277 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1278 name="rolesNotNull-" + str( i ),
1279 args=[] )
1280 threads.append( t )
1281 t.start()
1282
1283 for t in threads:
1284 t.join()
1285 rolesNotNull = rolesNotNull and t.result
1286 utilities.assert_equals(
1287 expect=main.TRUE,
1288 actual=rolesNotNull,
1289 onpass="Each device has a master",
1290 onfail="Some devices don't have a master assigned" )
1291
1292 main.step( "Get the Mastership of each switch from each controller" )
1293 ONOSMastership = []
1294 consistentMastership = True
1295 rolesResults = True
1296 threads = []
1297 for i in main.activeNodes:
1298 t = main.Thread( target=main.CLIs[i].roles,
1299 name="roles-" + str( i ),
1300 args=[] )
1301 threads.append( t )
1302 t.start()
1303
1304 for t in threads:
1305 t.join()
1306 ONOSMastership.append( t.result )
1307
1308 for i in range( len( ONOSMastership ) ):
1309 node = str( main.activeNodes[i] + 1 )
1310 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1311 main.log.error( "Error in getting ONOS" + node + " roles" )
1312 main.log.warn( "ONOS" + node + " mastership response: " +
1313 repr( ONOSMastership[i] ) )
1314 rolesResults = False
1315 utilities.assert_equals(
1316 expect=True,
1317 actual=rolesResults,
1318 onpass="No error in reading roles output",
1319 onfail="Error in reading roles from ONOS" )
1320
1321 main.step( "Check for consistency in roles from each controller" )
1322 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1323 main.log.info(
1324 "Switch roles are consistent across all ONOS nodes" )
1325 else:
1326 consistentMastership = False
1327 utilities.assert_equals(
1328 expect=True,
1329 actual=consistentMastership,
1330 onpass="Switch roles are consistent across all ONOS nodes",
1331 onfail="ONOS nodes have different views of switch roles" )
1332
1333 if rolesResults and not consistentMastership:
1334 for i in range( len( main.activeNodes ) ):
1335 node = str( main.activeNodes[i] + 1 )
1336 try:
1337 main.log.warn(
1338 "ONOS" + node + " roles: ",
1339 json.dumps(
1340 json.loads( ONOSMastership[ i ] ),
1341 sort_keys=True,
1342 indent=4,
1343 separators=( ',', ': ' ) ) )
1344 except ( ValueError, TypeError ):
1345 main.log.warn( repr( ONOSMastership[ i ] ) )
1346 elif rolesResults and consistentMastership:
1347 mastershipState = ONOSMastership[ 0 ]
1348
1349 main.step( "Get the intents from each controller" )
1350 global intentState
1351 intentState = []
1352 ONOSIntents = []
1353 consistentIntents = True # Are Intents consistent across nodes?
1354 intentsResults = True # Could we read Intents from ONOS?
1355 threads = []
1356 for i in main.activeNodes:
1357 t = main.Thread( target=main.CLIs[i].intents,
1358 name="intents-" + str( i ),
1359 args=[],
1360 kwargs={ 'jsonFormat': True } )
1361 threads.append( t )
1362 t.start()
1363
1364 for t in threads:
1365 t.join()
1366 ONOSIntents.append( t.result )
1367
1368 for i in range( len( ONOSIntents ) ):
1369 node = str( main.activeNodes[i] + 1 )
1370 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1371 main.log.error( "Error in getting ONOS" + node + " intents" )
1372 main.log.warn( "ONOS" + node + " intents response: " +
1373 repr( ONOSIntents[ i ] ) )
1374 intentsResults = False
1375 utilities.assert_equals(
1376 expect=True,
1377 actual=intentsResults,
1378 onpass="No error in reading intents output",
1379 onfail="Error in reading intents from ONOS" )
1380
1381 main.step( "Check for consistency in Intents from each controller" )
1382 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1383 main.log.info( "Intents are consistent across all ONOS " +
1384 "nodes" )
1385 else:
1386 consistentIntents = False
1387 main.log.error( "Intents not consistent" )
1388 utilities.assert_equals(
1389 expect=True,
1390 actual=consistentIntents,
1391 onpass="Intents are consistent across all ONOS nodes",
1392 onfail="ONOS nodes have different views of intents" )
1393
1394 if intentsResults:
1395 # Try to make it easy to figure out what is happening
1396 #
1397 # Intent ONOS1 ONOS2 ...
1398 # 0x01 INSTALLED INSTALLING
1399 # ... ... ...
1400 # ... ... ...
1401 title = " Id"
1402 for n in main.activeNodes:
1403 title += " " * 10 + "ONOS" + str( n + 1 )
1404 main.log.warn( title )
1405 # get all intent keys in the cluster
1406 keys = []
1407 try:
1408 # Get the set of all intent keys
1409 for nodeStr in ONOSIntents:
1410 node = json.loads( nodeStr )
1411 for intent in node:
1412 keys.append( intent.get( 'id' ) )
1413 keys = set( keys )
1414 # For each intent key, print the state on each node
1415 for key in keys:
1416 row = "%-13s" % key
1417 for nodeStr in ONOSIntents:
1418 node = json.loads( nodeStr )
1419 for intent in node:
1420 if intent.get( 'id', "Error" ) == key:
1421 row += "%-15s" % intent.get( 'state' )
1422 main.log.warn( row )
1423 # End of intent state table
1424 except ValueError as e:
1425 main.log.exception( e )
1426 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1427
1428 if intentsResults and not consistentIntents:
1429 # print the json objects
1430 n = str( main.activeNodes[-1] + 1 )
1431 main.log.debug( "ONOS" + n + " intents: " )
1432 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1433 sort_keys=True,
1434 indent=4,
1435 separators=( ',', ': ' ) ) )
1436 for i in range( len( ONOSIntents ) ):
1437 node = str( main.activeNodes[i] + 1 )
1438 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1439 main.log.debug( "ONOS" + node + " intents: " )
1440 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1441 sort_keys=True,
1442 indent=4,
1443 separators=( ',', ': ' ) ) )
1444 else:
1445 main.log.debug( "ONOS" + node + " intents match ONOS" +
1446 n + " intents" )
1447 elif intentsResults and consistentIntents:
1448 intentState = ONOSIntents[ 0 ]
1449
1450 main.step( "Get the flows from each controller" )
1451 global flowState
1452 flowState = []
1453 ONOSFlows = []
1454 ONOSFlowsJson = []
1455 flowCheck = main.FALSE
1456 consistentFlows = True
1457 flowsResults = True
1458 threads = []
1459 for i in main.activeNodes:
1460 t = main.Thread( target=main.CLIs[i].flows,
1461 name="flows-" + str( i ),
1462 args=[],
1463 kwargs={ 'jsonFormat': True } )
1464 threads.append( t )
1465 t.start()
1466
1467 # NOTE: Flows command can take some time to run
1468 time.sleep(30)
1469 for t in threads:
1470 t.join()
1471 result = t.result
1472 ONOSFlows.append( result )
1473
1474 for i in range( len( ONOSFlows ) ):
1475 num = str( main.activeNodes[i] + 1 )
1476 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1477 main.log.error( "Error in getting ONOS" + num + " flows" )
1478 main.log.warn( "ONOS" + num + " flows response: " +
1479 repr( ONOSFlows[ i ] ) )
1480 flowsResults = False
1481 ONOSFlowsJson.append( None )
1482 else:
1483 try:
1484 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1485 except ( ValueError, TypeError ):
1486 # FIXME: change this to log.error?
1487 main.log.exception( "Error in parsing ONOS" + num +
1488 " response as json." )
1489 main.log.error( repr( ONOSFlows[ i ] ) )
1490 ONOSFlowsJson.append( None )
1491 flowsResults = False
1492 utilities.assert_equals(
1493 expect=True,
1494 actual=flowsResults,
1495 onpass="No error in reading flows output",
1496 onfail="Error in reading flows from ONOS" )
1497
1498 main.step( "Check for consistency in Flows from each controller" )
1499 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1500 if all( tmp ):
1501 main.log.info( "Flow count is consistent across all ONOS nodes" )
1502 else:
1503 consistentFlows = False
1504 utilities.assert_equals(
1505 expect=True,
1506 actual=consistentFlows,
1507 onpass="The flow count is consistent across all ONOS nodes",
1508 onfail="ONOS nodes have different flow counts" )
1509
1510 if flowsResults and not consistentFlows:
1511 for i in range( len( ONOSFlows ) ):
1512 node = str( main.activeNodes[i] + 1 )
1513 try:
1514 main.log.warn(
1515 "ONOS" + node + " flows: " +
1516 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1517 indent=4, separators=( ',', ': ' ) ) )
1518 except ( ValueError, TypeError ):
1519 main.log.warn( "ONOS" + node + " flows: " +
1520 repr( ONOSFlows[ i ] ) )
1521 elif flowsResults and consistentFlows:
1522 flowCheck = main.TRUE
1523 flowState = ONOSFlows[ 0 ]
1524
1525 main.step( "Get the OF Table entries" )
1526 global flows
1527 flows = []
1528 for i in range( 1, 29 ):
1529 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1530 if flowCheck == main.FALSE:
1531 for table in flows:
1532 main.log.warn( table )
1533 # TODO: Compare switch flow tables with ONOS flow tables
1534
1535 main.step( "Start continuous pings" )
1536 main.Mininet2.pingLong(
1537 src=main.params[ 'PING' ][ 'source1' ],
1538 target=main.params[ 'PING' ][ 'target1' ],
1539 pingTime=500 )
1540 main.Mininet2.pingLong(
1541 src=main.params[ 'PING' ][ 'source2' ],
1542 target=main.params[ 'PING' ][ 'target2' ],
1543 pingTime=500 )
1544 main.Mininet2.pingLong(
1545 src=main.params[ 'PING' ][ 'source3' ],
1546 target=main.params[ 'PING' ][ 'target3' ],
1547 pingTime=500 )
1548 main.Mininet2.pingLong(
1549 src=main.params[ 'PING' ][ 'source4' ],
1550 target=main.params[ 'PING' ][ 'target4' ],
1551 pingTime=500 )
1552 main.Mininet2.pingLong(
1553 src=main.params[ 'PING' ][ 'source5' ],
1554 target=main.params[ 'PING' ][ 'target5' ],
1555 pingTime=500 )
1556 main.Mininet2.pingLong(
1557 src=main.params[ 'PING' ][ 'source6' ],
1558 target=main.params[ 'PING' ][ 'target6' ],
1559 pingTime=500 )
1560 main.Mininet2.pingLong(
1561 src=main.params[ 'PING' ][ 'source7' ],
1562 target=main.params[ 'PING' ][ 'target7' ],
1563 pingTime=500 )
1564 main.Mininet2.pingLong(
1565 src=main.params[ 'PING' ][ 'source8' ],
1566 target=main.params[ 'PING' ][ 'target8' ],
1567 pingTime=500 )
1568 main.Mininet2.pingLong(
1569 src=main.params[ 'PING' ][ 'source9' ],
1570 target=main.params[ 'PING' ][ 'target9' ],
1571 pingTime=500 )
1572 main.Mininet2.pingLong(
1573 src=main.params[ 'PING' ][ 'source10' ],
1574 target=main.params[ 'PING' ][ 'target10' ],
1575 pingTime=500 )
1576
1577 main.step( "Collecting topology information from ONOS" )
1578 devices = []
1579 threads = []
1580 for i in main.activeNodes:
1581 t = main.Thread( target=main.CLIs[i].devices,
1582 name="devices-" + str( i ),
1583 args=[ ] )
1584 threads.append( t )
1585 t.start()
1586
1587 for t in threads:
1588 t.join()
1589 devices.append( t.result )
1590 hosts = []
1591 threads = []
1592 for i in main.activeNodes:
1593 t = main.Thread( target=main.CLIs[i].hosts,
1594 name="hosts-" + str( i ),
1595 args=[ ] )
1596 threads.append( t )
1597 t.start()
1598
1599 for t in threads:
1600 t.join()
1601 try:
1602 hosts.append( json.loads( t.result ) )
1603 except ( ValueError, TypeError ):
1604 # FIXME: better handling of this, print which node
1605 # Maybe use thread name?
1606 main.log.exception( "Error parsing json output of hosts" )
1607 main.log.warn( repr( t.result ) )
1608 hosts.append( None )
1609
1610 ports = []
1611 threads = []
1612 for i in main.activeNodes:
1613 t = main.Thread( target=main.CLIs[i].ports,
1614 name="ports-" + str( i ),
1615 args=[ ] )
1616 threads.append( t )
1617 t.start()
1618
1619 for t in threads:
1620 t.join()
1621 ports.append( t.result )
1622 links = []
1623 threads = []
1624 for i in main.activeNodes:
1625 t = main.Thread( target=main.CLIs[i].links,
1626 name="links-" + str( i ),
1627 args=[ ] )
1628 threads.append( t )
1629 t.start()
1630
1631 for t in threads:
1632 t.join()
1633 links.append( t.result )
1634 clusters = []
1635 threads = []
1636 for i in main.activeNodes:
1637 t = main.Thread( target=main.CLIs[i].clusters,
1638 name="clusters-" + str( i ),
1639 args=[ ] )
1640 threads.append( t )
1641 t.start()
1642
1643 for t in threads:
1644 t.join()
1645 clusters.append( t.result )
1646 # Compare json objects for hosts and dataplane clusters
1647
1648 # hosts
1649 main.step( "Host view is consistent across ONOS nodes" )
1650 consistentHostsResult = main.TRUE
1651 for controller in range( len( hosts ) ):
1652 controllerStr = str( main.activeNodes[controller] + 1 )
1653 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1654 if hosts[ controller ] == hosts[ 0 ]:
1655 continue
1656 else: # hosts not consistent
1657 main.log.error( "hosts from ONOS" +
1658 controllerStr +
1659 " is inconsistent with ONOS1" )
1660 main.log.warn( repr( hosts[ controller ] ) )
1661 consistentHostsResult = main.FALSE
1662
1663 else:
1664 main.log.error( "Error in getting ONOS hosts from ONOS" +
1665 controllerStr )
1666 consistentHostsResult = main.FALSE
1667 main.log.warn( "ONOS" + controllerStr +
1668 " hosts response: " +
1669 repr( hosts[ controller ] ) )
1670 utilities.assert_equals(
1671 expect=main.TRUE,
1672 actual=consistentHostsResult,
1673 onpass="Hosts view is consistent across all ONOS nodes",
1674 onfail="ONOS nodes have different views of hosts" )
1675
1676 main.step( "Each host has an IP address" )
1677 ipResult = main.TRUE
1678 for controller in range( 0, len( hosts ) ):
1679 controllerStr = str( main.activeNodes[controller] + 1 )
1680 if hosts[ controller ]:
1681 for host in hosts[ controller ]:
1682 if not host.get( 'ipAddresses', [ ] ):
1683 main.log.error( "Error with host ips on controller" +
1684 controllerStr + ": " + str( host ) )
1685 ipResult = main.FALSE
1686 utilities.assert_equals(
1687 expect=main.TRUE,
1688 actual=ipResult,
1689 onpass="The ips of the hosts aren't empty",
1690 onfail="The ip of at least one host is missing" )
1691
1692 # Strongly connected clusters of devices
1693 main.step( "Cluster view is consistent across ONOS nodes" )
1694 consistentClustersResult = main.TRUE
1695 for controller in range( len( clusters ) ):
1696 controllerStr = str( main.activeNodes[controller] + 1 )
1697 if "Error" not in clusters[ controller ]:
1698 if clusters[ controller ] == clusters[ 0 ]:
1699 continue
1700 else: # clusters not consistent
1701 main.log.error( "clusters from ONOS" + controllerStr +
1702 " is inconsistent with ONOS1" )
1703 consistentClustersResult = main.FALSE
1704
1705 else:
1706 main.log.error( "Error in getting dataplane clusters " +
1707 "from ONOS" + controllerStr )
1708 consistentClustersResult = main.FALSE
1709 main.log.warn( "ONOS" + controllerStr +
1710 " clusters response: " +
1711 repr( clusters[ controller ] ) )
1712 utilities.assert_equals(
1713 expect=main.TRUE,
1714 actual=consistentClustersResult,
1715 onpass="Clusters view is consistent across all ONOS nodes",
1716 onfail="ONOS nodes have different views of clusters" )
1717 if not consistentClustersResult:
1718 main.log.debug( clusters )
1719
1720 # there should always only be one cluster
1721 main.step( "Cluster view correct across ONOS nodes" )
1722 try:
1723 numClusters = len( json.loads( clusters[ 0 ] ) )
1724 except ( ValueError, TypeError ):
1725 main.log.exception( "Error parsing clusters[0]: " +
1726 repr( clusters[ 0 ] ) )
1727 numClusters = "ERROR"
1728 utilities.assert_equals(
1729 expect=1,
1730 actual=numClusters,
1731 onpass="ONOS shows 1 SCC",
1732 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1733
1734 main.step( "Comparing ONOS topology to MN" )
1735 devicesResults = main.TRUE
1736 linksResults = main.TRUE
1737 hostsResults = main.TRUE
1738 mnSwitches = main.Mininet1.getSwitches()
1739 mnLinks = main.Mininet1.getLinks()
1740 mnHosts = main.Mininet1.getHosts()
1741 for controller in main.activeNodes:
1742 controllerStr = str( main.activeNodes[controller] + 1 )
1743 if devices[ controller ] and ports[ controller ] and\
1744 "Error" not in devices[ controller ] and\
1745 "Error" not in ports[ controller ]:
1746 currentDevicesResult = main.Mininet1.compareSwitches(
1747 mnSwitches,
1748 json.loads( devices[ controller ] ),
1749 json.loads( ports[ controller ] ) )
1750 else:
1751 currentDevicesResult = main.FALSE
1752 utilities.assert_equals( expect=main.TRUE,
1753 actual=currentDevicesResult,
1754 onpass="ONOS" + controllerStr +
1755 " Switches view is correct",
1756 onfail="ONOS" + controllerStr +
1757 " Switches view is incorrect" )
1758 if links[ controller ] and "Error" not in links[ controller ]:
1759 currentLinksResult = main.Mininet1.compareLinks(
1760 mnSwitches, mnLinks,
1761 json.loads( links[ controller ] ) )
1762 else:
1763 currentLinksResult = main.FALSE
1764 utilities.assert_equals( expect=main.TRUE,
1765 actual=currentLinksResult,
1766 onpass="ONOS" + controllerStr +
1767 " links view is correct",
1768 onfail="ONOS" + controllerStr +
1769 " links view is incorrect" )
1770
1771 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1772 currentHostsResult = main.Mininet1.compareHosts(
1773 mnHosts,
1774 hosts[ controller ] )
1775 else:
1776 currentHostsResult = main.FALSE
1777 utilities.assert_equals( expect=main.TRUE,
1778 actual=currentHostsResult,
1779 onpass="ONOS" + controllerStr +
1780 " hosts exist in Mininet",
1781 onfail="ONOS" + controllerStr +
1782 " hosts don't match Mininet" )
1783
1784 devicesResults = devicesResults and currentDevicesResult
1785 linksResults = linksResults and currentLinksResult
1786 hostsResults = hostsResults and currentHostsResult
1787
1788 main.step( "Device information is correct" )
1789 utilities.assert_equals(
1790 expect=main.TRUE,
1791 actual=devicesResults,
1792 onpass="Device information is correct",
1793 onfail="Device information is incorrect" )
1794
1795 main.step( "Links are correct" )
1796 utilities.assert_equals(
1797 expect=main.TRUE,
1798 actual=linksResults,
1799 onpass="Link are correct",
1800 onfail="Links are incorrect" )
1801
1802 main.step( "Hosts are correct" )
1803 utilities.assert_equals(
1804 expect=main.TRUE,
1805 actual=hostsResults,
1806 onpass="Hosts are correct",
1807 onfail="Hosts are incorrect" )
1808
1809 def CASE6( self, main ):
1810 """
1811 The Scaling case.
1812 """
1813 import time
1814 import re
1815 assert main.numCtrls, "main.numCtrls not defined"
1816 assert main, "main not defined"
1817 assert utilities.assert_equals, "utilities.assert_equals not defined"
1818 assert main.CLIs, "main.CLIs not defined"
1819 assert main.nodes, "main.nodes not defined"
1820 try:
1821 labels
1822 except NameError:
1823 main.log.error( "labels not defined, setting to []" )
1824 global labels
1825 labels = []
1826 try:
1827 data
1828 except NameError:
1829 main.log.error( "data not defined, setting to []" )
1830 global data
1831 data = []
1832
1833 main.case( "Swap some of the ONOS nodes" )
1834
1835 main.step( "Checking ONOS Logs for errors" )
1836 for i in main.activeNodes:
1837 node = main.nodes[i]
1838 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1839 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1840
1841 main.step( "Generate new metadata file" )
1842 old = [ main.activeNodes[0], main.activeNodes[-1] ]
1843 new = range( main.ONOSbench.maxNodes )[-2:]
1844 assert len( old ) == len( new ), "Length of nodes to swap don't match"
1845 handle = main.ONOSbench.handle
1846 for x, y in zip( old, new ):
1847 handle.sendline( "export OC{}=$OC{}".format( x + 1, y + 1 ) )
1848 handle.expect( "\$" ) # from the variable
1849 ret = handle.before
1850 handle.expect( "\$" ) # From the prompt
1851 ret += handle.before
1852 main.log.debug( ret )
1853 main.activeNodes.remove( x )
1854 main.activeNodes.append( y )
1855
1856 genResult = main.Server.generateFile( main.numCtrls )
1857 utilities.assert_equals( expect=main.TRUE, actual=genResult,
1858 onpass="New cluster metadata file generated",
1859 onfail="Failled to generate new metadata file" )
1860 time.sleep( 5 ) # Give time for nodes to read new file
1861
1862 main.step( "Start new nodes" ) # OR stop old nodes?
1863 started = main.TRUE
1864 for i in new:
1865 started = main.ONOSbench.onosStart( main.nodes[i].ip_address ) and main.TRUE
1866 utilities.assert_equals( expect=main.TRUE, actual=started,
1867 onpass="ONOS started",
1868 onfail="ONOS start NOT successful" )
1869
1870 main.step( "Checking if ONOS is up yet" )
1871 for i in range( 2 ):
1872 onosIsupResult = main.TRUE
1873 for i in main.activeNodes:
1874 node = main.nodes[i]
1875 started = main.ONOSbench.isup( node.ip_address )
1876 if not started:
1877 main.log.error( node.name + " didn't start!" )
1878 onosIsupResult = onosIsupResult and started
1879 if onosIsupResult == main.TRUE:
1880 break
1881 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1882 onpass="ONOS started",
1883 onfail="ONOS start NOT successful" )
1884
1885 main.log.step( "Starting ONOS CLI sessions" )
1886 cliResults = main.TRUE
1887 threads = []
1888 for i in main.activeNodes:
1889 t = main.Thread( target=main.CLIs[i].startOnosCli,
1890 name="startOnosCli-" + str( i ),
1891 args=[main.nodes[i].ip_address] )
1892 threads.append( t )
1893 t.start()
1894
1895 for t in threads:
1896 t.join()
1897 cliResults = cliResults and t.result
1898 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1899 onpass="ONOS cli started",
1900 onfail="ONOS clis did not start" )
1901
1902 main.step( "Checking ONOS nodes" )
1903 nodeResults = utilities.retry( main.HA.nodesCheck,
1904 False,
1905 args=[main.activeNodes],
1906 attempts=5 )
1907 utilities.assert_equals( expect=True, actual=nodeResults,
1908 onpass="Nodes check successful",
1909 onfail="Nodes check NOT successful" )
1910
1911 for i in range( 10 ):
1912 ready = True
1913 for i in main.activeNodes:
1914 cli = main.CLIs[i]
1915 output = cli.summary()
1916 if not output:
1917 ready = False
1918 if ready:
1919 break
1920 time.sleep( 30 )
1921 utilities.assert_equals( expect=True, actual=ready,
1922 onpass="ONOS summary command succeded",
1923 onfail="ONOS summary command failed" )
1924 if not ready:
1925 main.cleanup()
1926 main.exit()
1927
1928 # Rerun for election on new nodes
1929 runResults = main.TRUE
1930 for i in main.activeNodes:
1931 cli = main.CLIs[i]
1932 run = cli.electionTestRun()
1933 if run != main.TRUE:
1934 main.log.error( "Error running for election on " + cli.name )
1935 runResults = runResults and run
1936 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1937 onpass="Reran for election",
1938 onfail="Failed to rerun for election" )
1939
1940 for node in main.activeNodes:
1941 main.log.warn( "\n****************** {} **************".format( main.nodes[node].ip_address ) )
1942 main.log.debug( main.CLIs[node].nodes( jsonFormat=False ) )
1943 main.log.debug( main.CLIs[node].leaders( jsonFormat=False ) )
1944 main.log.debug( main.CLIs[node].partitions( jsonFormat=False ) )
1945 main.log.debug( main.CLIs[node].apps( jsonFormat=False ) )
1946
1947 main.step( "Reapplying cell variable to environment" )
1948 cellName = main.params[ 'ENV' ][ 'cellName' ]
1949 cellResult = main.ONOSbench.setCell( cellName )
1950 utilities.assert_equals( expect=main.TRUE, actual=cellResult,
1951 onpass="Set cell successfull",
1952 onfail="Failled to set cell" )
1953
1954 def CASE7( self, main ):
1955 """
1956 Check state after ONOS scaling
1957 """
1958 import json
1959 assert main.numCtrls, "main.numCtrls not defined"
1960 assert main, "main not defined"
1961 assert utilities.assert_equals, "utilities.assert_equals not defined"
1962 assert main.CLIs, "main.CLIs not defined"
1963 assert main.nodes, "main.nodes not defined"
1964 main.case( "Running ONOS Constant State Tests" )
1965
1966 main.step( "Check that each switch has a master" )
1967 # Assert that each device has a master
1968 rolesNotNull = main.TRUE
1969 threads = []
1970 for i in main.activeNodes:
1971 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1972 name="rolesNotNull-" + str( i ),
1973 args=[ ] )
1974 threads.append( t )
1975 t.start()
1976
1977 for t in threads:
1978 t.join()
1979 rolesNotNull = rolesNotNull and t.result
1980 utilities.assert_equals(
1981 expect=main.TRUE,
1982 actual=rolesNotNull,
1983 onpass="Each device has a master",
1984 onfail="Some devices don't have a master assigned" )
1985
1986 main.step( "Read device roles from ONOS" )
1987 ONOSMastership = []
1988 consistentMastership = True
1989 rolesResults = True
1990 threads = []
1991 for i in main.activeNodes:
1992 t = main.Thread( target=main.CLIs[i].roles,
1993 name="roles-" + str( i ),
1994 args=[] )
1995 threads.append( t )
1996 t.start()
1997
1998 for t in threads:
1999 t.join()
2000 ONOSMastership.append( t.result )
2001
2002 for i in range( len( ONOSMastership ) ):
2003 node = str( main.activeNodes[i] + 1 )
2004 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
2005 main.log.error( "Error in getting ONOS" + node + " roles" )
2006 main.log.warn( "ONOS" + node + " mastership response: " +
2007 repr( ONOSMastership[i] ) )
2008 rolesResults = False
2009 utilities.assert_equals(
2010 expect=True,
2011 actual=rolesResults,
2012 onpass="No error in reading roles output",
2013 onfail="Error in reading roles from ONOS" )
2014
2015 main.step( "Check for consistency in roles from each controller" )
2016 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
2017 main.log.info(
2018 "Switch roles are consistent across all ONOS nodes" )
2019 else:
2020 consistentMastership = False
2021 utilities.assert_equals(
2022 expect=True,
2023 actual=consistentMastership,
2024 onpass="Switch roles are consistent across all ONOS nodes",
2025 onfail="ONOS nodes have different views of switch roles" )
2026
2027 if rolesResults and not consistentMastership:
2028 for i in range( len( ONOSMastership ) ):
2029 node = str( main.activeNodes[i] + 1 )
2030 main.log.warn( "ONOS" + node + " roles: ",
2031 json.dumps( json.loads( ONOSMastership[ i ] ),
2032 sort_keys=True,
2033 indent=4,
2034 separators=( ',', ': ' ) ) )
2035
2036 # NOTE: we expect mastership to change on controller scaling down
2037
2038 main.step( "Get the intents and compare across all nodes" )
2039 ONOSIntents = []
2040 intentCheck = main.FALSE
2041 consistentIntents = True
2042 intentsResults = True
2043 threads = []
2044 for i in main.activeNodes:
2045 t = main.Thread( target=main.CLIs[i].intents,
2046 name="intents-" + str( i ),
2047 args=[],
2048 kwargs={ 'jsonFormat': True } )
2049 threads.append( t )
2050 t.start()
2051
2052 for t in threads:
2053 t.join()
2054 ONOSIntents.append( t.result )
2055
2056 for i in range( len( ONOSIntents) ):
2057 node = str( main.activeNodes[i] + 1 )
2058 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2059 main.log.error( "Error in getting ONOS" + node + " intents" )
2060 main.log.warn( "ONOS" + node + " intents response: " +
2061 repr( ONOSIntents[ i ] ) )
2062 intentsResults = False
2063 utilities.assert_equals(
2064 expect=True,
2065 actual=intentsResults,
2066 onpass="No error in reading intents output",
2067 onfail="Error in reading intents from ONOS" )
2068
2069 main.step( "Check for consistency in Intents from each controller" )
2070 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2071 main.log.info( "Intents are consistent across all ONOS " +
2072 "nodes" )
2073 else:
2074 consistentIntents = False
2075
2076 # Try to make it easy to figure out what is happening
2077 #
2078 # Intent ONOS1 ONOS2 ...
2079 # 0x01 INSTALLED INSTALLING
2080 # ... ... ...
2081 # ... ... ...
2082 title = " ID"
2083 for n in main.activeNodes:
2084 title += " " * 10 + "ONOS" + str( n + 1 )
2085 main.log.warn( title )
2086 # get all intent keys in the cluster
2087 keys = []
2088 for nodeStr in ONOSIntents:
2089 node = json.loads( nodeStr )
2090 for intent in node:
2091 keys.append( intent.get( 'id' ) )
2092 keys = set( keys )
2093 for key in keys:
2094 row = "%-13s" % key
2095 for nodeStr in ONOSIntents:
2096 node = json.loads( nodeStr )
2097 for intent in node:
2098 if intent.get( 'id' ) == key:
2099 row += "%-15s" % intent.get( 'state' )
2100 main.log.warn( row )
2101 # End table view
2102
2103 utilities.assert_equals(
2104 expect=True,
2105 actual=consistentIntents,
2106 onpass="Intents are consistent across all ONOS nodes",
2107 onfail="ONOS nodes have different views of intents" )
2108 intentStates = []
2109 for node in ONOSIntents: # Iter through ONOS nodes
2110 nodeStates = []
2111 # Iter through intents of a node
2112 try:
2113 for intent in json.loads( node ):
2114 nodeStates.append( intent[ 'state' ] )
2115 except ( ValueError, TypeError ):
2116 main.log.exception( "Error in parsing intents" )
2117 main.log.error( repr( node ) )
2118 intentStates.append( nodeStates )
2119 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2120 main.log.info( dict( out ) )
2121
2122 if intentsResults and not consistentIntents:
2123 for i in range( len( main.activeNodes ) ):
2124 node = str( main.activeNodes[i] + 1 )
2125 main.log.warn( "ONOS" + node + " intents: " )
2126 main.log.warn( json.dumps(
2127 json.loads( ONOSIntents[ i ] ),
2128 sort_keys=True,
2129 indent=4,
2130 separators=( ',', ': ' ) ) )
2131 elif intentsResults and consistentIntents:
2132 intentCheck = main.TRUE
2133
2134 main.step( "Compare current intents with intents before the scaling" )
2135 # NOTE: this requires case 5 to pass for intentState to be set.
2136 # maybe we should stop the test if that fails?
2137 sameIntents = main.FALSE
2138 try:
2139 intentState
2140 except NameError:
2141 main.log.warn( "No previous intent state was saved" )
2142 else:
2143 if intentState and intentState == ONOSIntents[ 0 ]:
2144 sameIntents = main.TRUE
2145 main.log.info( "Intents are consistent with before scaling" )
2146 # TODO: possibly the states have changed? we may need to figure out
2147 # what the acceptable states are
2148 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2149 sameIntents = main.TRUE
2150 try:
2151 before = json.loads( intentState )
2152 after = json.loads( ONOSIntents[ 0 ] )
2153 for intent in before:
2154 if intent not in after:
2155 sameIntents = main.FALSE
2156 main.log.debug( "Intent is not currently in ONOS " +
2157 "(at least in the same form):" )
2158 main.log.debug( json.dumps( intent ) )
2159 except ( ValueError, TypeError ):
2160 main.log.exception( "Exception printing intents" )
2161 main.log.debug( repr( ONOSIntents[0] ) )
2162 main.log.debug( repr( intentState ) )
2163 if sameIntents == main.FALSE:
2164 try:
2165 main.log.debug( "ONOS intents before: " )
2166 main.log.debug( json.dumps( json.loads( intentState ),
2167 sort_keys=True, indent=4,
2168 separators=( ',', ': ' ) ) )
2169 main.log.debug( "Current ONOS intents: " )
2170 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2171 sort_keys=True, indent=4,
2172 separators=( ',', ': ' ) ) )
2173 except ( ValueError, TypeError ):
2174 main.log.exception( "Exception printing intents" )
2175 main.log.debug( repr( ONOSIntents[0] ) )
2176 main.log.debug( repr( intentState ) )
2177 utilities.assert_equals(
2178 expect=main.TRUE,
2179 actual=sameIntents,
2180 onpass="Intents are consistent with before scaling",
2181 onfail="The Intents changed during scaling" )
2182 intentCheck = intentCheck and sameIntents
2183
2184 main.step( "Get the OF Table entries and compare to before " +
2185 "component scaling" )
2186 FlowTables = main.TRUE
2187 for i in range( 28 ):
2188 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2189 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2190 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2191 FlowTables = FlowTables and curSwitch
2192 if curSwitch == main.FALSE:
2193 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2194 utilities.assert_equals(
2195 expect=main.TRUE,
2196 actual=FlowTables,
2197 onpass="No changes were found in the flow tables",
2198 onfail="Changes were found in the flow tables" )
2199
2200 main.Mininet2.pingLongKill()
2201 '''
2202 # main.step( "Check the continuous pings to ensure that no packets " +
2203 # "were dropped during component failure" )
2204 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2205 main.params[ 'TESTONIP' ] )
2206 LossInPings = main.FALSE
2207 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2208 for i in range( 8, 18 ):
2209 main.log.info(
2210 "Checking for a loss in pings along flow from s" +
2211 str( i ) )
2212 LossInPings = main.Mininet2.checkForLoss(
2213 "/tmp/ping.h" +
2214 str( i ) ) or LossInPings
2215 if LossInPings == main.TRUE:
2216 main.log.info( "Loss in ping detected" )
2217 elif LossInPings == main.ERROR:
2218 main.log.info( "There are multiple mininet process running" )
2219 elif LossInPings == main.FALSE:
2220 main.log.info( "No Loss in the pings" )
2221 main.log.info( "No loss of dataplane connectivity" )
2222 # utilities.assert_equals(
2223 # expect=main.FALSE,
2224 # actual=LossInPings,
2225 # onpass="No Loss of connectivity",
2226 # onfail="Loss of dataplane connectivity detected" )
2227
2228 # NOTE: Since intents are not persisted with IntnentStore,
2229 # we expect loss in dataplane connectivity
2230 LossInPings = main.FALSE
2231 '''
2232
2233 main.step( "Leadership Election is still functional" )
2234 # Test of LeadershipElection
2235 leaderList = []
2236 leaderResult = main.TRUE
2237
2238 for i in main.activeNodes:
2239 cli = main.CLIs[i]
2240 leaderN = cli.electionTestLeader()
2241 leaderList.append( leaderN )
2242 if leaderN == main.FALSE:
2243 # error in response
2244 main.log.error( "Something is wrong with " +
2245 "electionTestLeader function, check the" +
2246 " error logs" )
2247 leaderResult = main.FALSE
2248 elif leaderN is None:
2249 main.log.error( cli.name +
2250 " shows no leader for the election-app." )
2251 leaderResult = main.FALSE
2252 if len( set( leaderList ) ) != 1:
2253 leaderResult = main.FALSE
2254 main.log.error(
2255 "Inconsistent view of leader for the election test app" )
2256 # TODO: print the list
2257 utilities.assert_equals(
2258 expect=main.TRUE,
2259 actual=leaderResult,
2260 onpass="Leadership election passed",
2261 onfail="Something went wrong with Leadership election" )
2262
2263 def CASE8( self, main ):
2264 """
2265 Compare topo
2266 """
2267 import json
2268 import time
2269 assert main.numCtrls, "main.numCtrls not defined"
2270 assert main, "main not defined"
2271 assert utilities.assert_equals, "utilities.assert_equals not defined"
2272 assert main.CLIs, "main.CLIs not defined"
2273 assert main.nodes, "main.nodes not defined"
2274
2275 main.case( "Compare ONOS Topology view to Mininet topology" )
2276 main.caseExplanation = "Compare topology objects between Mininet" +\
2277 " and ONOS"
2278 topoResult = main.FALSE
2279 topoFailMsg = "ONOS topology don't match Mininet"
2280 elapsed = 0
2281 count = 0
2282 main.step( "Comparing ONOS topology to MN topology" )
2283 startTime = time.time()
2284 # Give time for Gossip to work
2285 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2286 devicesResults = main.TRUE
2287 linksResults = main.TRUE
2288 hostsResults = main.TRUE
2289 hostAttachmentResults = True
2290 count += 1
2291 cliStart = time.time()
2292 devices = []
2293 threads = []
2294 for i in main.activeNodes:
2295 t = main.Thread( target=utilities.retry,
2296 name="devices-" + str( i ),
2297 args=[ main.CLIs[i].devices, [ None ] ],
2298 kwargs= { 'sleep': 5, 'attempts': 5,
2299 'randomTime': True } )
2300 threads.append( t )
2301 t.start()
2302
2303 for t in threads:
2304 t.join()
2305 devices.append( t.result )
2306 hosts = []
2307 ipResult = main.TRUE
2308 threads = []
2309 for i in main.activeNodes:
2310 t = main.Thread( target=utilities.retry,
2311 name="hosts-" + str( i ),
2312 args=[ main.CLIs[i].hosts, [ None ] ],
2313 kwargs= { 'sleep': 5, 'attempts': 5,
2314 'randomTime': True } )
2315 threads.append( t )
2316 t.start()
2317
2318 for t in threads:
2319 t.join()
2320 try:
2321 hosts.append( json.loads( t.result ) )
2322 except ( ValueError, TypeError ):
2323 main.log.exception( "Error parsing hosts results" )
2324 main.log.error( repr( t.result ) )
2325 hosts.append( None )
2326 for controller in range( 0, len( hosts ) ):
2327 controllerStr = str( main.activeNodes[controller] + 1 )
2328 if hosts[ controller ]:
2329 for host in hosts[ controller ]:
2330 if host is None or host.get( 'ipAddresses', [] ) == []:
2331 main.log.error(
2332 "Error with host ipAddresses on controller" +
2333 controllerStr + ": " + str( host ) )
2334 ipResult = main.FALSE
2335 ports = []
2336 threads = []
2337 for i in main.activeNodes:
2338 t = main.Thread( target=utilities.retry,
2339 name="ports-" + str( i ),
2340 args=[ main.CLIs[i].ports, [ None ] ],
2341 kwargs= { 'sleep': 5, 'attempts': 5,
2342 'randomTime': True } )
2343 threads.append( t )
2344 t.start()
2345
2346 for t in threads:
2347 t.join()
2348 ports.append( t.result )
2349 links = []
2350 threads = []
2351 for i in main.activeNodes:
2352 t = main.Thread( target=utilities.retry,
2353 name="links-" + str( i ),
2354 args=[ main.CLIs[i].links, [ None ] ],
2355 kwargs= { 'sleep': 5, 'attempts': 5,
2356 'randomTime': True } )
2357 threads.append( t )
2358 t.start()
2359
2360 for t in threads:
2361 t.join()
2362 links.append( t.result )
2363 clusters = []
2364 threads = []
2365 for i in main.activeNodes:
2366 t = main.Thread( target=utilities.retry,
2367 name="clusters-" + str( i ),
2368 args=[ main.CLIs[i].clusters, [ None ] ],
2369 kwargs= { 'sleep': 5, 'attempts': 5,
2370 'randomTime': True } )
2371 threads.append( t )
2372 t.start()
2373
2374 for t in threads:
2375 t.join()
2376 clusters.append( t.result )
2377
2378 elapsed = time.time() - startTime
2379 cliTime = time.time() - cliStart
2380 print "Elapsed time: " + str( elapsed )
2381 print "CLI time: " + str( cliTime )
2382
2383 if all( e is None for e in devices ) and\
2384 all( e is None for e in hosts ) and\
2385 all( e is None for e in ports ) and\
2386 all( e is None for e in links ) and\
2387 all( e is None for e in clusters ):
2388 topoFailMsg = "Could not get topology from ONOS"
2389 main.log.error( topoFailMsg )
2390 continue # Try again, No use trying to compare
2391
2392 mnSwitches = main.Mininet1.getSwitches()
2393 mnLinks = main.Mininet1.getLinks()
2394 mnHosts = main.Mininet1.getHosts()
2395 for controller in range( len( main.activeNodes ) ):
2396 controllerStr = str( main.activeNodes[controller] + 1 )
2397 if devices[ controller ] and ports[ controller ] and\
2398 "Error" not in devices[ controller ] and\
2399 "Error" not in ports[ controller ]:
2400
2401 try:
2402 currentDevicesResult = main.Mininet1.compareSwitches(
2403 mnSwitches,
2404 json.loads( devices[ controller ] ),
2405 json.loads( ports[ controller ] ) )
2406 except ( TypeError, ValueError ):
2407 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2408 devices[ controller ], ports[ controller ] ) )
2409 else:
2410 currentDevicesResult = main.FALSE
2411 utilities.assert_equals( expect=main.TRUE,
2412 actual=currentDevicesResult,
2413 onpass="ONOS" + controllerStr +
2414 " Switches view is correct",
2415 onfail="ONOS" + controllerStr +
2416 " Switches view is incorrect" )
2417
2418 if links[ controller ] and "Error" not in links[ controller ]:
2419 currentLinksResult = main.Mininet1.compareLinks(
2420 mnSwitches, mnLinks,
2421 json.loads( links[ controller ] ) )
2422 else:
2423 currentLinksResult = main.FALSE
2424 utilities.assert_equals( expect=main.TRUE,
2425 actual=currentLinksResult,
2426 onpass="ONOS" + controllerStr +
2427 " links view is correct",
2428 onfail="ONOS" + controllerStr +
2429 " links view is incorrect" )
2430 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2431 currentHostsResult = main.Mininet1.compareHosts(
2432 mnHosts,
2433 hosts[ controller ] )
2434 elif hosts[ controller ] == []:
2435 currentHostsResult = main.TRUE
2436 else:
2437 currentHostsResult = main.FALSE
2438 utilities.assert_equals( expect=main.TRUE,
2439 actual=currentHostsResult,
2440 onpass="ONOS" + controllerStr +
2441 " hosts exist in Mininet",
2442 onfail="ONOS" + controllerStr +
2443 " hosts don't match Mininet" )
2444 # CHECKING HOST ATTACHMENT POINTS
2445 hostAttachment = True
2446 zeroHosts = False
2447 # FIXME: topo-HA/obelisk specific mappings:
2448 # key is mac and value is dpid
2449 mappings = {}
2450 for i in range( 1, 29 ): # hosts 1 through 28
2451 # set up correct variables:
2452 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2453 if i == 1:
2454 deviceId = "1000".zfill(16)
2455 elif i == 2:
2456 deviceId = "2000".zfill(16)
2457 elif i == 3:
2458 deviceId = "3000".zfill(16)
2459 elif i == 4:
2460 deviceId = "3004".zfill(16)
2461 elif i == 5:
2462 deviceId = "5000".zfill(16)
2463 elif i == 6:
2464 deviceId = "6000".zfill(16)
2465 elif i == 7:
2466 deviceId = "6007".zfill(16)
2467 elif i >= 8 and i <= 17:
2468 dpid = '3' + str( i ).zfill( 3 )
2469 deviceId = dpid.zfill(16)
2470 elif i >= 18 and i <= 27:
2471 dpid = '6' + str( i ).zfill( 3 )
2472 deviceId = dpid.zfill(16)
2473 elif i == 28:
2474 deviceId = "2800".zfill(16)
2475 mappings[ macId ] = deviceId
2476 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2477 if hosts[ controller ] == []:
2478 main.log.warn( "There are no hosts discovered" )
2479 zeroHosts = True
2480 else:
2481 for host in hosts[ controller ]:
2482 mac = None
2483 location = None
2484 device = None
2485 port = None
2486 try:
2487 mac = host.get( 'mac' )
2488 assert mac, "mac field could not be found for this host object"
2489
2490 location = host.get( 'location' )
2491 assert location, "location field could not be found for this host object"
2492
2493 # Trim the protocol identifier off deviceId
2494 device = str( location.get( 'elementId' ) ).split(':')[1]
2495 assert device, "elementId field could not be found for this host location object"
2496
2497 port = location.get( 'port' )
2498 assert port, "port field could not be found for this host location object"
2499
2500 # Now check if this matches where they should be
2501 if mac and device and port:
2502 if str( port ) != "1":
2503 main.log.error( "The attachment port is incorrect for " +
2504 "host " + str( mac ) +
2505 ". Expected: 1 Actual: " + str( port) )
2506 hostAttachment = False
2507 if device != mappings[ str( mac ) ]:
2508 main.log.error( "The attachment device is incorrect for " +
2509 "host " + str( mac ) +
2510 ". Expected: " + mappings[ str( mac ) ] +
2511 " Actual: " + device )
2512 hostAttachment = False
2513 else:
2514 hostAttachment = False
2515 except AssertionError:
2516 main.log.exception( "Json object not as expected" )
2517 main.log.error( repr( host ) )
2518 hostAttachment = False
2519 else:
2520 main.log.error( "No hosts json output or \"Error\"" +
2521 " in output. hosts = " +
2522 repr( hosts[ controller ] ) )
2523 if zeroHosts is False:
2524 # TODO: Find a way to know if there should be hosts in a
2525 # given point of the test
2526 hostAttachment = True
2527
2528 # END CHECKING HOST ATTACHMENT POINTS
2529 devicesResults = devicesResults and currentDevicesResult
2530 linksResults = linksResults and currentLinksResult
2531 hostsResults = hostsResults and currentHostsResult
2532 hostAttachmentResults = hostAttachmentResults and\
2533 hostAttachment
2534 topoResult = ( devicesResults and linksResults
2535 and hostsResults and ipResult and
2536 hostAttachmentResults )
2537 utilities.assert_equals( expect=True,
2538 actual=topoResult,
2539 onpass="ONOS topology matches Mininet",
2540 onfail=topoFailMsg )
2541 # End of While loop to pull ONOS state
2542
2543 # Compare json objects for hosts and dataplane clusters
2544
2545 # hosts
2546 main.step( "Hosts view is consistent across all ONOS nodes" )
2547 consistentHostsResult = main.TRUE
2548 for controller in range( len( hosts ) ):
2549 controllerStr = str( main.activeNodes[controller] + 1 )
2550 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2551 if hosts[ controller ] == hosts[ 0 ]:
2552 continue
2553 else: # hosts not consistent
2554 main.log.error( "hosts from ONOS" + controllerStr +
2555 " is inconsistent with ONOS1" )
2556 main.log.warn( repr( hosts[ controller ] ) )
2557 consistentHostsResult = main.FALSE
2558
2559 else:
2560 main.log.error( "Error in getting ONOS hosts from ONOS" +
2561 controllerStr )
2562 consistentHostsResult = main.FALSE
2563 main.log.warn( "ONOS" + controllerStr +
2564 " hosts response: " +
2565 repr( hosts[ controller ] ) )
2566 utilities.assert_equals(
2567 expect=main.TRUE,
2568 actual=consistentHostsResult,
2569 onpass="Hosts view is consistent across all ONOS nodes",
2570 onfail="ONOS nodes have different views of hosts" )
2571
2572 main.step( "Hosts information is correct" )
2573 hostsResults = hostsResults and ipResult
2574 utilities.assert_equals(
2575 expect=main.TRUE,
2576 actual=hostsResults,
2577 onpass="Host information is correct",
2578 onfail="Host information is incorrect" )
2579
2580 main.step( "Host attachment points to the network" )
2581 utilities.assert_equals(
2582 expect=True,
2583 actual=hostAttachmentResults,
2584 onpass="Hosts are correctly attached to the network",
2585 onfail="ONOS did not correctly attach hosts to the network" )
2586
2587 # Strongly connected clusters of devices
2588 main.step( "Clusters view is consistent across all ONOS nodes" )
2589 consistentClustersResult = main.TRUE
2590 for controller in range( len( clusters ) ):
2591 controllerStr = str( main.activeNodes[controller] + 1 )
2592 if "Error" not in clusters[ controller ]:
2593 if clusters[ controller ] == clusters[ 0 ]:
2594 continue
2595 else: # clusters not consistent
2596 main.log.error( "clusters from ONOS" +
2597 controllerStr +
2598 " is inconsistent with ONOS1" )
2599 consistentClustersResult = main.FALSE
2600 else:
2601 main.log.error( "Error in getting dataplane clusters " +
2602 "from ONOS" + controllerStr )
2603 consistentClustersResult = main.FALSE
2604 main.log.warn( "ONOS" + controllerStr +
2605 " clusters response: " +
2606 repr( clusters[ controller ] ) )
2607 utilities.assert_equals(
2608 expect=main.TRUE,
2609 actual=consistentClustersResult,
2610 onpass="Clusters view is consistent across all ONOS nodes",
2611 onfail="ONOS nodes have different views of clusters" )
2612 if not consistentClustersResult:
2613 main.log.debug( clusters )
2614 for x in links:
2615 main.log.warn( "{}: {}".format( len( x ), x ) )
2616
2617
2618 main.step( "There is only one SCC" )
2619 # there should always only be one cluster
2620 try:
2621 numClusters = len( json.loads( clusters[ 0 ] ) )
2622 except ( ValueError, TypeError ):
2623 main.log.exception( "Error parsing clusters[0]: " +
2624 repr( clusters[0] ) )
2625 numClusters = "ERROR"
2626 clusterResults = main.FALSE
2627 if numClusters == 1:
2628 clusterResults = main.TRUE
2629 utilities.assert_equals(
2630 expect=1,
2631 actual=numClusters,
2632 onpass="ONOS shows 1 SCC",
2633 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2634
2635 topoResult = ( devicesResults and linksResults
2636 and hostsResults and consistentHostsResult
2637 and consistentClustersResult and clusterResults
2638 and ipResult and hostAttachmentResults )
2639
2640 topoResult = topoResult and int( count <= 2 )
2641 note = "note it takes about " + str( int( cliTime ) ) + \
2642 " seconds for the test to make all the cli calls to fetch " +\
2643 "the topology from each ONOS instance"
2644 main.log.info(
2645 "Very crass estimate for topology discovery/convergence( " +
2646 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2647 str( count ) + " tries" )
2648
2649 main.step( "Device information is correct" )
2650 utilities.assert_equals(
2651 expect=main.TRUE,
2652 actual=devicesResults,
2653 onpass="Device information is correct",
2654 onfail="Device information is incorrect" )
2655
2656 main.step( "Links are correct" )
2657 utilities.assert_equals(
2658 expect=main.TRUE,
2659 actual=linksResults,
2660 onpass="Link are correct",
2661 onfail="Links are incorrect" )
2662
2663 main.step( "Hosts are correct" )
2664 utilities.assert_equals(
2665 expect=main.TRUE,
2666 actual=hostsResults,
2667 onpass="Hosts are correct",
2668 onfail="Hosts are incorrect" )
2669
2670 # FIXME: move this to an ONOS state case
2671 main.step( "Checking ONOS nodes" )
2672 nodeResults = utilities.retry( main.HA.nodesCheck,
2673 False,
2674 args=[main.activeNodes],
2675 attempts=5 )
2676 utilities.assert_equals( expect=True, actual=nodeResults,
2677 onpass="Nodes check successful",
2678 onfail="Nodes check NOT successful" )
2679 if not nodeResults:
2680 for i in main.activeNodes:
2681 main.log.debug( "{} components not ACTIVE: \n{}".format(
2682 main.CLIs[i].name,
2683 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
2684
2685 def CASE9( self, main ):
2686 """
2687 Link s3-s28 down
2688 """
2689 import time
2690 assert main.numCtrls, "main.numCtrls not defined"
2691 assert main, "main not defined"
2692 assert utilities.assert_equals, "utilities.assert_equals not defined"
2693 assert main.CLIs, "main.CLIs not defined"
2694 assert main.nodes, "main.nodes not defined"
2695 # NOTE: You should probably run a topology check after this
2696
2697 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2698
2699 description = "Turn off a link to ensure that Link Discovery " +\
2700 "is working properly"
2701 main.case( description )
2702
2703 main.step( "Kill Link between s3 and s28" )
2704 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2705 main.log.info( "Waiting " + str( linkSleep ) +
2706 " seconds for link down to be discovered" )
2707 time.sleep( linkSleep )
2708 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2709 onpass="Link down successful",
2710 onfail="Failed to bring link down" )
2711 # TODO do some sort of check here
2712
2713 def CASE10( self, main ):
2714 """
2715 Link s3-s28 up
2716 """
2717 import time
2718 assert main.numCtrls, "main.numCtrls not defined"
2719 assert main, "main not defined"
2720 assert utilities.assert_equals, "utilities.assert_equals not defined"
2721 assert main.CLIs, "main.CLIs not defined"
2722 assert main.nodes, "main.nodes not defined"
2723 # NOTE: You should probably run a topology check after this
2724
2725 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2726
2727 description = "Restore a link to ensure that Link Discovery is " + \
2728 "working properly"
2729 main.case( description )
2730
2731 main.step( "Bring link between s3 and s28 back up" )
2732 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2733 main.log.info( "Waiting " + str( linkSleep ) +
2734 " seconds for link up to be discovered" )
2735 time.sleep( linkSleep )
2736 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2737 onpass="Link up successful",
2738 onfail="Failed to bring link up" )
2739 # TODO do some sort of check here
2740
2741 def CASE11( self, main ):
2742 """
2743 Switch Down
2744 """
2745 # NOTE: You should probably run a topology check after this
2746 import time
2747 assert main.numCtrls, "main.numCtrls not defined"
2748 assert main, "main not defined"
2749 assert utilities.assert_equals, "utilities.assert_equals not defined"
2750 assert main.CLIs, "main.CLIs not defined"
2751 assert main.nodes, "main.nodes not defined"
2752
2753 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2754
2755 description = "Killing a switch to ensure it is discovered correctly"
2756 onosCli = main.CLIs[ main.activeNodes[0] ]
2757 main.case( description )
2758 switch = main.params[ 'kill' ][ 'switch' ]
2759 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2760
2761 # TODO: Make this switch parameterizable
2762 main.step( "Kill " + switch )
2763 main.log.info( "Deleting " + switch )
2764 main.Mininet1.delSwitch( switch )
2765 main.log.info( "Waiting " + str( switchSleep ) +
2766 " seconds for switch down to be discovered" )
2767 time.sleep( switchSleep )
2768 device = onosCli.getDevice( dpid=switchDPID )
2769 # Peek at the deleted switch
2770 main.log.warn( str( device ) )
2771 result = main.FALSE
2772 if device and device[ 'available' ] is False:
2773 result = main.TRUE
2774 utilities.assert_equals( expect=main.TRUE, actual=result,
2775 onpass="Kill switch successful",
2776 onfail="Failed to kill switch?" )
2777
2778 def CASE12( self, main ):
2779 """
2780 Switch Up
2781 """
2782 # NOTE: You should probably run a topology check after this
2783 import time
2784 assert main.numCtrls, "main.numCtrls not defined"
2785 assert main, "main not defined"
2786 assert utilities.assert_equals, "utilities.assert_equals not defined"
2787 assert main.CLIs, "main.CLIs not defined"
2788 assert main.nodes, "main.nodes not defined"
2789
2790 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2791 switch = main.params[ 'kill' ][ 'switch' ]
2792 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2793 links = main.params[ 'kill' ][ 'links' ].split()
2794 onosCli = main.CLIs[ main.activeNodes[0] ]
2795 description = "Adding a switch to ensure it is discovered correctly"
2796 main.case( description )
2797
2798 main.step( "Add back " + switch )
2799 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2800 for peer in links:
2801 main.Mininet1.addLink( switch, peer )
2802 ipList = [ node.ip_address for node in main.nodes ]
2803 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2804 main.log.info( "Waiting " + str( switchSleep ) +
2805 " seconds for switch up to be discovered" )
2806 time.sleep( switchSleep )
2807 device = onosCli.getDevice( dpid=switchDPID )
2808 # Peek at the deleted switch
2809 main.log.warn( str( device ) )
2810 result = main.FALSE
2811 if device and device[ 'available' ]:
2812 result = main.TRUE
2813 utilities.assert_equals( expect=main.TRUE, actual=result,
2814 onpass="add switch successful",
2815 onfail="Failed to add switch?" )
2816
2817 def CASE13( self, main ):
2818 """
2819 Clean up
2820 """
2821 assert main.numCtrls, "main.numCtrls not defined"
2822 assert main, "main not defined"
2823 assert utilities.assert_equals, "utilities.assert_equals not defined"
2824 assert main.CLIs, "main.CLIs not defined"
2825 assert main.nodes, "main.nodes not defined"
2826
2827 main.case( "Test Cleanup" )
2828 main.step( "Killing tcpdumps" )
2829 main.Mininet2.stopTcpdump()
2830
2831 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2832 main.step( "Copying MN pcap and ONOS log files to test station" )
2833 # NOTE: MN Pcap file is being saved to logdir.
2834 # We scp this file as MN and TestON aren't necessarily the same vm
2835
2836 # FIXME: To be replaced with a Jenkin's post script
2837 # TODO: Load these from params
2838 # NOTE: must end in /
2839 logFolder = "/opt/onos/log/"
2840 logFiles = [ "karaf.log", "karaf.log.1" ]
2841 # NOTE: must end in /
2842 for f in logFiles:
2843 for node in main.nodes:
2844 dstName = main.logdir + "/" + node.name + "-" + f
2845 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2846 logFolder + f, dstName )
2847 # std*.log's
2848 # NOTE: must end in /
2849 logFolder = "/opt/onos/var/"
2850 logFiles = [ "stderr.log", "stdout.log" ]
2851 # NOTE: must end in /
2852 for f in logFiles:
2853 for node in main.nodes:
2854 dstName = main.logdir + "/" + node.name + "-" + f
2855 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2856 logFolder + f, dstName )
2857 else:
2858 main.log.debug( "skipping saving log files" )
2859
2860 main.step( "Stopping Mininet" )
2861 mnResult = main.Mininet1.stopNet()
2862 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2863 onpass="Mininet stopped",
2864 onfail="MN cleanup NOT successful" )
2865
2866 main.step( "Checking ONOS Logs for errors" )
2867 for node in main.nodes:
2868 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2869 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2870
2871 try:
2872 timerLog = open( main.logdir + "/Timers.csv", 'w')
2873 main.log.error( ", ".join( labels ) + "\n" + ", ".join( data ) )
2874 timerLog.write( ", ".join( labels ) + "\n" + ", ".join( data ) )
2875 timerLog.close()
2876 except NameError, e:
2877 main.log.exception(e)
2878
2879 main.step( "Stopping webserver" )
2880 status = main.Server.stop( )
2881 utilities.assert_equals( expect=main.TRUE, actual=status,
2882 onpass="Stop Server",
2883 onfail="Failled to stop SimpleHTTPServer" )
2884 del main.Server
2885
2886 def CASE14( self, main ):
2887 """
2888 start election app on all onos nodes
2889 """
2890 import time
2891 assert main.numCtrls, "main.numCtrls not defined"
2892 assert main, "main not defined"
2893 assert utilities.assert_equals, "utilities.assert_equals not defined"
2894 assert main.CLIs, "main.CLIs not defined"
2895 assert main.nodes, "main.nodes not defined"
2896
2897 main.case("Start Leadership Election app")
2898 main.step( "Install leadership election app" )
2899 onosCli = main.CLIs[ main.activeNodes[0] ]
2900 appResult = onosCli.activateApp( "org.onosproject.election" )
2901 utilities.assert_equals(
2902 expect=main.TRUE,
2903 actual=appResult,
2904 onpass="Election app installed",
2905 onfail="Something went wrong with installing Leadership election" )
2906
2907 main.step( "Run for election on each node" )
2908 for i in main.activeNodes:
2909 main.CLIs[i].electionTestRun()
2910 time.sleep(5)
2911 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2912 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
2913 utilities.assert_equals(
2914 expect=True,
2915 actual=sameResult,
2916 onpass="All nodes see the same leaderboards",
2917 onfail="Inconsistent leaderboards" )
2918
2919 if sameResult:
2920 leader = leaders[ 0 ][ 0 ]
2921 if main.nodes[ main.activeNodes[0] ].ip_address in leader:
2922 correctLeader = True
2923 else:
2924 correctLeader = False
2925 main.step( "First node was elected leader" )
2926 utilities.assert_equals(
2927 expect=True,
2928 actual=correctLeader,
2929 onpass="Correct leader was elected",
2930 onfail="Incorrect leader" )
2931
2932 def CASE15( self, main ):
2933 """
2934 Check that Leadership Election is still functional
2935 15.1 Run election on each node
2936 15.2 Check that each node has the same leaders and candidates
2937 15.3 Find current leader and withdraw
2938 15.4 Check that a new node was elected leader
2939 15.5 Check that that new leader was the candidate of old leader
2940 15.6 Run for election on old leader
2941 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2942 15.8 Make sure that the old leader was added to the candidate list
2943
2944 old and new variable prefixes refer to data from before vs after
2945 withdrawl and later before withdrawl vs after re-election
2946 """
2947 import time
2948 assert main.numCtrls, "main.numCtrls not defined"
2949 assert main, "main not defined"
2950 assert utilities.assert_equals, "utilities.assert_equals not defined"
2951 assert main.CLIs, "main.CLIs not defined"
2952 assert main.nodes, "main.nodes not defined"
2953
2954 description = "Check that Leadership Election is still functional"
2955 main.case( description )
2956 # NOTE: Need to re-run after restarts since being a canidate is not persistant
2957
2958 oldLeaders = [] # list of lists of each nodes' candidates before
2959 newLeaders = [] # list of lists of each nodes' candidates after
2960 oldLeader = '' # the old leader from oldLeaders, None if not same
2961 newLeader = '' # the new leaders fron newLoeaders, None if not same
2962 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2963 expectNoLeader = False # True when there is only one leader
2964 if main.numCtrls == 1:
2965 expectNoLeader = True
2966
2967 main.step( "Run for election on each node" )
2968 electionResult = main.TRUE
2969
2970 for i in main.activeNodes: # run test election on each node
2971 if main.CLIs[i].electionTestRun() == main.FALSE:
2972 electionResult = main.FALSE
2973 utilities.assert_equals(
2974 expect=main.TRUE,
2975 actual=electionResult,
2976 onpass="All nodes successfully ran for leadership",
2977 onfail="At least one node failed to run for leadership" )
2978
2979 if electionResult == main.FALSE:
2980 main.log.error(
2981 "Skipping Test Case because Election Test App isn't loaded" )
2982 main.skipCase()
2983
2984 main.step( "Check that each node shows the same leader and candidates" )
2985 failMessage = "Nodes have different leaderboards"
2986 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2987 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
2988 if sameResult:
2989 oldLeader = oldLeaders[ 0 ][ 0 ]
2990 main.log.warn( oldLeader )
2991 else:
2992 oldLeader = None
2993 utilities.assert_equals(
2994 expect=True,
2995 actual=sameResult,
2996 onpass="Leaderboards are consistent for the election topic",
2997 onfail=failMessage )
2998
2999 main.step( "Find current leader and withdraw" )
3000 withdrawResult = main.TRUE
3001 # do some sanity checking on leader before using it
3002 if oldLeader is None:
3003 main.log.error( "Leadership isn't consistent." )
3004 withdrawResult = main.FALSE
3005 # Get the CLI of the oldLeader
3006 for i in main.activeNodes:
3007 if oldLeader == main.nodes[ i ].ip_address:
3008 oldLeaderCLI = main.CLIs[ i ]
3009 break
3010 else: # FOR/ELSE statement
3011 main.log.error( "Leader election, could not find current leader" )
3012 if oldLeader:
3013 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3014 utilities.assert_equals(
3015 expect=main.TRUE,
3016 actual=withdrawResult,
3017 onpass="Node was withdrawn from election",
3018 onfail="Node was not withdrawn from election" )
3019
3020 main.step( "Check that a new node was elected leader" )
3021 failMessage = "Nodes have different leaders"
3022 # Get new leaders and candidates
3023 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
3024 newLeader = None
3025 if newLeaderResult:
3026 if newLeaders[ 0 ][ 0 ] == 'none':
3027 main.log.error( "No leader was elected on at least 1 node" )
3028 if not expectNoLeader:
3029 newLeaderResult = False
3030 newLeader = newLeaders[ 0 ][ 0 ]
3031
3032 # Check that the new leader is not the older leader, which was withdrawn
3033 if newLeader == oldLeader:
3034 newLeaderResult = False
3035 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3036 " as the current leader" )
3037 utilities.assert_equals(
3038 expect=True,
3039 actual=newLeaderResult,
3040 onpass="Leadership election passed",
3041 onfail="Something went wrong with Leadership election" )
3042
3043 main.step( "Check that that new leader was the candidate of old leader" )
3044 # candidates[ 2 ] should become the top candidate after withdrawl
3045 correctCandidateResult = main.TRUE
3046 if expectNoLeader:
3047 if newLeader == 'none':
3048 main.log.info( "No leader expected. None found. Pass" )
3049 correctCandidateResult = main.TRUE
3050 else:
3051 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3052 correctCandidateResult = main.FALSE
3053 elif len( oldLeaders[0] ) >= 3:
3054 if newLeader == oldLeaders[ 0 ][ 2 ]:
3055 # correct leader was elected
3056 correctCandidateResult = main.TRUE
3057 else:
3058 correctCandidateResult = main.FALSE
3059 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3060 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3061 else:
3062 main.log.warn( "Could not determine who should be the correct leader" )
3063 main.log.debug( oldLeaders[ 0 ] )
3064 correctCandidateResult = main.FALSE
3065 utilities.assert_equals(
3066 expect=main.TRUE,
3067 actual=correctCandidateResult,
3068 onpass="Correct Candidate Elected",
3069 onfail="Incorrect Candidate Elected" )
3070
3071 main.step( "Run for election on old leader( just so everyone " +
3072 "is in the hat )" )
3073 if oldLeaderCLI is not None:
3074 runResult = oldLeaderCLI.electionTestRun()
3075 else:
3076 main.log.error( "No old leader to re-elect" )
3077 runResult = main.FALSE
3078 utilities.assert_equals(
3079 expect=main.TRUE,
3080 actual=runResult,
3081 onpass="App re-ran for election",
3082 onfail="App failed to run for election" )
3083
3084 main.step(
3085 "Check that oldLeader is a candidate, and leader if only 1 node" )
3086 # verify leader didn't just change
3087 # Get new leaders and candidates
3088 reRunLeaders = []
3089 time.sleep( 5 ) # Paremterize
3090 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
3091
3092 # Check that the re-elected node is last on the candidate List
3093 if not reRunLeaders[0]:
3094 positionResult = main.FALSE
3095 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3096 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3097 str( reRunLeaders[ 0 ] ) ) )
3098 positionResult = main.FALSE
3099 utilities.assert_equals(
3100 expect=True,
3101 actual=positionResult,
3102 onpass="Old leader successfully re-ran for election",
3103 onfail="Something went wrong with Leadership election after " +
3104 "the old leader re-ran for election" )
3105
3106 def CASE16( self, main ):
3107 """
3108 Install Distributed Primitives app
3109 """
3110 import time
3111 assert main.numCtrls, "main.numCtrls not defined"
3112 assert main, "main not defined"
3113 assert utilities.assert_equals, "utilities.assert_equals not defined"
3114 assert main.CLIs, "main.CLIs not defined"
3115 assert main.nodes, "main.nodes not defined"
3116
3117 # Variables for the distributed primitives tests
3118 global pCounterName
3119 global pCounterValue
3120 global onosSet
3121 global onosSetName
3122 pCounterName = "TestON-Partitions"
3123 pCounterValue = 0
3124 onosSet = set([])
3125 onosSetName = "TestON-set"
3126
3127 description = "Install Primitives app"
3128 main.case( description )
3129 main.step( "Install Primitives app" )
3130 appName = "org.onosproject.distributedprimitives"
3131 node = main.activeNodes[0]
3132 appResults = main.CLIs[node].activateApp( appName )
3133 utilities.assert_equals( expect=main.TRUE,
3134 actual=appResults,
3135 onpass="Primitives app activated",
3136 onfail="Primitives app not activated" )
3137 time.sleep( 5 ) # To allow all nodes to activate
3138
3139 def CASE17( self, main ):
3140 """
3141 Check for basic functionality with distributed primitives
3142 """
3143 # Make sure variables are defined/set
3144 assert main.numCtrls, "main.numCtrls not defined"
3145 assert main, "main not defined"
3146 assert utilities.assert_equals, "utilities.assert_equals not defined"
3147 assert main.CLIs, "main.CLIs not defined"
3148 assert main.nodes, "main.nodes not defined"
3149 assert pCounterName, "pCounterName not defined"
3150 assert onosSetName, "onosSetName not defined"
3151 # NOTE: assert fails if value is 0/None/Empty/False
3152 try:
3153 pCounterValue
3154 except NameError:
3155 main.log.error( "pCounterValue not defined, setting to 0" )
3156 pCounterValue = 0
3157 try:
3158 onosSet
3159 except NameError:
3160 main.log.error( "onosSet not defined, setting to empty Set" )
3161 onosSet = set([])
3162 # Variables for the distributed primitives tests. These are local only
3163 addValue = "a"
3164 addAllValue = "a b c d e f"
3165 retainValue = "c d e f"
3166
3167 description = "Check for basic functionality with distributed " +\
3168 "primitives"
3169 main.case( description )
3170 main.caseExplanation = "Test the methods of the distributed " +\
3171 "primitives (counters and sets) throught the cli"
3172 # DISTRIBUTED ATOMIC COUNTERS
3173 # Partitioned counters
3174 main.step( "Increment then get a default counter on each node" )
3175 pCounters = []
3176 threads = []
3177 addedPValues = []
3178 for i in main.activeNodes:
3179 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3180 name="counterAddAndGet-" + str( i ),
3181 args=[ pCounterName ] )
3182 pCounterValue += 1
3183 addedPValues.append( pCounterValue )
3184 threads.append( t )
3185 t.start()
3186
3187 for t in threads:
3188 t.join()
3189 pCounters.append( t.result )
3190 # Check that counter incremented numController times
3191 pCounterResults = True
3192 for i in addedPValues:
3193 tmpResult = i in pCounters
3194 pCounterResults = pCounterResults and tmpResult
3195 if not tmpResult:
3196 main.log.error( str( i ) + " is not in partitioned "
3197 "counter incremented results" )
3198 utilities.assert_equals( expect=True,
3199 actual=pCounterResults,
3200 onpass="Default counter incremented",
3201 onfail="Error incrementing default" +
3202 " counter" )
3203
3204 main.step( "Get then Increment a default counter on each node" )
3205 pCounters = []
3206 threads = []
3207 addedPValues = []
3208 for i in main.activeNodes:
3209 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3210 name="counterGetAndAdd-" + str( i ),
3211 args=[ pCounterName ] )
3212 addedPValues.append( pCounterValue )
3213 pCounterValue += 1
3214 threads.append( t )
3215 t.start()
3216
3217 for t in threads:
3218 t.join()
3219 pCounters.append( t.result )
3220 # Check that counter incremented numController times
3221 pCounterResults = True
3222 for i in addedPValues:
3223 tmpResult = i in pCounters
3224 pCounterResults = pCounterResults and tmpResult
3225 if not tmpResult:
3226 main.log.error( str( i ) + " is not in partitioned "
3227 "counter incremented results" )
3228 utilities.assert_equals( expect=True,
3229 actual=pCounterResults,
3230 onpass="Default counter incremented",
3231 onfail="Error incrementing default" +
3232 " counter" )
3233
3234 main.step( "Counters we added have the correct values" )
3235 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3236 utilities.assert_equals( expect=main.TRUE,
3237 actual=incrementCheck,
3238 onpass="Added counters are correct",
3239 onfail="Added counters are incorrect" )
3240
3241 main.step( "Add -8 to then get a default counter on each node" )
3242 pCounters = []
3243 threads = []
3244 addedPValues = []
3245 for i in main.activeNodes:
3246 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3247 name="counterIncrement-" + str( i ),
3248 args=[ pCounterName ],
3249 kwargs={ "delta": -8 } )
3250 pCounterValue += -8
3251 addedPValues.append( pCounterValue )
3252 threads.append( t )
3253 t.start()
3254
3255 for t in threads:
3256 t.join()
3257 pCounters.append( t.result )
3258 # Check that counter incremented numController times
3259 pCounterResults = True
3260 for i in addedPValues:
3261 tmpResult = i in pCounters
3262 pCounterResults = pCounterResults and tmpResult
3263 if not tmpResult:
3264 main.log.error( str( i ) + " is not in partitioned "
3265 "counter incremented results" )
3266 utilities.assert_equals( expect=True,
3267 actual=pCounterResults,
3268 onpass="Default counter incremented",
3269 onfail="Error incrementing default" +
3270 " counter" )
3271
3272 main.step( "Add 5 to then get a default counter on each node" )
3273 pCounters = []
3274 threads = []
3275 addedPValues = []
3276 for i in main.activeNodes:
3277 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3278 name="counterIncrement-" + str( i ),
3279 args=[ pCounterName ],
3280 kwargs={ "delta": 5 } )
3281 pCounterValue += 5
3282 addedPValues.append( pCounterValue )
3283 threads.append( t )
3284 t.start()
3285
3286 for t in threads:
3287 t.join()
3288 pCounters.append( t.result )
3289 # Check that counter incremented numController times
3290 pCounterResults = True
3291 for i in addedPValues:
3292 tmpResult = i in pCounters
3293 pCounterResults = pCounterResults and tmpResult
3294 if not tmpResult:
3295 main.log.error( str( i ) + " is not in partitioned "
3296 "counter incremented results" )
3297 utilities.assert_equals( expect=True,
3298 actual=pCounterResults,
3299 onpass="Default counter incremented",
3300 onfail="Error incrementing default" +
3301 " counter" )
3302
3303 main.step( "Get then add 5 to a default counter on each node" )
3304 pCounters = []
3305 threads = []
3306 addedPValues = []
3307 for i in main.activeNodes:
3308 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3309 name="counterIncrement-" + str( i ),
3310 args=[ pCounterName ],
3311 kwargs={ "delta": 5 } )
3312 addedPValues.append( pCounterValue )
3313 pCounterValue += 5
3314 threads.append( t )
3315 t.start()
3316
3317 for t in threads:
3318 t.join()
3319 pCounters.append( t.result )
3320 # Check that counter incremented numController times
3321 pCounterResults = True
3322 for i in addedPValues:
3323 tmpResult = i in pCounters
3324 pCounterResults = pCounterResults and tmpResult
3325 if not tmpResult:
3326 main.log.error( str( i ) + " is not in partitioned "
3327 "counter incremented results" )
3328 utilities.assert_equals( expect=True,
3329 actual=pCounterResults,
3330 onpass="Default counter incremented",
3331 onfail="Error incrementing default" +
3332 " counter" )
3333
3334 main.step( "Counters we added have the correct values" )
3335 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
3336 utilities.assert_equals( expect=main.TRUE,
3337 actual=incrementCheck,
3338 onpass="Added counters are correct",
3339 onfail="Added counters are incorrect" )
3340
3341 # DISTRIBUTED SETS
3342 main.step( "Distributed Set get" )
3343 size = len( onosSet )
3344 getResponses = []
3345 threads = []
3346 for i in main.activeNodes:
3347 t = main.Thread( target=main.CLIs[i].setTestGet,
3348 name="setTestGet-" + str( i ),
3349 args=[ onosSetName ] )
3350 threads.append( t )
3351 t.start()
3352 for t in threads:
3353 t.join()
3354 getResponses.append( t.result )
3355
3356 getResults = main.TRUE
3357 for i in range( len( main.activeNodes ) ):
3358 node = str( main.activeNodes[i] + 1 )
3359 if isinstance( getResponses[ i ], list):
3360 current = set( getResponses[ i ] )
3361 if len( current ) == len( getResponses[ i ] ):
3362 # no repeats
3363 if onosSet != current:
3364 main.log.error( "ONOS" + node +
3365 " has incorrect view" +
3366 " of set " + onosSetName + ":\n" +
3367 str( getResponses[ i ] ) )
3368 main.log.debug( "Expected: " + str( onosSet ) )
3369 main.log.debug( "Actual: " + str( current ) )
3370 getResults = main.FALSE
3371 else:
3372 # error, set is not a set
3373 main.log.error( "ONOS" + node +
3374 " has repeat elements in" +
3375 " set " + onosSetName + ":\n" +
3376 str( getResponses[ i ] ) )
3377 getResults = main.FALSE
3378 elif getResponses[ i ] == main.ERROR:
3379 getResults = main.FALSE
3380 utilities.assert_equals( expect=main.TRUE,
3381 actual=getResults,
3382 onpass="Set elements are correct",
3383 onfail="Set elements are incorrect" )
3384
3385 main.step( "Distributed Set size" )
3386 sizeResponses = []
3387 threads = []
3388 for i in main.activeNodes:
3389 t = main.Thread( target=main.CLIs[i].setTestSize,
3390 name="setTestSize-" + str( i ),
3391 args=[ onosSetName ] )
3392 threads.append( t )
3393 t.start()
3394 for t in threads:
3395 t.join()
3396 sizeResponses.append( t.result )
3397
3398 sizeResults = main.TRUE
3399 for i in range( len( main.activeNodes ) ):
3400 node = str( main.activeNodes[i] + 1 )
3401 if size != sizeResponses[ i ]:
3402 sizeResults = main.FALSE
3403 main.log.error( "ONOS" + node +
3404 " expected a size of " + str( size ) +
3405 " for set " + onosSetName +
3406 " but got " + str( sizeResponses[ i ] ) )
3407 utilities.assert_equals( expect=main.TRUE,
3408 actual=sizeResults,
3409 onpass="Set sizes are correct",
3410 onfail="Set sizes are incorrect" )
3411
3412 main.step( "Distributed Set add()" )
3413 onosSet.add( addValue )
3414 addResponses = []
3415 threads = []
3416 for i in main.activeNodes:
3417 t = main.Thread( target=main.CLIs[i].setTestAdd,
3418 name="setTestAdd-" + str( i ),
3419 args=[ onosSetName, addValue ] )
3420 threads.append( t )
3421 t.start()
3422 for t in threads:
3423 t.join()
3424 addResponses.append( t.result )
3425
3426 # main.TRUE = successfully changed the set
3427 # main.FALSE = action resulted in no change in set
3428 # main.ERROR - Some error in executing the function
3429 addResults = main.TRUE
3430 for i in range( len( main.activeNodes ) ):
3431 if addResponses[ i ] == main.TRUE:
3432 # All is well
3433 pass
3434 elif addResponses[ i ] == main.FALSE:
3435 # Already in set, probably fine
3436 pass
3437 elif addResponses[ i ] == main.ERROR:
3438 # Error in execution
3439 addResults = main.FALSE
3440 else:
3441 # unexpected result
3442 addResults = main.FALSE
3443 if addResults != main.TRUE:
3444 main.log.error( "Error executing set add" )
3445
3446 # Check if set is still correct
3447 size = len( onosSet )
3448 getResponses = []
3449 threads = []
3450 for i in main.activeNodes:
3451 t = main.Thread( target=main.CLIs[i].setTestGet,
3452 name="setTestGet-" + str( i ),
3453 args=[ onosSetName ] )
3454 threads.append( t )
3455 t.start()
3456 for t in threads:
3457 t.join()
3458 getResponses.append( t.result )
3459 getResults = main.TRUE
3460 for i in range( len( main.activeNodes ) ):
3461 node = str( main.activeNodes[i] + 1 )
3462 if isinstance( getResponses[ i ], list):
3463 current = set( getResponses[ i ] )
3464 if len( current ) == len( getResponses[ i ] ):
3465 # no repeats
3466 if onosSet != current:
3467 main.log.error( "ONOS" + node + " has incorrect view" +
3468 " of set " + onosSetName + ":\n" +
3469 str( getResponses[ i ] ) )
3470 main.log.debug( "Expected: " + str( onosSet ) )
3471 main.log.debug( "Actual: " + str( current ) )
3472 getResults = main.FALSE
3473 else:
3474 # error, set is not a set
3475 main.log.error( "ONOS" + node + " has repeat elements in" +
3476 " set " + onosSetName + ":\n" +
3477 str( getResponses[ i ] ) )
3478 getResults = main.FALSE
3479 elif getResponses[ i ] == main.ERROR:
3480 getResults = main.FALSE
3481 sizeResponses = []
3482 threads = []
3483 for i in main.activeNodes:
3484 t = main.Thread( target=main.CLIs[i].setTestSize,
3485 name="setTestSize-" + str( i ),
3486 args=[ onosSetName ] )
3487 threads.append( t )
3488 t.start()
3489 for t in threads:
3490 t.join()
3491 sizeResponses.append( t.result )
3492 sizeResults = main.TRUE
3493 for i in range( len( main.activeNodes ) ):
3494 node = str( main.activeNodes[i] + 1 )
3495 if size != sizeResponses[ i ]:
3496 sizeResults = main.FALSE
3497 main.log.error( "ONOS" + node +
3498 " expected a size of " + str( size ) +
3499 " for set " + onosSetName +
3500 " but got " + str( sizeResponses[ i ] ) )
3501 addResults = addResults and getResults and sizeResults
3502 utilities.assert_equals( expect=main.TRUE,
3503 actual=addResults,
3504 onpass="Set add correct",
3505 onfail="Set add was incorrect" )
3506
3507 main.step( "Distributed Set addAll()" )
3508 onosSet.update( addAllValue.split() )
3509 addResponses = []
3510 threads = []
3511 for i in main.activeNodes:
3512 t = main.Thread( target=main.CLIs[i].setTestAdd,
3513 name="setTestAddAll-" + str( i ),
3514 args=[ onosSetName, addAllValue ] )
3515 threads.append( t )
3516 t.start()
3517 for t in threads:
3518 t.join()
3519 addResponses.append( t.result )
3520
3521 # main.TRUE = successfully changed the set
3522 # main.FALSE = action resulted in no change in set
3523 # main.ERROR - Some error in executing the function
3524 addAllResults = main.TRUE
3525 for i in range( len( main.activeNodes ) ):
3526 if addResponses[ i ] == main.TRUE:
3527 # All is well
3528 pass
3529 elif addResponses[ i ] == main.FALSE:
3530 # Already in set, probably fine
3531 pass
3532 elif addResponses[ i ] == main.ERROR:
3533 # Error in execution
3534 addAllResults = main.FALSE
3535 else:
3536 # unexpected result
3537 addAllResults = main.FALSE
3538 if addAllResults != main.TRUE:
3539 main.log.error( "Error executing set addAll" )
3540
3541 # Check if set is still correct
3542 size = len( onosSet )
3543 getResponses = []
3544 threads = []
3545 for i in main.activeNodes:
3546 t = main.Thread( target=main.CLIs[i].setTestGet,
3547 name="setTestGet-" + str( i ),
3548 args=[ onosSetName ] )
3549 threads.append( t )
3550 t.start()
3551 for t in threads:
3552 t.join()
3553 getResponses.append( t.result )
3554 getResults = main.TRUE
3555 for i in range( len( main.activeNodes ) ):
3556 node = str( main.activeNodes[i] + 1 )
3557 if isinstance( getResponses[ i ], list):
3558 current = set( getResponses[ i ] )
3559 if len( current ) == len( getResponses[ i ] ):
3560 # no repeats
3561 if onosSet != current:
3562 main.log.error( "ONOS" + node +
3563 " has incorrect view" +
3564 " of set " + onosSetName + ":\n" +
3565 str( getResponses[ i ] ) )
3566 main.log.debug( "Expected: " + str( onosSet ) )
3567 main.log.debug( "Actual: " + str( current ) )
3568 getResults = main.FALSE
3569 else:
3570 # error, set is not a set
3571 main.log.error( "ONOS" + node +
3572 " has repeat elements in" +
3573 " set " + onosSetName + ":\n" +
3574 str( getResponses[ i ] ) )
3575 getResults = main.FALSE
3576 elif getResponses[ i ] == main.ERROR:
3577 getResults = main.FALSE
3578 sizeResponses = []
3579 threads = []
3580 for i in main.activeNodes:
3581 t = main.Thread( target=main.CLIs[i].setTestSize,
3582 name="setTestSize-" + str( i ),
3583 args=[ onosSetName ] )
3584 threads.append( t )
3585 t.start()
3586 for t in threads:
3587 t.join()
3588 sizeResponses.append( t.result )
3589 sizeResults = main.TRUE
3590 for i in range( len( main.activeNodes ) ):
3591 node = str( main.activeNodes[i] + 1 )
3592 if size != sizeResponses[ i ]:
3593 sizeResults = main.FALSE
3594 main.log.error( "ONOS" + node +
3595 " expected a size of " + str( size ) +
3596 " for set " + onosSetName +
3597 " but got " + str( sizeResponses[ i ] ) )
3598 addAllResults = addAllResults and getResults and sizeResults
3599 utilities.assert_equals( expect=main.TRUE,
3600 actual=addAllResults,
3601 onpass="Set addAll correct",
3602 onfail="Set addAll was incorrect" )
3603
3604 main.step( "Distributed Set contains()" )
3605 containsResponses = []
3606 threads = []
3607 for i in main.activeNodes:
3608 t = main.Thread( target=main.CLIs[i].setTestGet,
3609 name="setContains-" + str( i ),
3610 args=[ onosSetName ],
3611 kwargs={ "values": addValue } )
3612 threads.append( t )
3613 t.start()
3614 for t in threads:
3615 t.join()
3616 # NOTE: This is the tuple
3617 containsResponses.append( t.result )
3618
3619 containsResults = main.TRUE
3620 for i in range( len( main.activeNodes ) ):
3621 if containsResponses[ i ] == main.ERROR:
3622 containsResults = main.FALSE
3623 else:
3624 containsResults = containsResults and\
3625 containsResponses[ i ][ 1 ]
3626 utilities.assert_equals( expect=main.TRUE,
3627 actual=containsResults,
3628 onpass="Set contains is functional",
3629 onfail="Set contains failed" )
3630
3631 main.step( "Distributed Set containsAll()" )
3632 containsAllResponses = []
3633 threads = []
3634 for i in main.activeNodes:
3635 t = main.Thread( target=main.CLIs[i].setTestGet,
3636 name="setContainsAll-" + str( i ),
3637 args=[ onosSetName ],
3638 kwargs={ "values": addAllValue } )
3639 threads.append( t )
3640 t.start()
3641 for t in threads:
3642 t.join()
3643 # NOTE: This is the tuple
3644 containsAllResponses.append( t.result )
3645
3646 containsAllResults = main.TRUE
3647 for i in range( len( main.activeNodes ) ):
3648 if containsResponses[ i ] == main.ERROR:
3649 containsResults = main.FALSE
3650 else:
3651 containsResults = containsResults and\
3652 containsResponses[ i ][ 1 ]
3653 utilities.assert_equals( expect=main.TRUE,
3654 actual=containsAllResults,
3655 onpass="Set containsAll is functional",
3656 onfail="Set containsAll failed" )
3657
3658 main.step( "Distributed Set remove()" )
3659 onosSet.remove( addValue )
3660 removeResponses = []
3661 threads = []
3662 for i in main.activeNodes:
3663 t = main.Thread( target=main.CLIs[i].setTestRemove,
3664 name="setTestRemove-" + str( i ),
3665 args=[ onosSetName, addValue ] )
3666 threads.append( t )
3667 t.start()
3668 for t in threads:
3669 t.join()
3670 removeResponses.append( t.result )
3671
3672 # main.TRUE = successfully changed the set
3673 # main.FALSE = action resulted in no change in set
3674 # main.ERROR - Some error in executing the function
3675 removeResults = main.TRUE
3676 for i in range( len( main.activeNodes ) ):
3677 if removeResponses[ i ] == main.TRUE:
3678 # All is well
3679 pass
3680 elif removeResponses[ i ] == main.FALSE:
3681 # not in set, probably fine
3682 pass
3683 elif removeResponses[ i ] == main.ERROR:
3684 # Error in execution
3685 removeResults = main.FALSE
3686 else:
3687 # unexpected result
3688 removeResults = main.FALSE
3689 if removeResults != main.TRUE:
3690 main.log.error( "Error executing set remove" )
3691
3692 # Check if set is still correct
3693 size = len( onosSet )
3694 getResponses = []
3695 threads = []
3696 for i in main.activeNodes:
3697 t = main.Thread( target=main.CLIs[i].setTestGet,
3698 name="setTestGet-" + str( i ),
3699 args=[ onosSetName ] )
3700 threads.append( t )
3701 t.start()
3702 for t in threads:
3703 t.join()
3704 getResponses.append( t.result )
3705 getResults = main.TRUE
3706 for i in range( len( main.activeNodes ) ):
3707 node = str( main.activeNodes[i] + 1 )
3708 if isinstance( getResponses[ i ], list):
3709 current = set( getResponses[ i ] )
3710 if len( current ) == len( getResponses[ i ] ):
3711 # no repeats
3712 if onosSet != current:
3713 main.log.error( "ONOS" + node +
3714 " has incorrect view" +
3715 " of set " + onosSetName + ":\n" +
3716 str( getResponses[ i ] ) )
3717 main.log.debug( "Expected: " + str( onosSet ) )
3718 main.log.debug( "Actual: " + str( current ) )
3719 getResults = main.FALSE
3720 else:
3721 # error, set is not a set
3722 main.log.error( "ONOS" + node +
3723 " has repeat elements in" +
3724 " set " + onosSetName + ":\n" +
3725 str( getResponses[ i ] ) )
3726 getResults = main.FALSE
3727 elif getResponses[ i ] == main.ERROR:
3728 getResults = main.FALSE
3729 sizeResponses = []
3730 threads = []
3731 for i in main.activeNodes:
3732 t = main.Thread( target=main.CLIs[i].setTestSize,
3733 name="setTestSize-" + str( i ),
3734 args=[ onosSetName ] )
3735 threads.append( t )
3736 t.start()
3737 for t in threads:
3738 t.join()
3739 sizeResponses.append( t.result )
3740 sizeResults = main.TRUE
3741 for i in range( len( main.activeNodes ) ):
3742 node = str( main.activeNodes[i] + 1 )
3743 if size != sizeResponses[ i ]:
3744 sizeResults = main.FALSE
3745 main.log.error( "ONOS" + node +
3746 " expected a size of " + str( size ) +
3747 " for set " + onosSetName +
3748 " but got " + str( sizeResponses[ i ] ) )
3749 removeResults = removeResults and getResults and sizeResults
3750 utilities.assert_equals( expect=main.TRUE,
3751 actual=removeResults,
3752 onpass="Set remove correct",
3753 onfail="Set remove was incorrect" )
3754
3755 main.step( "Distributed Set removeAll()" )
3756 onosSet.difference_update( addAllValue.split() )
3757 removeAllResponses = []
3758 threads = []
3759 try:
3760 for i in main.activeNodes:
3761 t = main.Thread( target=main.CLIs[i].setTestRemove,
3762 name="setTestRemoveAll-" + str( i ),
3763 args=[ onosSetName, addAllValue ] )
3764 threads.append( t )
3765 t.start()
3766 for t in threads:
3767 t.join()
3768 removeAllResponses.append( t.result )
3769 except Exception, e:
3770 main.log.exception(e)
3771
3772 # main.TRUE = successfully changed the set
3773 # main.FALSE = action resulted in no change in set
3774 # main.ERROR - Some error in executing the function
3775 removeAllResults = main.TRUE
3776 for i in range( len( main.activeNodes ) ):
3777 if removeAllResponses[ i ] == main.TRUE:
3778 # All is well
3779 pass
3780 elif removeAllResponses[ i ] == main.FALSE:
3781 # not in set, probably fine
3782 pass
3783 elif removeAllResponses[ i ] == main.ERROR:
3784 # Error in execution
3785 removeAllResults = main.FALSE
3786 else:
3787 # unexpected result
3788 removeAllResults = main.FALSE
3789 if removeAllResults != main.TRUE:
3790 main.log.error( "Error executing set removeAll" )
3791
3792 # Check if set is still correct
3793 size = len( onosSet )
3794 getResponses = []
3795 threads = []
3796 for i in main.activeNodes:
3797 t = main.Thread( target=main.CLIs[i].setTestGet,
3798 name="setTestGet-" + str( i ),
3799 args=[ onosSetName ] )
3800 threads.append( t )
3801 t.start()
3802 for t in threads:
3803 t.join()
3804 getResponses.append( t.result )
3805 getResults = main.TRUE
3806 for i in range( len( main.activeNodes ) ):
3807 node = str( main.activeNodes[i] + 1 )
3808 if isinstance( getResponses[ i ], list):
3809 current = set( getResponses[ i ] )
3810 if len( current ) == len( getResponses[ i ] ):
3811 # no repeats
3812 if onosSet != current:
3813 main.log.error( "ONOS" + node +
3814 " has incorrect view" +
3815 " of set " + onosSetName + ":\n" +
3816 str( getResponses[ i ] ) )
3817 main.log.debug( "Expected: " + str( onosSet ) )
3818 main.log.debug( "Actual: " + str( current ) )
3819 getResults = main.FALSE
3820 else:
3821 # error, set is not a set
3822 main.log.error( "ONOS" + node +
3823 " has repeat elements in" +
3824 " set " + onosSetName + ":\n" +
3825 str( getResponses[ i ] ) )
3826 getResults = main.FALSE
3827 elif getResponses[ i ] == main.ERROR:
3828 getResults = main.FALSE
3829 sizeResponses = []
3830 threads = []
3831 for i in main.activeNodes:
3832 t = main.Thread( target=main.CLIs[i].setTestSize,
3833 name="setTestSize-" + str( i ),
3834 args=[ onosSetName ] )
3835 threads.append( t )
3836 t.start()
3837 for t in threads:
3838 t.join()
3839 sizeResponses.append( t.result )
3840 sizeResults = main.TRUE
3841 for i in range( len( main.activeNodes ) ):
3842 node = str( main.activeNodes[i] + 1 )
3843 if size != sizeResponses[ i ]:
3844 sizeResults = main.FALSE
3845 main.log.error( "ONOS" + node +
3846 " expected a size of " + str( size ) +
3847 " for set " + onosSetName +
3848 " but got " + str( sizeResponses[ i ] ) )
3849 removeAllResults = removeAllResults and getResults and sizeResults
3850 utilities.assert_equals( expect=main.TRUE,
3851 actual=removeAllResults,
3852 onpass="Set removeAll correct",
3853 onfail="Set removeAll was incorrect" )
3854
3855 main.step( "Distributed Set addAll()" )
3856 onosSet.update( addAllValue.split() )
3857 addResponses = []
3858 threads = []
3859 for i in main.activeNodes:
3860 t = main.Thread( target=main.CLIs[i].setTestAdd,
3861 name="setTestAddAll-" + str( i ),
3862 args=[ onosSetName, addAllValue ] )
3863 threads.append( t )
3864 t.start()
3865 for t in threads:
3866 t.join()
3867 addResponses.append( t.result )
3868
3869 # main.TRUE = successfully changed the set
3870 # main.FALSE = action resulted in no change in set
3871 # main.ERROR - Some error in executing the function
3872 addAllResults = main.TRUE
3873 for i in range( len( main.activeNodes ) ):
3874 if addResponses[ i ] == main.TRUE:
3875 # All is well
3876 pass
3877 elif addResponses[ i ] == main.FALSE:
3878 # Already in set, probably fine
3879 pass
3880 elif addResponses[ i ] == main.ERROR:
3881 # Error in execution
3882 addAllResults = main.FALSE
3883 else:
3884 # unexpected result
3885 addAllResults = main.FALSE
3886 if addAllResults != main.TRUE:
3887 main.log.error( "Error executing set addAll" )
3888
3889 # Check if set is still correct
3890 size = len( onosSet )
3891 getResponses = []
3892 threads = []
3893 for i in main.activeNodes:
3894 t = main.Thread( target=main.CLIs[i].setTestGet,
3895 name="setTestGet-" + str( i ),
3896 args=[ onosSetName ] )
3897 threads.append( t )
3898 t.start()
3899 for t in threads:
3900 t.join()
3901 getResponses.append( t.result )
3902 getResults = main.TRUE
3903 for i in range( len( main.activeNodes ) ):
3904 node = str( main.activeNodes[i] + 1 )
3905 if isinstance( getResponses[ i ], list):
3906 current = set( getResponses[ i ] )
3907 if len( current ) == len( getResponses[ i ] ):
3908 # no repeats
3909 if onosSet != current:
3910 main.log.error( "ONOS" + node +
3911 " has incorrect view" +
3912 " of set " + onosSetName + ":\n" +
3913 str( getResponses[ i ] ) )
3914 main.log.debug( "Expected: " + str( onosSet ) )
3915 main.log.debug( "Actual: " + str( current ) )
3916 getResults = main.FALSE
3917 else:
3918 # error, set is not a set
3919 main.log.error( "ONOS" + node +
3920 " has repeat elements in" +
3921 " set " + onosSetName + ":\n" +
3922 str( getResponses[ i ] ) )
3923 getResults = main.FALSE
3924 elif getResponses[ i ] == main.ERROR:
3925 getResults = main.FALSE
3926 sizeResponses = []
3927 threads = []
3928 for i in main.activeNodes:
3929 t = main.Thread( target=main.CLIs[i].setTestSize,
3930 name="setTestSize-" + str( i ),
3931 args=[ onosSetName ] )
3932 threads.append( t )
3933 t.start()
3934 for t in threads:
3935 t.join()
3936 sizeResponses.append( t.result )
3937 sizeResults = main.TRUE
3938 for i in range( len( main.activeNodes ) ):
3939 node = str( main.activeNodes[i] + 1 )
3940 if size != sizeResponses[ i ]:
3941 sizeResults = main.FALSE
3942 main.log.error( "ONOS" + node +
3943 " expected a size of " + str( size ) +
3944 " for set " + onosSetName +
3945 " but got " + str( sizeResponses[ i ] ) )
3946 addAllResults = addAllResults and getResults and sizeResults
3947 utilities.assert_equals( expect=main.TRUE,
3948 actual=addAllResults,
3949 onpass="Set addAll correct",
3950 onfail="Set addAll was incorrect" )
3951
3952 main.step( "Distributed Set clear()" )
3953 onosSet.clear()
3954 clearResponses = []
3955 threads = []
3956 for i in main.activeNodes:
3957 t = main.Thread( target=main.CLIs[i].setTestRemove,
3958 name="setTestClear-" + str( i ),
3959 args=[ onosSetName, " "], # Values doesn't matter
3960 kwargs={ "clear": True } )
3961 threads.append( t )
3962 t.start()
3963 for t in threads:
3964 t.join()
3965 clearResponses.append( t.result )
3966
3967 # main.TRUE = successfully changed the set
3968 # main.FALSE = action resulted in no change in set
3969 # main.ERROR - Some error in executing the function
3970 clearResults = main.TRUE
3971 for i in range( len( main.activeNodes ) ):
3972 if clearResponses[ i ] == main.TRUE:
3973 # All is well
3974 pass
3975 elif clearResponses[ i ] == main.FALSE:
3976 # Nothing set, probably fine
3977 pass
3978 elif clearResponses[ i ] == main.ERROR:
3979 # Error in execution
3980 clearResults = main.FALSE
3981 else:
3982 # unexpected result
3983 clearResults = main.FALSE
3984 if clearResults != main.TRUE:
3985 main.log.error( "Error executing set clear" )
3986
3987 # Check if set is still correct
3988 size = len( onosSet )
3989 getResponses = []
3990 threads = []
3991 for i in main.activeNodes:
3992 t = main.Thread( target=main.CLIs[i].setTestGet,
3993 name="setTestGet-" + str( i ),
3994 args=[ onosSetName ] )
3995 threads.append( t )
3996 t.start()
3997 for t in threads:
3998 t.join()
3999 getResponses.append( t.result )
4000 getResults = main.TRUE
4001 for i in range( len( main.activeNodes ) ):
4002 node = str( main.activeNodes[i] + 1 )
4003 if isinstance( getResponses[ i ], list):
4004 current = set( getResponses[ i ] )
4005 if len( current ) == len( getResponses[ i ] ):
4006 # no repeats
4007 if onosSet != current:
4008 main.log.error( "ONOS" + node +
4009 " has incorrect view" +
4010 " of set " + onosSetName + ":\n" +
4011 str( getResponses[ i ] ) )
4012 main.log.debug( "Expected: " + str( onosSet ) )
4013 main.log.debug( "Actual: " + str( current ) )
4014 getResults = main.FALSE
4015 else:
4016 # error, set is not a set
4017 main.log.error( "ONOS" + node +
4018 " has repeat elements in" +
4019 " set " + onosSetName + ":\n" +
4020 str( getResponses[ i ] ) )
4021 getResults = main.FALSE
4022 elif getResponses[ i ] == main.ERROR:
4023 getResults = main.FALSE
4024 sizeResponses = []
4025 threads = []
4026 for i in main.activeNodes:
4027 t = main.Thread( target=main.CLIs[i].setTestSize,
4028 name="setTestSize-" + str( i ),
4029 args=[ onosSetName ] )
4030 threads.append( t )
4031 t.start()
4032 for t in threads:
4033 t.join()
4034 sizeResponses.append( t.result )
4035 sizeResults = main.TRUE
4036 for i in range( len( main.activeNodes ) ):
4037 node = str( main.activeNodes[i] + 1 )
4038 if size != sizeResponses[ i ]:
4039 sizeResults = main.FALSE
4040 main.log.error( "ONOS" + node +
4041 " expected a size of " + str( size ) +
4042 " for set " + onosSetName +
4043 " but got " + str( sizeResponses[ i ] ) )
4044 clearResults = clearResults and getResults and sizeResults
4045 utilities.assert_equals( expect=main.TRUE,
4046 actual=clearResults,
4047 onpass="Set clear correct",
4048 onfail="Set clear was incorrect" )
4049
4050 main.step( "Distributed Set addAll()" )
4051 onosSet.update( addAllValue.split() )
4052 addResponses = []
4053 threads = []
4054 for i in main.activeNodes:
4055 t = main.Thread( target=main.CLIs[i].setTestAdd,
4056 name="setTestAddAll-" + str( i ),
4057 args=[ onosSetName, addAllValue ] )
4058 threads.append( t )
4059 t.start()
4060 for t in threads:
4061 t.join()
4062 addResponses.append( t.result )
4063
4064 # main.TRUE = successfully changed the set
4065 # main.FALSE = action resulted in no change in set
4066 # main.ERROR - Some error in executing the function
4067 addAllResults = main.TRUE
4068 for i in range( len( main.activeNodes ) ):
4069 if addResponses[ i ] == main.TRUE:
4070 # All is well
4071 pass
4072 elif addResponses[ i ] == main.FALSE:
4073 # Already in set, probably fine
4074 pass
4075 elif addResponses[ i ] == main.ERROR:
4076 # Error in execution
4077 addAllResults = main.FALSE
4078 else:
4079 # unexpected result
4080 addAllResults = main.FALSE
4081 if addAllResults != main.TRUE:
4082 main.log.error( "Error executing set addAll" )
4083
4084 # Check if set is still correct
4085 size = len( onosSet )
4086 getResponses = []
4087 threads = []
4088 for i in main.activeNodes:
4089 t = main.Thread( target=main.CLIs[i].setTestGet,
4090 name="setTestGet-" + str( i ),
4091 args=[ onosSetName ] )
4092 threads.append( t )
4093 t.start()
4094 for t in threads:
4095 t.join()
4096 getResponses.append( t.result )
4097 getResults = main.TRUE
4098 for i in range( len( main.activeNodes ) ):
4099 node = str( main.activeNodes[i] + 1 )
4100 if isinstance( getResponses[ i ], list):
4101 current = set( getResponses[ i ] )
4102 if len( current ) == len( getResponses[ i ] ):
4103 # no repeats
4104 if onosSet != current:
4105 main.log.error( "ONOS" + node +
4106 " has incorrect view" +
4107 " of set " + onosSetName + ":\n" +
4108 str( getResponses[ i ] ) )
4109 main.log.debug( "Expected: " + str( onosSet ) )
4110 main.log.debug( "Actual: " + str( current ) )
4111 getResults = main.FALSE
4112 else:
4113 # error, set is not a set
4114 main.log.error( "ONOS" + node +
4115 " has repeat elements in" +
4116 " set " + onosSetName + ":\n" +
4117 str( getResponses[ i ] ) )
4118 getResults = main.FALSE
4119 elif getResponses[ i ] == main.ERROR:
4120 getResults = main.FALSE
4121 sizeResponses = []
4122 threads = []
4123 for i in main.activeNodes:
4124 t = main.Thread( target=main.CLIs[i].setTestSize,
4125 name="setTestSize-" + str( i ),
4126 args=[ onosSetName ] )
4127 threads.append( t )
4128 t.start()
4129 for t in threads:
4130 t.join()
4131 sizeResponses.append( t.result )
4132 sizeResults = main.TRUE
4133 for i in range( len( main.activeNodes ) ):
4134 node = str( main.activeNodes[i] + 1 )
4135 if size != sizeResponses[ i ]:
4136 sizeResults = main.FALSE
4137 main.log.error( "ONOS" + node +
4138 " expected a size of " + str( size ) +
4139 " for set " + onosSetName +
4140 " but got " + str( sizeResponses[ i ] ) )
4141 addAllResults = addAllResults and getResults and sizeResults
4142 utilities.assert_equals( expect=main.TRUE,
4143 actual=addAllResults,
4144 onpass="Set addAll correct",
4145 onfail="Set addAll was incorrect" )
4146
4147 main.step( "Distributed Set retain()" )
4148 onosSet.intersection_update( retainValue.split() )
4149 retainResponses = []
4150 threads = []
4151 for i in main.activeNodes:
4152 t = main.Thread( target=main.CLIs[i].setTestRemove,
4153 name="setTestRetain-" + str( i ),
4154 args=[ onosSetName, retainValue ],
4155 kwargs={ "retain": True } )
4156 threads.append( t )
4157 t.start()
4158 for t in threads:
4159 t.join()
4160 retainResponses.append( t.result )
4161
4162 # main.TRUE = successfully changed the set
4163 # main.FALSE = action resulted in no change in set
4164 # main.ERROR - Some error in executing the function
4165 retainResults = main.TRUE
4166 for i in range( len( main.activeNodes ) ):
4167 if retainResponses[ i ] == main.TRUE:
4168 # All is well
4169 pass
4170 elif retainResponses[ i ] == main.FALSE:
4171 # Already in set, probably fine
4172 pass
4173 elif retainResponses[ i ] == main.ERROR:
4174 # Error in execution
4175 retainResults = main.FALSE
4176 else:
4177 # unexpected result
4178 retainResults = main.FALSE
4179 if retainResults != main.TRUE:
4180 main.log.error( "Error executing set retain" )
4181
4182 # Check if set is still correct
4183 size = len( onosSet )
4184 getResponses = []
4185 threads = []
4186 for i in main.activeNodes:
4187 t = main.Thread( target=main.CLIs[i].setTestGet,
4188 name="setTestGet-" + str( i ),
4189 args=[ onosSetName ] )
4190 threads.append( t )
4191 t.start()
4192 for t in threads:
4193 t.join()
4194 getResponses.append( t.result )
4195 getResults = main.TRUE
4196 for i in range( len( main.activeNodes ) ):
4197 node = str( main.activeNodes[i] + 1 )
4198 if isinstance( getResponses[ i ], list):
4199 current = set( getResponses[ i ] )
4200 if len( current ) == len( getResponses[ i ] ):
4201 # no repeats
4202 if onosSet != current:
4203 main.log.error( "ONOS" + node +
4204 " has incorrect view" +
4205 " of set " + onosSetName + ":\n" +
4206 str( getResponses[ i ] ) )
4207 main.log.debug( "Expected: " + str( onosSet ) )
4208 main.log.debug( "Actual: " + str( current ) )
4209 getResults = main.FALSE
4210 else:
4211 # error, set is not a set
4212 main.log.error( "ONOS" + node +
4213 " has repeat elements in" +
4214 " set " + onosSetName + ":\n" +
4215 str( getResponses[ i ] ) )
4216 getResults = main.FALSE
4217 elif getResponses[ i ] == main.ERROR:
4218 getResults = main.FALSE
4219 sizeResponses = []
4220 threads = []
4221 for i in main.activeNodes:
4222 t = main.Thread( target=main.CLIs[i].setTestSize,
4223 name="setTestSize-" + str( i ),
4224 args=[ onosSetName ] )
4225 threads.append( t )
4226 t.start()
4227 for t in threads:
4228 t.join()
4229 sizeResponses.append( t.result )
4230 sizeResults = main.TRUE
4231 for i in range( len( main.activeNodes ) ):
4232 node = str( main.activeNodes[i] + 1 )
4233 if size != sizeResponses[ i ]:
4234 sizeResults = main.FALSE
4235 main.log.error( "ONOS" + node + " expected a size of " +
4236 str( size ) + " for set " + onosSetName +
4237 " but got " + str( sizeResponses[ i ] ) )
4238 retainResults = retainResults and getResults and sizeResults
4239 utilities.assert_equals( expect=main.TRUE,
4240 actual=retainResults,
4241 onpass="Set retain correct",
4242 onfail="Set retain was incorrect" )
4243
4244 # Transactional maps
4245 main.step( "Partitioned Transactional maps put" )
4246 tMapValue = "Testing"
4247 numKeys = 100
4248 putResult = True
4249 node = main.activeNodes[0]
4250 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4251 if putResponses and len( putResponses ) == 100:
4252 for i in putResponses:
4253 if putResponses[ i ][ 'value' ] != tMapValue:
4254 putResult = False
4255 else:
4256 putResult = False
4257 if not putResult:
4258 main.log.debug( "Put response values: " + str( putResponses ) )
4259 utilities.assert_equals( expect=True,
4260 actual=putResult,
4261 onpass="Partitioned Transactional Map put successful",
4262 onfail="Partitioned Transactional Map put values are incorrect" )
4263
4264 main.step( "Partitioned Transactional maps get" )
4265 getCheck = True
4266 for n in range( 1, numKeys + 1 ):
4267 getResponses = []
4268 threads = []
4269 valueCheck = True
4270 for i in main.activeNodes:
4271 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4272 name="TMap-get-" + str( i ),
4273 args=[ "Key" + str( n ) ] )
4274 threads.append( t )
4275 t.start()
4276 for t in threads:
4277 t.join()
4278 getResponses.append( t.result )
4279 for node in getResponses:
4280 if node != tMapValue:
4281 valueCheck = False
4282 if not valueCheck:
4283 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4284 main.log.warn( getResponses )
4285 getCheck = getCheck and valueCheck
4286 utilities.assert_equals( expect=True,
4287 actual=getCheck,
4288 onpass="Partitioned Transactional Map get values were correct",
4289 onfail="Partitioned Transactional Map values incorrect" )