blob: 7fee99362e22dc7f2577ce6dcffd080293ab2c0d [file] [log] [blame]
Jon Hall6e709752016-02-01 13:38:46 -08001"""
2Description: This test is to determine if ONOS can handle
3 a full network partion
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
14CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
28class HAfullNetPartition:
29
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
51 import imp
52 import pexpect
53 import time
Jon Halla440e872016-03-31 15:15:50 -070054 import json
Jon Hall6e709752016-02-01 13:38:46 -080055 main.log.info( "ONOS HA test: Partition ONOS nodes into two sub-clusters - " +
56 "initialization" )
57 main.case( "Setting up test environment" )
58 main.caseExplanation = "Setup the test environment including " +\
59 "installing ONOS, starting Mininet and ONOS" +\
60 "cli sessions."
61
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
69 main.numCtrls = int( main.params[ 'num_controllers' ] )
70 if main.ONOSbench.maxNodes:
71 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
74 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070081 # These are for csv plotting in jenkins
82 global labels
83 global data
84 labels = []
85 data = []
Jon Hall6e709752016-02-01 13:38:46 -080086
87 # FIXME: just get controller port from params?
88 # TODO: do we really need all these?
89 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
90 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
91 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
92 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
93 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
94 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
95 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
96
97 try:
Jon Hall53c5e662016-04-13 16:06:56 -070098 from tests.HA.dependencies.HA import HA
Jon Hall41d39f12016-04-11 22:54:35 -070099 main.HA = HA()
Jon Hall6e709752016-02-01 13:38:46 -0800100 except Exception as e:
101 main.log.exception( e )
102 main.cleanup()
103 main.exit()
104
105 main.CLIs = []
106 main.nodes = []
107 ipList = []
108 for i in range( 1, main.numCtrls + 1 ):
109 try:
110 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
111 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
112 ipList.append( main.nodes[ -1 ].ip_address )
113 except AttributeError:
114 break
115
116 main.step( "Create cell file" )
117 cellAppString = main.params[ 'ENV' ][ 'appString' ]
118 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
119 main.Mininet1.ip_address,
120 cellAppString, ipList )
121 main.step( "Applying cell variable to environment" )
122 cellResult = main.ONOSbench.setCell( cellName )
123 verifyResult = main.ONOSbench.verifyCell()
124
125 # FIXME:this is short term fix
126 main.log.info( "Removing raft logs" )
127 main.ONOSbench.onosRemoveRaftLogs()
128
129 main.log.info( "Uninstalling ONOS" )
130 for node in main.nodes:
131 main.ONOSbench.onosUninstall( node.ip_address )
132
133 # Make sure ONOS is DEAD
134 main.log.info( "Killing any ONOS processes" )
135 killResults = main.TRUE
136 for node in main.nodes:
137 killed = main.ONOSbench.onosKill( node.ip_address )
138 killResults = killResults and killed
139
140 cleanInstallResult = main.TRUE
141 gitPullResult = main.TRUE
142
143 main.step( "Starting Mininet" )
144 # scp topo file to mininet
145 # TODO: move to params?
146 topoName = "obelisk.py"
147 filePath = main.ONOSbench.home + "/tools/test/topos/"
148 main.ONOSbench.scp( main.Mininet1,
149 filePath + topoName,
150 main.Mininet1.home,
151 direction="to" )
152 mnResult = main.Mininet1.startNet( )
153 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
154 onpass="Mininet Started",
155 onfail="Error starting Mininet" )
156
157 main.step( "Git checkout and pull " + gitBranch )
158 if PULLCODE:
159 main.ONOSbench.gitCheckout( gitBranch )
160 gitPullResult = main.ONOSbench.gitPull()
161 # values of 1 or 3 are good
162 utilities.assert_lesser( expect=0, actual=gitPullResult,
163 onpass="Git pull successful",
164 onfail="Git pull failed" )
165 main.ONOSbench.getVersion( report=True )
166
167 main.step( "Using mvn clean install" )
168 cleanInstallResult = main.TRUE
169 if PULLCODE and gitPullResult == main.TRUE:
170 cleanInstallResult = main.ONOSbench.cleanInstall()
171 else:
172 main.log.warn( "Did not pull new code so skipping mvn " +
173 "clean install" )
174 utilities.assert_equals( expect=main.TRUE,
175 actual=cleanInstallResult,
176 onpass="MCI successful",
177 onfail="MCI failed" )
178 # GRAPHS
179 # NOTE: important params here:
180 # job = name of Jenkins job
181 # Plot Name = Plot-HA, only can be used if multiple plots
182 # index = The number of the graph under plot name
183 job = "HAfullNetPartition"
184 plotName = "Plot-HA"
Jon Hall676e5432016-09-26 11:32:50 -0700185 index = "2"
Jon Hall6e709752016-02-01 13:38:46 -0800186 graphs = '<ac:structured-macro ac:name="html">\n'
187 graphs += '<ac:plain-text-body><![CDATA[\n'
188 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
189 '/plot/' + plotName + '/getPlot?index=' + index +\
190 '&width=500&height=300"' +\
191 'noborder="0" width="500" height="300" scrolling="yes" ' +\
192 'seamless="seamless"></iframe>\n'
193 graphs += ']]></ac:plain-text-body>\n'
194 graphs += '</ac:structured-macro>\n'
195 main.log.wiki(graphs)
196
197 main.step( "Creating ONOS package" )
198 # copy gen-partions file to ONOS
199 # NOTE: this assumes TestON and ONOS are on the same machine
Jon Hall53c5e662016-04-13 16:06:56 -0700200 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
Jon Hall6e709752016-02-01 13:38:46 -0800201 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
202 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
203 main.ONOSbench.ip_address,
204 srcFile,
205 dstDir,
206 pwd=main.ONOSbench.pwd,
207 direction="from" )
Jon Hallbd60ea02016-08-23 10:03:59 -0700208 packageResult = main.ONOSbench.buckBuild()
Jon Hall6e709752016-02-01 13:38:46 -0800209 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
210 onpass="ONOS package successful",
211 onfail="ONOS package failed" )
212
213 main.step( "Installing ONOS package" )
214 onosInstallResult = main.TRUE
215 for node in main.nodes:
216 tmpResult = main.ONOSbench.onosInstall( options="-f",
217 node=node.ip_address )
218 onosInstallResult = onosInstallResult and tmpResult
219 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
220 onpass="ONOS install successful",
221 onfail="ONOS install failed" )
222 # clean up gen-partitions file
223 try:
224 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
225 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
226 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
227 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
228 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
229 str( main.ONOSbench.handle.before ) )
230 except ( pexpect.TIMEOUT, pexpect.EOF ):
231 main.log.exception( "ONOSbench: pexpect exception found:" +
232 main.ONOSbench.handle.before )
233 main.cleanup()
234 main.exit()
235
236 main.step( "Checking if ONOS is up yet" )
237 for i in range( 2 ):
238 onosIsupResult = main.TRUE
239 for node in main.nodes:
240 started = main.ONOSbench.isup( node.ip_address )
241 if not started:
242 main.log.error( node.name + " hasn't started" )
243 onosIsupResult = onosIsupResult and started
244 if onosIsupResult == main.TRUE:
245 break
246 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
247 onpass="ONOS startup successful",
248 onfail="ONOS startup failed" )
249
Jon Hall6509dbf2016-06-21 17:01:17 -0700250 main.step( "Starting ONOS CLI sessions" )
Jon Hall6e709752016-02-01 13:38:46 -0800251 cliResults = main.TRUE
252 threads = []
253 for i in range( main.numCtrls ):
254 t = main.Thread( target=main.CLIs[i].startOnosCli,
255 name="startOnosCli-" + str( i ),
256 args=[main.nodes[i].ip_address] )
257 threads.append( t )
258 t.start()
259
260 for t in threads:
261 t.join()
262 cliResults = cliResults and t.result
263 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
264 onpass="ONOS cli startup successful",
265 onfail="ONOS cli startup failed" )
266
267 # Create a list of active nodes for use when some nodes are stopped
268 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
269
270 if main.params[ 'tcpdump' ].lower() == "true":
271 main.step( "Start Packet Capture MN" )
272 main.Mininet2.startTcpdump(
273 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
274 + "-MN.pcap",
275 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
276 port=main.params[ 'MNtcpdump' ][ 'port' ] )
277
Jon Halla440e872016-03-31 15:15:50 -0700278 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -0700279 nodeResults = utilities.retry( main.HA.nodesCheck,
280 False,
281 args=[main.activeNodes],
282 attempts=5 )
Jon Halla440e872016-03-31 15:15:50 -0700283
Jon Hall41d39f12016-04-11 22:54:35 -0700284 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Halla440e872016-03-31 15:15:50 -0700285 onpass="Nodes check successful",
286 onfail="Nodes check NOT successful" )
287
288 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700289 for i in main.activeNodes:
290 cli = main.CLIs[i]
Jon Halla440e872016-03-31 15:15:50 -0700291 main.log.debug( "{} components not ACTIVE: \n{}".format(
292 cli.name,
293 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall6e709752016-02-01 13:38:46 -0800294 main.log.error( "Failed to start ONOS, stopping test" )
295 main.cleanup()
296 main.exit()
297
Jon Hall172b7ba2016-04-07 18:12:20 -0700298 main.step( "Activate apps defined in the params file" )
299 # get data from the params
300 apps = main.params.get( 'apps' )
301 if apps:
302 apps = apps.split(',')
303 main.log.warn( apps )
304 activateResult = True
305 for app in apps:
306 main.CLIs[ 0 ].app( app, "Activate" )
307 # TODO: check this worked
308 time.sleep( 10 ) # wait for apps to activate
309 for app in apps:
310 state = main.CLIs[ 0 ].appStatus( app )
311 if state == "ACTIVE":
312 activateResult = activeResult and True
313 else:
314 main.log.error( "{} is in {} state".format( app, state ) )
315 activeResult = False
316 utilities.assert_equals( expect=True,
317 actual=activateResult,
318 onpass="Successfully activated apps",
319 onfail="Failed to activate apps" )
320 else:
321 main.log.warn( "No apps were specified to be loaded after startup" )
322
323 main.step( "Set ONOS configurations" )
324 config = main.params.get( 'ONOS_Configuration' )
325 if config:
326 main.log.debug( config )
327 checkResult = main.TRUE
328 for component in config:
329 for setting in config[component]:
330 value = config[component][setting]
331 check = main.CLIs[ 0 ].setCfg( component, setting, value )
332 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
333 checkResult = check and checkResult
334 utilities.assert_equals( expect=main.TRUE,
335 actual=checkResult,
336 onpass="Successfully set config",
337 onfail="Failed to set config" )
338 else:
339 main.log.warn( "No configurations were specified to be changed after startup" )
340
Jon Hall9d2dcad2016-04-08 10:15:20 -0700341 main.step( "App Ids check" )
342 appCheck = main.TRUE
343 threads = []
344 for i in main.activeNodes:
345 t = main.Thread( target=main.CLIs[i].appToIDCheck,
346 name="appToIDCheck-" + str( i ),
347 args=[] )
348 threads.append( t )
349 t.start()
350
351 for t in threads:
352 t.join()
353 appCheck = appCheck and t.result
354 if appCheck != main.TRUE:
355 node = main.activeNodes[0]
356 main.log.warn( main.CLIs[node].apps() )
357 main.log.warn( main.CLIs[node].appIDs() )
358 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
359 onpass="App Ids seem to be correct",
360 onfail="Something is wrong with app Ids" )
361
Jon Hall6e709752016-02-01 13:38:46 -0800362 def CASE2( self, main ):
363 """
364 Assign devices to controllers
365 """
366 import re
367 assert main.numCtrls, "main.numCtrls not defined"
368 assert main, "main not defined"
369 assert utilities.assert_equals, "utilities.assert_equals not defined"
370 assert main.CLIs, "main.CLIs not defined"
371 assert main.nodes, "main.nodes not defined"
372 assert ONOS1Port, "ONOS1Port not defined"
373 assert ONOS2Port, "ONOS2Port not defined"
374 assert ONOS3Port, "ONOS3Port not defined"
375 assert ONOS4Port, "ONOS4Port not defined"
376 assert ONOS5Port, "ONOS5Port not defined"
377 assert ONOS6Port, "ONOS6Port not defined"
378 assert ONOS7Port, "ONOS7Port not defined"
379
380 main.case( "Assigning devices to controllers" )
381 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
382 "and check that an ONOS node becomes the " +\
383 "master of the device."
384 main.step( "Assign switches to controllers" )
385
386 ipList = []
387 for i in range( main.numCtrls ):
388 ipList.append( main.nodes[ i ].ip_address )
389 swList = []
390 for i in range( 1, 29 ):
391 swList.append( "s" + str( i ) )
392 main.Mininet1.assignSwController( sw=swList, ip=ipList )
393
394 mastershipCheck = main.TRUE
395 for i in range( 1, 29 ):
396 response = main.Mininet1.getSwController( "s" + str( i ) )
397 try:
398 main.log.info( str( response ) )
399 except Exception:
400 main.log.info( repr( response ) )
401 for node in main.nodes:
402 if re.search( "tcp:" + node.ip_address, response ):
403 mastershipCheck = mastershipCheck and main.TRUE
404 else:
405 main.log.error( "Error, node " + node.ip_address + " is " +
406 "not in the list of controllers s" +
407 str( i ) + " is connecting to." )
408 mastershipCheck = main.FALSE
409 utilities.assert_equals(
410 expect=main.TRUE,
411 actual=mastershipCheck,
412 onpass="Switch mastership assigned correctly",
413 onfail="Switches not assigned correctly to controllers" )
414
415 def CASE21( self, main ):
416 """
417 Assign mastership to controllers
418 """
419 import time
420 assert main.numCtrls, "main.numCtrls not defined"
421 assert main, "main not defined"
422 assert utilities.assert_equals, "utilities.assert_equals not defined"
423 assert main.CLIs, "main.CLIs not defined"
424 assert main.nodes, "main.nodes not defined"
425 assert ONOS1Port, "ONOS1Port not defined"
426 assert ONOS2Port, "ONOS2Port not defined"
427 assert ONOS3Port, "ONOS3Port not defined"
428 assert ONOS4Port, "ONOS4Port not defined"
429 assert ONOS5Port, "ONOS5Port not defined"
430 assert ONOS6Port, "ONOS6Port not defined"
431 assert ONOS7Port, "ONOS7Port not defined"
432
433 main.case( "Assigning Controller roles for switches" )
434 main.caseExplanation = "Check that ONOS is connected to each " +\
435 "device. Then manually assign" +\
436 " mastership to specific ONOS nodes using" +\
437 " 'device-role'"
438 main.step( "Assign mastership of switches to specific controllers" )
439 # Manually assign mastership to the controller we want
440 roleCall = main.TRUE
441
442 ipList = [ ]
443 deviceList = []
444 onosCli = main.CLIs[ main.activeNodes[0] ]
445 try:
446 # Assign mastership to specific controllers. This assignment was
447 # determined for a 7 node cluser, but will work with any sized
448 # cluster
449 for i in range( 1, 29 ): # switches 1 through 28
450 # set up correct variables:
451 if i == 1:
452 c = 0
453 ip = main.nodes[ c ].ip_address # ONOS1
454 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
455 elif i == 2:
456 c = 1 % main.numCtrls
457 ip = main.nodes[ c ].ip_address # ONOS2
458 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
459 elif i == 3:
460 c = 1 % main.numCtrls
461 ip = main.nodes[ c ].ip_address # ONOS2
462 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
463 elif i == 4:
464 c = 3 % main.numCtrls
465 ip = main.nodes[ c ].ip_address # ONOS4
466 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
467 elif i == 5:
468 c = 2 % main.numCtrls
469 ip = main.nodes[ c ].ip_address # ONOS3
470 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
471 elif i == 6:
472 c = 2 % main.numCtrls
473 ip = main.nodes[ c ].ip_address # ONOS3
474 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
475 elif i == 7:
476 c = 5 % main.numCtrls
477 ip = main.nodes[ c ].ip_address # ONOS6
478 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
479 elif i >= 8 and i <= 17:
480 c = 4 % main.numCtrls
481 ip = main.nodes[ c ].ip_address # ONOS5
482 dpid = '3' + str( i ).zfill( 3 )
483 deviceId = onosCli.getDevice( dpid ).get( 'id' )
484 elif i >= 18 and i <= 27:
485 c = 6 % main.numCtrls
486 ip = main.nodes[ c ].ip_address # ONOS7
487 dpid = '6' + str( i ).zfill( 3 )
488 deviceId = onosCli.getDevice( dpid ).get( 'id' )
489 elif i == 28:
490 c = 0
491 ip = main.nodes[ c ].ip_address # ONOS1
492 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
493 else:
494 main.log.error( "You didn't write an else statement for " +
495 "switch s" + str( i ) )
496 roleCall = main.FALSE
497 # Assign switch
498 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
499 # TODO: make this controller dynamic
500 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
501 ipList.append( ip )
502 deviceList.append( deviceId )
503 except ( AttributeError, AssertionError ):
504 main.log.exception( "Something is wrong with ONOS device view" )
505 main.log.info( onosCli.devices() )
506 utilities.assert_equals(
507 expect=main.TRUE,
508 actual=roleCall,
509 onpass="Re-assigned switch mastership to designated controller",
510 onfail="Something wrong with deviceRole calls" )
511
512 main.step( "Check mastership was correctly assigned" )
513 roleCheck = main.TRUE
514 # NOTE: This is due to the fact that device mastership change is not
515 # atomic and is actually a multi step process
516 time.sleep( 5 )
517 for i in range( len( ipList ) ):
518 ip = ipList[i]
519 deviceId = deviceList[i]
520 # Check assignment
521 master = onosCli.getRole( deviceId ).get( 'master' )
522 if ip in master:
523 roleCheck = roleCheck and main.TRUE
524 else:
525 roleCheck = roleCheck and main.FALSE
526 main.log.error( "Error, controller " + ip + " is not" +
527 " master " + "of device " +
528 str( deviceId ) + ". Master is " +
529 repr( master ) + "." )
530 utilities.assert_equals(
531 expect=main.TRUE,
532 actual=roleCheck,
533 onpass="Switches were successfully reassigned to designated " +
534 "controller",
535 onfail="Switches were not successfully reassigned" )
536
537 def CASE3( self, main ):
538 """
539 Assign intents
540 """
541 import time
542 import json
543 assert main.numCtrls, "main.numCtrls not defined"
544 assert main, "main not defined"
545 assert utilities.assert_equals, "utilities.assert_equals not defined"
546 assert main.CLIs, "main.CLIs not defined"
547 assert main.nodes, "main.nodes not defined"
548 main.case( "Adding host Intents" )
549 main.caseExplanation = "Discover hosts by using pingall then " +\
550 "assign predetermined host-to-host intents." +\
551 " After installation, check that the intent" +\
552 " is distributed to all nodes and the state" +\
553 " is INSTALLED"
554
555 # install onos-app-fwd
556 main.step( "Install reactive forwarding app" )
557 onosCli = main.CLIs[ main.activeNodes[0] ]
558 installResults = onosCli.activateApp( "org.onosproject.fwd" )
559 utilities.assert_equals( expect=main.TRUE, actual=installResults,
560 onpass="Install fwd successful",
561 onfail="Install fwd failed" )
562
563 main.step( "Check app ids" )
564 appCheck = main.TRUE
565 threads = []
566 for i in main.activeNodes:
567 t = main.Thread( target=main.CLIs[i].appToIDCheck,
568 name="appToIDCheck-" + str( i ),
569 args=[] )
570 threads.append( t )
571 t.start()
572
573 for t in threads:
574 t.join()
575 appCheck = appCheck and t.result
576 if appCheck != main.TRUE:
577 main.log.warn( onosCli.apps() )
578 main.log.warn( onosCli.appIDs() )
579 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
580 onpass="App Ids seem to be correct",
581 onfail="Something is wrong with app Ids" )
582
583 main.step( "Discovering Hosts( Via pingall for now )" )
584 # FIXME: Once we have a host discovery mechanism, use that instead
585 # REACTIVE FWD test
586 pingResult = main.FALSE
587 passMsg = "Reactive Pingall test passed"
588 time1 = time.time()
589 pingResult = main.Mininet1.pingall()
590 time2 = time.time()
591 if not pingResult:
592 main.log.warn("First pingall failed. Trying again...")
593 pingResult = main.Mininet1.pingall()
594 passMsg += " on the second try"
595 utilities.assert_equals(
596 expect=main.TRUE,
597 actual=pingResult,
598 onpass= passMsg,
599 onfail="Reactive Pingall failed, " +
600 "one or more ping pairs failed" )
601 main.log.info( "Time for pingall: %2f seconds" %
602 ( time2 - time1 ) )
603 # timeout for fwd flows
604 time.sleep( 11 )
605 # uninstall onos-app-fwd
606 main.step( "Uninstall reactive forwarding app" )
607 node = main.activeNodes[0]
608 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
609 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
610 onpass="Uninstall fwd successful",
611 onfail="Uninstall fwd failed" )
612
613 main.step( "Check app ids" )
614 threads = []
615 appCheck2 = main.TRUE
616 for i in main.activeNodes:
617 t = main.Thread( target=main.CLIs[i].appToIDCheck,
618 name="appToIDCheck-" + str( i ),
619 args=[] )
620 threads.append( t )
621 t.start()
622
623 for t in threads:
624 t.join()
625 appCheck2 = appCheck2 and t.result
626 if appCheck2 != main.TRUE:
627 node = main.activeNodes[0]
628 main.log.warn( main.CLIs[node].apps() )
629 main.log.warn( main.CLIs[node].appIDs() )
630 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
631 onpass="App Ids seem to be correct",
632 onfail="Something is wrong with app Ids" )
633
634 main.step( "Add host intents via cli" )
635 intentIds = []
636 # TODO: move the host numbers to params
637 # Maybe look at all the paths we ping?
638 intentAddResult = True
639 hostResult = main.TRUE
640 for i in range( 8, 18 ):
641 main.log.info( "Adding host intent between h" + str( i ) +
642 " and h" + str( i + 10 ) )
643 host1 = "00:00:00:00:00:" + \
644 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
645 host2 = "00:00:00:00:00:" + \
646 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
647 # NOTE: getHost can return None
648 host1Dict = onosCli.getHost( host1 )
649 host2Dict = onosCli.getHost( host2 )
650 host1Id = None
651 host2Id = None
652 if host1Dict and host2Dict:
653 host1Id = host1Dict.get( 'id', None )
654 host2Id = host2Dict.get( 'id', None )
655 if host1Id and host2Id:
656 nodeNum = ( i % len( main.activeNodes ) )
657 node = main.activeNodes[nodeNum]
658 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
659 if tmpId:
660 main.log.info( "Added intent with id: " + tmpId )
661 intentIds.append( tmpId )
662 else:
663 main.log.error( "addHostIntent returned: " +
664 repr( tmpId ) )
665 else:
666 main.log.error( "Error, getHost() failed for h" + str( i ) +
667 " and/or h" + str( i + 10 ) )
668 node = main.activeNodes[0]
669 hosts = main.CLIs[node].hosts()
670 main.log.warn( "Hosts output: " )
671 try:
672 main.log.warn( json.dumps( json.loads( hosts ),
673 sort_keys=True,
674 indent=4,
675 separators=( ',', ': ' ) ) )
676 except ( ValueError, TypeError ):
677 main.log.warn( repr( hosts ) )
678 hostResult = main.FALSE
679 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
680 onpass="Found a host id for each host",
681 onfail="Error looking up host ids" )
682
683 intentStart = time.time()
684 onosIds = onosCli.getAllIntentsId()
685 main.log.info( "Submitted intents: " + str( intentIds ) )
686 main.log.info( "Intents in ONOS: " + str( onosIds ) )
687 for intent in intentIds:
688 if intent in onosIds:
689 pass # intent submitted is in onos
690 else:
691 intentAddResult = False
692 if intentAddResult:
693 intentStop = time.time()
694 else:
695 intentStop = None
696 # Print the intent states
697 intents = onosCli.intents()
698 intentStates = []
699 installedCheck = True
700 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
701 count = 0
702 try:
703 for intent in json.loads( intents ):
704 state = intent.get( 'state', None )
705 if "INSTALLED" not in state:
706 installedCheck = False
707 intentId = intent.get( 'id', None )
708 intentStates.append( ( intentId, state ) )
709 except ( ValueError, TypeError ):
710 main.log.exception( "Error parsing intents" )
711 # add submitted intents not in the store
712 tmplist = [ i for i, s in intentStates ]
713 missingIntents = False
714 for i in intentIds:
715 if i not in tmplist:
716 intentStates.append( ( i, " - " ) )
717 missingIntents = True
718 intentStates.sort()
719 for i, s in intentStates:
720 count += 1
721 main.log.info( "%-6s%-15s%-15s" %
722 ( str( count ), str( i ), str( s ) ) )
723 leaders = onosCli.leaders()
724 try:
725 missing = False
726 if leaders:
727 parsedLeaders = json.loads( leaders )
728 main.log.warn( json.dumps( parsedLeaders,
729 sort_keys=True,
730 indent=4,
731 separators=( ',', ': ' ) ) )
732 # check for all intent partitions
733 topics = []
734 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700735 topics.append( "work-partition-" + str( i ) )
Jon Hall6e709752016-02-01 13:38:46 -0800736 main.log.debug( topics )
737 ONOStopics = [ j['topic'] for j in parsedLeaders ]
738 for topic in topics:
739 if topic not in ONOStopics:
740 main.log.error( "Error: " + topic +
741 " not in leaders" )
742 missing = True
743 else:
744 main.log.error( "leaders() returned None" )
745 except ( ValueError, TypeError ):
746 main.log.exception( "Error parsing leaders" )
747 main.log.error( repr( leaders ) )
748 # Check all nodes
749 if missing:
750 for i in main.activeNodes:
751 response = main.CLIs[i].leaders( jsonFormat=False)
752 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
753 str( response ) )
754
755 partitions = onosCli.partitions()
756 try:
757 if partitions :
758 parsedPartitions = json.loads( partitions )
759 main.log.warn( json.dumps( parsedPartitions,
760 sort_keys=True,
761 indent=4,
762 separators=( ',', ': ' ) ) )
763 # TODO check for a leader in all paritions
764 # TODO check for consistency among nodes
765 else:
766 main.log.error( "partitions() returned None" )
767 except ( ValueError, TypeError ):
768 main.log.exception( "Error parsing partitions" )
769 main.log.error( repr( partitions ) )
770 pendingMap = onosCli.pendingMap()
771 try:
772 if pendingMap :
773 parsedPending = json.loads( pendingMap )
774 main.log.warn( json.dumps( parsedPending,
775 sort_keys=True,
776 indent=4,
777 separators=( ',', ': ' ) ) )
778 # TODO check something here?
779 else:
780 main.log.error( "pendingMap() returned None" )
781 except ( ValueError, TypeError ):
782 main.log.exception( "Error parsing pending map" )
783 main.log.error( repr( pendingMap ) )
784
785 intentAddResult = bool( intentAddResult and not missingIntents and
786 installedCheck )
787 if not intentAddResult:
788 main.log.error( "Error in pushing host intents to ONOS" )
789
790 main.step( "Intent Anti-Entropy dispersion" )
791 for j in range(100):
792 correct = True
793 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
794 for i in main.activeNodes:
795 onosIds = []
796 ids = main.CLIs[i].getAllIntentsId()
797 onosIds.append( ids )
798 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
799 str( sorted( onosIds ) ) )
800 if sorted( ids ) != sorted( intentIds ):
801 main.log.warn( "Set of intent IDs doesn't match" )
802 correct = False
803 break
804 else:
805 intents = json.loads( main.CLIs[i].intents() )
806 for intent in intents:
807 if intent[ 'state' ] != "INSTALLED":
808 main.log.warn( "Intent " + intent[ 'id' ] +
809 " is " + intent[ 'state' ] )
810 correct = False
811 break
812 if correct:
813 break
814 else:
815 time.sleep(1)
816 if not intentStop:
817 intentStop = time.time()
818 global gossipTime
819 gossipTime = intentStop - intentStart
820 main.log.info( "It took about " + str( gossipTime ) +
821 " seconds for all intents to appear in each node" )
822 gossipPeriod = int( main.params['timers']['gossip'] )
823 maxGossipTime = gossipPeriod * len( main.activeNodes )
824 utilities.assert_greater_equals(
825 expect=maxGossipTime, actual=gossipTime,
826 onpass="ECM anti-entropy for intents worked within " +
827 "expected time",
828 onfail="Intent ECM anti-entropy took too long. " +
829 "Expected time:{}, Actual time:{}".format( maxGossipTime,
830 gossipTime ) )
831 if gossipTime <= maxGossipTime:
832 intentAddResult = True
833
834 if not intentAddResult or "key" in pendingMap:
835 import time
836 installedCheck = True
837 main.log.info( "Sleeping 60 seconds to see if intents are found" )
838 time.sleep( 60 )
839 onosIds = onosCli.getAllIntentsId()
840 main.log.info( "Submitted intents: " + str( intentIds ) )
841 main.log.info( "Intents in ONOS: " + str( onosIds ) )
842 # Print the intent states
843 intents = onosCli.intents()
844 intentStates = []
845 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
846 count = 0
847 try:
848 for intent in json.loads( intents ):
849 # Iter through intents of a node
850 state = intent.get( 'state', None )
851 if "INSTALLED" not in state:
852 installedCheck = False
853 intentId = intent.get( 'id', None )
854 intentStates.append( ( intentId, state ) )
855 except ( ValueError, TypeError ):
856 main.log.exception( "Error parsing intents" )
857 # add submitted intents not in the store
858 tmplist = [ i for i, s in intentStates ]
859 for i in intentIds:
860 if i not in tmplist:
861 intentStates.append( ( i, " - " ) )
862 intentStates.sort()
863 for i, s in intentStates:
864 count += 1
865 main.log.info( "%-6s%-15s%-15s" %
866 ( str( count ), str( i ), str( s ) ) )
867 leaders = onosCli.leaders()
868 try:
869 missing = False
870 if leaders:
871 parsedLeaders = json.loads( leaders )
872 main.log.warn( json.dumps( parsedLeaders,
873 sort_keys=True,
874 indent=4,
875 separators=( ',', ': ' ) ) )
876 # check for all intent partitions
877 # check for election
878 topics = []
879 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -0700880 topics.append( "work-partition-" + str( i ) )
Jon Hall6e709752016-02-01 13:38:46 -0800881 # FIXME: this should only be after we start the app
882 topics.append( "org.onosproject.election" )
883 main.log.debug( topics )
884 ONOStopics = [ j['topic'] for j in parsedLeaders ]
885 for topic in topics:
886 if topic not in ONOStopics:
887 main.log.error( "Error: " + topic +
888 " not in leaders" )
889 missing = True
890 else:
891 main.log.error( "leaders() returned None" )
892 except ( ValueError, TypeError ):
893 main.log.exception( "Error parsing leaders" )
894 main.log.error( repr( leaders ) )
895 # Check all nodes
896 if missing:
897 for i in main.activeNodes:
898 node = main.CLIs[i]
899 response = node.leaders( jsonFormat=False)
900 main.log.warn( str( node.name ) + " leaders output: \n" +
901 str( response ) )
902
903 partitions = onosCli.partitions()
904 try:
905 if partitions :
906 parsedPartitions = json.loads( partitions )
907 main.log.warn( json.dumps( parsedPartitions,
908 sort_keys=True,
909 indent=4,
910 separators=( ',', ': ' ) ) )
911 # TODO check for a leader in all paritions
912 # TODO check for consistency among nodes
913 else:
914 main.log.error( "partitions() returned None" )
915 except ( ValueError, TypeError ):
916 main.log.exception( "Error parsing partitions" )
917 main.log.error( repr( partitions ) )
918 pendingMap = onosCli.pendingMap()
919 try:
920 if pendingMap :
921 parsedPending = json.loads( pendingMap )
922 main.log.warn( json.dumps( parsedPending,
923 sort_keys=True,
924 indent=4,
925 separators=( ',', ': ' ) ) )
926 # TODO check something here?
927 else:
928 main.log.error( "pendingMap() returned None" )
929 except ( ValueError, TypeError ):
930 main.log.exception( "Error parsing pending map" )
931 main.log.error( repr( pendingMap ) )
932
933 def CASE4( self, main ):
934 """
935 Ping across added host intents
936 """
937 import json
938 import time
939 assert main.numCtrls, "main.numCtrls not defined"
940 assert main, "main not defined"
941 assert utilities.assert_equals, "utilities.assert_equals not defined"
942 assert main.CLIs, "main.CLIs not defined"
943 assert main.nodes, "main.nodes not defined"
944 main.case( "Verify connectivity by sending traffic across Intents" )
945 main.caseExplanation = "Ping across added host intents to check " +\
946 "functionality and check the state of " +\
947 "the intent"
Jon Hall6e709752016-02-01 13:38:46 -0800948
Jon Hall41d39f12016-04-11 22:54:35 -0700949 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall6e709752016-02-01 13:38:46 -0800950 main.step( "Check Intent state" )
951 installedCheck = False
952 loopCount = 0
953 while not installedCheck and loopCount < 40:
954 installedCheck = True
955 # Print the intent states
956 intents = onosCli.intents()
957 intentStates = []
958 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
959 count = 0
960 # Iter through intents of a node
961 try:
962 for intent in json.loads( intents ):
963 state = intent.get( 'state', None )
964 if "INSTALLED" not in state:
965 installedCheck = False
966 intentId = intent.get( 'id', None )
967 intentStates.append( ( intentId, state ) )
968 except ( ValueError, TypeError ):
969 main.log.exception( "Error parsing intents." )
970 # Print states
971 intentStates.sort()
972 for i, s in intentStates:
973 count += 1
974 main.log.info( "%-6s%-15s%-15s" %
975 ( str( count ), str( i ), str( s ) ) )
976 if not installedCheck:
977 time.sleep( 1 )
978 loopCount += 1
979 utilities.assert_equals( expect=True, actual=installedCheck,
980 onpass="Intents are all INSTALLED",
981 onfail="Intents are not all in " +
982 "INSTALLED state" )
983
Jon Hall9d2dcad2016-04-08 10:15:20 -0700984 main.step( "Ping across added host intents" )
Jon Hall9d2dcad2016-04-08 10:15:20 -0700985 PingResult = main.TRUE
986 for i in range( 8, 18 ):
987 ping = main.Mininet1.pingHost( src="h" + str( i ),
988 target="h" + str( i + 10 ) )
989 PingResult = PingResult and ping
990 if ping == main.FALSE:
991 main.log.warn( "Ping failed between h" + str( i ) +
992 " and h" + str( i + 10 ) )
993 elif ping == main.TRUE:
994 main.log.info( "Ping test passed!" )
995 # Don't set PingResult or you'd override failures
996 if PingResult == main.FALSE:
997 main.log.error(
998 "Intents have not been installed correctly, pings failed." )
999 # TODO: pretty print
1000 main.log.warn( "ONOS1 intents: " )
1001 try:
1002 tmpIntents = onosCli.intents()
1003 main.log.warn( json.dumps( json.loads( tmpIntents ),
1004 sort_keys=True,
1005 indent=4,
1006 separators=( ',', ': ' ) ) )
1007 except ( ValueError, TypeError ):
1008 main.log.warn( repr( tmpIntents ) )
1009 utilities.assert_equals(
1010 expect=main.TRUE,
1011 actual=PingResult,
1012 onpass="Intents have been installed correctly and pings work",
1013 onfail="Intents have not been installed correctly, pings failed." )
1014
Jon Hall6e709752016-02-01 13:38:46 -08001015 main.step( "Check leadership of topics" )
1016 leaders = onosCli.leaders()
1017 topicCheck = main.TRUE
1018 try:
1019 if leaders:
1020 parsedLeaders = json.loads( leaders )
1021 main.log.warn( json.dumps( parsedLeaders,
1022 sort_keys=True,
1023 indent=4,
1024 separators=( ',', ': ' ) ) )
1025 # check for all intent partitions
1026 # check for election
1027 # TODO: Look at Devices as topics now that it uses this system
1028 topics = []
1029 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001030 topics.append( "work-partition-" + str( i ) )
Jon Hall6e709752016-02-01 13:38:46 -08001031 # FIXME: this should only be after we start the app
1032 # FIXME: topics.append( "org.onosproject.election" )
1033 # Print leaders output
1034 main.log.debug( topics )
1035 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1036 for topic in topics:
1037 if topic not in ONOStopics:
1038 main.log.error( "Error: " + topic +
1039 " not in leaders" )
1040 topicCheck = main.FALSE
1041 else:
1042 main.log.error( "leaders() returned None" )
1043 topicCheck = main.FALSE
1044 except ( ValueError, TypeError ):
1045 topicCheck = main.FALSE
1046 main.log.exception( "Error parsing leaders" )
1047 main.log.error( repr( leaders ) )
1048 # TODO: Check for a leader of these topics
1049 # Check all nodes
1050 if topicCheck:
1051 for i in main.activeNodes:
1052 node = main.CLIs[i]
1053 response = node.leaders( jsonFormat=False)
1054 main.log.warn( str( node.name ) + " leaders output: \n" +
1055 str( response ) )
1056
1057 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1058 onpass="intent Partitions is in leaders",
1059 onfail="Some topics were lost " )
1060 # Print partitions
1061 partitions = onosCli.partitions()
1062 try:
1063 if partitions :
1064 parsedPartitions = json.loads( partitions )
1065 main.log.warn( json.dumps( parsedPartitions,
1066 sort_keys=True,
1067 indent=4,
1068 separators=( ',', ': ' ) ) )
1069 # TODO check for a leader in all paritions
1070 # TODO check for consistency among nodes
1071 else:
1072 main.log.error( "partitions() returned None" )
1073 except ( ValueError, TypeError ):
1074 main.log.exception( "Error parsing partitions" )
1075 main.log.error( repr( partitions ) )
1076 # Print Pending Map
1077 pendingMap = onosCli.pendingMap()
1078 try:
1079 if pendingMap :
1080 parsedPending = json.loads( pendingMap )
1081 main.log.warn( json.dumps( parsedPending,
1082 sort_keys=True,
1083 indent=4,
1084 separators=( ',', ': ' ) ) )
1085 # TODO check something here?
1086 else:
1087 main.log.error( "pendingMap() returned None" )
1088 except ( ValueError, TypeError ):
1089 main.log.exception( "Error parsing pending map" )
1090 main.log.error( repr( pendingMap ) )
1091
1092 if not installedCheck:
1093 main.log.info( "Waiting 60 seconds to see if the state of " +
1094 "intents change" )
1095 time.sleep( 60 )
1096 # Print the intent states
1097 intents = onosCli.intents()
1098 intentStates = []
1099 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1100 count = 0
1101 # Iter through intents of a node
1102 try:
1103 for intent in json.loads( intents ):
1104 state = intent.get( 'state', None )
1105 if "INSTALLED" not in state:
1106 installedCheck = False
1107 intentId = intent.get( 'id', None )
1108 intentStates.append( ( intentId, state ) )
1109 except ( ValueError, TypeError ):
1110 main.log.exception( "Error parsing intents." )
1111 intentStates.sort()
1112 for i, s in intentStates:
1113 count += 1
1114 main.log.info( "%-6s%-15s%-15s" %
1115 ( str( count ), str( i ), str( s ) ) )
1116 leaders = onosCli.leaders()
1117 try:
1118 missing = False
1119 if leaders:
1120 parsedLeaders = json.loads( leaders )
1121 main.log.warn( json.dumps( parsedLeaders,
1122 sort_keys=True,
1123 indent=4,
1124 separators=( ',', ': ' ) ) )
1125 # check for all intent partitions
1126 # check for election
1127 topics = []
1128 for i in range( 14 ):
Jon Hall8dafdcc2016-09-16 10:21:25 -07001129 topics.append( "work-partition-" + str( i ) )
Jon Hall6e709752016-02-01 13:38:46 -08001130 # FIXME: this should only be after we start the app
1131 topics.append( "org.onosproject.election" )
1132 main.log.debug( topics )
1133 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1134 for topic in topics:
1135 if topic not in ONOStopics:
1136 main.log.error( "Error: " + topic +
1137 " not in leaders" )
1138 missing = True
1139 else:
1140 main.log.error( "leaders() returned None" )
1141 except ( ValueError, TypeError ):
1142 main.log.exception( "Error parsing leaders" )
1143 main.log.error( repr( leaders ) )
1144 if missing:
1145 for i in main.activeNodes:
1146 node = main.CLIs[i]
1147 response = node.leaders( jsonFormat=False)
1148 main.log.warn( str( node.name ) + " leaders output: \n" +
1149 str( response ) )
1150
1151 partitions = onosCli.partitions()
1152 try:
1153 if partitions :
1154 parsedPartitions = json.loads( partitions )
1155 main.log.warn( json.dumps( parsedPartitions,
1156 sort_keys=True,
1157 indent=4,
1158 separators=( ',', ': ' ) ) )
1159 # TODO check for a leader in all paritions
1160 # TODO check for consistency among nodes
1161 else:
1162 main.log.error( "partitions() returned None" )
1163 except ( ValueError, TypeError ):
1164 main.log.exception( "Error parsing partitions" )
1165 main.log.error( repr( partitions ) )
1166 pendingMap = onosCli.pendingMap()
1167 try:
1168 if pendingMap :
1169 parsedPending = json.loads( pendingMap )
1170 main.log.warn( json.dumps( parsedPending,
1171 sort_keys=True,
1172 indent=4,
1173 separators=( ',', ': ' ) ) )
1174 # TODO check something here?
1175 else:
1176 main.log.error( "pendingMap() returned None" )
1177 except ( ValueError, TypeError ):
1178 main.log.exception( "Error parsing pending map" )
1179 main.log.error( repr( pendingMap ) )
1180 # Print flowrules
1181 node = main.activeNodes[0]
1182 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1183 main.step( "Wait a minute then ping again" )
1184 # the wait is above
1185 PingResult = main.TRUE
1186 for i in range( 8, 18 ):
1187 ping = main.Mininet1.pingHost( src="h" + str( i ),
1188 target="h" + str( i + 10 ) )
1189 PingResult = PingResult and ping
1190 if ping == main.FALSE:
1191 main.log.warn( "Ping failed between h" + str( i ) +
1192 " and h" + str( i + 10 ) )
1193 elif ping == main.TRUE:
1194 main.log.info( "Ping test passed!" )
1195 # Don't set PingResult or you'd override failures
1196 if PingResult == main.FALSE:
1197 main.log.error(
1198 "Intents have not been installed correctly, pings failed." )
1199 # TODO: pretty print
1200 main.log.warn( "ONOS1 intents: " )
1201 try:
1202 tmpIntents = onosCli.intents()
1203 main.log.warn( json.dumps( json.loads( tmpIntents ),
1204 sort_keys=True,
1205 indent=4,
1206 separators=( ',', ': ' ) ) )
1207 except ( ValueError, TypeError ):
1208 main.log.warn( repr( tmpIntents ) )
1209 utilities.assert_equals(
1210 expect=main.TRUE,
1211 actual=PingResult,
1212 onpass="Intents have been installed correctly and pings work",
1213 onfail="Intents have not been installed correctly, pings failed." )
1214
1215 def CASE5( self, main ):
1216 """
1217 Reading state of ONOS
1218 """
1219 import json
1220 import time
1221 assert main.numCtrls, "main.numCtrls not defined"
1222 assert main, "main not defined"
1223 assert utilities.assert_equals, "utilities.assert_equals not defined"
1224 assert main.CLIs, "main.CLIs not defined"
1225 assert main.nodes, "main.nodes not defined"
1226
1227 main.case( "Setting up and gathering data for current state" )
1228 # The general idea for this test case is to pull the state of
1229 # ( intents,flows, topology,... ) from each ONOS node
1230 # We can then compare them with each other and also with past states
1231
1232 main.step( "Check that each switch has a master" )
1233 global mastershipState
1234 mastershipState = '[]'
1235
1236 # Assert that each device has a master
1237 rolesNotNull = main.TRUE
1238 threads = []
1239 for i in main.activeNodes:
1240 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1241 name="rolesNotNull-" + str( i ),
1242 args=[] )
1243 threads.append( t )
1244 t.start()
1245
1246 for t in threads:
1247 t.join()
1248 rolesNotNull = rolesNotNull and t.result
1249 utilities.assert_equals(
1250 expect=main.TRUE,
1251 actual=rolesNotNull,
1252 onpass="Each device has a master",
1253 onfail="Some devices don't have a master assigned" )
1254
1255 main.step( "Get the Mastership of each switch from each controller" )
1256 ONOSMastership = []
1257 mastershipCheck = main.FALSE
1258 consistentMastership = True
1259 rolesResults = True
1260 threads = []
1261 for i in main.activeNodes:
1262 t = main.Thread( target=main.CLIs[i].roles,
1263 name="roles-" + str( i ),
1264 args=[] )
1265 threads.append( t )
1266 t.start()
1267
1268 for t in threads:
1269 t.join()
1270 ONOSMastership.append( t.result )
1271
1272 for i in range( len( ONOSMastership ) ):
1273 node = str( main.activeNodes[i] + 1 )
1274 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1275 main.log.error( "Error in getting ONOS" + node + " roles" )
1276 main.log.warn( "ONOS" + node + " mastership response: " +
1277 repr( ONOSMastership[i] ) )
1278 rolesResults = False
1279 utilities.assert_equals(
1280 expect=True,
1281 actual=rolesResults,
1282 onpass="No error in reading roles output",
1283 onfail="Error in reading roles from ONOS" )
1284
1285 main.step( "Check for consistency in roles from each controller" )
1286 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1287 main.log.info(
1288 "Switch roles are consistent across all ONOS nodes" )
1289 else:
1290 consistentMastership = False
1291 utilities.assert_equals(
1292 expect=True,
1293 actual=consistentMastership,
1294 onpass="Switch roles are consistent across all ONOS nodes",
1295 onfail="ONOS nodes have different views of switch roles" )
1296
1297 if rolesResults and not consistentMastership:
1298 for i in range( len( main.activeNodes ) ):
1299 node = str( main.activeNodes[i] + 1 )
1300 try:
1301 main.log.warn(
1302 "ONOS" + node + " roles: ",
1303 json.dumps(
1304 json.loads( ONOSMastership[ i ] ),
1305 sort_keys=True,
1306 indent=4,
1307 separators=( ',', ': ' ) ) )
1308 except ( ValueError, TypeError ):
1309 main.log.warn( repr( ONOSMastership[ i ] ) )
1310 elif rolesResults and consistentMastership:
1311 mastershipCheck = main.TRUE
1312 mastershipState = ONOSMastership[ 0 ]
1313
1314 main.step( "Get the intents from each controller" )
1315 global intentState
1316 intentState = []
1317 ONOSIntents = []
1318 intentCheck = main.FALSE
1319 consistentIntents = True
1320 intentsResults = True
1321 threads = []
1322 for i in main.activeNodes:
1323 t = main.Thread( target=main.CLIs[i].intents,
1324 name="intents-" + str( i ),
1325 args=[],
1326 kwargs={ 'jsonFormat': True } )
1327 threads.append( t )
1328 t.start()
1329
1330 for t in threads:
1331 t.join()
1332 ONOSIntents.append( t.result )
1333
1334 for i in range( len( ONOSIntents ) ):
1335 node = str( main.activeNodes[i] + 1 )
1336 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1337 main.log.error( "Error in getting ONOS" + node + " intents" )
1338 main.log.warn( "ONOS" + node + " intents response: " +
1339 repr( ONOSIntents[ i ] ) )
1340 intentsResults = False
1341 utilities.assert_equals(
1342 expect=True,
1343 actual=intentsResults,
1344 onpass="No error in reading intents output",
1345 onfail="Error in reading intents from ONOS" )
1346
1347 main.step( "Check for consistency in Intents from each controller" )
1348 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1349 main.log.info( "Intents are consistent across all ONOS " +
1350 "nodes" )
1351 else:
1352 consistentIntents = False
1353 main.log.error( "Intents not consistent" )
1354 utilities.assert_equals(
1355 expect=True,
1356 actual=consistentIntents,
1357 onpass="Intents are consistent across all ONOS nodes",
1358 onfail="ONOS nodes have different views of intents" )
1359
1360 if intentsResults:
1361 # Try to make it easy to figure out what is happening
1362 #
1363 # Intent ONOS1 ONOS2 ...
1364 # 0x01 INSTALLED INSTALLING
1365 # ... ... ...
1366 # ... ... ...
1367 title = " Id"
1368 for n in main.activeNodes:
1369 title += " " * 10 + "ONOS" + str( n + 1 )
1370 main.log.warn( title )
1371 # get all intent keys in the cluster
1372 keys = []
Jon Halla440e872016-03-31 15:15:50 -07001373 try:
1374 # Get the set of all intent keys
Jon Hall6e709752016-02-01 13:38:46 -08001375 for nodeStr in ONOSIntents:
1376 node = json.loads( nodeStr )
1377 for intent in node:
Jon Halla440e872016-03-31 15:15:50 -07001378 keys.append( intent.get( 'id' ) )
1379 keys = set( keys )
1380 # For each intent key, print the state on each node
1381 for key in keys:
1382 row = "%-13s" % key
1383 for nodeStr in ONOSIntents:
1384 node = json.loads( nodeStr )
1385 for intent in node:
1386 if intent.get( 'id', "Error" ) == key:
1387 row += "%-15s" % intent.get( 'state' )
1388 main.log.warn( row )
1389 # End of intent state table
1390 except ValueError as e:
1391 main.log.exception( e )
1392 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
Jon Hall6e709752016-02-01 13:38:46 -08001393
1394 if intentsResults and not consistentIntents:
1395 # print the json objects
1396 n = str( main.activeNodes[-1] + 1 )
1397 main.log.debug( "ONOS" + n + " intents: " )
1398 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1399 sort_keys=True,
1400 indent=4,
1401 separators=( ',', ': ' ) ) )
1402 for i in range( len( ONOSIntents ) ):
1403 node = str( main.activeNodes[i] + 1 )
1404 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1405 main.log.debug( "ONOS" + node + " intents: " )
1406 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1407 sort_keys=True,
1408 indent=4,
1409 separators=( ',', ': ' ) ) )
1410 else:
1411 main.log.debug( "ONOS" + node + " intents match ONOS" +
1412 n + " intents" )
1413 elif intentsResults and consistentIntents:
1414 intentCheck = main.TRUE
1415 intentState = ONOSIntents[ 0 ]
1416
1417 main.step( "Get the flows from each controller" )
1418 global flowState
1419 flowState = []
1420 ONOSFlows = []
1421 ONOSFlowsJson = []
1422 flowCheck = main.FALSE
1423 consistentFlows = True
1424 flowsResults = True
1425 threads = []
1426 for i in main.activeNodes:
1427 t = main.Thread( target=main.CLIs[i].flows,
1428 name="flows-" + str( i ),
1429 args=[],
1430 kwargs={ 'jsonFormat': True } )
1431 threads.append( t )
1432 t.start()
1433
1434 # NOTE: Flows command can take some time to run
1435 time.sleep(30)
1436 for t in threads:
1437 t.join()
1438 result = t.result
1439 ONOSFlows.append( result )
1440
1441 for i in range( len( ONOSFlows ) ):
1442 num = str( main.activeNodes[i] + 1 )
1443 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1444 main.log.error( "Error in getting ONOS" + num + " flows" )
1445 main.log.warn( "ONOS" + num + " flows response: " +
1446 repr( ONOSFlows[ i ] ) )
1447 flowsResults = False
1448 ONOSFlowsJson.append( None )
1449 else:
1450 try:
1451 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1452 except ( ValueError, TypeError ):
1453 # FIXME: change this to log.error?
1454 main.log.exception( "Error in parsing ONOS" + num +
1455 " response as json." )
1456 main.log.error( repr( ONOSFlows[ i ] ) )
1457 ONOSFlowsJson.append( None )
1458 flowsResults = False
1459 utilities.assert_equals(
1460 expect=True,
1461 actual=flowsResults,
1462 onpass="No error in reading flows output",
1463 onfail="Error in reading flows from ONOS" )
1464
1465 main.step( "Check for consistency in Flows from each controller" )
1466 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1467 if all( tmp ):
1468 main.log.info( "Flow count is consistent across all ONOS nodes" )
1469 else:
1470 consistentFlows = False
1471 utilities.assert_equals(
1472 expect=True,
1473 actual=consistentFlows,
1474 onpass="The flow count is consistent across all ONOS nodes",
1475 onfail="ONOS nodes have different flow counts" )
1476
1477 if flowsResults and not consistentFlows:
1478 for i in range( len( ONOSFlows ) ):
1479 node = str( main.activeNodes[i] + 1 )
1480 try:
1481 main.log.warn(
1482 "ONOS" + node + " flows: " +
1483 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1484 indent=4, separators=( ',', ': ' ) ) )
1485 except ( ValueError, TypeError ):
1486 main.log.warn( "ONOS" + node + " flows: " +
1487 repr( ONOSFlows[ i ] ) )
1488 elif flowsResults and consistentFlows:
1489 flowCheck = main.TRUE
1490 flowState = ONOSFlows[ 0 ]
1491
1492 main.step( "Get the OF Table entries" )
1493 global flows
1494 flows = []
1495 for i in range( 1, 29 ):
1496 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1497 if flowCheck == main.FALSE:
1498 for table in flows:
1499 main.log.warn( table )
1500 # TODO: Compare switch flow tables with ONOS flow tables
1501
1502 main.step( "Start continuous pings" )
1503 main.Mininet2.pingLong(
1504 src=main.params[ 'PING' ][ 'source1' ],
1505 target=main.params[ 'PING' ][ 'target1' ],
1506 pingTime=500 )
1507 main.Mininet2.pingLong(
1508 src=main.params[ 'PING' ][ 'source2' ],
1509 target=main.params[ 'PING' ][ 'target2' ],
1510 pingTime=500 )
1511 main.Mininet2.pingLong(
1512 src=main.params[ 'PING' ][ 'source3' ],
1513 target=main.params[ 'PING' ][ 'target3' ],
1514 pingTime=500 )
1515 main.Mininet2.pingLong(
1516 src=main.params[ 'PING' ][ 'source4' ],
1517 target=main.params[ 'PING' ][ 'target4' ],
1518 pingTime=500 )
1519 main.Mininet2.pingLong(
1520 src=main.params[ 'PING' ][ 'source5' ],
1521 target=main.params[ 'PING' ][ 'target5' ],
1522 pingTime=500 )
1523 main.Mininet2.pingLong(
1524 src=main.params[ 'PING' ][ 'source6' ],
1525 target=main.params[ 'PING' ][ 'target6' ],
1526 pingTime=500 )
1527 main.Mininet2.pingLong(
1528 src=main.params[ 'PING' ][ 'source7' ],
1529 target=main.params[ 'PING' ][ 'target7' ],
1530 pingTime=500 )
1531 main.Mininet2.pingLong(
1532 src=main.params[ 'PING' ][ 'source8' ],
1533 target=main.params[ 'PING' ][ 'target8' ],
1534 pingTime=500 )
1535 main.Mininet2.pingLong(
1536 src=main.params[ 'PING' ][ 'source9' ],
1537 target=main.params[ 'PING' ][ 'target9' ],
1538 pingTime=500 )
1539 main.Mininet2.pingLong(
1540 src=main.params[ 'PING' ][ 'source10' ],
1541 target=main.params[ 'PING' ][ 'target10' ],
1542 pingTime=500 )
1543
1544 main.step( "Collecting topology information from ONOS" )
1545 devices = []
1546 threads = []
1547 for i in main.activeNodes:
1548 t = main.Thread( target=main.CLIs[i].devices,
1549 name="devices-" + str( i ),
1550 args=[ ] )
1551 threads.append( t )
1552 t.start()
1553
1554 for t in threads:
1555 t.join()
1556 devices.append( t.result )
1557 hosts = []
1558 threads = []
1559 for i in main.activeNodes:
1560 t = main.Thread( target=main.CLIs[i].hosts,
1561 name="hosts-" + str( i ),
1562 args=[ ] )
1563 threads.append( t )
1564 t.start()
1565
1566 for t in threads:
1567 t.join()
1568 try:
1569 hosts.append( json.loads( t.result ) )
1570 except ( ValueError, TypeError ):
1571 # FIXME: better handling of this, print which node
1572 # Maybe use thread name?
1573 main.log.exception( "Error parsing json output of hosts" )
1574 main.log.warn( repr( t.result ) )
1575 hosts.append( None )
1576
1577 ports = []
1578 threads = []
1579 for i in main.activeNodes:
1580 t = main.Thread( target=main.CLIs[i].ports,
1581 name="ports-" + str( i ),
1582 args=[ ] )
1583 threads.append( t )
1584 t.start()
1585
1586 for t in threads:
1587 t.join()
1588 ports.append( t.result )
1589 links = []
1590 threads = []
1591 for i in main.activeNodes:
1592 t = main.Thread( target=main.CLIs[i].links,
1593 name="links-" + str( i ),
1594 args=[ ] )
1595 threads.append( t )
1596 t.start()
1597
1598 for t in threads:
1599 t.join()
1600 links.append( t.result )
1601 clusters = []
1602 threads = []
1603 for i in main.activeNodes:
1604 t = main.Thread( target=main.CLIs[i].clusters,
1605 name="clusters-" + str( i ),
1606 args=[ ] )
1607 threads.append( t )
1608 t.start()
1609
1610 for t in threads:
1611 t.join()
1612 clusters.append( t.result )
1613 # Compare json objects for hosts and dataplane clusters
1614
1615 # hosts
1616 main.step( "Host view is consistent across ONOS nodes" )
1617 consistentHostsResult = main.TRUE
1618 for controller in range( len( hosts ) ):
1619 controllerStr = str( main.activeNodes[controller] + 1 )
1620 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1621 if hosts[ controller ] == hosts[ 0 ]:
1622 continue
1623 else: # hosts not consistent
1624 main.log.error( "hosts from ONOS" +
1625 controllerStr +
1626 " is inconsistent with ONOS1" )
1627 main.log.warn( repr( hosts[ controller ] ) )
1628 consistentHostsResult = main.FALSE
1629
1630 else:
1631 main.log.error( "Error in getting ONOS hosts from ONOS" +
1632 controllerStr )
1633 consistentHostsResult = main.FALSE
1634 main.log.warn( "ONOS" + controllerStr +
1635 " hosts response: " +
1636 repr( hosts[ controller ] ) )
1637 utilities.assert_equals(
1638 expect=main.TRUE,
1639 actual=consistentHostsResult,
1640 onpass="Hosts view is consistent across all ONOS nodes",
1641 onfail="ONOS nodes have different views of hosts" )
1642
1643 main.step( "Each host has an IP address" )
1644 ipResult = main.TRUE
1645 for controller in range( 0, len( hosts ) ):
1646 controllerStr = str( main.activeNodes[controller] + 1 )
1647 if hosts[ controller ]:
1648 for host in hosts[ controller ]:
1649 if not host.get( 'ipAddresses', [ ] ):
1650 main.log.error( "Error with host ips on controller" +
1651 controllerStr + ": " + str( host ) )
1652 ipResult = main.FALSE
1653 utilities.assert_equals(
1654 expect=main.TRUE,
1655 actual=ipResult,
1656 onpass="The ips of the hosts aren't empty",
1657 onfail="The ip of at least one host is missing" )
1658
1659 # Strongly connected clusters of devices
1660 main.step( "Cluster view is consistent across ONOS nodes" )
1661 consistentClustersResult = main.TRUE
1662 for controller in range( len( clusters ) ):
1663 controllerStr = str( main.activeNodes[controller] + 1 )
1664 if "Error" not in clusters[ controller ]:
1665 if clusters[ controller ] == clusters[ 0 ]:
1666 continue
1667 else: # clusters not consistent
1668 main.log.error( "clusters from ONOS" + controllerStr +
1669 " is inconsistent with ONOS1" )
1670 consistentClustersResult = main.FALSE
1671
1672 else:
1673 main.log.error( "Error in getting dataplane clusters " +
1674 "from ONOS" + controllerStr )
1675 consistentClustersResult = main.FALSE
1676 main.log.warn( "ONOS" + controllerStr +
1677 " clusters response: " +
1678 repr( clusters[ controller ] ) )
1679 utilities.assert_equals(
1680 expect=main.TRUE,
1681 actual=consistentClustersResult,
1682 onpass="Clusters view is consistent across all ONOS nodes",
1683 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07001684 if not consistentClustersResult:
Jon Hall172b7ba2016-04-07 18:12:20 -07001685 main.log.debug( clusters )
Jon Hall64948022016-05-12 13:38:50 -07001686
Jon Hall6e709752016-02-01 13:38:46 -08001687 # there should always only be one cluster
1688 main.step( "Cluster view correct across ONOS nodes" )
1689 try:
1690 numClusters = len( json.loads( clusters[ 0 ] ) )
1691 except ( ValueError, TypeError ):
1692 main.log.exception( "Error parsing clusters[0]: " +
1693 repr( clusters[ 0 ] ) )
1694 numClusters = "ERROR"
1695 clusterResults = main.FALSE
1696 if numClusters == 1:
1697 clusterResults = main.TRUE
1698 utilities.assert_equals(
1699 expect=1,
1700 actual=numClusters,
1701 onpass="ONOS shows 1 SCC",
1702 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1703
1704 main.step( "Comparing ONOS topology to MN" )
1705 devicesResults = main.TRUE
1706 linksResults = main.TRUE
1707 hostsResults = main.TRUE
1708 mnSwitches = main.Mininet1.getSwitches()
1709 mnLinks = main.Mininet1.getLinks()
1710 mnHosts = main.Mininet1.getHosts()
1711 for controller in main.activeNodes:
1712 controllerStr = str( main.activeNodes[controller] + 1 )
1713 if devices[ controller ] and ports[ controller ] and\
1714 "Error" not in devices[ controller ] and\
1715 "Error" not in ports[ controller ]:
1716 currentDevicesResult = main.Mininet1.compareSwitches(
1717 mnSwitches,
1718 json.loads( devices[ controller ] ),
1719 json.loads( ports[ controller ] ) )
1720 else:
1721 currentDevicesResult = main.FALSE
1722 utilities.assert_equals( expect=main.TRUE,
1723 actual=currentDevicesResult,
1724 onpass="ONOS" + controllerStr +
1725 " Switches view is correct",
1726 onfail="ONOS" + controllerStr +
1727 " Switches view is incorrect" )
1728 if links[ controller ] and "Error" not in links[ controller ]:
1729 currentLinksResult = main.Mininet1.compareLinks(
1730 mnSwitches, mnLinks,
1731 json.loads( links[ controller ] ) )
1732 else:
1733 currentLinksResult = main.FALSE
1734 utilities.assert_equals( expect=main.TRUE,
1735 actual=currentLinksResult,
1736 onpass="ONOS" + controllerStr +
1737 " links view is correct",
1738 onfail="ONOS" + controllerStr +
1739 " links view is incorrect" )
1740
1741 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1742 currentHostsResult = main.Mininet1.compareHosts(
1743 mnHosts,
1744 hosts[ controller ] )
1745 else:
1746 currentHostsResult = main.FALSE
1747 utilities.assert_equals( expect=main.TRUE,
1748 actual=currentHostsResult,
1749 onpass="ONOS" + controllerStr +
1750 " hosts exist in Mininet",
1751 onfail="ONOS" + controllerStr +
1752 " hosts don't match Mininet" )
1753
1754 devicesResults = devicesResults and currentDevicesResult
1755 linksResults = linksResults and currentLinksResult
1756 hostsResults = hostsResults and currentHostsResult
1757
1758 main.step( "Device information is correct" )
1759 utilities.assert_equals(
1760 expect=main.TRUE,
1761 actual=devicesResults,
1762 onpass="Device information is correct",
1763 onfail="Device information is incorrect" )
1764
1765 main.step( "Links are correct" )
1766 utilities.assert_equals(
1767 expect=main.TRUE,
1768 actual=linksResults,
1769 onpass="Link are correct",
1770 onfail="Links are incorrect" )
1771
1772 main.step( "Hosts are correct" )
1773 utilities.assert_equals(
1774 expect=main.TRUE,
1775 actual=hostsResults,
1776 onpass="Hosts are correct",
1777 onfail="Hosts are incorrect" )
1778
1779 def CASE61( self, main ):
1780 """
1781 The Failure case.
1782 """
1783 import math
1784 assert main.numCtrls, "main.numCtrls not defined"
1785 assert main, "main not defined"
1786 assert utilities.assert_equals, "utilities.assert_equals not defined"
1787 assert main.CLIs, "main.CLIs not defined"
1788 assert main.nodes, "main.nodes not defined"
1789 main.case( "Partition ONOS nodes into two distinct partitions" )
1790
1791 main.step( "Checking ONOS Logs for errors" )
1792 for node in main.nodes:
1793 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1794 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1795
Jon Halld2871c22016-07-26 11:01:14 -07001796 main.log.debug( main.CLIs[0].roles( jsonFormat=False ) )
1797
Jon Hall6e709752016-02-01 13:38:46 -08001798 n = len( main.nodes ) # Number of nodes
1799 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1800 main.partition = [ 0 ] # ONOS node to partition, listed by index in main.nodes
1801 if n > 3:
1802 main.partition.append( p - 1 )
1803 # NOTE: This only works for cluster sizes of 3,5, or 7.
1804
1805 main.step( "Partitioning ONOS nodes" )
1806 nodeList = [ str( i + 1 ) for i in main.partition ]
1807 main.log.info( "Nodes to be partitioned: " + str( nodeList ) )
1808 partitionResults = main.TRUE
1809 for i in range( 0, n ):
1810 this = main.nodes[i]
1811 if i not in main.partition:
1812 for j in main.partition:
1813 foe = main.nodes[j]
1814 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1815 #CMD HERE
1816 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1817 this.handle.sendline( cmdStr )
1818 this.handle.expect( "\$" )
1819 main.log.debug( this.handle.before )
1820 else:
1821 for j in range( 0, n ):
1822 if j not in main.partition:
1823 foe = main.nodes[j]
1824 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1825 #CMD HERE
1826 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1827 this.handle.sendline( cmdStr )
1828 this.handle.expect( "\$" )
1829 main.log.debug( this.handle.before )
1830 main.activeNodes.remove( i )
1831 # NOTE: When dynamic clustering is finished, we need to start checking
1832 # main.partion nodes still work when partitioned
1833 utilities.assert_equals( expect=main.TRUE, actual=partitionResults,
1834 onpass="Firewall rules set successfully",
1835 onfail="Error setting firewall rules" )
1836
Jon Hall6509dbf2016-06-21 17:01:17 -07001837 main.step( "Sleeping 60 seconds" )
Jon Hall6e709752016-02-01 13:38:46 -08001838 time.sleep( 60 )
1839
1840 def CASE62( self, main ):
1841 """
1842 Healing Partition
1843 """
1844 import time
1845 assert main.numCtrls, "main.numCtrls not defined"
1846 assert main, "main not defined"
1847 assert utilities.assert_equals, "utilities.assert_equals not defined"
1848 assert main.CLIs, "main.CLIs not defined"
1849 assert main.nodes, "main.nodes not defined"
1850 assert main.partition, "main.partition not defined"
1851 main.case( "Healing Partition" )
1852
1853 main.step( "Deleteing firewall rules" )
1854 healResults = main.TRUE
1855 for node in main.nodes:
1856 cmdStr = "sudo iptables -F"
1857 node.handle.sendline( cmdStr )
1858 node.handle.expect( "\$" )
1859 main.log.debug( node.handle.before )
1860 utilities.assert_equals( expect=main.TRUE, actual=healResults,
1861 onpass="Firewall rules removed",
1862 onfail="Error removing firewall rules" )
1863
1864 for node in main.partition:
1865 main.activeNodes.append( node )
1866 main.activeNodes.sort()
1867 try:
1868 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1869 "List of active nodes has duplicates, this likely indicates something was run out of order"
1870 except AssertionError:
1871 main.log.exception( "" )
1872 main.cleanup()
1873 main.exit()
1874
Jon Halld2871c22016-07-26 11:01:14 -07001875 main.step( "Checking ONOS nodes" )
1876 nodeResults = utilities.retry( main.HA.nodesCheck,
1877 False,
1878 args=[main.activeNodes],
1879 sleep=15,
1880 attempts=5 )
1881
1882 utilities.assert_equals( expect=True, actual=nodeResults,
1883 onpass="Nodes check successful",
1884 onfail="Nodes check NOT successful" )
1885
1886 if not nodeResults:
1887 for i in main.activeNodes:
1888 cli = main.CLIs[i]
1889 main.log.debug( "{} components not ACTIVE: \n{}".format(
1890 cli.name,
1891 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
1892 main.log.error( "Failed to start ONOS, stopping test" )
1893 main.cleanup()
1894 main.exit()
1895
Jon Hall6e709752016-02-01 13:38:46 -08001896 def CASE7( self, main ):
1897 """
1898 Check state after ONOS failure
1899 """
1900 import json
1901 assert main.numCtrls, "main.numCtrls not defined"
1902 assert main, "main not defined"
1903 assert utilities.assert_equals, "utilities.assert_equals not defined"
1904 assert main.CLIs, "main.CLIs not defined"
1905 assert main.nodes, "main.nodes not defined"
1906 try:
1907 main.partition
1908 except AttributeError:
1909 main.partition = []
1910
1911 main.case( "Running ONOS Constant State Tests" )
1912
1913 main.step( "Check that each switch has a master" )
1914 # Assert that each device has a master
1915 rolesNotNull = main.TRUE
1916 threads = []
1917 for i in main.activeNodes:
1918 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1919 name="rolesNotNull-" + str( i ),
1920 args=[ ] )
1921 threads.append( t )
1922 t.start()
1923
1924 for t in threads:
1925 t.join()
1926 rolesNotNull = rolesNotNull and t.result
1927 utilities.assert_equals(
1928 expect=main.TRUE,
1929 actual=rolesNotNull,
1930 onpass="Each device has a master",
1931 onfail="Some devices don't have a master assigned" )
1932
1933 main.step( "Read device roles from ONOS" )
1934 ONOSMastership = []
1935 mastershipCheck = main.FALSE
1936 consistentMastership = True
1937 rolesResults = True
1938 threads = []
1939 for i in main.activeNodes:
1940 t = main.Thread( target=main.CLIs[i].roles,
1941 name="roles-" + str( i ),
1942 args=[] )
1943 threads.append( t )
1944 t.start()
1945
1946 for t in threads:
1947 t.join()
1948 ONOSMastership.append( t.result )
1949
1950 for i in range( len( ONOSMastership ) ):
1951 node = str( main.activeNodes[i] + 1 )
1952 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1953 main.log.error( "Error in getting ONOS" + node + " roles" )
1954 main.log.warn( "ONOS" + node + " mastership response: " +
1955 repr( ONOSMastership[i] ) )
1956 rolesResults = False
1957 utilities.assert_equals(
1958 expect=True,
1959 actual=rolesResults,
1960 onpass="No error in reading roles output",
1961 onfail="Error in reading roles from ONOS" )
1962
1963 main.step( "Check for consistency in roles from each controller" )
1964 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1965 main.log.info(
1966 "Switch roles are consistent across all ONOS nodes" )
1967 else:
1968 consistentMastership = False
1969 utilities.assert_equals(
1970 expect=True,
1971 actual=consistentMastership,
1972 onpass="Switch roles are consistent across all ONOS nodes",
1973 onfail="ONOS nodes have different views of switch roles" )
1974
1975 if rolesResults and not consistentMastership:
1976 for i in range( len( ONOSMastership ) ):
1977 node = str( main.activeNodes[i] + 1 )
1978 main.log.warn( "ONOS" + node + " roles: ",
1979 json.dumps( json.loads( ONOSMastership[ i ] ),
1980 sort_keys=True,
1981 indent=4,
1982 separators=( ',', ': ' ) ) )
1983
1984 # NOTE: we expect mastership to change on controller failure
1985
1986 main.step( "Get the intents and compare across all nodes" )
1987 ONOSIntents = []
1988 intentCheck = main.FALSE
1989 consistentIntents = True
1990 intentsResults = True
1991 threads = []
1992 for i in main.activeNodes:
1993 t = main.Thread( target=main.CLIs[i].intents,
1994 name="intents-" + str( i ),
1995 args=[],
1996 kwargs={ 'jsonFormat': True } )
1997 threads.append( t )
1998 t.start()
1999
2000 for t in threads:
2001 t.join()
2002 ONOSIntents.append( t.result )
2003
2004 for i in range( len( ONOSIntents) ):
2005 node = str( main.activeNodes[i] + 1 )
2006 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2007 main.log.error( "Error in getting ONOS" + node + " intents" )
2008 main.log.warn( "ONOS" + node + " intents response: " +
2009 repr( ONOSIntents[ i ] ) )
2010 intentsResults = False
2011 utilities.assert_equals(
2012 expect=True,
2013 actual=intentsResults,
2014 onpass="No error in reading intents output",
2015 onfail="Error in reading intents from ONOS" )
2016
2017 main.step( "Check for consistency in Intents from each controller" )
2018 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2019 main.log.info( "Intents are consistent across all ONOS " +
2020 "nodes" )
2021 else:
2022 consistentIntents = False
2023
2024 # Try to make it easy to figure out what is happening
2025 #
2026 # Intent ONOS1 ONOS2 ...
2027 # 0x01 INSTALLED INSTALLING
2028 # ... ... ...
2029 # ... ... ...
2030 title = " ID"
2031 for n in main.activeNodes:
2032 title += " " * 10 + "ONOS" + str( n + 1 )
2033 main.log.warn( title )
2034 # get all intent keys in the cluster
2035 keys = []
2036 for nodeStr in ONOSIntents:
2037 node = json.loads( nodeStr )
2038 for intent in node:
2039 keys.append( intent.get( 'id' ) )
2040 keys = set( keys )
2041 for key in keys:
2042 row = "%-13s" % key
2043 for nodeStr in ONOSIntents:
2044 node = json.loads( nodeStr )
2045 for intent in node:
2046 if intent.get( 'id' ) == key:
2047 row += "%-15s" % intent.get( 'state' )
2048 main.log.warn( row )
2049 # End table view
2050
2051 utilities.assert_equals(
2052 expect=True,
2053 actual=consistentIntents,
2054 onpass="Intents are consistent across all ONOS nodes",
2055 onfail="ONOS nodes have different views of intents" )
2056 intentStates = []
2057 for node in ONOSIntents: # Iter through ONOS nodes
2058 nodeStates = []
2059 # Iter through intents of a node
2060 try:
2061 for intent in json.loads( node ):
2062 nodeStates.append( intent[ 'state' ] )
2063 except ( ValueError, TypeError ):
2064 main.log.exception( "Error in parsing intents" )
2065 main.log.error( repr( node ) )
2066 intentStates.append( nodeStates )
2067 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2068 main.log.info( dict( out ) )
2069
2070 if intentsResults and not consistentIntents:
2071 for i in range( len( main.activeNodes ) ):
2072 node = str( main.activeNodes[i] + 1 )
2073 main.log.warn( "ONOS" + node + " intents: " )
2074 main.log.warn( json.dumps(
2075 json.loads( ONOSIntents[ i ] ),
2076 sort_keys=True,
2077 indent=4,
2078 separators=( ',', ': ' ) ) )
2079 elif intentsResults and consistentIntents:
2080 intentCheck = main.TRUE
2081
2082 # NOTE: Store has no durability, so intents are lost across system
2083 # restarts
2084 main.step( "Compare current intents with intents before the failure" )
2085 # NOTE: this requires case 5 to pass for intentState to be set.
2086 # maybe we should stop the test if that fails?
2087 sameIntents = main.FALSE
2088 try:
2089 intentState
2090 except NameError:
2091 main.log.warn( "No previous intent state was saved" )
2092 else:
2093 if intentState and intentState == ONOSIntents[ 0 ]:
2094 sameIntents = main.TRUE
2095 main.log.info( "Intents are consistent with before failure" )
2096 # TODO: possibly the states have changed? we may need to figure out
2097 # what the acceptable states are
2098 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2099 sameIntents = main.TRUE
2100 try:
2101 before = json.loads( intentState )
2102 after = json.loads( ONOSIntents[ 0 ] )
2103 for intent in before:
2104 if intent not in after:
2105 sameIntents = main.FALSE
2106 main.log.debug( "Intent is not currently in ONOS " +
2107 "(at least in the same form):" )
2108 main.log.debug( json.dumps( intent ) )
2109 except ( ValueError, TypeError ):
2110 main.log.exception( "Exception printing intents" )
2111 main.log.debug( repr( ONOSIntents[0] ) )
2112 main.log.debug( repr( intentState ) )
2113 if sameIntents == main.FALSE:
2114 try:
2115 main.log.debug( "ONOS intents before: " )
2116 main.log.debug( json.dumps( json.loads( intentState ),
2117 sort_keys=True, indent=4,
2118 separators=( ',', ': ' ) ) )
2119 main.log.debug( "Current ONOS intents: " )
2120 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2121 sort_keys=True, indent=4,
2122 separators=( ',', ': ' ) ) )
2123 except ( ValueError, TypeError ):
2124 main.log.exception( "Exception printing intents" )
2125 main.log.debug( repr( ONOSIntents[0] ) )
2126 main.log.debug( repr( intentState ) )
2127 utilities.assert_equals(
2128 expect=main.TRUE,
2129 actual=sameIntents,
2130 onpass="Intents are consistent with before failure",
2131 onfail="The Intents changed during failure" )
2132 intentCheck = intentCheck and sameIntents
2133
2134 main.step( "Get the OF Table entries and compare to before " +
2135 "component failure" )
2136 FlowTables = main.TRUE
2137 for i in range( 28 ):
2138 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2139 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
Jon Hall41d39f12016-04-11 22:54:35 -07002140 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2141 FlowTables = FlowTables and curSwitch
2142 if curSwitch == main.FALSE:
Jon Hall6e709752016-02-01 13:38:46 -08002143 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2144 utilities.assert_equals(
2145 expect=main.TRUE,
2146 actual=FlowTables,
2147 onpass="No changes were found in the flow tables",
2148 onfail="Changes were found in the flow tables" )
2149
2150 main.Mininet2.pingLongKill()
2151 '''
2152 main.step( "Check the continuous pings to ensure that no packets " +
2153 "were dropped during component failure" )
2154 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2155 main.params[ 'TESTONIP' ] )
2156 LossInPings = main.FALSE
2157 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2158 for i in range( 8, 18 ):
2159 main.log.info(
2160 "Checking for a loss in pings along flow from s" +
2161 str( i ) )
2162 LossInPings = main.Mininet2.checkForLoss(
2163 "/tmp/ping.h" +
2164 str( i ) ) or LossInPings
2165 if LossInPings == main.TRUE:
2166 main.log.info( "Loss in ping detected" )
2167 elif LossInPings == main.ERROR:
2168 main.log.info( "There are multiple mininet process running" )
2169 elif LossInPings == main.FALSE:
2170 main.log.info( "No Loss in the pings" )
2171 main.log.info( "No loss of dataplane connectivity" )
2172 utilities.assert_equals(
2173 expect=main.FALSE,
2174 actual=LossInPings,
2175 onpass="No Loss of connectivity",
2176 onfail="Loss of dataplane connectivity detected" )
2177 '''
2178
2179 main.step( "Leadership Election is still functional" )
2180 # Test of LeadershipElection
2181 leaderList = []
2182
2183 partitioned = []
2184 for i in main.partition:
2185 partitioned.append( main.nodes[i].ip_address )
2186 leaderResult = main.TRUE
2187
2188 for i in main.activeNodes:
2189 cli = main.CLIs[i]
2190 leaderN = cli.electionTestLeader()
2191 leaderList.append( leaderN )
2192 if leaderN == main.FALSE:
2193 # error in response
2194 main.log.error( "Something is wrong with " +
2195 "electionTestLeader function, check the" +
2196 " error logs" )
2197 leaderResult = main.FALSE
2198 elif leaderN is None:
2199 main.log.error( cli.name +
2200 " shows no leader for the election-app was" +
2201 " elected after the old one died" )
2202 leaderResult = main.FALSE
2203 elif leaderN in partitioned:
2204 main.log.error( cli.name + " shows " + str( leaderN ) +
2205 " as leader for the election-app, but it " +
2206 "was partitioned" )
2207 leaderResult = main.FALSE
2208 if len( set( leaderList ) ) != 1:
2209 leaderResult = main.FALSE
2210 main.log.error(
2211 "Inconsistent view of leader for the election test app" )
2212 # TODO: print the list
2213 utilities.assert_equals(
2214 expect=main.TRUE,
2215 actual=leaderResult,
2216 onpass="Leadership election passed",
2217 onfail="Something went wrong with Leadership election" )
2218
2219 def CASE8( self, main ):
2220 """
2221 Compare topo
2222 """
2223 import json
2224 import time
2225 assert main.numCtrls, "main.numCtrls not defined"
2226 assert main, "main not defined"
2227 assert utilities.assert_equals, "utilities.assert_equals not defined"
2228 assert main.CLIs, "main.CLIs not defined"
2229 assert main.nodes, "main.nodes not defined"
2230
2231 main.case( "Compare ONOS Topology view to Mininet topology" )
2232 main.caseExplanation = "Compare topology objects between Mininet" +\
2233 " and ONOS"
2234 topoResult = main.FALSE
2235 topoFailMsg = "ONOS topology don't match Mininet"
2236 elapsed = 0
2237 count = 0
2238 main.step( "Comparing ONOS topology to MN topology" )
2239 startTime = time.time()
2240 # Give time for Gossip to work
2241 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2242 devicesResults = main.TRUE
2243 linksResults = main.TRUE
2244 hostsResults = main.TRUE
2245 hostAttachmentResults = True
2246 count += 1
2247 cliStart = time.time()
2248 devices = []
2249 threads = []
2250 for i in main.activeNodes:
2251 t = main.Thread( target=utilities.retry,
2252 name="devices-" + str( i ),
2253 args=[ main.CLIs[i].devices, [ None ] ],
2254 kwargs= { 'sleep': 5, 'attempts': 5,
2255 'randomTime': True } )
2256 threads.append( t )
2257 t.start()
2258
2259 for t in threads:
2260 t.join()
2261 devices.append( t.result )
2262 hosts = []
2263 ipResult = main.TRUE
2264 threads = []
2265 for i in main.activeNodes:
2266 t = main.Thread( target=utilities.retry,
2267 name="hosts-" + str( i ),
2268 args=[ main.CLIs[i].hosts, [ None ] ],
2269 kwargs= { 'sleep': 5, 'attempts': 5,
2270 'randomTime': True } )
2271 threads.append( t )
2272 t.start()
2273
2274 for t in threads:
2275 t.join()
2276 try:
2277 hosts.append( json.loads( t.result ) )
2278 except ( ValueError, TypeError ):
2279 main.log.exception( "Error parsing hosts results" )
2280 main.log.error( repr( t.result ) )
2281 hosts.append( None )
2282 for controller in range( 0, len( hosts ) ):
2283 controllerStr = str( main.activeNodes[controller] + 1 )
2284 if hosts[ controller ]:
2285 for host in hosts[ controller ]:
2286 if host is None or host.get( 'ipAddresses', [] ) == []:
2287 main.log.error(
2288 "Error with host ipAddresses on controller" +
2289 controllerStr + ": " + str( host ) )
2290 ipResult = main.FALSE
2291 ports = []
2292 threads = []
2293 for i in main.activeNodes:
2294 t = main.Thread( target=utilities.retry,
2295 name="ports-" + str( i ),
2296 args=[ main.CLIs[i].ports, [ None ] ],
2297 kwargs= { 'sleep': 5, 'attempts': 5,
2298 'randomTime': True } )
2299 threads.append( t )
2300 t.start()
2301
2302 for t in threads:
2303 t.join()
2304 ports.append( t.result )
2305 links = []
2306 threads = []
2307 for i in main.activeNodes:
2308 t = main.Thread( target=utilities.retry,
2309 name="links-" + str( i ),
2310 args=[ main.CLIs[i].links, [ None ] ],
2311 kwargs= { 'sleep': 5, 'attempts': 5,
2312 'randomTime': True } )
2313 threads.append( t )
2314 t.start()
2315
2316 for t in threads:
2317 t.join()
2318 links.append( t.result )
2319 clusters = []
2320 threads = []
2321 for i in main.activeNodes:
2322 t = main.Thread( target=utilities.retry,
2323 name="clusters-" + str( i ),
2324 args=[ main.CLIs[i].clusters, [ None ] ],
2325 kwargs= { 'sleep': 5, 'attempts': 5,
2326 'randomTime': True } )
2327 threads.append( t )
2328 t.start()
2329
2330 for t in threads:
2331 t.join()
2332 clusters.append( t.result )
2333
2334 elapsed = time.time() - startTime
2335 cliTime = time.time() - cliStart
2336 print "Elapsed time: " + str( elapsed )
2337 print "CLI time: " + str( cliTime )
2338
2339 if all( e is None for e in devices ) and\
2340 all( e is None for e in hosts ) and\
2341 all( e is None for e in ports ) and\
2342 all( e is None for e in links ) and\
2343 all( e is None for e in clusters ):
2344 topoFailMsg = "Could not get topology from ONOS"
2345 main.log.error( topoFailMsg )
2346 continue # Try again, No use trying to compare
2347
2348 mnSwitches = main.Mininet1.getSwitches()
2349 mnLinks = main.Mininet1.getLinks()
2350 mnHosts = main.Mininet1.getHosts()
2351 for controller in range( len( main.activeNodes ) ):
2352 controllerStr = str( main.activeNodes[controller] + 1 )
2353 if devices[ controller ] and ports[ controller ] and\
2354 "Error" not in devices[ controller ] and\
2355 "Error" not in ports[ controller ]:
2356
2357 try:
2358 currentDevicesResult = main.Mininet1.compareSwitches(
2359 mnSwitches,
2360 json.loads( devices[ controller ] ),
2361 json.loads( ports[ controller ] ) )
2362 except ( TypeError, ValueError ) as e:
2363 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2364 devices[ controller ], ports[ controller ] ) )
2365 else:
2366 currentDevicesResult = main.FALSE
2367 utilities.assert_equals( expect=main.TRUE,
2368 actual=currentDevicesResult,
2369 onpass="ONOS" + controllerStr +
2370 " Switches view is correct",
2371 onfail="ONOS" + controllerStr +
2372 " Switches view is incorrect" )
2373
2374 if links[ controller ] and "Error" not in links[ controller ]:
2375 currentLinksResult = main.Mininet1.compareLinks(
2376 mnSwitches, mnLinks,
2377 json.loads( links[ controller ] ) )
2378 else:
2379 currentLinksResult = main.FALSE
2380 utilities.assert_equals( expect=main.TRUE,
2381 actual=currentLinksResult,
2382 onpass="ONOS" + controllerStr +
2383 " links view is correct",
2384 onfail="ONOS" + controllerStr +
2385 " links view is incorrect" )
2386 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2387 currentHostsResult = main.Mininet1.compareHosts(
2388 mnHosts,
2389 hosts[ controller ] )
2390 elif hosts[ controller ] == []:
2391 currentHostsResult = main.TRUE
2392 else:
2393 currentHostsResult = main.FALSE
2394 utilities.assert_equals( expect=main.TRUE,
2395 actual=currentHostsResult,
2396 onpass="ONOS" + controllerStr +
2397 " hosts exist in Mininet",
2398 onfail="ONOS" + controllerStr +
2399 " hosts don't match Mininet" )
2400 # CHECKING HOST ATTACHMENT POINTS
2401 hostAttachment = True
2402 zeroHosts = False
2403 # FIXME: topo-HA/obelisk specific mappings:
2404 # key is mac and value is dpid
2405 mappings = {}
2406 for i in range( 1, 29 ): # hosts 1 through 28
2407 # set up correct variables:
2408 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2409 if i == 1:
2410 deviceId = "1000".zfill(16)
2411 elif i == 2:
2412 deviceId = "2000".zfill(16)
2413 elif i == 3:
2414 deviceId = "3000".zfill(16)
2415 elif i == 4:
2416 deviceId = "3004".zfill(16)
2417 elif i == 5:
2418 deviceId = "5000".zfill(16)
2419 elif i == 6:
2420 deviceId = "6000".zfill(16)
2421 elif i == 7:
2422 deviceId = "6007".zfill(16)
2423 elif i >= 8 and i <= 17:
2424 dpid = '3' + str( i ).zfill( 3 )
2425 deviceId = dpid.zfill(16)
2426 elif i >= 18 and i <= 27:
2427 dpid = '6' + str( i ).zfill( 3 )
2428 deviceId = dpid.zfill(16)
2429 elif i == 28:
2430 deviceId = "2800".zfill(16)
2431 mappings[ macId ] = deviceId
2432 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2433 if hosts[ controller ] == []:
2434 main.log.warn( "There are no hosts discovered" )
2435 zeroHosts = True
2436 else:
2437 for host in hosts[ controller ]:
2438 mac = None
2439 location = None
2440 device = None
2441 port = None
2442 try:
2443 mac = host.get( 'mac' )
2444 assert mac, "mac field could not be found for this host object"
2445
2446 location = host.get( 'location' )
2447 assert location, "location field could not be found for this host object"
2448
2449 # Trim the protocol identifier off deviceId
2450 device = str( location.get( 'elementId' ) ).split(':')[1]
2451 assert device, "elementId field could not be found for this host location object"
2452
2453 port = location.get( 'port' )
2454 assert port, "port field could not be found for this host location object"
2455
2456 # Now check if this matches where they should be
2457 if mac and device and port:
2458 if str( port ) != "1":
2459 main.log.error( "The attachment port is incorrect for " +
2460 "host " + str( mac ) +
2461 ". Expected: 1 Actual: " + str( port) )
2462 hostAttachment = False
2463 if device != mappings[ str( mac ) ]:
2464 main.log.error( "The attachment device is incorrect for " +
2465 "host " + str( mac ) +
2466 ". Expected: " + mappings[ str( mac ) ] +
2467 " Actual: " + device )
2468 hostAttachment = False
2469 else:
2470 hostAttachment = False
2471 except AssertionError:
2472 main.log.exception( "Json object not as expected" )
2473 main.log.error( repr( host ) )
2474 hostAttachment = False
2475 else:
2476 main.log.error( "No hosts json output or \"Error\"" +
2477 " in output. hosts = " +
2478 repr( hosts[ controller ] ) )
2479 if zeroHosts is False:
2480 hostAttachment = True
2481
2482 # END CHECKING HOST ATTACHMENT POINTS
2483 devicesResults = devicesResults and currentDevicesResult
2484 linksResults = linksResults and currentLinksResult
2485 hostsResults = hostsResults and currentHostsResult
2486 hostAttachmentResults = hostAttachmentResults and\
2487 hostAttachment
Jon Halla440e872016-03-31 15:15:50 -07002488 topoResult = ( devicesResults and linksResults
2489 and hostsResults and ipResult and
2490 hostAttachmentResults )
Jon Hall6e709752016-02-01 13:38:46 -08002491 utilities.assert_equals( expect=True,
2492 actual=topoResult,
2493 onpass="ONOS topology matches Mininet",
2494 onfail=topoFailMsg )
2495 # End of While loop to pull ONOS state
2496
2497 # Compare json objects for hosts and dataplane clusters
2498
2499 # hosts
2500 main.step( "Hosts view is consistent across all ONOS nodes" )
2501 consistentHostsResult = main.TRUE
2502 for controller in range( len( hosts ) ):
2503 controllerStr = str( main.activeNodes[controller] + 1 )
2504 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2505 if hosts[ controller ] == hosts[ 0 ]:
2506 continue
2507 else: # hosts not consistent
2508 main.log.error( "hosts from ONOS" + controllerStr +
2509 " is inconsistent with ONOS1" )
2510 main.log.warn( repr( hosts[ controller ] ) )
2511 consistentHostsResult = main.FALSE
2512
2513 else:
2514 main.log.error( "Error in getting ONOS hosts from ONOS" +
2515 controllerStr )
2516 consistentHostsResult = main.FALSE
2517 main.log.warn( "ONOS" + controllerStr +
2518 " hosts response: " +
2519 repr( hosts[ controller ] ) )
2520 utilities.assert_equals(
2521 expect=main.TRUE,
2522 actual=consistentHostsResult,
2523 onpass="Hosts view is consistent across all ONOS nodes",
2524 onfail="ONOS nodes have different views of hosts" )
2525
2526 main.step( "Hosts information is correct" )
2527 hostsResults = hostsResults and ipResult
2528 utilities.assert_equals(
2529 expect=main.TRUE,
2530 actual=hostsResults,
2531 onpass="Host information is correct",
2532 onfail="Host information is incorrect" )
2533
2534 main.step( "Host attachment points to the network" )
2535 utilities.assert_equals(
2536 expect=True,
2537 actual=hostAttachmentResults,
2538 onpass="Hosts are correctly attached to the network",
2539 onfail="ONOS did not correctly attach hosts to the network" )
2540
2541 # Strongly connected clusters of devices
2542 main.step( "Clusters view is consistent across all ONOS nodes" )
2543 consistentClustersResult = main.TRUE
2544 for controller in range( len( clusters ) ):
2545 controllerStr = str( main.activeNodes[controller] + 1 )
2546 if "Error" not in clusters[ controller ]:
2547 if clusters[ controller ] == clusters[ 0 ]:
2548 continue
2549 else: # clusters not consistent
2550 main.log.error( "clusters from ONOS" +
2551 controllerStr +
2552 " is inconsistent with ONOS1" )
2553 consistentClustersResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002554 else:
2555 main.log.error( "Error in getting dataplane clusters " +
2556 "from ONOS" + controllerStr )
2557 consistentClustersResult = main.FALSE
2558 main.log.warn( "ONOS" + controllerStr +
2559 " clusters response: " +
2560 repr( clusters[ controller ] ) )
2561 utilities.assert_equals(
2562 expect=main.TRUE,
2563 actual=consistentClustersResult,
2564 onpass="Clusters view is consistent across all ONOS nodes",
2565 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07002566 if not consistentClustersResult:
2567 main.log.debug( clusters )
Jon Hall6e709752016-02-01 13:38:46 -08002568
2569 main.step( "There is only one SCC" )
2570 # there should always only be one cluster
2571 try:
2572 numClusters = len( json.loads( clusters[ 0 ] ) )
2573 except ( ValueError, TypeError ):
2574 main.log.exception( "Error parsing clusters[0]: " +
2575 repr( clusters[0] ) )
Jon Halla440e872016-03-31 15:15:50 -07002576 numClusters = "ERROR"
Jon Hall6e709752016-02-01 13:38:46 -08002577 clusterResults = main.FALSE
2578 if numClusters == 1:
2579 clusterResults = main.TRUE
2580 utilities.assert_equals(
2581 expect=1,
2582 actual=numClusters,
2583 onpass="ONOS shows 1 SCC",
2584 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2585
2586 topoResult = ( devicesResults and linksResults
2587 and hostsResults and consistentHostsResult
2588 and consistentClustersResult and clusterResults
2589 and ipResult and hostAttachmentResults )
2590
2591 topoResult = topoResult and int( count <= 2 )
2592 note = "note it takes about " + str( int( cliTime ) ) + \
2593 " seconds for the test to make all the cli calls to fetch " +\
2594 "the topology from each ONOS instance"
2595 main.log.info(
2596 "Very crass estimate for topology discovery/convergence( " +
2597 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2598 str( count ) + " tries" )
2599
2600 main.step( "Device information is correct" )
2601 utilities.assert_equals(
2602 expect=main.TRUE,
2603 actual=devicesResults,
2604 onpass="Device information is correct",
2605 onfail="Device information is incorrect" )
2606
2607 main.step( "Links are correct" )
2608 utilities.assert_equals(
2609 expect=main.TRUE,
2610 actual=linksResults,
2611 onpass="Link are correct",
2612 onfail="Links are incorrect" )
2613
Jon Halla440e872016-03-31 15:15:50 -07002614 main.step( "Hosts are correct" )
2615 utilities.assert_equals(
2616 expect=main.TRUE,
2617 actual=hostsResults,
2618 onpass="Hosts are correct",
2619 onfail="Hosts are incorrect" )
2620
Jon Hall6e709752016-02-01 13:38:46 -08002621 # FIXME: move this to an ONOS state case
2622 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -07002623 nodeResults = utilities.retry( main.HA.nodesCheck,
2624 False,
2625 args=[main.activeNodes],
2626 attempts=5 )
Jon Hall6e709752016-02-01 13:38:46 -08002627
Jon Hall41d39f12016-04-11 22:54:35 -07002628 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Hall6e709752016-02-01 13:38:46 -08002629 onpass="Nodes check successful",
2630 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002631 if not nodeResults:
Jon Hall41d39f12016-04-11 22:54:35 -07002632 for i in main.activeNodes:
Jon Halla440e872016-03-31 15:15:50 -07002633 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hall41d39f12016-04-11 22:54:35 -07002634 main.CLIs[i].name,
2635 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08002636
Jon Halld2871c22016-07-26 11:01:14 -07002637 if not topoResult:
2638 main.cleanup()
2639 main.exit()
2640
Jon Hall6e709752016-02-01 13:38:46 -08002641 def CASE9( self, main ):
2642 """
2643 Link s3-s28 down
2644 """
2645 import time
2646 assert main.numCtrls, "main.numCtrls not defined"
2647 assert main, "main not defined"
2648 assert utilities.assert_equals, "utilities.assert_equals not defined"
2649 assert main.CLIs, "main.CLIs not defined"
2650 assert main.nodes, "main.nodes not defined"
2651 # NOTE: You should probably run a topology check after this
2652
2653 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2654
2655 description = "Turn off a link to ensure that Link Discovery " +\
2656 "is working properly"
2657 main.case( description )
2658
2659 main.step( "Kill Link between s3 and s28" )
2660 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2661 main.log.info( "Waiting " + str( linkSleep ) +
2662 " seconds for link down to be discovered" )
2663 time.sleep( linkSleep )
2664 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2665 onpass="Link down successful",
2666 onfail="Failed to bring link down" )
2667 # TODO do some sort of check here
2668
2669 def CASE10( self, main ):
2670 """
2671 Link s3-s28 up
2672 """
2673 import time
2674 assert main.numCtrls, "main.numCtrls not defined"
2675 assert main, "main not defined"
2676 assert utilities.assert_equals, "utilities.assert_equals not defined"
2677 assert main.CLIs, "main.CLIs not defined"
2678 assert main.nodes, "main.nodes not defined"
2679 # NOTE: You should probably run a topology check after this
2680
2681 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2682
2683 description = "Restore a link to ensure that Link Discovery is " + \
2684 "working properly"
2685 main.case( description )
2686
2687 main.step( "Bring link between s3 and s28 back up" )
2688 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2689 main.log.info( "Waiting " + str( linkSleep ) +
2690 " seconds for link up to be discovered" )
2691 time.sleep( linkSleep )
2692 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2693 onpass="Link up successful",
2694 onfail="Failed to bring link up" )
2695 # TODO do some sort of check here
2696
2697 def CASE11( self, main ):
2698 """
2699 Switch Down
2700 """
2701 # NOTE: You should probably run a topology check after this
2702 import time
2703 assert main.numCtrls, "main.numCtrls not defined"
2704 assert main, "main not defined"
2705 assert utilities.assert_equals, "utilities.assert_equals not defined"
2706 assert main.CLIs, "main.CLIs not defined"
2707 assert main.nodes, "main.nodes not defined"
2708
2709 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2710
2711 description = "Killing a switch to ensure it is discovered correctly"
2712 onosCli = main.CLIs[ main.activeNodes[0] ]
2713 main.case( description )
2714 switch = main.params[ 'kill' ][ 'switch' ]
2715 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2716
2717 # TODO: Make this switch parameterizable
2718 main.step( "Kill " + switch )
2719 main.log.info( "Deleting " + switch )
2720 main.Mininet1.delSwitch( switch )
2721 main.log.info( "Waiting " + str( switchSleep ) +
2722 " seconds for switch down to be discovered" )
2723 time.sleep( switchSleep )
2724 device = onosCli.getDevice( dpid=switchDPID )
2725 # Peek at the deleted switch
2726 main.log.warn( str( device ) )
2727 result = main.FALSE
2728 if device and device[ 'available' ] is False:
2729 result = main.TRUE
2730 utilities.assert_equals( expect=main.TRUE, actual=result,
2731 onpass="Kill switch successful",
2732 onfail="Failed to kill switch?" )
2733
2734 def CASE12( self, main ):
2735 """
2736 Switch Up
2737 """
2738 # NOTE: You should probably run a topology check after this
2739 import time
2740 assert main.numCtrls, "main.numCtrls not defined"
2741 assert main, "main not defined"
2742 assert utilities.assert_equals, "utilities.assert_equals not defined"
2743 assert main.CLIs, "main.CLIs not defined"
2744 assert main.nodes, "main.nodes not defined"
2745 assert ONOS1Port, "ONOS1Port not defined"
2746 assert ONOS2Port, "ONOS2Port not defined"
2747 assert ONOS3Port, "ONOS3Port not defined"
2748 assert ONOS4Port, "ONOS4Port not defined"
2749 assert ONOS5Port, "ONOS5Port not defined"
2750 assert ONOS6Port, "ONOS6Port not defined"
2751 assert ONOS7Port, "ONOS7Port not defined"
2752
2753 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2754 switch = main.params[ 'kill' ][ 'switch' ]
2755 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2756 links = main.params[ 'kill' ][ 'links' ].split()
2757 onosCli = main.CLIs[ main.activeNodes[0] ]
2758 description = "Adding a switch to ensure it is discovered correctly"
2759 main.case( description )
2760
2761 main.step( "Add back " + switch )
2762 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2763 for peer in links:
2764 main.Mininet1.addLink( switch, peer )
2765 ipList = [ node.ip_address for node in main.nodes ]
2766 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2767 main.log.info( "Waiting " + str( switchSleep ) +
2768 " seconds for switch up to be discovered" )
2769 time.sleep( switchSleep )
2770 device = onosCli.getDevice( dpid=switchDPID )
2771 # Peek at the deleted switch
2772 main.log.warn( str( device ) )
2773 result = main.FALSE
2774 if device and device[ 'available' ]:
2775 result = main.TRUE
2776 utilities.assert_equals( expect=main.TRUE, actual=result,
2777 onpass="add switch successful",
2778 onfail="Failed to add switch?" )
2779
2780 def CASE13( self, main ):
2781 """
2782 Clean up
2783 """
2784 import os
2785 import time
2786 assert main.numCtrls, "main.numCtrls not defined"
2787 assert main, "main not defined"
2788 assert utilities.assert_equals, "utilities.assert_equals not defined"
2789 assert main.CLIs, "main.CLIs not defined"
2790 assert main.nodes, "main.nodes not defined"
2791
2792 # printing colors to terminal
2793 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2794 'blue': '\033[94m', 'green': '\033[92m',
2795 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2796 main.case( "Test Cleanup" )
2797 main.step( "Killing tcpdumps" )
2798 main.Mininet2.stopTcpdump()
2799
2800 testname = main.TEST
2801 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2802 main.step( "Copying MN pcap and ONOS log files to test station" )
2803 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2804 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2805 # NOTE: MN Pcap file is being saved to logdir.
2806 # We scp this file as MN and TestON aren't necessarily the same vm
2807
2808 # FIXME: To be replaced with a Jenkin's post script
2809 # TODO: Load these from params
2810 # NOTE: must end in /
2811 logFolder = "/opt/onos/log/"
2812 logFiles = [ "karaf.log", "karaf.log.1" ]
2813 # NOTE: must end in /
2814 for f in logFiles:
2815 for node in main.nodes:
2816 dstName = main.logdir + "/" + node.name + "-" + f
2817 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2818 logFolder + f, dstName )
2819 # std*.log's
2820 # NOTE: must end in /
2821 logFolder = "/opt/onos/var/"
2822 logFiles = [ "stderr.log", "stdout.log" ]
2823 # NOTE: must end in /
2824 for f in logFiles:
2825 for node in main.nodes:
2826 dstName = main.logdir + "/" + node.name + "-" + f
2827 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2828 logFolder + f, dstName )
2829 else:
2830 main.log.debug( "skipping saving log files" )
2831
2832 main.step( "Stopping Mininet" )
2833 mnResult = main.Mininet1.stopNet()
2834 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2835 onpass="Mininet stopped",
2836 onfail="MN cleanup NOT successful" )
2837
2838 main.step( "Checking ONOS Logs for errors" )
2839 for node in main.nodes:
2840 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2841 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2842
2843 try:
2844 timerLog = open( main.logdir + "/Timers.csv", 'w')
2845 # Overwrite with empty line and close
2846 labels = "Gossip Intents"
2847 data = str( gossipTime )
2848 timerLog.write( labels + "\n" + data )
2849 timerLog.close()
2850 except NameError, e:
2851 main.log.exception(e)
2852
2853 def CASE14( self, main ):
2854 """
2855 start election app on all onos nodes
2856 """
2857 assert main.numCtrls, "main.numCtrls not defined"
2858 assert main, "main not defined"
2859 assert utilities.assert_equals, "utilities.assert_equals not defined"
2860 assert main.CLIs, "main.CLIs not defined"
2861 assert main.nodes, "main.nodes not defined"
2862
2863 main.case("Start Leadership Election app")
2864 main.step( "Install leadership election app" )
2865 onosCli = main.CLIs[ main.activeNodes[0] ]
2866 appResult = onosCli.activateApp( "org.onosproject.election" )
2867 utilities.assert_equals(
2868 expect=main.TRUE,
2869 actual=appResult,
2870 onpass="Election app installed",
2871 onfail="Something went wrong with installing Leadership election" )
2872
2873 main.step( "Run for election on each node" )
Jon Hall6e709752016-02-01 13:38:46 -08002874 for i in main.activeNodes:
2875 main.CLIs[i].electionTestRun()
Jon Hall25463a82016-04-13 14:03:52 -07002876 time.sleep(5)
2877 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2878 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall6e709752016-02-01 13:38:46 -08002879 utilities.assert_equals(
Jon Hall25463a82016-04-13 14:03:52 -07002880 expect=True,
2881 actual=sameResult,
2882 onpass="All nodes see the same leaderboards",
2883 onfail="Inconsistent leaderboards" )
Jon Hall6e709752016-02-01 13:38:46 -08002884
Jon Hall25463a82016-04-13 14:03:52 -07002885 if sameResult:
2886 leader = leaders[ 0 ][ 0 ]
2887 if main.nodes[main.activeNodes[0]].ip_address in leader:
2888 correctLeader = True
2889 else:
2890 correctLeader = False
2891 main.step( "First node was elected leader" )
2892 utilities.assert_equals(
2893 expect=True,
2894 actual=correctLeader,
2895 onpass="Correct leader was elected",
2896 onfail="Incorrect leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002897
2898 def CASE15( self, main ):
2899 """
2900 Check that Leadership Election is still functional
2901 15.1 Run election on each node
2902 15.2 Check that each node has the same leaders and candidates
2903 15.3 Find current leader and withdraw
2904 15.4 Check that a new node was elected leader
2905 15.5 Check that that new leader was the candidate of old leader
2906 15.6 Run for election on old leader
2907 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2908 15.8 Make sure that the old leader was added to the candidate list
2909
2910 old and new variable prefixes refer to data from before vs after
2911 withdrawl and later before withdrawl vs after re-election
2912 """
2913 import time
2914 assert main.numCtrls, "main.numCtrls not defined"
2915 assert main, "main not defined"
2916 assert utilities.assert_equals, "utilities.assert_equals not defined"
2917 assert main.CLIs, "main.CLIs not defined"
2918 assert main.nodes, "main.nodes not defined"
2919
2920 description = "Check that Leadership Election is still functional"
2921 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002922 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall6e709752016-02-01 13:38:46 -08002923
Jon Halla440e872016-03-31 15:15:50 -07002924 oldLeaders = [] # list of lists of each nodes' candidates before
2925 newLeaders = [] # list of lists of each nodes' candidates after
Jon Hall6e709752016-02-01 13:38:46 -08002926 oldLeader = '' # the old leader from oldLeaders, None if not same
2927 newLeader = '' # the new leaders fron newLoeaders, None if not same
2928 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2929 expectNoLeader = False # True when there is only one leader
2930 if main.numCtrls == 1:
2931 expectNoLeader = True
2932
2933 main.step( "Run for election on each node" )
2934 electionResult = main.TRUE
2935
2936 for i in main.activeNodes: # run test election on each node
2937 if main.CLIs[i].electionTestRun() == main.FALSE:
2938 electionResult = main.FALSE
2939 utilities.assert_equals(
2940 expect=main.TRUE,
2941 actual=electionResult,
2942 onpass="All nodes successfully ran for leadership",
2943 onfail="At least one node failed to run for leadership" )
2944
2945 if electionResult == main.FALSE:
2946 main.log.error(
2947 "Skipping Test Case because Election Test App isn't loaded" )
2948 main.skipCase()
2949
2950 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07002951 failMessage = "Nodes have different leaderboards"
Jon Halla440e872016-03-31 15:15:50 -07002952 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
Jon Hall41d39f12016-04-11 22:54:35 -07002953 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Halla440e872016-03-31 15:15:50 -07002954 if sameResult:
2955 oldLeader = oldLeaders[ 0 ][ 0 ]
2956 main.log.warn( oldLeader )
Jon Hall6e709752016-02-01 13:38:46 -08002957 else:
Jon Halla440e872016-03-31 15:15:50 -07002958 oldLeader = None
Jon Hall6e709752016-02-01 13:38:46 -08002959 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002960 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08002961 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07002962 onpass="Leaderboards are consistent for the election topic",
Jon Hall6e709752016-02-01 13:38:46 -08002963 onfail=failMessage )
2964
2965 main.step( "Find current leader and withdraw" )
2966 withdrawResult = main.TRUE
2967 # do some sanity checking on leader before using it
2968 if oldLeader is None:
2969 main.log.error( "Leadership isn't consistent." )
2970 withdrawResult = main.FALSE
2971 # Get the CLI of the oldLeader
2972 for i in main.activeNodes:
2973 if oldLeader == main.nodes[ i ].ip_address:
2974 oldLeaderCLI = main.CLIs[ i ]
2975 break
2976 else: # FOR/ELSE statement
2977 main.log.error( "Leader election, could not find current leader" )
2978 if oldLeader:
2979 withdrawResult = oldLeaderCLI.electionTestWithdraw()
2980 utilities.assert_equals(
2981 expect=main.TRUE,
2982 actual=withdrawResult,
2983 onpass="Node was withdrawn from election",
2984 onfail="Node was not withdrawn from election" )
2985
2986 main.step( "Check that a new node was elected leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002987 failMessage = "Nodes have different leaders"
Jon Hall6e709752016-02-01 13:38:46 -08002988 # Get new leaders and candidates
Jon Hall41d39f12016-04-11 22:54:35 -07002989 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall3a7843a2016-04-12 03:01:09 -07002990 newLeader = None
Jon Halla440e872016-03-31 15:15:50 -07002991 if newLeaderResult:
Jon Hall3a7843a2016-04-12 03:01:09 -07002992 if newLeaders[ 0 ][ 0 ] == 'none':
2993 main.log.error( "No leader was elected on at least 1 node" )
2994 if not expectNoLeader:
2995 newLeaderResult = False
Jon Hall25463a82016-04-13 14:03:52 -07002996 newLeader = newLeaders[ 0 ][ 0 ]
Jon Hall6e709752016-02-01 13:38:46 -08002997
2998 # Check that the new leader is not the older leader, which was withdrawn
2999 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07003000 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08003001 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3002 " as the current leader" )
Jon Hall6e709752016-02-01 13:38:46 -08003003 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003004 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08003005 actual=newLeaderResult,
3006 onpass="Leadership election passed",
3007 onfail="Something went wrong with Leadership election" )
3008
Jon Halla440e872016-03-31 15:15:50 -07003009 main.step( "Check that that new leader was the candidate of old leader" )
Jon Hall6e709752016-02-01 13:38:46 -08003010 # candidates[ 2 ] should become the top candidate after withdrawl
3011 correctCandidateResult = main.TRUE
3012 if expectNoLeader:
3013 if newLeader == 'none':
3014 main.log.info( "No leader expected. None found. Pass" )
3015 correctCandidateResult = main.TRUE
3016 else:
3017 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3018 correctCandidateResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003019 elif len( oldLeaders[0] ) >= 3:
3020 if newLeader == oldLeaders[ 0 ][ 2 ]:
3021 # correct leader was elected
3022 correctCandidateResult = main.TRUE
3023 else:
3024 correctCandidateResult = main.FALSE
3025 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3026 newLeader, oldLeaders[ 0 ][ 2 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08003027 else:
3028 main.log.warn( "Could not determine who should be the correct leader" )
Jon Halla440e872016-03-31 15:15:50 -07003029 main.log.debug( oldLeaders[ 0 ] )
Jon Hall6e709752016-02-01 13:38:46 -08003030 correctCandidateResult = main.FALSE
3031 utilities.assert_equals(
3032 expect=main.TRUE,
3033 actual=correctCandidateResult,
3034 onpass="Correct Candidate Elected",
3035 onfail="Incorrect Candidate Elected" )
3036
3037 main.step( "Run for election on old leader( just so everyone " +
3038 "is in the hat )" )
3039 if oldLeaderCLI is not None:
3040 runResult = oldLeaderCLI.electionTestRun()
3041 else:
3042 main.log.error( "No old leader to re-elect" )
3043 runResult = main.FALSE
3044 utilities.assert_equals(
3045 expect=main.TRUE,
3046 actual=runResult,
3047 onpass="App re-ran for election",
3048 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07003049
Jon Hall6e709752016-02-01 13:38:46 -08003050 main.step(
3051 "Check that oldLeader is a candidate, and leader if only 1 node" )
3052 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07003053 # Get new leaders and candidates
3054 reRunLeaders = []
3055 time.sleep( 5 ) # Paremterize
Jon Hall41d39f12016-04-11 22:54:35 -07003056 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall6e709752016-02-01 13:38:46 -08003057
3058 # Check that the re-elected node is last on the candidate List
Jon Hall3a7843a2016-04-12 03:01:09 -07003059 if not reRunLeaders[0]:
3060 positionResult = main.FALSE
3061 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
Jon Halla440e872016-03-31 15:15:50 -07003062 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3063 str( reRunLeaders[ 0 ] ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08003064 positionResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08003065 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003066 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08003067 actual=positionResult,
3068 onpass="Old leader successfully re-ran for election",
3069 onfail="Something went wrong with Leadership election after " +
3070 "the old leader re-ran for election" )
3071
3072 def CASE16( self, main ):
3073 """
3074 Install Distributed Primitives app
3075 """
3076 import time
3077 assert main.numCtrls, "main.numCtrls not defined"
3078 assert main, "main not defined"
3079 assert utilities.assert_equals, "utilities.assert_equals not defined"
3080 assert main.CLIs, "main.CLIs not defined"
3081 assert main.nodes, "main.nodes not defined"
3082
3083 # Variables for the distributed primitives tests
3084 global pCounterName
Jon Hall6e709752016-02-01 13:38:46 -08003085 global pCounterValue
Jon Hall6e709752016-02-01 13:38:46 -08003086 global onosSet
3087 global onosSetName
3088 pCounterName = "TestON-Partitions"
Jon Hall6e709752016-02-01 13:38:46 -08003089 pCounterValue = 0
Jon Hall6e709752016-02-01 13:38:46 -08003090 onosSet = set([])
3091 onosSetName = "TestON-set"
3092
3093 description = "Install Primitives app"
3094 main.case( description )
3095 main.step( "Install Primitives app" )
3096 appName = "org.onosproject.distributedprimitives"
3097 node = main.activeNodes[0]
3098 appResults = main.CLIs[node].activateApp( appName )
3099 utilities.assert_equals( expect=main.TRUE,
3100 actual=appResults,
3101 onpass="Primitives app activated",
3102 onfail="Primitives app not activated" )
3103 time.sleep( 5 ) # To allow all nodes to activate
3104
3105 def CASE17( self, main ):
3106 """
3107 Check for basic functionality with distributed primitives
3108 """
3109 # Make sure variables are defined/set
3110 assert main.numCtrls, "main.numCtrls not defined"
3111 assert main, "main not defined"
3112 assert utilities.assert_equals, "utilities.assert_equals not defined"
3113 assert main.CLIs, "main.CLIs not defined"
3114 assert main.nodes, "main.nodes not defined"
3115 assert pCounterName, "pCounterName not defined"
Jon Hall6e709752016-02-01 13:38:46 -08003116 assert onosSetName, "onosSetName not defined"
3117 # NOTE: assert fails if value is 0/None/Empty/False
3118 try:
3119 pCounterValue
3120 except NameError:
3121 main.log.error( "pCounterValue not defined, setting to 0" )
3122 pCounterValue = 0
3123 try:
Jon Hall6e709752016-02-01 13:38:46 -08003124 onosSet
3125 except NameError:
3126 main.log.error( "onosSet not defined, setting to empty Set" )
3127 onosSet = set([])
3128 # Variables for the distributed primitives tests. These are local only
3129 addValue = "a"
3130 addAllValue = "a b c d e f"
3131 retainValue = "c d e f"
3132
3133 description = "Check for basic functionality with distributed " +\
3134 "primitives"
3135 main.case( description )
3136 main.caseExplanation = "Test the methods of the distributed " +\
3137 "primitives (counters and sets) throught the cli"
3138 # DISTRIBUTED ATOMIC COUNTERS
3139 # Partitioned counters
3140 main.step( "Increment then get a default counter on each node" )
3141 pCounters = []
3142 threads = []
3143 addedPValues = []
3144 for i in main.activeNodes:
3145 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3146 name="counterAddAndGet-" + str( i ),
3147 args=[ pCounterName ] )
3148 pCounterValue += 1
3149 addedPValues.append( pCounterValue )
3150 threads.append( t )
3151 t.start()
3152
3153 for t in threads:
3154 t.join()
3155 pCounters.append( t.result )
3156 # Check that counter incremented numController times
3157 pCounterResults = True
3158 for i in addedPValues:
3159 tmpResult = i in pCounters
3160 pCounterResults = pCounterResults and tmpResult
3161 if not tmpResult:
3162 main.log.error( str( i ) + " is not in partitioned "
3163 "counter incremented results" )
3164 utilities.assert_equals( expect=True,
3165 actual=pCounterResults,
3166 onpass="Default counter incremented",
3167 onfail="Error incrementing default" +
3168 " counter" )
3169
3170 main.step( "Get then Increment a default counter on each node" )
3171 pCounters = []
3172 threads = []
3173 addedPValues = []
3174 for i in main.activeNodes:
3175 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3176 name="counterGetAndAdd-" + str( i ),
3177 args=[ pCounterName ] )
3178 addedPValues.append( pCounterValue )
3179 pCounterValue += 1
3180 threads.append( t )
3181 t.start()
3182
3183 for t in threads:
3184 t.join()
3185 pCounters.append( t.result )
3186 # Check that counter incremented numController times
3187 pCounterResults = True
3188 for i in addedPValues:
3189 tmpResult = i in pCounters
3190 pCounterResults = pCounterResults and tmpResult
3191 if not tmpResult:
3192 main.log.error( str( i ) + " is not in partitioned "
3193 "counter incremented results" )
3194 utilities.assert_equals( expect=True,
3195 actual=pCounterResults,
3196 onpass="Default counter incremented",
3197 onfail="Error incrementing default" +
3198 " counter" )
3199
3200 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003201 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Hall6e709752016-02-01 13:38:46 -08003202 utilities.assert_equals( expect=main.TRUE,
3203 actual=incrementCheck,
3204 onpass="Added counters are correct",
3205 onfail="Added counters are incorrect" )
3206
3207 main.step( "Add -8 to then get a default counter on each node" )
3208 pCounters = []
3209 threads = []
3210 addedPValues = []
3211 for i in main.activeNodes:
3212 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3213 name="counterIncrement-" + str( i ),
3214 args=[ pCounterName ],
3215 kwargs={ "delta": -8 } )
3216 pCounterValue += -8
3217 addedPValues.append( pCounterValue )
3218 threads.append( t )
3219 t.start()
3220
3221 for t in threads:
3222 t.join()
3223 pCounters.append( t.result )
3224 # Check that counter incremented numController times
3225 pCounterResults = True
3226 for i in addedPValues:
3227 tmpResult = i in pCounters
3228 pCounterResults = pCounterResults and tmpResult
3229 if not tmpResult:
3230 main.log.error( str( i ) + " is not in partitioned "
3231 "counter incremented results" )
3232 utilities.assert_equals( expect=True,
3233 actual=pCounterResults,
3234 onpass="Default counter incremented",
3235 onfail="Error incrementing default" +
3236 " counter" )
3237
3238 main.step( "Add 5 to then get a default counter on each node" )
3239 pCounters = []
3240 threads = []
3241 addedPValues = []
3242 for i in main.activeNodes:
3243 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3244 name="counterIncrement-" + str( i ),
3245 args=[ pCounterName ],
3246 kwargs={ "delta": 5 } )
3247 pCounterValue += 5
3248 addedPValues.append( pCounterValue )
3249 threads.append( t )
3250 t.start()
3251
3252 for t in threads:
3253 t.join()
3254 pCounters.append( t.result )
3255 # Check that counter incremented numController times
3256 pCounterResults = True
3257 for i in addedPValues:
3258 tmpResult = i in pCounters
3259 pCounterResults = pCounterResults and tmpResult
3260 if not tmpResult:
3261 main.log.error( str( i ) + " is not in partitioned "
3262 "counter incremented results" )
3263 utilities.assert_equals( expect=True,
3264 actual=pCounterResults,
3265 onpass="Default counter incremented",
3266 onfail="Error incrementing default" +
3267 " counter" )
3268
3269 main.step( "Get then add 5 to a default counter on each node" )
3270 pCounters = []
3271 threads = []
3272 addedPValues = []
3273 for i in main.activeNodes:
3274 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3275 name="counterIncrement-" + str( i ),
3276 args=[ pCounterName ],
3277 kwargs={ "delta": 5 } )
3278 addedPValues.append( pCounterValue )
3279 pCounterValue += 5
3280 threads.append( t )
3281 t.start()
3282
3283 for t in threads:
3284 t.join()
3285 pCounters.append( t.result )
3286 # Check that counter incremented numController times
3287 pCounterResults = True
3288 for i in addedPValues:
3289 tmpResult = i in pCounters
3290 pCounterResults = pCounterResults and tmpResult
3291 if not tmpResult:
3292 main.log.error( str( i ) + " is not in partitioned "
3293 "counter incremented results" )
3294 utilities.assert_equals( expect=True,
3295 actual=pCounterResults,
3296 onpass="Default counter incremented",
3297 onfail="Error incrementing default" +
3298 " counter" )
3299
3300 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003301 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Hall6e709752016-02-01 13:38:46 -08003302 utilities.assert_equals( expect=main.TRUE,
3303 actual=incrementCheck,
3304 onpass="Added counters are correct",
3305 onfail="Added counters are incorrect" )
3306
Jon Hall6e709752016-02-01 13:38:46 -08003307 # DISTRIBUTED SETS
3308 main.step( "Distributed Set get" )
3309 size = len( onosSet )
3310 getResponses = []
3311 threads = []
3312 for i in main.activeNodes:
3313 t = main.Thread( target=main.CLIs[i].setTestGet,
3314 name="setTestGet-" + str( i ),
3315 args=[ onosSetName ] )
3316 threads.append( t )
3317 t.start()
3318 for t in threads:
3319 t.join()
3320 getResponses.append( t.result )
3321
3322 getResults = main.TRUE
3323 for i in range( len( main.activeNodes ) ):
3324 node = str( main.activeNodes[i] + 1 )
3325 if isinstance( getResponses[ i ], list):
3326 current = set( getResponses[ i ] )
3327 if len( current ) == len( getResponses[ i ] ):
3328 # no repeats
3329 if onosSet != current:
3330 main.log.error( "ONOS" + node +
3331 " has incorrect view" +
3332 " of set " + onosSetName + ":\n" +
3333 str( getResponses[ i ] ) )
3334 main.log.debug( "Expected: " + str( onosSet ) )
3335 main.log.debug( "Actual: " + str( current ) )
3336 getResults = main.FALSE
3337 else:
3338 # error, set is not a set
3339 main.log.error( "ONOS" + node +
3340 " has repeat elements in" +
3341 " set " + onosSetName + ":\n" +
3342 str( getResponses[ i ] ) )
3343 getResults = main.FALSE
3344 elif getResponses[ i ] == main.ERROR:
3345 getResults = main.FALSE
3346 utilities.assert_equals( expect=main.TRUE,
3347 actual=getResults,
3348 onpass="Set elements are correct",
3349 onfail="Set elements are incorrect" )
3350
3351 main.step( "Distributed Set size" )
3352 sizeResponses = []
3353 threads = []
3354 for i in main.activeNodes:
3355 t = main.Thread( target=main.CLIs[i].setTestSize,
3356 name="setTestSize-" + str( i ),
3357 args=[ onosSetName ] )
3358 threads.append( t )
3359 t.start()
3360 for t in threads:
3361 t.join()
3362 sizeResponses.append( t.result )
3363
3364 sizeResults = main.TRUE
3365 for i in range( len( main.activeNodes ) ):
3366 node = str( main.activeNodes[i] + 1 )
3367 if size != sizeResponses[ i ]:
3368 sizeResults = main.FALSE
3369 main.log.error( "ONOS" + node +
3370 " expected a size of " + str( size ) +
3371 " for set " + onosSetName +
3372 " but got " + str( sizeResponses[ i ] ) )
3373 utilities.assert_equals( expect=main.TRUE,
3374 actual=sizeResults,
3375 onpass="Set sizes are correct",
3376 onfail="Set sizes are incorrect" )
3377
3378 main.step( "Distributed Set add()" )
3379 onosSet.add( addValue )
3380 addResponses = []
3381 threads = []
3382 for i in main.activeNodes:
3383 t = main.Thread( target=main.CLIs[i].setTestAdd,
3384 name="setTestAdd-" + str( i ),
3385 args=[ onosSetName, addValue ] )
3386 threads.append( t )
3387 t.start()
3388 for t in threads:
3389 t.join()
3390 addResponses.append( t.result )
3391
3392 # main.TRUE = successfully changed the set
3393 # main.FALSE = action resulted in no change in set
3394 # main.ERROR - Some error in executing the function
3395 addResults = main.TRUE
3396 for i in range( len( main.activeNodes ) ):
3397 if addResponses[ i ] == main.TRUE:
3398 # All is well
3399 pass
3400 elif addResponses[ i ] == main.FALSE:
3401 # Already in set, probably fine
3402 pass
3403 elif addResponses[ i ] == main.ERROR:
3404 # Error in execution
3405 addResults = main.FALSE
3406 else:
3407 # unexpected result
3408 addResults = main.FALSE
3409 if addResults != main.TRUE:
3410 main.log.error( "Error executing set add" )
3411
3412 # Check if set is still correct
3413 size = len( onosSet )
3414 getResponses = []
3415 threads = []
3416 for i in main.activeNodes:
3417 t = main.Thread( target=main.CLIs[i].setTestGet,
3418 name="setTestGet-" + str( i ),
3419 args=[ onosSetName ] )
3420 threads.append( t )
3421 t.start()
3422 for t in threads:
3423 t.join()
3424 getResponses.append( t.result )
3425 getResults = main.TRUE
3426 for i in range( len( main.activeNodes ) ):
3427 node = str( main.activeNodes[i] + 1 )
3428 if isinstance( getResponses[ i ], list):
3429 current = set( getResponses[ i ] )
3430 if len( current ) == len( getResponses[ i ] ):
3431 # no repeats
3432 if onosSet != current:
3433 main.log.error( "ONOS" + node + " has incorrect view" +
3434 " of set " + onosSetName + ":\n" +
3435 str( getResponses[ i ] ) )
3436 main.log.debug( "Expected: " + str( onosSet ) )
3437 main.log.debug( "Actual: " + str( current ) )
3438 getResults = main.FALSE
3439 else:
3440 # error, set is not a set
3441 main.log.error( "ONOS" + node + " has repeat elements in" +
3442 " set " + onosSetName + ":\n" +
3443 str( getResponses[ i ] ) )
3444 getResults = main.FALSE
3445 elif getResponses[ i ] == main.ERROR:
3446 getResults = main.FALSE
3447 sizeResponses = []
3448 threads = []
3449 for i in main.activeNodes:
3450 t = main.Thread( target=main.CLIs[i].setTestSize,
3451 name="setTestSize-" + str( i ),
3452 args=[ onosSetName ] )
3453 threads.append( t )
3454 t.start()
3455 for t in threads:
3456 t.join()
3457 sizeResponses.append( t.result )
3458 sizeResults = main.TRUE
3459 for i in range( len( main.activeNodes ) ):
3460 node = str( main.activeNodes[i] + 1 )
3461 if size != sizeResponses[ i ]:
3462 sizeResults = main.FALSE
3463 main.log.error( "ONOS" + node +
3464 " expected a size of " + str( size ) +
3465 " for set " + onosSetName +
3466 " but got " + str( sizeResponses[ i ] ) )
3467 addResults = addResults and getResults and sizeResults
3468 utilities.assert_equals( expect=main.TRUE,
3469 actual=addResults,
3470 onpass="Set add correct",
3471 onfail="Set add was incorrect" )
3472
3473 main.step( "Distributed Set addAll()" )
3474 onosSet.update( addAllValue.split() )
3475 addResponses = []
3476 threads = []
3477 for i in main.activeNodes:
3478 t = main.Thread( target=main.CLIs[i].setTestAdd,
3479 name="setTestAddAll-" + str( i ),
3480 args=[ onosSetName, addAllValue ] )
3481 threads.append( t )
3482 t.start()
3483 for t in threads:
3484 t.join()
3485 addResponses.append( t.result )
3486
3487 # main.TRUE = successfully changed the set
3488 # main.FALSE = action resulted in no change in set
3489 # main.ERROR - Some error in executing the function
3490 addAllResults = main.TRUE
3491 for i in range( len( main.activeNodes ) ):
3492 if addResponses[ i ] == main.TRUE:
3493 # All is well
3494 pass
3495 elif addResponses[ i ] == main.FALSE:
3496 # Already in set, probably fine
3497 pass
3498 elif addResponses[ i ] == main.ERROR:
3499 # Error in execution
3500 addAllResults = main.FALSE
3501 else:
3502 # unexpected result
3503 addAllResults = main.FALSE
3504 if addAllResults != main.TRUE:
3505 main.log.error( "Error executing set addAll" )
3506
3507 # Check if set is still correct
3508 size = len( onosSet )
3509 getResponses = []
3510 threads = []
3511 for i in main.activeNodes:
3512 t = main.Thread( target=main.CLIs[i].setTestGet,
3513 name="setTestGet-" + str( i ),
3514 args=[ onosSetName ] )
3515 threads.append( t )
3516 t.start()
3517 for t in threads:
3518 t.join()
3519 getResponses.append( t.result )
3520 getResults = main.TRUE
3521 for i in range( len( main.activeNodes ) ):
3522 node = str( main.activeNodes[i] + 1 )
3523 if isinstance( getResponses[ i ], list):
3524 current = set( getResponses[ i ] )
3525 if len( current ) == len( getResponses[ i ] ):
3526 # no repeats
3527 if onosSet != current:
3528 main.log.error( "ONOS" + node +
3529 " has incorrect view" +
3530 " of set " + onosSetName + ":\n" +
3531 str( getResponses[ i ] ) )
3532 main.log.debug( "Expected: " + str( onosSet ) )
3533 main.log.debug( "Actual: " + str( current ) )
3534 getResults = main.FALSE
3535 else:
3536 # error, set is not a set
3537 main.log.error( "ONOS" + node +
3538 " has repeat elements in" +
3539 " set " + onosSetName + ":\n" +
3540 str( getResponses[ i ] ) )
3541 getResults = main.FALSE
3542 elif getResponses[ i ] == main.ERROR:
3543 getResults = main.FALSE
3544 sizeResponses = []
3545 threads = []
3546 for i in main.activeNodes:
3547 t = main.Thread( target=main.CLIs[i].setTestSize,
3548 name="setTestSize-" + str( i ),
3549 args=[ onosSetName ] )
3550 threads.append( t )
3551 t.start()
3552 for t in threads:
3553 t.join()
3554 sizeResponses.append( t.result )
3555 sizeResults = main.TRUE
3556 for i in range( len( main.activeNodes ) ):
3557 node = str( main.activeNodes[i] + 1 )
3558 if size != sizeResponses[ i ]:
3559 sizeResults = main.FALSE
3560 main.log.error( "ONOS" + node +
3561 " expected a size of " + str( size ) +
3562 " for set " + onosSetName +
3563 " but got " + str( sizeResponses[ i ] ) )
3564 addAllResults = addAllResults and getResults and sizeResults
3565 utilities.assert_equals( expect=main.TRUE,
3566 actual=addAllResults,
3567 onpass="Set addAll correct",
3568 onfail="Set addAll was incorrect" )
3569
3570 main.step( "Distributed Set contains()" )
3571 containsResponses = []
3572 threads = []
3573 for i in main.activeNodes:
3574 t = main.Thread( target=main.CLIs[i].setTestGet,
3575 name="setContains-" + str( i ),
3576 args=[ onosSetName ],
3577 kwargs={ "values": addValue } )
3578 threads.append( t )
3579 t.start()
3580 for t in threads:
3581 t.join()
3582 # NOTE: This is the tuple
3583 containsResponses.append( t.result )
3584
3585 containsResults = main.TRUE
3586 for i in range( len( main.activeNodes ) ):
3587 if containsResponses[ i ] == main.ERROR:
3588 containsResults = main.FALSE
3589 else:
3590 containsResults = containsResults and\
3591 containsResponses[ i ][ 1 ]
3592 utilities.assert_equals( expect=main.TRUE,
3593 actual=containsResults,
3594 onpass="Set contains is functional",
3595 onfail="Set contains failed" )
3596
3597 main.step( "Distributed Set containsAll()" )
3598 containsAllResponses = []
3599 threads = []
3600 for i in main.activeNodes:
3601 t = main.Thread( target=main.CLIs[i].setTestGet,
3602 name="setContainsAll-" + str( i ),
3603 args=[ onosSetName ],
3604 kwargs={ "values": addAllValue } )
3605 threads.append( t )
3606 t.start()
3607 for t in threads:
3608 t.join()
3609 # NOTE: This is the tuple
3610 containsAllResponses.append( t.result )
3611
3612 containsAllResults = main.TRUE
3613 for i in range( len( main.activeNodes ) ):
3614 if containsResponses[ i ] == main.ERROR:
3615 containsResults = main.FALSE
3616 else:
3617 containsResults = containsResults and\
3618 containsResponses[ i ][ 1 ]
3619 utilities.assert_equals( expect=main.TRUE,
3620 actual=containsAllResults,
3621 onpass="Set containsAll is functional",
3622 onfail="Set containsAll failed" )
3623
3624 main.step( "Distributed Set remove()" )
3625 onosSet.remove( addValue )
3626 removeResponses = []
3627 threads = []
3628 for i in main.activeNodes:
3629 t = main.Thread( target=main.CLIs[i].setTestRemove,
3630 name="setTestRemove-" + str( i ),
3631 args=[ onosSetName, addValue ] )
3632 threads.append( t )
3633 t.start()
3634 for t in threads:
3635 t.join()
3636 removeResponses.append( t.result )
3637
3638 # main.TRUE = successfully changed the set
3639 # main.FALSE = action resulted in no change in set
3640 # main.ERROR - Some error in executing the function
3641 removeResults = main.TRUE
3642 for i in range( len( main.activeNodes ) ):
3643 if removeResponses[ i ] == main.TRUE:
3644 # All is well
3645 pass
3646 elif removeResponses[ i ] == main.FALSE:
3647 # not in set, probably fine
3648 pass
3649 elif removeResponses[ i ] == main.ERROR:
3650 # Error in execution
3651 removeResults = main.FALSE
3652 else:
3653 # unexpected result
3654 removeResults = main.FALSE
3655 if removeResults != main.TRUE:
3656 main.log.error( "Error executing set remove" )
3657
3658 # Check if set is still correct
3659 size = len( onosSet )
3660 getResponses = []
3661 threads = []
3662 for i in main.activeNodes:
3663 t = main.Thread( target=main.CLIs[i].setTestGet,
3664 name="setTestGet-" + str( i ),
3665 args=[ onosSetName ] )
3666 threads.append( t )
3667 t.start()
3668 for t in threads:
3669 t.join()
3670 getResponses.append( t.result )
3671 getResults = main.TRUE
3672 for i in range( len( main.activeNodes ) ):
3673 node = str( main.activeNodes[i] + 1 )
3674 if isinstance( getResponses[ i ], list):
3675 current = set( getResponses[ i ] )
3676 if len( current ) == len( getResponses[ i ] ):
3677 # no repeats
3678 if onosSet != current:
3679 main.log.error( "ONOS" + node +
3680 " has incorrect view" +
3681 " of set " + onosSetName + ":\n" +
3682 str( getResponses[ i ] ) )
3683 main.log.debug( "Expected: " + str( onosSet ) )
3684 main.log.debug( "Actual: " + str( current ) )
3685 getResults = main.FALSE
3686 else:
3687 # error, set is not a set
3688 main.log.error( "ONOS" + node +
3689 " has repeat elements in" +
3690 " set " + onosSetName + ":\n" +
3691 str( getResponses[ i ] ) )
3692 getResults = main.FALSE
3693 elif getResponses[ i ] == main.ERROR:
3694 getResults = main.FALSE
3695 sizeResponses = []
3696 threads = []
3697 for i in main.activeNodes:
3698 t = main.Thread( target=main.CLIs[i].setTestSize,
3699 name="setTestSize-" + str( i ),
3700 args=[ onosSetName ] )
3701 threads.append( t )
3702 t.start()
3703 for t in threads:
3704 t.join()
3705 sizeResponses.append( t.result )
3706 sizeResults = main.TRUE
3707 for i in range( len( main.activeNodes ) ):
3708 node = str( main.activeNodes[i] + 1 )
3709 if size != sizeResponses[ i ]:
3710 sizeResults = main.FALSE
3711 main.log.error( "ONOS" + node +
3712 " expected a size of " + str( size ) +
3713 " for set " + onosSetName +
3714 " but got " + str( sizeResponses[ i ] ) )
3715 removeResults = removeResults and getResults and sizeResults
3716 utilities.assert_equals( expect=main.TRUE,
3717 actual=removeResults,
3718 onpass="Set remove correct",
3719 onfail="Set remove was incorrect" )
3720
3721 main.step( "Distributed Set removeAll()" )
3722 onosSet.difference_update( addAllValue.split() )
3723 removeAllResponses = []
3724 threads = []
3725 try:
3726 for i in main.activeNodes:
3727 t = main.Thread( target=main.CLIs[i].setTestRemove,
3728 name="setTestRemoveAll-" + str( i ),
3729 args=[ onosSetName, addAllValue ] )
3730 threads.append( t )
3731 t.start()
3732 for t in threads:
3733 t.join()
3734 removeAllResponses.append( t.result )
3735 except Exception, e:
3736 main.log.exception(e)
3737
3738 # main.TRUE = successfully changed the set
3739 # main.FALSE = action resulted in no change in set
3740 # main.ERROR - Some error in executing the function
3741 removeAllResults = main.TRUE
3742 for i in range( len( main.activeNodes ) ):
3743 if removeAllResponses[ i ] == main.TRUE:
3744 # All is well
3745 pass
3746 elif removeAllResponses[ i ] == main.FALSE:
3747 # not in set, probably fine
3748 pass
3749 elif removeAllResponses[ i ] == main.ERROR:
3750 # Error in execution
3751 removeAllResults = main.FALSE
3752 else:
3753 # unexpected result
3754 removeAllResults = main.FALSE
3755 if removeAllResults != main.TRUE:
3756 main.log.error( "Error executing set removeAll" )
3757
3758 # Check if set is still correct
3759 size = len( onosSet )
3760 getResponses = []
3761 threads = []
3762 for i in main.activeNodes:
3763 t = main.Thread( target=main.CLIs[i].setTestGet,
3764 name="setTestGet-" + str( i ),
3765 args=[ onosSetName ] )
3766 threads.append( t )
3767 t.start()
3768 for t in threads:
3769 t.join()
3770 getResponses.append( t.result )
3771 getResults = main.TRUE
3772 for i in range( len( main.activeNodes ) ):
3773 node = str( main.activeNodes[i] + 1 )
3774 if isinstance( getResponses[ i ], list):
3775 current = set( getResponses[ i ] )
3776 if len( current ) == len( getResponses[ i ] ):
3777 # no repeats
3778 if onosSet != current:
3779 main.log.error( "ONOS" + node +
3780 " has incorrect view" +
3781 " of set " + onosSetName + ":\n" +
3782 str( getResponses[ i ] ) )
3783 main.log.debug( "Expected: " + str( onosSet ) )
3784 main.log.debug( "Actual: " + str( current ) )
3785 getResults = main.FALSE
3786 else:
3787 # error, set is not a set
3788 main.log.error( "ONOS" + node +
3789 " has repeat elements in" +
3790 " set " + onosSetName + ":\n" +
3791 str( getResponses[ i ] ) )
3792 getResults = main.FALSE
3793 elif getResponses[ i ] == main.ERROR:
3794 getResults = main.FALSE
3795 sizeResponses = []
3796 threads = []
3797 for i in main.activeNodes:
3798 t = main.Thread( target=main.CLIs[i].setTestSize,
3799 name="setTestSize-" + str( i ),
3800 args=[ onosSetName ] )
3801 threads.append( t )
3802 t.start()
3803 for t in threads:
3804 t.join()
3805 sizeResponses.append( t.result )
3806 sizeResults = main.TRUE
3807 for i in range( len( main.activeNodes ) ):
3808 node = str( main.activeNodes[i] + 1 )
3809 if size != sizeResponses[ i ]:
3810 sizeResults = main.FALSE
3811 main.log.error( "ONOS" + node +
3812 " expected a size of " + str( size ) +
3813 " for set " + onosSetName +
3814 " but got " + str( sizeResponses[ i ] ) )
3815 removeAllResults = removeAllResults and getResults and sizeResults
3816 utilities.assert_equals( expect=main.TRUE,
3817 actual=removeAllResults,
3818 onpass="Set removeAll correct",
3819 onfail="Set removeAll was incorrect" )
3820
3821 main.step( "Distributed Set addAll()" )
3822 onosSet.update( addAllValue.split() )
3823 addResponses = []
3824 threads = []
3825 for i in main.activeNodes:
3826 t = main.Thread( target=main.CLIs[i].setTestAdd,
3827 name="setTestAddAll-" + str( i ),
3828 args=[ onosSetName, addAllValue ] )
3829 threads.append( t )
3830 t.start()
3831 for t in threads:
3832 t.join()
3833 addResponses.append( t.result )
3834
3835 # main.TRUE = successfully changed the set
3836 # main.FALSE = action resulted in no change in set
3837 # main.ERROR - Some error in executing the function
3838 addAllResults = main.TRUE
3839 for i in range( len( main.activeNodes ) ):
3840 if addResponses[ i ] == main.TRUE:
3841 # All is well
3842 pass
3843 elif addResponses[ i ] == main.FALSE:
3844 # Already in set, probably fine
3845 pass
3846 elif addResponses[ i ] == main.ERROR:
3847 # Error in execution
3848 addAllResults = main.FALSE
3849 else:
3850 # unexpected result
3851 addAllResults = main.FALSE
3852 if addAllResults != main.TRUE:
3853 main.log.error( "Error executing set addAll" )
3854
3855 # Check if set is still correct
3856 size = len( onosSet )
3857 getResponses = []
3858 threads = []
3859 for i in main.activeNodes:
3860 t = main.Thread( target=main.CLIs[i].setTestGet,
3861 name="setTestGet-" + str( i ),
3862 args=[ onosSetName ] )
3863 threads.append( t )
3864 t.start()
3865 for t in threads:
3866 t.join()
3867 getResponses.append( t.result )
3868 getResults = main.TRUE
3869 for i in range( len( main.activeNodes ) ):
3870 node = str( main.activeNodes[i] + 1 )
3871 if isinstance( getResponses[ i ], list):
3872 current = set( getResponses[ i ] )
3873 if len( current ) == len( getResponses[ i ] ):
3874 # no repeats
3875 if onosSet != current:
3876 main.log.error( "ONOS" + node +
3877 " has incorrect view" +
3878 " of set " + onosSetName + ":\n" +
3879 str( getResponses[ i ] ) )
3880 main.log.debug( "Expected: " + str( onosSet ) )
3881 main.log.debug( "Actual: " + str( current ) )
3882 getResults = main.FALSE
3883 else:
3884 # error, set is not a set
3885 main.log.error( "ONOS" + node +
3886 " has repeat elements in" +
3887 " set " + onosSetName + ":\n" +
3888 str( getResponses[ i ] ) )
3889 getResults = main.FALSE
3890 elif getResponses[ i ] == main.ERROR:
3891 getResults = main.FALSE
3892 sizeResponses = []
3893 threads = []
3894 for i in main.activeNodes:
3895 t = main.Thread( target=main.CLIs[i].setTestSize,
3896 name="setTestSize-" + str( i ),
3897 args=[ onosSetName ] )
3898 threads.append( t )
3899 t.start()
3900 for t in threads:
3901 t.join()
3902 sizeResponses.append( t.result )
3903 sizeResults = main.TRUE
3904 for i in range( len( main.activeNodes ) ):
3905 node = str( main.activeNodes[i] + 1 )
3906 if size != sizeResponses[ i ]:
3907 sizeResults = main.FALSE
3908 main.log.error( "ONOS" + node +
3909 " expected a size of " + str( size ) +
3910 " for set " + onosSetName +
3911 " but got " + str( sizeResponses[ i ] ) )
3912 addAllResults = addAllResults and getResults and sizeResults
3913 utilities.assert_equals( expect=main.TRUE,
3914 actual=addAllResults,
3915 onpass="Set addAll correct",
3916 onfail="Set addAll was incorrect" )
3917
3918 main.step( "Distributed Set clear()" )
3919 onosSet.clear()
3920 clearResponses = []
3921 threads = []
3922 for i in main.activeNodes:
3923 t = main.Thread( target=main.CLIs[i].setTestRemove,
3924 name="setTestClear-" + str( i ),
3925 args=[ onosSetName, " "], # Values doesn't matter
3926 kwargs={ "clear": True } )
3927 threads.append( t )
3928 t.start()
3929 for t in threads:
3930 t.join()
3931 clearResponses.append( t.result )
3932
3933 # main.TRUE = successfully changed the set
3934 # main.FALSE = action resulted in no change in set
3935 # main.ERROR - Some error in executing the function
3936 clearResults = main.TRUE
3937 for i in range( len( main.activeNodes ) ):
3938 if clearResponses[ i ] == main.TRUE:
3939 # All is well
3940 pass
3941 elif clearResponses[ i ] == main.FALSE:
3942 # Nothing set, probably fine
3943 pass
3944 elif clearResponses[ i ] == main.ERROR:
3945 # Error in execution
3946 clearResults = main.FALSE
3947 else:
3948 # unexpected result
3949 clearResults = main.FALSE
3950 if clearResults != main.TRUE:
3951 main.log.error( "Error executing set clear" )
3952
3953 # Check if set is still correct
3954 size = len( onosSet )
3955 getResponses = []
3956 threads = []
3957 for i in main.activeNodes:
3958 t = main.Thread( target=main.CLIs[i].setTestGet,
3959 name="setTestGet-" + str( i ),
3960 args=[ onosSetName ] )
3961 threads.append( t )
3962 t.start()
3963 for t in threads:
3964 t.join()
3965 getResponses.append( t.result )
3966 getResults = main.TRUE
3967 for i in range( len( main.activeNodes ) ):
3968 node = str( main.activeNodes[i] + 1 )
3969 if isinstance( getResponses[ i ], list):
3970 current = set( getResponses[ i ] )
3971 if len( current ) == len( getResponses[ i ] ):
3972 # no repeats
3973 if onosSet != current:
3974 main.log.error( "ONOS" + node +
3975 " has incorrect view" +
3976 " of set " + onosSetName + ":\n" +
3977 str( getResponses[ i ] ) )
3978 main.log.debug( "Expected: " + str( onosSet ) )
3979 main.log.debug( "Actual: " + str( current ) )
3980 getResults = main.FALSE
3981 else:
3982 # error, set is not a set
3983 main.log.error( "ONOS" + node +
3984 " has repeat elements in" +
3985 " set " + onosSetName + ":\n" +
3986 str( getResponses[ i ] ) )
3987 getResults = main.FALSE
3988 elif getResponses[ i ] == main.ERROR:
3989 getResults = main.FALSE
3990 sizeResponses = []
3991 threads = []
3992 for i in main.activeNodes:
3993 t = main.Thread( target=main.CLIs[i].setTestSize,
3994 name="setTestSize-" + str( i ),
3995 args=[ onosSetName ] )
3996 threads.append( t )
3997 t.start()
3998 for t in threads:
3999 t.join()
4000 sizeResponses.append( t.result )
4001 sizeResults = main.TRUE
4002 for i in range( len( main.activeNodes ) ):
4003 node = str( main.activeNodes[i] + 1 )
4004 if size != sizeResponses[ i ]:
4005 sizeResults = main.FALSE
4006 main.log.error( "ONOS" + node +
4007 " expected a size of " + str( size ) +
4008 " for set " + onosSetName +
4009 " but got " + str( sizeResponses[ i ] ) )
4010 clearResults = clearResults and getResults and sizeResults
4011 utilities.assert_equals( expect=main.TRUE,
4012 actual=clearResults,
4013 onpass="Set clear correct",
4014 onfail="Set clear was incorrect" )
4015
4016 main.step( "Distributed Set addAll()" )
4017 onosSet.update( addAllValue.split() )
4018 addResponses = []
4019 threads = []
4020 for i in main.activeNodes:
4021 t = main.Thread( target=main.CLIs[i].setTestAdd,
4022 name="setTestAddAll-" + str( i ),
4023 args=[ onosSetName, addAllValue ] )
4024 threads.append( t )
4025 t.start()
4026 for t in threads:
4027 t.join()
4028 addResponses.append( t.result )
4029
4030 # main.TRUE = successfully changed the set
4031 # main.FALSE = action resulted in no change in set
4032 # main.ERROR - Some error in executing the function
4033 addAllResults = main.TRUE
4034 for i in range( len( main.activeNodes ) ):
4035 if addResponses[ i ] == main.TRUE:
4036 # All is well
4037 pass
4038 elif addResponses[ i ] == main.FALSE:
4039 # Already in set, probably fine
4040 pass
4041 elif addResponses[ i ] == main.ERROR:
4042 # Error in execution
4043 addAllResults = main.FALSE
4044 else:
4045 # unexpected result
4046 addAllResults = main.FALSE
4047 if addAllResults != main.TRUE:
4048 main.log.error( "Error executing set addAll" )
4049
4050 # Check if set is still correct
4051 size = len( onosSet )
4052 getResponses = []
4053 threads = []
4054 for i in main.activeNodes:
4055 t = main.Thread( target=main.CLIs[i].setTestGet,
4056 name="setTestGet-" + str( i ),
4057 args=[ onosSetName ] )
4058 threads.append( t )
4059 t.start()
4060 for t in threads:
4061 t.join()
4062 getResponses.append( t.result )
4063 getResults = main.TRUE
4064 for i in range( len( main.activeNodes ) ):
4065 node = str( main.activeNodes[i] + 1 )
4066 if isinstance( getResponses[ i ], list):
4067 current = set( getResponses[ i ] )
4068 if len( current ) == len( getResponses[ i ] ):
4069 # no repeats
4070 if onosSet != current:
4071 main.log.error( "ONOS" + node +
4072 " has incorrect view" +
4073 " of set " + onosSetName + ":\n" +
4074 str( getResponses[ i ] ) )
4075 main.log.debug( "Expected: " + str( onosSet ) )
4076 main.log.debug( "Actual: " + str( current ) )
4077 getResults = main.FALSE
4078 else:
4079 # error, set is not a set
4080 main.log.error( "ONOS" + node +
4081 " has repeat elements in" +
4082 " set " + onosSetName + ":\n" +
4083 str( getResponses[ i ] ) )
4084 getResults = main.FALSE
4085 elif getResponses[ i ] == main.ERROR:
4086 getResults = main.FALSE
4087 sizeResponses = []
4088 threads = []
4089 for i in main.activeNodes:
4090 t = main.Thread( target=main.CLIs[i].setTestSize,
4091 name="setTestSize-" + str( i ),
4092 args=[ onosSetName ] )
4093 threads.append( t )
4094 t.start()
4095 for t in threads:
4096 t.join()
4097 sizeResponses.append( t.result )
4098 sizeResults = main.TRUE
4099 for i in range( len( main.activeNodes ) ):
4100 node = str( main.activeNodes[i] + 1 )
4101 if size != sizeResponses[ i ]:
4102 sizeResults = main.FALSE
4103 main.log.error( "ONOS" + node +
4104 " expected a size of " + str( size ) +
4105 " for set " + onosSetName +
4106 " but got " + str( sizeResponses[ i ] ) )
4107 addAllResults = addAllResults and getResults and sizeResults
4108 utilities.assert_equals( expect=main.TRUE,
4109 actual=addAllResults,
4110 onpass="Set addAll correct",
4111 onfail="Set addAll was incorrect" )
4112
4113 main.step( "Distributed Set retain()" )
4114 onosSet.intersection_update( retainValue.split() )
4115 retainResponses = []
4116 threads = []
4117 for i in main.activeNodes:
4118 t = main.Thread( target=main.CLIs[i].setTestRemove,
4119 name="setTestRetain-" + str( i ),
4120 args=[ onosSetName, retainValue ],
4121 kwargs={ "retain": True } )
4122 threads.append( t )
4123 t.start()
4124 for t in threads:
4125 t.join()
4126 retainResponses.append( t.result )
4127
4128 # main.TRUE = successfully changed the set
4129 # main.FALSE = action resulted in no change in set
4130 # main.ERROR - Some error in executing the function
4131 retainResults = main.TRUE
4132 for i in range( len( main.activeNodes ) ):
4133 if retainResponses[ i ] == main.TRUE:
4134 # All is well
4135 pass
4136 elif retainResponses[ i ] == main.FALSE:
4137 # Already in set, probably fine
4138 pass
4139 elif retainResponses[ i ] == main.ERROR:
4140 # Error in execution
4141 retainResults = main.FALSE
4142 else:
4143 # unexpected result
4144 retainResults = main.FALSE
4145 if retainResults != main.TRUE:
4146 main.log.error( "Error executing set retain" )
4147
4148 # Check if set is still correct
4149 size = len( onosSet )
4150 getResponses = []
4151 threads = []
4152 for i in main.activeNodes:
4153 t = main.Thread( target=main.CLIs[i].setTestGet,
4154 name="setTestGet-" + str( i ),
4155 args=[ onosSetName ] )
4156 threads.append( t )
4157 t.start()
4158 for t in threads:
4159 t.join()
4160 getResponses.append( t.result )
4161 getResults = main.TRUE
4162 for i in range( len( main.activeNodes ) ):
4163 node = str( main.activeNodes[i] + 1 )
4164 if isinstance( getResponses[ i ], list):
4165 current = set( getResponses[ i ] )
4166 if len( current ) == len( getResponses[ i ] ):
4167 # no repeats
4168 if onosSet != current:
4169 main.log.error( "ONOS" + node +
4170 " has incorrect view" +
4171 " of set " + onosSetName + ":\n" +
4172 str( getResponses[ i ] ) )
4173 main.log.debug( "Expected: " + str( onosSet ) )
4174 main.log.debug( "Actual: " + str( current ) )
4175 getResults = main.FALSE
4176 else:
4177 # error, set is not a set
4178 main.log.error( "ONOS" + node +
4179 " has repeat elements in" +
4180 " set " + onosSetName + ":\n" +
4181 str( getResponses[ i ] ) )
4182 getResults = main.FALSE
4183 elif getResponses[ i ] == main.ERROR:
4184 getResults = main.FALSE
4185 sizeResponses = []
4186 threads = []
4187 for i in main.activeNodes:
4188 t = main.Thread( target=main.CLIs[i].setTestSize,
4189 name="setTestSize-" + str( i ),
4190 args=[ onosSetName ] )
4191 threads.append( t )
4192 t.start()
4193 for t in threads:
4194 t.join()
4195 sizeResponses.append( t.result )
4196 sizeResults = main.TRUE
4197 for i in range( len( main.activeNodes ) ):
4198 node = str( main.activeNodes[i] + 1 )
4199 if size != sizeResponses[ i ]:
4200 sizeResults = main.FALSE
4201 main.log.error( "ONOS" + node + " expected a size of " +
4202 str( size ) + " for set " + onosSetName +
4203 " but got " + str( sizeResponses[ i ] ) )
4204 retainResults = retainResults and getResults and sizeResults
4205 utilities.assert_equals( expect=main.TRUE,
4206 actual=retainResults,
4207 onpass="Set retain correct",
4208 onfail="Set retain was incorrect" )
4209
4210 # Transactional maps
4211 main.step( "Partitioned Transactional maps put" )
4212 tMapValue = "Testing"
4213 numKeys = 100
4214 putResult = True
4215 node = main.activeNodes[0]
4216 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4217 if putResponses and len( putResponses ) == 100:
4218 for i in putResponses:
4219 if putResponses[ i ][ 'value' ] != tMapValue:
4220 putResult = False
4221 else:
4222 putResult = False
4223 if not putResult:
4224 main.log.debug( "Put response values: " + str( putResponses ) )
4225 utilities.assert_equals( expect=True,
4226 actual=putResult,
4227 onpass="Partitioned Transactional Map put successful",
4228 onfail="Partitioned Transactional Map put values are incorrect" )
4229
4230 main.step( "Partitioned Transactional maps get" )
Jon Hall9bfadd22016-05-11 14:48:07 -07004231 # FIXME: is this sleep needed?
4232 time.sleep( 5 )
4233
Jon Hall6e709752016-02-01 13:38:46 -08004234 getCheck = True
4235 for n in range( 1, numKeys + 1 ):
4236 getResponses = []
4237 threads = []
4238 valueCheck = True
4239 for i in main.activeNodes:
4240 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4241 name="TMap-get-" + str( i ),
4242 args=[ "Key" + str( n ) ] )
4243 threads.append( t )
4244 t.start()
4245 for t in threads:
4246 t.join()
4247 getResponses.append( t.result )
4248 for node in getResponses:
4249 if node != tMapValue:
4250 valueCheck = False
4251 if not valueCheck:
4252 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4253 main.log.warn( getResponses )
4254 getCheck = getCheck and valueCheck
4255 utilities.assert_equals( expect=True,
4256 actual=getCheck,
4257 onpass="Partitioned Transactional Map get values were correct",
4258 onfail="Partitioned Transactional Map values incorrect" )