blob: a48a46036da75d4f55aa68efcc92eb49d554690b [file] [log] [blame]
Jon Hall6e709752016-02-01 13:38:46 -08001"""
2Description: This test is to determine if ONOS can handle
3 a full network partion
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
14CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
28class HAfullNetPartition:
29
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
51 import imp
52 import pexpect
53 import time
Jon Halla440e872016-03-31 15:15:50 -070054 import json
Jon Hall6e709752016-02-01 13:38:46 -080055 main.log.info( "ONOS HA test: Partition ONOS nodes into two sub-clusters - " +
56 "initialization" )
57 main.case( "Setting up test environment" )
58 main.caseExplanation = "Setup the test environment including " +\
59 "installing ONOS, starting Mininet and ONOS" +\
60 "cli sessions."
61
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
69 main.numCtrls = int( main.params[ 'num_controllers' ] )
70 if main.ONOSbench.maxNodes:
71 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
74 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070081 # These are for csv plotting in jenkins
82 global labels
83 global data
84 labels = []
85 data = []
Jon Hall6e709752016-02-01 13:38:46 -080086
87 # FIXME: just get controller port from params?
88 # TODO: do we really need all these?
89 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
90 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
91 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
92 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
93 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
94 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
95 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
96
97 try:
Jon Hall53c5e662016-04-13 16:06:56 -070098 from tests.HA.dependencies.HA import HA
Jon Hall41d39f12016-04-11 22:54:35 -070099 main.HA = HA()
Jon Hall6e709752016-02-01 13:38:46 -0800100 except Exception as e:
101 main.log.exception( e )
102 main.cleanup()
103 main.exit()
104
105 main.CLIs = []
106 main.nodes = []
107 ipList = []
108 for i in range( 1, main.numCtrls + 1 ):
109 try:
110 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
111 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
112 ipList.append( main.nodes[ -1 ].ip_address )
113 except AttributeError:
114 break
115
116 main.step( "Create cell file" )
117 cellAppString = main.params[ 'ENV' ][ 'appString' ]
118 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
119 main.Mininet1.ip_address,
120 cellAppString, ipList )
121 main.step( "Applying cell variable to environment" )
122 cellResult = main.ONOSbench.setCell( cellName )
123 verifyResult = main.ONOSbench.verifyCell()
124
125 # FIXME:this is short term fix
126 main.log.info( "Removing raft logs" )
127 main.ONOSbench.onosRemoveRaftLogs()
128
129 main.log.info( "Uninstalling ONOS" )
130 for node in main.nodes:
131 main.ONOSbench.onosUninstall( node.ip_address )
132
133 # Make sure ONOS is DEAD
134 main.log.info( "Killing any ONOS processes" )
135 killResults = main.TRUE
136 for node in main.nodes:
137 killed = main.ONOSbench.onosKill( node.ip_address )
138 killResults = killResults and killed
139
140 cleanInstallResult = main.TRUE
141 gitPullResult = main.TRUE
142
143 main.step( "Starting Mininet" )
144 # scp topo file to mininet
145 # TODO: move to params?
146 topoName = "obelisk.py"
147 filePath = main.ONOSbench.home + "/tools/test/topos/"
148 main.ONOSbench.scp( main.Mininet1,
149 filePath + topoName,
150 main.Mininet1.home,
151 direction="to" )
152 mnResult = main.Mininet1.startNet( )
153 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
154 onpass="Mininet Started",
155 onfail="Error starting Mininet" )
156
157 main.step( "Git checkout and pull " + gitBranch )
158 if PULLCODE:
159 main.ONOSbench.gitCheckout( gitBranch )
160 gitPullResult = main.ONOSbench.gitPull()
161 # values of 1 or 3 are good
162 utilities.assert_lesser( expect=0, actual=gitPullResult,
163 onpass="Git pull successful",
164 onfail="Git pull failed" )
165 main.ONOSbench.getVersion( report=True )
166
167 main.step( "Using mvn clean install" )
168 cleanInstallResult = main.TRUE
169 if PULLCODE and gitPullResult == main.TRUE:
170 cleanInstallResult = main.ONOSbench.cleanInstall()
171 else:
172 main.log.warn( "Did not pull new code so skipping mvn " +
173 "clean install" )
174 utilities.assert_equals( expect=main.TRUE,
175 actual=cleanInstallResult,
176 onpass="MCI successful",
177 onfail="MCI failed" )
178 # GRAPHS
179 # NOTE: important params here:
180 # job = name of Jenkins job
181 # Plot Name = Plot-HA, only can be used if multiple plots
182 # index = The number of the graph under plot name
183 job = "HAfullNetPartition"
184 plotName = "Plot-HA"
Jon Hall843f8bc2016-03-18 14:28:13 -0700185 index = "1"
Jon Hall6e709752016-02-01 13:38:46 -0800186 graphs = '<ac:structured-macro ac:name="html">\n'
187 graphs += '<ac:plain-text-body><![CDATA[\n'
188 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
189 '/plot/' + plotName + '/getPlot?index=' + index +\
190 '&width=500&height=300"' +\
191 'noborder="0" width="500" height="300" scrolling="yes" ' +\
192 'seamless="seamless"></iframe>\n'
193 graphs += ']]></ac:plain-text-body>\n'
194 graphs += '</ac:structured-macro>\n'
195 main.log.wiki(graphs)
196
197 main.step( "Creating ONOS package" )
198 # copy gen-partions file to ONOS
199 # NOTE: this assumes TestON and ONOS are on the same machine
Jon Hall53c5e662016-04-13 16:06:56 -0700200 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
Jon Hall6e709752016-02-01 13:38:46 -0800201 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
202 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
203 main.ONOSbench.ip_address,
204 srcFile,
205 dstDir,
206 pwd=main.ONOSbench.pwd,
207 direction="from" )
208 packageResult = main.ONOSbench.onosPackage()
209 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
210 onpass="ONOS package successful",
211 onfail="ONOS package failed" )
212
213 main.step( "Installing ONOS package" )
214 onosInstallResult = main.TRUE
215 for node in main.nodes:
216 tmpResult = main.ONOSbench.onosInstall( options="-f",
217 node=node.ip_address )
218 onosInstallResult = onosInstallResult and tmpResult
219 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
220 onpass="ONOS install successful",
221 onfail="ONOS install failed" )
222 # clean up gen-partitions file
223 try:
224 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
225 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
226 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
227 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
228 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
229 str( main.ONOSbench.handle.before ) )
230 except ( pexpect.TIMEOUT, pexpect.EOF ):
231 main.log.exception( "ONOSbench: pexpect exception found:" +
232 main.ONOSbench.handle.before )
233 main.cleanup()
234 main.exit()
235
236 main.step( "Checking if ONOS is up yet" )
237 for i in range( 2 ):
238 onosIsupResult = main.TRUE
239 for node in main.nodes:
240 started = main.ONOSbench.isup( node.ip_address )
241 if not started:
242 main.log.error( node.name + " hasn't started" )
243 onosIsupResult = onosIsupResult and started
244 if onosIsupResult == main.TRUE:
245 break
246 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
247 onpass="ONOS startup successful",
248 onfail="ONOS startup failed" )
249
250 main.log.step( "Starting ONOS CLI sessions" )
251 cliResults = main.TRUE
252 threads = []
253 for i in range( main.numCtrls ):
254 t = main.Thread( target=main.CLIs[i].startOnosCli,
255 name="startOnosCli-" + str( i ),
256 args=[main.nodes[i].ip_address] )
257 threads.append( t )
258 t.start()
259
260 for t in threads:
261 t.join()
262 cliResults = cliResults and t.result
263 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
264 onpass="ONOS cli startup successful",
265 onfail="ONOS cli startup failed" )
266
267 # Create a list of active nodes for use when some nodes are stopped
268 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
269
270 if main.params[ 'tcpdump' ].lower() == "true":
271 main.step( "Start Packet Capture MN" )
272 main.Mininet2.startTcpdump(
273 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
274 + "-MN.pcap",
275 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
276 port=main.params[ 'MNtcpdump' ][ 'port' ] )
277
Jon Halla440e872016-03-31 15:15:50 -0700278 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -0700279 nodeResults = utilities.retry( main.HA.nodesCheck,
280 False,
281 args=[main.activeNodes],
282 attempts=5 )
Jon Halla440e872016-03-31 15:15:50 -0700283
Jon Hall41d39f12016-04-11 22:54:35 -0700284 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Halla440e872016-03-31 15:15:50 -0700285 onpass="Nodes check successful",
286 onfail="Nodes check NOT successful" )
287
288 if not nodeResults:
Jon Hall7ac7bc32016-05-05 10:57:02 -0700289 for i in main.activeNodes:
290 cli = main.CLIs[i]
Jon Halla440e872016-03-31 15:15:50 -0700291 main.log.debug( "{} components not ACTIVE: \n{}".format(
292 cli.name,
293 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall6e709752016-02-01 13:38:46 -0800294 main.log.error( "Failed to start ONOS, stopping test" )
295 main.cleanup()
296 main.exit()
297
Jon Hall172b7ba2016-04-07 18:12:20 -0700298 main.step( "Activate apps defined in the params file" )
299 # get data from the params
300 apps = main.params.get( 'apps' )
301 if apps:
302 apps = apps.split(',')
303 main.log.warn( apps )
304 activateResult = True
305 for app in apps:
306 main.CLIs[ 0 ].app( app, "Activate" )
307 # TODO: check this worked
308 time.sleep( 10 ) # wait for apps to activate
309 for app in apps:
310 state = main.CLIs[ 0 ].appStatus( app )
311 if state == "ACTIVE":
312 activateResult = activeResult and True
313 else:
314 main.log.error( "{} is in {} state".format( app, state ) )
315 activeResult = False
316 utilities.assert_equals( expect=True,
317 actual=activateResult,
318 onpass="Successfully activated apps",
319 onfail="Failed to activate apps" )
320 else:
321 main.log.warn( "No apps were specified to be loaded after startup" )
322
323 main.step( "Set ONOS configurations" )
324 config = main.params.get( 'ONOS_Configuration' )
325 if config:
326 main.log.debug( config )
327 checkResult = main.TRUE
328 for component in config:
329 for setting in config[component]:
330 value = config[component][setting]
331 check = main.CLIs[ 0 ].setCfg( component, setting, value )
332 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
333 checkResult = check and checkResult
334 utilities.assert_equals( expect=main.TRUE,
335 actual=checkResult,
336 onpass="Successfully set config",
337 onfail="Failed to set config" )
338 else:
339 main.log.warn( "No configurations were specified to be changed after startup" )
340
Jon Hall9d2dcad2016-04-08 10:15:20 -0700341 main.step( "App Ids check" )
342 appCheck = main.TRUE
343 threads = []
344 for i in main.activeNodes:
345 t = main.Thread( target=main.CLIs[i].appToIDCheck,
346 name="appToIDCheck-" + str( i ),
347 args=[] )
348 threads.append( t )
349 t.start()
350
351 for t in threads:
352 t.join()
353 appCheck = appCheck and t.result
354 if appCheck != main.TRUE:
355 node = main.activeNodes[0]
356 main.log.warn( main.CLIs[node].apps() )
357 main.log.warn( main.CLIs[node].appIDs() )
358 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
359 onpass="App Ids seem to be correct",
360 onfail="Something is wrong with app Ids" )
361
Jon Hall6e709752016-02-01 13:38:46 -0800362 def CASE2( self, main ):
363 """
364 Assign devices to controllers
365 """
366 import re
367 assert main.numCtrls, "main.numCtrls not defined"
368 assert main, "main not defined"
369 assert utilities.assert_equals, "utilities.assert_equals not defined"
370 assert main.CLIs, "main.CLIs not defined"
371 assert main.nodes, "main.nodes not defined"
372 assert ONOS1Port, "ONOS1Port not defined"
373 assert ONOS2Port, "ONOS2Port not defined"
374 assert ONOS3Port, "ONOS3Port not defined"
375 assert ONOS4Port, "ONOS4Port not defined"
376 assert ONOS5Port, "ONOS5Port not defined"
377 assert ONOS6Port, "ONOS6Port not defined"
378 assert ONOS7Port, "ONOS7Port not defined"
379
380 main.case( "Assigning devices to controllers" )
381 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
382 "and check that an ONOS node becomes the " +\
383 "master of the device."
384 main.step( "Assign switches to controllers" )
385
386 ipList = []
387 for i in range( main.numCtrls ):
388 ipList.append( main.nodes[ i ].ip_address )
389 swList = []
390 for i in range( 1, 29 ):
391 swList.append( "s" + str( i ) )
392 main.Mininet1.assignSwController( sw=swList, ip=ipList )
393
394 mastershipCheck = main.TRUE
395 for i in range( 1, 29 ):
396 response = main.Mininet1.getSwController( "s" + str( i ) )
397 try:
398 main.log.info( str( response ) )
399 except Exception:
400 main.log.info( repr( response ) )
401 for node in main.nodes:
402 if re.search( "tcp:" + node.ip_address, response ):
403 mastershipCheck = mastershipCheck and main.TRUE
404 else:
405 main.log.error( "Error, node " + node.ip_address + " is " +
406 "not in the list of controllers s" +
407 str( i ) + " is connecting to." )
408 mastershipCheck = main.FALSE
409 utilities.assert_equals(
410 expect=main.TRUE,
411 actual=mastershipCheck,
412 onpass="Switch mastership assigned correctly",
413 onfail="Switches not assigned correctly to controllers" )
414
415 def CASE21( self, main ):
416 """
417 Assign mastership to controllers
418 """
419 import time
420 assert main.numCtrls, "main.numCtrls not defined"
421 assert main, "main not defined"
422 assert utilities.assert_equals, "utilities.assert_equals not defined"
423 assert main.CLIs, "main.CLIs not defined"
424 assert main.nodes, "main.nodes not defined"
425 assert ONOS1Port, "ONOS1Port not defined"
426 assert ONOS2Port, "ONOS2Port not defined"
427 assert ONOS3Port, "ONOS3Port not defined"
428 assert ONOS4Port, "ONOS4Port not defined"
429 assert ONOS5Port, "ONOS5Port not defined"
430 assert ONOS6Port, "ONOS6Port not defined"
431 assert ONOS7Port, "ONOS7Port not defined"
432
433 main.case( "Assigning Controller roles for switches" )
434 main.caseExplanation = "Check that ONOS is connected to each " +\
435 "device. Then manually assign" +\
436 " mastership to specific ONOS nodes using" +\
437 " 'device-role'"
438 main.step( "Assign mastership of switches to specific controllers" )
439 # Manually assign mastership to the controller we want
440 roleCall = main.TRUE
441
442 ipList = [ ]
443 deviceList = []
444 onosCli = main.CLIs[ main.activeNodes[0] ]
445 try:
446 # Assign mastership to specific controllers. This assignment was
447 # determined for a 7 node cluser, but will work with any sized
448 # cluster
449 for i in range( 1, 29 ): # switches 1 through 28
450 # set up correct variables:
451 if i == 1:
452 c = 0
453 ip = main.nodes[ c ].ip_address # ONOS1
454 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
455 elif i == 2:
456 c = 1 % main.numCtrls
457 ip = main.nodes[ c ].ip_address # ONOS2
458 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
459 elif i == 3:
460 c = 1 % main.numCtrls
461 ip = main.nodes[ c ].ip_address # ONOS2
462 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
463 elif i == 4:
464 c = 3 % main.numCtrls
465 ip = main.nodes[ c ].ip_address # ONOS4
466 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
467 elif i == 5:
468 c = 2 % main.numCtrls
469 ip = main.nodes[ c ].ip_address # ONOS3
470 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
471 elif i == 6:
472 c = 2 % main.numCtrls
473 ip = main.nodes[ c ].ip_address # ONOS3
474 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
475 elif i == 7:
476 c = 5 % main.numCtrls
477 ip = main.nodes[ c ].ip_address # ONOS6
478 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
479 elif i >= 8 and i <= 17:
480 c = 4 % main.numCtrls
481 ip = main.nodes[ c ].ip_address # ONOS5
482 dpid = '3' + str( i ).zfill( 3 )
483 deviceId = onosCli.getDevice( dpid ).get( 'id' )
484 elif i >= 18 and i <= 27:
485 c = 6 % main.numCtrls
486 ip = main.nodes[ c ].ip_address # ONOS7
487 dpid = '6' + str( i ).zfill( 3 )
488 deviceId = onosCli.getDevice( dpid ).get( 'id' )
489 elif i == 28:
490 c = 0
491 ip = main.nodes[ c ].ip_address # ONOS1
492 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
493 else:
494 main.log.error( "You didn't write an else statement for " +
495 "switch s" + str( i ) )
496 roleCall = main.FALSE
497 # Assign switch
498 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
499 # TODO: make this controller dynamic
500 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
501 ipList.append( ip )
502 deviceList.append( deviceId )
503 except ( AttributeError, AssertionError ):
504 main.log.exception( "Something is wrong with ONOS device view" )
505 main.log.info( onosCli.devices() )
506 utilities.assert_equals(
507 expect=main.TRUE,
508 actual=roleCall,
509 onpass="Re-assigned switch mastership to designated controller",
510 onfail="Something wrong with deviceRole calls" )
511
512 main.step( "Check mastership was correctly assigned" )
513 roleCheck = main.TRUE
514 # NOTE: This is due to the fact that device mastership change is not
515 # atomic and is actually a multi step process
516 time.sleep( 5 )
517 for i in range( len( ipList ) ):
518 ip = ipList[i]
519 deviceId = deviceList[i]
520 # Check assignment
521 master = onosCli.getRole( deviceId ).get( 'master' )
522 if ip in master:
523 roleCheck = roleCheck and main.TRUE
524 else:
525 roleCheck = roleCheck and main.FALSE
526 main.log.error( "Error, controller " + ip + " is not" +
527 " master " + "of device " +
528 str( deviceId ) + ". Master is " +
529 repr( master ) + "." )
530 utilities.assert_equals(
531 expect=main.TRUE,
532 actual=roleCheck,
533 onpass="Switches were successfully reassigned to designated " +
534 "controller",
535 onfail="Switches were not successfully reassigned" )
536
537 def CASE3( self, main ):
538 """
539 Assign intents
540 """
541 import time
542 import json
543 assert main.numCtrls, "main.numCtrls not defined"
544 assert main, "main not defined"
545 assert utilities.assert_equals, "utilities.assert_equals not defined"
546 assert main.CLIs, "main.CLIs not defined"
547 assert main.nodes, "main.nodes not defined"
548 main.case( "Adding host Intents" )
549 main.caseExplanation = "Discover hosts by using pingall then " +\
550 "assign predetermined host-to-host intents." +\
551 " After installation, check that the intent" +\
552 " is distributed to all nodes and the state" +\
553 " is INSTALLED"
554
555 # install onos-app-fwd
556 main.step( "Install reactive forwarding app" )
557 onosCli = main.CLIs[ main.activeNodes[0] ]
558 installResults = onosCli.activateApp( "org.onosproject.fwd" )
559 utilities.assert_equals( expect=main.TRUE, actual=installResults,
560 onpass="Install fwd successful",
561 onfail="Install fwd failed" )
562
563 main.step( "Check app ids" )
564 appCheck = main.TRUE
565 threads = []
566 for i in main.activeNodes:
567 t = main.Thread( target=main.CLIs[i].appToIDCheck,
568 name="appToIDCheck-" + str( i ),
569 args=[] )
570 threads.append( t )
571 t.start()
572
573 for t in threads:
574 t.join()
575 appCheck = appCheck and t.result
576 if appCheck != main.TRUE:
577 main.log.warn( onosCli.apps() )
578 main.log.warn( onosCli.appIDs() )
579 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
580 onpass="App Ids seem to be correct",
581 onfail="Something is wrong with app Ids" )
582
583 main.step( "Discovering Hosts( Via pingall for now )" )
584 # FIXME: Once we have a host discovery mechanism, use that instead
585 # REACTIVE FWD test
586 pingResult = main.FALSE
587 passMsg = "Reactive Pingall test passed"
588 time1 = time.time()
589 pingResult = main.Mininet1.pingall()
590 time2 = time.time()
591 if not pingResult:
592 main.log.warn("First pingall failed. Trying again...")
593 pingResult = main.Mininet1.pingall()
594 passMsg += " on the second try"
595 utilities.assert_equals(
596 expect=main.TRUE,
597 actual=pingResult,
598 onpass= passMsg,
599 onfail="Reactive Pingall failed, " +
600 "one or more ping pairs failed" )
601 main.log.info( "Time for pingall: %2f seconds" %
602 ( time2 - time1 ) )
603 # timeout for fwd flows
604 time.sleep( 11 )
605 # uninstall onos-app-fwd
606 main.step( "Uninstall reactive forwarding app" )
607 node = main.activeNodes[0]
608 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
609 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
610 onpass="Uninstall fwd successful",
611 onfail="Uninstall fwd failed" )
612
613 main.step( "Check app ids" )
614 threads = []
615 appCheck2 = main.TRUE
616 for i in main.activeNodes:
617 t = main.Thread( target=main.CLIs[i].appToIDCheck,
618 name="appToIDCheck-" + str( i ),
619 args=[] )
620 threads.append( t )
621 t.start()
622
623 for t in threads:
624 t.join()
625 appCheck2 = appCheck2 and t.result
626 if appCheck2 != main.TRUE:
627 node = main.activeNodes[0]
628 main.log.warn( main.CLIs[node].apps() )
629 main.log.warn( main.CLIs[node].appIDs() )
630 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
631 onpass="App Ids seem to be correct",
632 onfail="Something is wrong with app Ids" )
633
634 main.step( "Add host intents via cli" )
635 intentIds = []
636 # TODO: move the host numbers to params
637 # Maybe look at all the paths we ping?
638 intentAddResult = True
639 hostResult = main.TRUE
640 for i in range( 8, 18 ):
641 main.log.info( "Adding host intent between h" + str( i ) +
642 " and h" + str( i + 10 ) )
643 host1 = "00:00:00:00:00:" + \
644 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
645 host2 = "00:00:00:00:00:" + \
646 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
647 # NOTE: getHost can return None
648 host1Dict = onosCli.getHost( host1 )
649 host2Dict = onosCli.getHost( host2 )
650 host1Id = None
651 host2Id = None
652 if host1Dict and host2Dict:
653 host1Id = host1Dict.get( 'id', None )
654 host2Id = host2Dict.get( 'id', None )
655 if host1Id and host2Id:
656 nodeNum = ( i % len( main.activeNodes ) )
657 node = main.activeNodes[nodeNum]
658 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
659 if tmpId:
660 main.log.info( "Added intent with id: " + tmpId )
661 intentIds.append( tmpId )
662 else:
663 main.log.error( "addHostIntent returned: " +
664 repr( tmpId ) )
665 else:
666 main.log.error( "Error, getHost() failed for h" + str( i ) +
667 " and/or h" + str( i + 10 ) )
668 node = main.activeNodes[0]
669 hosts = main.CLIs[node].hosts()
670 main.log.warn( "Hosts output: " )
671 try:
672 main.log.warn( json.dumps( json.loads( hosts ),
673 sort_keys=True,
674 indent=4,
675 separators=( ',', ': ' ) ) )
676 except ( ValueError, TypeError ):
677 main.log.warn( repr( hosts ) )
678 hostResult = main.FALSE
679 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
680 onpass="Found a host id for each host",
681 onfail="Error looking up host ids" )
682
683 intentStart = time.time()
684 onosIds = onosCli.getAllIntentsId()
685 main.log.info( "Submitted intents: " + str( intentIds ) )
686 main.log.info( "Intents in ONOS: " + str( onosIds ) )
687 for intent in intentIds:
688 if intent in onosIds:
689 pass # intent submitted is in onos
690 else:
691 intentAddResult = False
692 if intentAddResult:
693 intentStop = time.time()
694 else:
695 intentStop = None
696 # Print the intent states
697 intents = onosCli.intents()
698 intentStates = []
699 installedCheck = True
700 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
701 count = 0
702 try:
703 for intent in json.loads( intents ):
704 state = intent.get( 'state', None )
705 if "INSTALLED" not in state:
706 installedCheck = False
707 intentId = intent.get( 'id', None )
708 intentStates.append( ( intentId, state ) )
709 except ( ValueError, TypeError ):
710 main.log.exception( "Error parsing intents" )
711 # add submitted intents not in the store
712 tmplist = [ i for i, s in intentStates ]
713 missingIntents = False
714 for i in intentIds:
715 if i not in tmplist:
716 intentStates.append( ( i, " - " ) )
717 missingIntents = True
718 intentStates.sort()
719 for i, s in intentStates:
720 count += 1
721 main.log.info( "%-6s%-15s%-15s" %
722 ( str( count ), str( i ), str( s ) ) )
723 leaders = onosCli.leaders()
724 try:
725 missing = False
726 if leaders:
727 parsedLeaders = json.loads( leaders )
728 main.log.warn( json.dumps( parsedLeaders,
729 sort_keys=True,
730 indent=4,
731 separators=( ',', ': ' ) ) )
732 # check for all intent partitions
733 topics = []
734 for i in range( 14 ):
735 topics.append( "intent-partition-" + str( i ) )
736 main.log.debug( topics )
737 ONOStopics = [ j['topic'] for j in parsedLeaders ]
738 for topic in topics:
739 if topic not in ONOStopics:
740 main.log.error( "Error: " + topic +
741 " not in leaders" )
742 missing = True
743 else:
744 main.log.error( "leaders() returned None" )
745 except ( ValueError, TypeError ):
746 main.log.exception( "Error parsing leaders" )
747 main.log.error( repr( leaders ) )
748 # Check all nodes
749 if missing:
750 for i in main.activeNodes:
751 response = main.CLIs[i].leaders( jsonFormat=False)
752 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
753 str( response ) )
754
755 partitions = onosCli.partitions()
756 try:
757 if partitions :
758 parsedPartitions = json.loads( partitions )
759 main.log.warn( json.dumps( parsedPartitions,
760 sort_keys=True,
761 indent=4,
762 separators=( ',', ': ' ) ) )
763 # TODO check for a leader in all paritions
764 # TODO check for consistency among nodes
765 else:
766 main.log.error( "partitions() returned None" )
767 except ( ValueError, TypeError ):
768 main.log.exception( "Error parsing partitions" )
769 main.log.error( repr( partitions ) )
770 pendingMap = onosCli.pendingMap()
771 try:
772 if pendingMap :
773 parsedPending = json.loads( pendingMap )
774 main.log.warn( json.dumps( parsedPending,
775 sort_keys=True,
776 indent=4,
777 separators=( ',', ': ' ) ) )
778 # TODO check something here?
779 else:
780 main.log.error( "pendingMap() returned None" )
781 except ( ValueError, TypeError ):
782 main.log.exception( "Error parsing pending map" )
783 main.log.error( repr( pendingMap ) )
784
785 intentAddResult = bool( intentAddResult and not missingIntents and
786 installedCheck )
787 if not intentAddResult:
788 main.log.error( "Error in pushing host intents to ONOS" )
789
790 main.step( "Intent Anti-Entropy dispersion" )
791 for j in range(100):
792 correct = True
793 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
794 for i in main.activeNodes:
795 onosIds = []
796 ids = main.CLIs[i].getAllIntentsId()
797 onosIds.append( ids )
798 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
799 str( sorted( onosIds ) ) )
800 if sorted( ids ) != sorted( intentIds ):
801 main.log.warn( "Set of intent IDs doesn't match" )
802 correct = False
803 break
804 else:
805 intents = json.loads( main.CLIs[i].intents() )
806 for intent in intents:
807 if intent[ 'state' ] != "INSTALLED":
808 main.log.warn( "Intent " + intent[ 'id' ] +
809 " is " + intent[ 'state' ] )
810 correct = False
811 break
812 if correct:
813 break
814 else:
815 time.sleep(1)
816 if not intentStop:
817 intentStop = time.time()
818 global gossipTime
819 gossipTime = intentStop - intentStart
820 main.log.info( "It took about " + str( gossipTime ) +
821 " seconds for all intents to appear in each node" )
822 gossipPeriod = int( main.params['timers']['gossip'] )
823 maxGossipTime = gossipPeriod * len( main.activeNodes )
824 utilities.assert_greater_equals(
825 expect=maxGossipTime, actual=gossipTime,
826 onpass="ECM anti-entropy for intents worked within " +
827 "expected time",
828 onfail="Intent ECM anti-entropy took too long. " +
829 "Expected time:{}, Actual time:{}".format( maxGossipTime,
830 gossipTime ) )
831 if gossipTime <= maxGossipTime:
832 intentAddResult = True
833
834 if not intentAddResult or "key" in pendingMap:
835 import time
836 installedCheck = True
837 main.log.info( "Sleeping 60 seconds to see if intents are found" )
838 time.sleep( 60 )
839 onosIds = onosCli.getAllIntentsId()
840 main.log.info( "Submitted intents: " + str( intentIds ) )
841 main.log.info( "Intents in ONOS: " + str( onosIds ) )
842 # Print the intent states
843 intents = onosCli.intents()
844 intentStates = []
845 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
846 count = 0
847 try:
848 for intent in json.loads( intents ):
849 # Iter through intents of a node
850 state = intent.get( 'state', None )
851 if "INSTALLED" not in state:
852 installedCheck = False
853 intentId = intent.get( 'id', None )
854 intentStates.append( ( intentId, state ) )
855 except ( ValueError, TypeError ):
856 main.log.exception( "Error parsing intents" )
857 # add submitted intents not in the store
858 tmplist = [ i for i, s in intentStates ]
859 for i in intentIds:
860 if i not in tmplist:
861 intentStates.append( ( i, " - " ) )
862 intentStates.sort()
863 for i, s in intentStates:
864 count += 1
865 main.log.info( "%-6s%-15s%-15s" %
866 ( str( count ), str( i ), str( s ) ) )
867 leaders = onosCli.leaders()
868 try:
869 missing = False
870 if leaders:
871 parsedLeaders = json.loads( leaders )
872 main.log.warn( json.dumps( parsedLeaders,
873 sort_keys=True,
874 indent=4,
875 separators=( ',', ': ' ) ) )
876 # check for all intent partitions
877 # check for election
878 topics = []
879 for i in range( 14 ):
880 topics.append( "intent-partition-" + str( i ) )
881 # FIXME: this should only be after we start the app
882 topics.append( "org.onosproject.election" )
883 main.log.debug( topics )
884 ONOStopics = [ j['topic'] for j in parsedLeaders ]
885 for topic in topics:
886 if topic not in ONOStopics:
887 main.log.error( "Error: " + topic +
888 " not in leaders" )
889 missing = True
890 else:
891 main.log.error( "leaders() returned None" )
892 except ( ValueError, TypeError ):
893 main.log.exception( "Error parsing leaders" )
894 main.log.error( repr( leaders ) )
895 # Check all nodes
896 if missing:
897 for i in main.activeNodes:
898 node = main.CLIs[i]
899 response = node.leaders( jsonFormat=False)
900 main.log.warn( str( node.name ) + " leaders output: \n" +
901 str( response ) )
902
903 partitions = onosCli.partitions()
904 try:
905 if partitions :
906 parsedPartitions = json.loads( partitions )
907 main.log.warn( json.dumps( parsedPartitions,
908 sort_keys=True,
909 indent=4,
910 separators=( ',', ': ' ) ) )
911 # TODO check for a leader in all paritions
912 # TODO check for consistency among nodes
913 else:
914 main.log.error( "partitions() returned None" )
915 except ( ValueError, TypeError ):
916 main.log.exception( "Error parsing partitions" )
917 main.log.error( repr( partitions ) )
918 pendingMap = onosCli.pendingMap()
919 try:
920 if pendingMap :
921 parsedPending = json.loads( pendingMap )
922 main.log.warn( json.dumps( parsedPending,
923 sort_keys=True,
924 indent=4,
925 separators=( ',', ': ' ) ) )
926 # TODO check something here?
927 else:
928 main.log.error( "pendingMap() returned None" )
929 except ( ValueError, TypeError ):
930 main.log.exception( "Error parsing pending map" )
931 main.log.error( repr( pendingMap ) )
932
933 def CASE4( self, main ):
934 """
935 Ping across added host intents
936 """
937 import json
938 import time
939 assert main.numCtrls, "main.numCtrls not defined"
940 assert main, "main not defined"
941 assert utilities.assert_equals, "utilities.assert_equals not defined"
942 assert main.CLIs, "main.CLIs not defined"
943 assert main.nodes, "main.nodes not defined"
944 main.case( "Verify connectivity by sending traffic across Intents" )
945 main.caseExplanation = "Ping across added host intents to check " +\
946 "functionality and check the state of " +\
947 "the intent"
Jon Hall6e709752016-02-01 13:38:46 -0800948
Jon Hall41d39f12016-04-11 22:54:35 -0700949 onosCli = main.CLIs[ main.activeNodes[0] ]
Jon Hall6e709752016-02-01 13:38:46 -0800950 main.step( "Check Intent state" )
951 installedCheck = False
952 loopCount = 0
953 while not installedCheck and loopCount < 40:
954 installedCheck = True
955 # Print the intent states
956 intents = onosCli.intents()
957 intentStates = []
958 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
959 count = 0
960 # Iter through intents of a node
961 try:
962 for intent in json.loads( intents ):
963 state = intent.get( 'state', None )
964 if "INSTALLED" not in state:
965 installedCheck = False
966 intentId = intent.get( 'id', None )
967 intentStates.append( ( intentId, state ) )
968 except ( ValueError, TypeError ):
969 main.log.exception( "Error parsing intents." )
970 # Print states
971 intentStates.sort()
972 for i, s in intentStates:
973 count += 1
974 main.log.info( "%-6s%-15s%-15s" %
975 ( str( count ), str( i ), str( s ) ) )
976 if not installedCheck:
977 time.sleep( 1 )
978 loopCount += 1
979 utilities.assert_equals( expect=True, actual=installedCheck,
980 onpass="Intents are all INSTALLED",
981 onfail="Intents are not all in " +
982 "INSTALLED state" )
983
Jon Hall9d2dcad2016-04-08 10:15:20 -0700984 main.step( "Ping across added host intents" )
Jon Hall9d2dcad2016-04-08 10:15:20 -0700985 PingResult = main.TRUE
986 for i in range( 8, 18 ):
987 ping = main.Mininet1.pingHost( src="h" + str( i ),
988 target="h" + str( i + 10 ) )
989 PingResult = PingResult and ping
990 if ping == main.FALSE:
991 main.log.warn( "Ping failed between h" + str( i ) +
992 " and h" + str( i + 10 ) )
993 elif ping == main.TRUE:
994 main.log.info( "Ping test passed!" )
995 # Don't set PingResult or you'd override failures
996 if PingResult == main.FALSE:
997 main.log.error(
998 "Intents have not been installed correctly, pings failed." )
999 # TODO: pretty print
1000 main.log.warn( "ONOS1 intents: " )
1001 try:
1002 tmpIntents = onosCli.intents()
1003 main.log.warn( json.dumps( json.loads( tmpIntents ),
1004 sort_keys=True,
1005 indent=4,
1006 separators=( ',', ': ' ) ) )
1007 except ( ValueError, TypeError ):
1008 main.log.warn( repr( tmpIntents ) )
1009 utilities.assert_equals(
1010 expect=main.TRUE,
1011 actual=PingResult,
1012 onpass="Intents have been installed correctly and pings work",
1013 onfail="Intents have not been installed correctly, pings failed." )
1014
Jon Hall6e709752016-02-01 13:38:46 -08001015 main.step( "Check leadership of topics" )
1016 leaders = onosCli.leaders()
1017 topicCheck = main.TRUE
1018 try:
1019 if leaders:
1020 parsedLeaders = json.loads( leaders )
1021 main.log.warn( json.dumps( parsedLeaders,
1022 sort_keys=True,
1023 indent=4,
1024 separators=( ',', ': ' ) ) )
1025 # check for all intent partitions
1026 # check for election
1027 # TODO: Look at Devices as topics now that it uses this system
1028 topics = []
1029 for i in range( 14 ):
1030 topics.append( "intent-partition-" + str( i ) )
1031 # FIXME: this should only be after we start the app
1032 # FIXME: topics.append( "org.onosproject.election" )
1033 # Print leaders output
1034 main.log.debug( topics )
1035 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1036 for topic in topics:
1037 if topic not in ONOStopics:
1038 main.log.error( "Error: " + topic +
1039 " not in leaders" )
1040 topicCheck = main.FALSE
1041 else:
1042 main.log.error( "leaders() returned None" )
1043 topicCheck = main.FALSE
1044 except ( ValueError, TypeError ):
1045 topicCheck = main.FALSE
1046 main.log.exception( "Error parsing leaders" )
1047 main.log.error( repr( leaders ) )
1048 # TODO: Check for a leader of these topics
1049 # Check all nodes
1050 if topicCheck:
1051 for i in main.activeNodes:
1052 node = main.CLIs[i]
1053 response = node.leaders( jsonFormat=False)
1054 main.log.warn( str( node.name ) + " leaders output: \n" +
1055 str( response ) )
1056
1057 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1058 onpass="intent Partitions is in leaders",
1059 onfail="Some topics were lost " )
1060 # Print partitions
1061 partitions = onosCli.partitions()
1062 try:
1063 if partitions :
1064 parsedPartitions = json.loads( partitions )
1065 main.log.warn( json.dumps( parsedPartitions,
1066 sort_keys=True,
1067 indent=4,
1068 separators=( ',', ': ' ) ) )
1069 # TODO check for a leader in all paritions
1070 # TODO check for consistency among nodes
1071 else:
1072 main.log.error( "partitions() returned None" )
1073 except ( ValueError, TypeError ):
1074 main.log.exception( "Error parsing partitions" )
1075 main.log.error( repr( partitions ) )
1076 # Print Pending Map
1077 pendingMap = onosCli.pendingMap()
1078 try:
1079 if pendingMap :
1080 parsedPending = json.loads( pendingMap )
1081 main.log.warn( json.dumps( parsedPending,
1082 sort_keys=True,
1083 indent=4,
1084 separators=( ',', ': ' ) ) )
1085 # TODO check something here?
1086 else:
1087 main.log.error( "pendingMap() returned None" )
1088 except ( ValueError, TypeError ):
1089 main.log.exception( "Error parsing pending map" )
1090 main.log.error( repr( pendingMap ) )
1091
1092 if not installedCheck:
1093 main.log.info( "Waiting 60 seconds to see if the state of " +
1094 "intents change" )
1095 time.sleep( 60 )
1096 # Print the intent states
1097 intents = onosCli.intents()
1098 intentStates = []
1099 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1100 count = 0
1101 # Iter through intents of a node
1102 try:
1103 for intent in json.loads( intents ):
1104 state = intent.get( 'state', None )
1105 if "INSTALLED" not in state:
1106 installedCheck = False
1107 intentId = intent.get( 'id', None )
1108 intentStates.append( ( intentId, state ) )
1109 except ( ValueError, TypeError ):
1110 main.log.exception( "Error parsing intents." )
1111 intentStates.sort()
1112 for i, s in intentStates:
1113 count += 1
1114 main.log.info( "%-6s%-15s%-15s" %
1115 ( str( count ), str( i ), str( s ) ) )
1116 leaders = onosCli.leaders()
1117 try:
1118 missing = False
1119 if leaders:
1120 parsedLeaders = json.loads( leaders )
1121 main.log.warn( json.dumps( parsedLeaders,
1122 sort_keys=True,
1123 indent=4,
1124 separators=( ',', ': ' ) ) )
1125 # check for all intent partitions
1126 # check for election
1127 topics = []
1128 for i in range( 14 ):
1129 topics.append( "intent-partition-" + str( i ) )
1130 # FIXME: this should only be after we start the app
1131 topics.append( "org.onosproject.election" )
1132 main.log.debug( topics )
1133 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1134 for topic in topics:
1135 if topic not in ONOStopics:
1136 main.log.error( "Error: " + topic +
1137 " not in leaders" )
1138 missing = True
1139 else:
1140 main.log.error( "leaders() returned None" )
1141 except ( ValueError, TypeError ):
1142 main.log.exception( "Error parsing leaders" )
1143 main.log.error( repr( leaders ) )
1144 if missing:
1145 for i in main.activeNodes:
1146 node = main.CLIs[i]
1147 response = node.leaders( jsonFormat=False)
1148 main.log.warn( str( node.name ) + " leaders output: \n" +
1149 str( response ) )
1150
1151 partitions = onosCli.partitions()
1152 try:
1153 if partitions :
1154 parsedPartitions = json.loads( partitions )
1155 main.log.warn( json.dumps( parsedPartitions,
1156 sort_keys=True,
1157 indent=4,
1158 separators=( ',', ': ' ) ) )
1159 # TODO check for a leader in all paritions
1160 # TODO check for consistency among nodes
1161 else:
1162 main.log.error( "partitions() returned None" )
1163 except ( ValueError, TypeError ):
1164 main.log.exception( "Error parsing partitions" )
1165 main.log.error( repr( partitions ) )
1166 pendingMap = onosCli.pendingMap()
1167 try:
1168 if pendingMap :
1169 parsedPending = json.loads( pendingMap )
1170 main.log.warn( json.dumps( parsedPending,
1171 sort_keys=True,
1172 indent=4,
1173 separators=( ',', ': ' ) ) )
1174 # TODO check something here?
1175 else:
1176 main.log.error( "pendingMap() returned None" )
1177 except ( ValueError, TypeError ):
1178 main.log.exception( "Error parsing pending map" )
1179 main.log.error( repr( pendingMap ) )
1180 # Print flowrules
1181 node = main.activeNodes[0]
1182 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1183 main.step( "Wait a minute then ping again" )
1184 # the wait is above
1185 PingResult = main.TRUE
1186 for i in range( 8, 18 ):
1187 ping = main.Mininet1.pingHost( src="h" + str( i ),
1188 target="h" + str( i + 10 ) )
1189 PingResult = PingResult and ping
1190 if ping == main.FALSE:
1191 main.log.warn( "Ping failed between h" + str( i ) +
1192 " and h" + str( i + 10 ) )
1193 elif ping == main.TRUE:
1194 main.log.info( "Ping test passed!" )
1195 # Don't set PingResult or you'd override failures
1196 if PingResult == main.FALSE:
1197 main.log.error(
1198 "Intents have not been installed correctly, pings failed." )
1199 # TODO: pretty print
1200 main.log.warn( "ONOS1 intents: " )
1201 try:
1202 tmpIntents = onosCli.intents()
1203 main.log.warn( json.dumps( json.loads( tmpIntents ),
1204 sort_keys=True,
1205 indent=4,
1206 separators=( ',', ': ' ) ) )
1207 except ( ValueError, TypeError ):
1208 main.log.warn( repr( tmpIntents ) )
1209 utilities.assert_equals(
1210 expect=main.TRUE,
1211 actual=PingResult,
1212 onpass="Intents have been installed correctly and pings work",
1213 onfail="Intents have not been installed correctly, pings failed." )
1214
1215 def CASE5( self, main ):
1216 """
1217 Reading state of ONOS
1218 """
1219 import json
1220 import time
1221 assert main.numCtrls, "main.numCtrls not defined"
1222 assert main, "main not defined"
1223 assert utilities.assert_equals, "utilities.assert_equals not defined"
1224 assert main.CLIs, "main.CLIs not defined"
1225 assert main.nodes, "main.nodes not defined"
1226
1227 main.case( "Setting up and gathering data for current state" )
1228 # The general idea for this test case is to pull the state of
1229 # ( intents,flows, topology,... ) from each ONOS node
1230 # We can then compare them with each other and also with past states
1231
1232 main.step( "Check that each switch has a master" )
1233 global mastershipState
1234 mastershipState = '[]'
1235
1236 # Assert that each device has a master
1237 rolesNotNull = main.TRUE
1238 threads = []
1239 for i in main.activeNodes:
1240 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1241 name="rolesNotNull-" + str( i ),
1242 args=[] )
1243 threads.append( t )
1244 t.start()
1245
1246 for t in threads:
1247 t.join()
1248 rolesNotNull = rolesNotNull and t.result
1249 utilities.assert_equals(
1250 expect=main.TRUE,
1251 actual=rolesNotNull,
1252 onpass="Each device has a master",
1253 onfail="Some devices don't have a master assigned" )
1254
1255 main.step( "Get the Mastership of each switch from each controller" )
1256 ONOSMastership = []
1257 mastershipCheck = main.FALSE
1258 consistentMastership = True
1259 rolesResults = True
1260 threads = []
1261 for i in main.activeNodes:
1262 t = main.Thread( target=main.CLIs[i].roles,
1263 name="roles-" + str( i ),
1264 args=[] )
1265 threads.append( t )
1266 t.start()
1267
1268 for t in threads:
1269 t.join()
1270 ONOSMastership.append( t.result )
1271
1272 for i in range( len( ONOSMastership ) ):
1273 node = str( main.activeNodes[i] + 1 )
1274 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1275 main.log.error( "Error in getting ONOS" + node + " roles" )
1276 main.log.warn( "ONOS" + node + " mastership response: " +
1277 repr( ONOSMastership[i] ) )
1278 rolesResults = False
1279 utilities.assert_equals(
1280 expect=True,
1281 actual=rolesResults,
1282 onpass="No error in reading roles output",
1283 onfail="Error in reading roles from ONOS" )
1284
1285 main.step( "Check for consistency in roles from each controller" )
1286 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1287 main.log.info(
1288 "Switch roles are consistent across all ONOS nodes" )
1289 else:
1290 consistentMastership = False
1291 utilities.assert_equals(
1292 expect=True,
1293 actual=consistentMastership,
1294 onpass="Switch roles are consistent across all ONOS nodes",
1295 onfail="ONOS nodes have different views of switch roles" )
1296
1297 if rolesResults and not consistentMastership:
1298 for i in range( len( main.activeNodes ) ):
1299 node = str( main.activeNodes[i] + 1 )
1300 try:
1301 main.log.warn(
1302 "ONOS" + node + " roles: ",
1303 json.dumps(
1304 json.loads( ONOSMastership[ i ] ),
1305 sort_keys=True,
1306 indent=4,
1307 separators=( ',', ': ' ) ) )
1308 except ( ValueError, TypeError ):
1309 main.log.warn( repr( ONOSMastership[ i ] ) )
1310 elif rolesResults and consistentMastership:
1311 mastershipCheck = main.TRUE
1312 mastershipState = ONOSMastership[ 0 ]
1313
1314 main.step( "Get the intents from each controller" )
1315 global intentState
1316 intentState = []
1317 ONOSIntents = []
1318 intentCheck = main.FALSE
1319 consistentIntents = True
1320 intentsResults = True
1321 threads = []
1322 for i in main.activeNodes:
1323 t = main.Thread( target=main.CLIs[i].intents,
1324 name="intents-" + str( i ),
1325 args=[],
1326 kwargs={ 'jsonFormat': True } )
1327 threads.append( t )
1328 t.start()
1329
1330 for t in threads:
1331 t.join()
1332 ONOSIntents.append( t.result )
1333
1334 for i in range( len( ONOSIntents ) ):
1335 node = str( main.activeNodes[i] + 1 )
1336 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1337 main.log.error( "Error in getting ONOS" + node + " intents" )
1338 main.log.warn( "ONOS" + node + " intents response: " +
1339 repr( ONOSIntents[ i ] ) )
1340 intentsResults = False
1341 utilities.assert_equals(
1342 expect=True,
1343 actual=intentsResults,
1344 onpass="No error in reading intents output",
1345 onfail="Error in reading intents from ONOS" )
1346
1347 main.step( "Check for consistency in Intents from each controller" )
1348 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1349 main.log.info( "Intents are consistent across all ONOS " +
1350 "nodes" )
1351 else:
1352 consistentIntents = False
1353 main.log.error( "Intents not consistent" )
1354 utilities.assert_equals(
1355 expect=True,
1356 actual=consistentIntents,
1357 onpass="Intents are consistent across all ONOS nodes",
1358 onfail="ONOS nodes have different views of intents" )
1359
1360 if intentsResults:
1361 # Try to make it easy to figure out what is happening
1362 #
1363 # Intent ONOS1 ONOS2 ...
1364 # 0x01 INSTALLED INSTALLING
1365 # ... ... ...
1366 # ... ... ...
1367 title = " Id"
1368 for n in main.activeNodes:
1369 title += " " * 10 + "ONOS" + str( n + 1 )
1370 main.log.warn( title )
1371 # get all intent keys in the cluster
1372 keys = []
Jon Halla440e872016-03-31 15:15:50 -07001373 try:
1374 # Get the set of all intent keys
Jon Hall6e709752016-02-01 13:38:46 -08001375 for nodeStr in ONOSIntents:
1376 node = json.loads( nodeStr )
1377 for intent in node:
Jon Halla440e872016-03-31 15:15:50 -07001378 keys.append( intent.get( 'id' ) )
1379 keys = set( keys )
1380 # For each intent key, print the state on each node
1381 for key in keys:
1382 row = "%-13s" % key
1383 for nodeStr in ONOSIntents:
1384 node = json.loads( nodeStr )
1385 for intent in node:
1386 if intent.get( 'id', "Error" ) == key:
1387 row += "%-15s" % intent.get( 'state' )
1388 main.log.warn( row )
1389 # End of intent state table
1390 except ValueError as e:
1391 main.log.exception( e )
1392 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
Jon Hall6e709752016-02-01 13:38:46 -08001393
1394 if intentsResults and not consistentIntents:
1395 # print the json objects
1396 n = str( main.activeNodes[-1] + 1 )
1397 main.log.debug( "ONOS" + n + " intents: " )
1398 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1399 sort_keys=True,
1400 indent=4,
1401 separators=( ',', ': ' ) ) )
1402 for i in range( len( ONOSIntents ) ):
1403 node = str( main.activeNodes[i] + 1 )
1404 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1405 main.log.debug( "ONOS" + node + " intents: " )
1406 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1407 sort_keys=True,
1408 indent=4,
1409 separators=( ',', ': ' ) ) )
1410 else:
1411 main.log.debug( "ONOS" + node + " intents match ONOS" +
1412 n + " intents" )
1413 elif intentsResults and consistentIntents:
1414 intentCheck = main.TRUE
1415 intentState = ONOSIntents[ 0 ]
1416
1417 main.step( "Get the flows from each controller" )
1418 global flowState
1419 flowState = []
1420 ONOSFlows = []
1421 ONOSFlowsJson = []
1422 flowCheck = main.FALSE
1423 consistentFlows = True
1424 flowsResults = True
1425 threads = []
1426 for i in main.activeNodes:
1427 t = main.Thread( target=main.CLIs[i].flows,
1428 name="flows-" + str( i ),
1429 args=[],
1430 kwargs={ 'jsonFormat': True } )
1431 threads.append( t )
1432 t.start()
1433
1434 # NOTE: Flows command can take some time to run
1435 time.sleep(30)
1436 for t in threads:
1437 t.join()
1438 result = t.result
1439 ONOSFlows.append( result )
1440
1441 for i in range( len( ONOSFlows ) ):
1442 num = str( main.activeNodes[i] + 1 )
1443 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1444 main.log.error( "Error in getting ONOS" + num + " flows" )
1445 main.log.warn( "ONOS" + num + " flows response: " +
1446 repr( ONOSFlows[ i ] ) )
1447 flowsResults = False
1448 ONOSFlowsJson.append( None )
1449 else:
1450 try:
1451 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1452 except ( ValueError, TypeError ):
1453 # FIXME: change this to log.error?
1454 main.log.exception( "Error in parsing ONOS" + num +
1455 " response as json." )
1456 main.log.error( repr( ONOSFlows[ i ] ) )
1457 ONOSFlowsJson.append( None )
1458 flowsResults = False
1459 utilities.assert_equals(
1460 expect=True,
1461 actual=flowsResults,
1462 onpass="No error in reading flows output",
1463 onfail="Error in reading flows from ONOS" )
1464
1465 main.step( "Check for consistency in Flows from each controller" )
1466 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1467 if all( tmp ):
1468 main.log.info( "Flow count is consistent across all ONOS nodes" )
1469 else:
1470 consistentFlows = False
1471 utilities.assert_equals(
1472 expect=True,
1473 actual=consistentFlows,
1474 onpass="The flow count is consistent across all ONOS nodes",
1475 onfail="ONOS nodes have different flow counts" )
1476
1477 if flowsResults and not consistentFlows:
1478 for i in range( len( ONOSFlows ) ):
1479 node = str( main.activeNodes[i] + 1 )
1480 try:
1481 main.log.warn(
1482 "ONOS" + node + " flows: " +
1483 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1484 indent=4, separators=( ',', ': ' ) ) )
1485 except ( ValueError, TypeError ):
1486 main.log.warn( "ONOS" + node + " flows: " +
1487 repr( ONOSFlows[ i ] ) )
1488 elif flowsResults and consistentFlows:
1489 flowCheck = main.TRUE
1490 flowState = ONOSFlows[ 0 ]
1491
1492 main.step( "Get the OF Table entries" )
1493 global flows
1494 flows = []
1495 for i in range( 1, 29 ):
1496 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1497 if flowCheck == main.FALSE:
1498 for table in flows:
1499 main.log.warn( table )
1500 # TODO: Compare switch flow tables with ONOS flow tables
1501
1502 main.step( "Start continuous pings" )
1503 main.Mininet2.pingLong(
1504 src=main.params[ 'PING' ][ 'source1' ],
1505 target=main.params[ 'PING' ][ 'target1' ],
1506 pingTime=500 )
1507 main.Mininet2.pingLong(
1508 src=main.params[ 'PING' ][ 'source2' ],
1509 target=main.params[ 'PING' ][ 'target2' ],
1510 pingTime=500 )
1511 main.Mininet2.pingLong(
1512 src=main.params[ 'PING' ][ 'source3' ],
1513 target=main.params[ 'PING' ][ 'target3' ],
1514 pingTime=500 )
1515 main.Mininet2.pingLong(
1516 src=main.params[ 'PING' ][ 'source4' ],
1517 target=main.params[ 'PING' ][ 'target4' ],
1518 pingTime=500 )
1519 main.Mininet2.pingLong(
1520 src=main.params[ 'PING' ][ 'source5' ],
1521 target=main.params[ 'PING' ][ 'target5' ],
1522 pingTime=500 )
1523 main.Mininet2.pingLong(
1524 src=main.params[ 'PING' ][ 'source6' ],
1525 target=main.params[ 'PING' ][ 'target6' ],
1526 pingTime=500 )
1527 main.Mininet2.pingLong(
1528 src=main.params[ 'PING' ][ 'source7' ],
1529 target=main.params[ 'PING' ][ 'target7' ],
1530 pingTime=500 )
1531 main.Mininet2.pingLong(
1532 src=main.params[ 'PING' ][ 'source8' ],
1533 target=main.params[ 'PING' ][ 'target8' ],
1534 pingTime=500 )
1535 main.Mininet2.pingLong(
1536 src=main.params[ 'PING' ][ 'source9' ],
1537 target=main.params[ 'PING' ][ 'target9' ],
1538 pingTime=500 )
1539 main.Mininet2.pingLong(
1540 src=main.params[ 'PING' ][ 'source10' ],
1541 target=main.params[ 'PING' ][ 'target10' ],
1542 pingTime=500 )
1543
1544 main.step( "Collecting topology information from ONOS" )
1545 devices = []
1546 threads = []
1547 for i in main.activeNodes:
1548 t = main.Thread( target=main.CLIs[i].devices,
1549 name="devices-" + str( i ),
1550 args=[ ] )
1551 threads.append( t )
1552 t.start()
1553
1554 for t in threads:
1555 t.join()
1556 devices.append( t.result )
1557 hosts = []
1558 threads = []
1559 for i in main.activeNodes:
1560 t = main.Thread( target=main.CLIs[i].hosts,
1561 name="hosts-" + str( i ),
1562 args=[ ] )
1563 threads.append( t )
1564 t.start()
1565
1566 for t in threads:
1567 t.join()
1568 try:
1569 hosts.append( json.loads( t.result ) )
1570 except ( ValueError, TypeError ):
1571 # FIXME: better handling of this, print which node
1572 # Maybe use thread name?
1573 main.log.exception( "Error parsing json output of hosts" )
1574 main.log.warn( repr( t.result ) )
1575 hosts.append( None )
1576
1577 ports = []
1578 threads = []
1579 for i in main.activeNodes:
1580 t = main.Thread( target=main.CLIs[i].ports,
1581 name="ports-" + str( i ),
1582 args=[ ] )
1583 threads.append( t )
1584 t.start()
1585
1586 for t in threads:
1587 t.join()
1588 ports.append( t.result )
1589 links = []
1590 threads = []
1591 for i in main.activeNodes:
1592 t = main.Thread( target=main.CLIs[i].links,
1593 name="links-" + str( i ),
1594 args=[ ] )
1595 threads.append( t )
1596 t.start()
1597
1598 for t in threads:
1599 t.join()
1600 links.append( t.result )
1601 clusters = []
1602 threads = []
1603 for i in main.activeNodes:
1604 t = main.Thread( target=main.CLIs[i].clusters,
1605 name="clusters-" + str( i ),
1606 args=[ ] )
1607 threads.append( t )
1608 t.start()
1609
1610 for t in threads:
1611 t.join()
1612 clusters.append( t.result )
1613 # Compare json objects for hosts and dataplane clusters
1614
1615 # hosts
1616 main.step( "Host view is consistent across ONOS nodes" )
1617 consistentHostsResult = main.TRUE
1618 for controller in range( len( hosts ) ):
1619 controllerStr = str( main.activeNodes[controller] + 1 )
1620 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1621 if hosts[ controller ] == hosts[ 0 ]:
1622 continue
1623 else: # hosts not consistent
1624 main.log.error( "hosts from ONOS" +
1625 controllerStr +
1626 " is inconsistent with ONOS1" )
1627 main.log.warn( repr( hosts[ controller ] ) )
1628 consistentHostsResult = main.FALSE
1629
1630 else:
1631 main.log.error( "Error in getting ONOS hosts from ONOS" +
1632 controllerStr )
1633 consistentHostsResult = main.FALSE
1634 main.log.warn( "ONOS" + controllerStr +
1635 " hosts response: " +
1636 repr( hosts[ controller ] ) )
1637 utilities.assert_equals(
1638 expect=main.TRUE,
1639 actual=consistentHostsResult,
1640 onpass="Hosts view is consistent across all ONOS nodes",
1641 onfail="ONOS nodes have different views of hosts" )
1642
1643 main.step( "Each host has an IP address" )
1644 ipResult = main.TRUE
1645 for controller in range( 0, len( hosts ) ):
1646 controllerStr = str( main.activeNodes[controller] + 1 )
1647 if hosts[ controller ]:
1648 for host in hosts[ controller ]:
1649 if not host.get( 'ipAddresses', [ ] ):
1650 main.log.error( "Error with host ips on controller" +
1651 controllerStr + ": " + str( host ) )
1652 ipResult = main.FALSE
1653 utilities.assert_equals(
1654 expect=main.TRUE,
1655 actual=ipResult,
1656 onpass="The ips of the hosts aren't empty",
1657 onfail="The ip of at least one host is missing" )
1658
1659 # Strongly connected clusters of devices
1660 main.step( "Cluster view is consistent across ONOS nodes" )
1661 consistentClustersResult = main.TRUE
1662 for controller in range( len( clusters ) ):
1663 controllerStr = str( main.activeNodes[controller] + 1 )
1664 if "Error" not in clusters[ controller ]:
1665 if clusters[ controller ] == clusters[ 0 ]:
1666 continue
1667 else: # clusters not consistent
1668 main.log.error( "clusters from ONOS" + controllerStr +
1669 " is inconsistent with ONOS1" )
1670 consistentClustersResult = main.FALSE
1671
1672 else:
1673 main.log.error( "Error in getting dataplane clusters " +
1674 "from ONOS" + controllerStr )
1675 consistentClustersResult = main.FALSE
1676 main.log.warn( "ONOS" + controllerStr +
1677 " clusters response: " +
1678 repr( clusters[ controller ] ) )
1679 utilities.assert_equals(
1680 expect=main.TRUE,
1681 actual=consistentClustersResult,
1682 onpass="Clusters view is consistent across all ONOS nodes",
1683 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07001684 if not consistentClustersResult:
Jon Hall172b7ba2016-04-07 18:12:20 -07001685 main.log.debug( clusters )
Jon Hall64948022016-05-12 13:38:50 -07001686
Jon Hall6e709752016-02-01 13:38:46 -08001687 # there should always only be one cluster
1688 main.step( "Cluster view correct across ONOS nodes" )
1689 try:
1690 numClusters = len( json.loads( clusters[ 0 ] ) )
1691 except ( ValueError, TypeError ):
1692 main.log.exception( "Error parsing clusters[0]: " +
1693 repr( clusters[ 0 ] ) )
1694 numClusters = "ERROR"
1695 clusterResults = main.FALSE
1696 if numClusters == 1:
1697 clusterResults = main.TRUE
1698 utilities.assert_equals(
1699 expect=1,
1700 actual=numClusters,
1701 onpass="ONOS shows 1 SCC",
1702 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1703
1704 main.step( "Comparing ONOS topology to MN" )
1705 devicesResults = main.TRUE
1706 linksResults = main.TRUE
1707 hostsResults = main.TRUE
1708 mnSwitches = main.Mininet1.getSwitches()
1709 mnLinks = main.Mininet1.getLinks()
1710 mnHosts = main.Mininet1.getHosts()
1711 for controller in main.activeNodes:
1712 controllerStr = str( main.activeNodes[controller] + 1 )
1713 if devices[ controller ] and ports[ controller ] and\
1714 "Error" not in devices[ controller ] and\
1715 "Error" not in ports[ controller ]:
1716 currentDevicesResult = main.Mininet1.compareSwitches(
1717 mnSwitches,
1718 json.loads( devices[ controller ] ),
1719 json.loads( ports[ controller ] ) )
1720 else:
1721 currentDevicesResult = main.FALSE
1722 utilities.assert_equals( expect=main.TRUE,
1723 actual=currentDevicesResult,
1724 onpass="ONOS" + controllerStr +
1725 " Switches view is correct",
1726 onfail="ONOS" + controllerStr +
1727 " Switches view is incorrect" )
1728 if links[ controller ] and "Error" not in links[ controller ]:
1729 currentLinksResult = main.Mininet1.compareLinks(
1730 mnSwitches, mnLinks,
1731 json.loads( links[ controller ] ) )
1732 else:
1733 currentLinksResult = main.FALSE
1734 utilities.assert_equals( expect=main.TRUE,
1735 actual=currentLinksResult,
1736 onpass="ONOS" + controllerStr +
1737 " links view is correct",
1738 onfail="ONOS" + controllerStr +
1739 " links view is incorrect" )
1740
1741 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1742 currentHostsResult = main.Mininet1.compareHosts(
1743 mnHosts,
1744 hosts[ controller ] )
1745 else:
1746 currentHostsResult = main.FALSE
1747 utilities.assert_equals( expect=main.TRUE,
1748 actual=currentHostsResult,
1749 onpass="ONOS" + controllerStr +
1750 " hosts exist in Mininet",
1751 onfail="ONOS" + controllerStr +
1752 " hosts don't match Mininet" )
1753
1754 devicesResults = devicesResults and currentDevicesResult
1755 linksResults = linksResults and currentLinksResult
1756 hostsResults = hostsResults and currentHostsResult
1757
1758 main.step( "Device information is correct" )
1759 utilities.assert_equals(
1760 expect=main.TRUE,
1761 actual=devicesResults,
1762 onpass="Device information is correct",
1763 onfail="Device information is incorrect" )
1764
1765 main.step( "Links are correct" )
1766 utilities.assert_equals(
1767 expect=main.TRUE,
1768 actual=linksResults,
1769 onpass="Link are correct",
1770 onfail="Links are incorrect" )
1771
1772 main.step( "Hosts are correct" )
1773 utilities.assert_equals(
1774 expect=main.TRUE,
1775 actual=hostsResults,
1776 onpass="Hosts are correct",
1777 onfail="Hosts are incorrect" )
1778
1779 def CASE61( self, main ):
1780 """
1781 The Failure case.
1782 """
1783 import math
1784 assert main.numCtrls, "main.numCtrls not defined"
1785 assert main, "main not defined"
1786 assert utilities.assert_equals, "utilities.assert_equals not defined"
1787 assert main.CLIs, "main.CLIs not defined"
1788 assert main.nodes, "main.nodes not defined"
1789 main.case( "Partition ONOS nodes into two distinct partitions" )
1790
1791 main.step( "Checking ONOS Logs for errors" )
1792 for node in main.nodes:
1793 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1794 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1795
1796 n = len( main.nodes ) # Number of nodes
1797 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1798 main.partition = [ 0 ] # ONOS node to partition, listed by index in main.nodes
1799 if n > 3:
1800 main.partition.append( p - 1 )
1801 # NOTE: This only works for cluster sizes of 3,5, or 7.
1802
1803 main.step( "Partitioning ONOS nodes" )
1804 nodeList = [ str( i + 1 ) for i in main.partition ]
1805 main.log.info( "Nodes to be partitioned: " + str( nodeList ) )
1806 partitionResults = main.TRUE
1807 for i in range( 0, n ):
1808 this = main.nodes[i]
1809 if i not in main.partition:
1810 for j in main.partition:
1811 foe = main.nodes[j]
1812 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1813 #CMD HERE
1814 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1815 this.handle.sendline( cmdStr )
1816 this.handle.expect( "\$" )
1817 main.log.debug( this.handle.before )
1818 else:
1819 for j in range( 0, n ):
1820 if j not in main.partition:
1821 foe = main.nodes[j]
1822 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1823 #CMD HERE
1824 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1825 this.handle.sendline( cmdStr )
1826 this.handle.expect( "\$" )
1827 main.log.debug( this.handle.before )
1828 main.activeNodes.remove( i )
1829 # NOTE: When dynamic clustering is finished, we need to start checking
1830 # main.partion nodes still work when partitioned
1831 utilities.assert_equals( expect=main.TRUE, actual=partitionResults,
1832 onpass="Firewall rules set successfully",
1833 onfail="Error setting firewall rules" )
1834
1835 main.log.step( "Sleeping 60 seconds" )
1836 time.sleep( 60 )
1837
1838 def CASE62( self, main ):
1839 """
1840 Healing Partition
1841 """
1842 import time
1843 assert main.numCtrls, "main.numCtrls not defined"
1844 assert main, "main not defined"
1845 assert utilities.assert_equals, "utilities.assert_equals not defined"
1846 assert main.CLIs, "main.CLIs not defined"
1847 assert main.nodes, "main.nodes not defined"
1848 assert main.partition, "main.partition not defined"
1849 main.case( "Healing Partition" )
1850
1851 main.step( "Deleteing firewall rules" )
1852 healResults = main.TRUE
1853 for node in main.nodes:
1854 cmdStr = "sudo iptables -F"
1855 node.handle.sendline( cmdStr )
1856 node.handle.expect( "\$" )
1857 main.log.debug( node.handle.before )
1858 utilities.assert_equals( expect=main.TRUE, actual=healResults,
1859 onpass="Firewall rules removed",
1860 onfail="Error removing firewall rules" )
1861
1862 for node in main.partition:
1863 main.activeNodes.append( node )
1864 main.activeNodes.sort()
1865 try:
1866 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1867 "List of active nodes has duplicates, this likely indicates something was run out of order"
1868 except AssertionError:
1869 main.log.exception( "" )
1870 main.cleanup()
1871 main.exit()
1872
1873 def CASE7( self, main ):
1874 """
1875 Check state after ONOS failure
1876 """
1877 import json
1878 assert main.numCtrls, "main.numCtrls not defined"
1879 assert main, "main not defined"
1880 assert utilities.assert_equals, "utilities.assert_equals not defined"
1881 assert main.CLIs, "main.CLIs not defined"
1882 assert main.nodes, "main.nodes not defined"
1883 try:
1884 main.partition
1885 except AttributeError:
1886 main.partition = []
1887
1888 main.case( "Running ONOS Constant State Tests" )
1889
1890 main.step( "Check that each switch has a master" )
1891 # Assert that each device has a master
1892 rolesNotNull = main.TRUE
1893 threads = []
1894 for i in main.activeNodes:
1895 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1896 name="rolesNotNull-" + str( i ),
1897 args=[ ] )
1898 threads.append( t )
1899 t.start()
1900
1901 for t in threads:
1902 t.join()
1903 rolesNotNull = rolesNotNull and t.result
1904 utilities.assert_equals(
1905 expect=main.TRUE,
1906 actual=rolesNotNull,
1907 onpass="Each device has a master",
1908 onfail="Some devices don't have a master assigned" )
1909
1910 main.step( "Read device roles from ONOS" )
1911 ONOSMastership = []
1912 mastershipCheck = main.FALSE
1913 consistentMastership = True
1914 rolesResults = True
1915 threads = []
1916 for i in main.activeNodes:
1917 t = main.Thread( target=main.CLIs[i].roles,
1918 name="roles-" + str( i ),
1919 args=[] )
1920 threads.append( t )
1921 t.start()
1922
1923 for t in threads:
1924 t.join()
1925 ONOSMastership.append( t.result )
1926
1927 for i in range( len( ONOSMastership ) ):
1928 node = str( main.activeNodes[i] + 1 )
1929 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1930 main.log.error( "Error in getting ONOS" + node + " roles" )
1931 main.log.warn( "ONOS" + node + " mastership response: " +
1932 repr( ONOSMastership[i] ) )
1933 rolesResults = False
1934 utilities.assert_equals(
1935 expect=True,
1936 actual=rolesResults,
1937 onpass="No error in reading roles output",
1938 onfail="Error in reading roles from ONOS" )
1939
1940 main.step( "Check for consistency in roles from each controller" )
1941 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1942 main.log.info(
1943 "Switch roles are consistent across all ONOS nodes" )
1944 else:
1945 consistentMastership = False
1946 utilities.assert_equals(
1947 expect=True,
1948 actual=consistentMastership,
1949 onpass="Switch roles are consistent across all ONOS nodes",
1950 onfail="ONOS nodes have different views of switch roles" )
1951
1952 if rolesResults and not consistentMastership:
1953 for i in range( len( ONOSMastership ) ):
1954 node = str( main.activeNodes[i] + 1 )
1955 main.log.warn( "ONOS" + node + " roles: ",
1956 json.dumps( json.loads( ONOSMastership[ i ] ),
1957 sort_keys=True,
1958 indent=4,
1959 separators=( ',', ': ' ) ) )
1960
1961 # NOTE: we expect mastership to change on controller failure
1962
1963 main.step( "Get the intents and compare across all nodes" )
1964 ONOSIntents = []
1965 intentCheck = main.FALSE
1966 consistentIntents = True
1967 intentsResults = True
1968 threads = []
1969 for i in main.activeNodes:
1970 t = main.Thread( target=main.CLIs[i].intents,
1971 name="intents-" + str( i ),
1972 args=[],
1973 kwargs={ 'jsonFormat': True } )
1974 threads.append( t )
1975 t.start()
1976
1977 for t in threads:
1978 t.join()
1979 ONOSIntents.append( t.result )
1980
1981 for i in range( len( ONOSIntents) ):
1982 node = str( main.activeNodes[i] + 1 )
1983 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1984 main.log.error( "Error in getting ONOS" + node + " intents" )
1985 main.log.warn( "ONOS" + node + " intents response: " +
1986 repr( ONOSIntents[ i ] ) )
1987 intentsResults = False
1988 utilities.assert_equals(
1989 expect=True,
1990 actual=intentsResults,
1991 onpass="No error in reading intents output",
1992 onfail="Error in reading intents from ONOS" )
1993
1994 main.step( "Check for consistency in Intents from each controller" )
1995 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1996 main.log.info( "Intents are consistent across all ONOS " +
1997 "nodes" )
1998 else:
1999 consistentIntents = False
2000
2001 # Try to make it easy to figure out what is happening
2002 #
2003 # Intent ONOS1 ONOS2 ...
2004 # 0x01 INSTALLED INSTALLING
2005 # ... ... ...
2006 # ... ... ...
2007 title = " ID"
2008 for n in main.activeNodes:
2009 title += " " * 10 + "ONOS" + str( n + 1 )
2010 main.log.warn( title )
2011 # get all intent keys in the cluster
2012 keys = []
2013 for nodeStr in ONOSIntents:
2014 node = json.loads( nodeStr )
2015 for intent in node:
2016 keys.append( intent.get( 'id' ) )
2017 keys = set( keys )
2018 for key in keys:
2019 row = "%-13s" % key
2020 for nodeStr in ONOSIntents:
2021 node = json.loads( nodeStr )
2022 for intent in node:
2023 if intent.get( 'id' ) == key:
2024 row += "%-15s" % intent.get( 'state' )
2025 main.log.warn( row )
2026 # End table view
2027
2028 utilities.assert_equals(
2029 expect=True,
2030 actual=consistentIntents,
2031 onpass="Intents are consistent across all ONOS nodes",
2032 onfail="ONOS nodes have different views of intents" )
2033 intentStates = []
2034 for node in ONOSIntents: # Iter through ONOS nodes
2035 nodeStates = []
2036 # Iter through intents of a node
2037 try:
2038 for intent in json.loads( node ):
2039 nodeStates.append( intent[ 'state' ] )
2040 except ( ValueError, TypeError ):
2041 main.log.exception( "Error in parsing intents" )
2042 main.log.error( repr( node ) )
2043 intentStates.append( nodeStates )
2044 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2045 main.log.info( dict( out ) )
2046
2047 if intentsResults and not consistentIntents:
2048 for i in range( len( main.activeNodes ) ):
2049 node = str( main.activeNodes[i] + 1 )
2050 main.log.warn( "ONOS" + node + " intents: " )
2051 main.log.warn( json.dumps(
2052 json.loads( ONOSIntents[ i ] ),
2053 sort_keys=True,
2054 indent=4,
2055 separators=( ',', ': ' ) ) )
2056 elif intentsResults and consistentIntents:
2057 intentCheck = main.TRUE
2058
2059 # NOTE: Store has no durability, so intents are lost across system
2060 # restarts
2061 main.step( "Compare current intents with intents before the failure" )
2062 # NOTE: this requires case 5 to pass for intentState to be set.
2063 # maybe we should stop the test if that fails?
2064 sameIntents = main.FALSE
2065 try:
2066 intentState
2067 except NameError:
2068 main.log.warn( "No previous intent state was saved" )
2069 else:
2070 if intentState and intentState == ONOSIntents[ 0 ]:
2071 sameIntents = main.TRUE
2072 main.log.info( "Intents are consistent with before failure" )
2073 # TODO: possibly the states have changed? we may need to figure out
2074 # what the acceptable states are
2075 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2076 sameIntents = main.TRUE
2077 try:
2078 before = json.loads( intentState )
2079 after = json.loads( ONOSIntents[ 0 ] )
2080 for intent in before:
2081 if intent not in after:
2082 sameIntents = main.FALSE
2083 main.log.debug( "Intent is not currently in ONOS " +
2084 "(at least in the same form):" )
2085 main.log.debug( json.dumps( intent ) )
2086 except ( ValueError, TypeError ):
2087 main.log.exception( "Exception printing intents" )
2088 main.log.debug( repr( ONOSIntents[0] ) )
2089 main.log.debug( repr( intentState ) )
2090 if sameIntents == main.FALSE:
2091 try:
2092 main.log.debug( "ONOS intents before: " )
2093 main.log.debug( json.dumps( json.loads( intentState ),
2094 sort_keys=True, indent=4,
2095 separators=( ',', ': ' ) ) )
2096 main.log.debug( "Current ONOS intents: " )
2097 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2098 sort_keys=True, indent=4,
2099 separators=( ',', ': ' ) ) )
2100 except ( ValueError, TypeError ):
2101 main.log.exception( "Exception printing intents" )
2102 main.log.debug( repr( ONOSIntents[0] ) )
2103 main.log.debug( repr( intentState ) )
2104 utilities.assert_equals(
2105 expect=main.TRUE,
2106 actual=sameIntents,
2107 onpass="Intents are consistent with before failure",
2108 onfail="The Intents changed during failure" )
2109 intentCheck = intentCheck and sameIntents
2110
2111 main.step( "Get the OF Table entries and compare to before " +
2112 "component failure" )
2113 FlowTables = main.TRUE
2114 for i in range( 28 ):
2115 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2116 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
Jon Hall41d39f12016-04-11 22:54:35 -07002117 curSwitch = main.Mininet1.flowTableComp( flows[i], tmpFlows )
2118 FlowTables = FlowTables and curSwitch
2119 if curSwitch == main.FALSE:
Jon Hall6e709752016-02-01 13:38:46 -08002120 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2121 utilities.assert_equals(
2122 expect=main.TRUE,
2123 actual=FlowTables,
2124 onpass="No changes were found in the flow tables",
2125 onfail="Changes were found in the flow tables" )
2126
2127 main.Mininet2.pingLongKill()
2128 '''
2129 main.step( "Check the continuous pings to ensure that no packets " +
2130 "were dropped during component failure" )
2131 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2132 main.params[ 'TESTONIP' ] )
2133 LossInPings = main.FALSE
2134 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2135 for i in range( 8, 18 ):
2136 main.log.info(
2137 "Checking for a loss in pings along flow from s" +
2138 str( i ) )
2139 LossInPings = main.Mininet2.checkForLoss(
2140 "/tmp/ping.h" +
2141 str( i ) ) or LossInPings
2142 if LossInPings == main.TRUE:
2143 main.log.info( "Loss in ping detected" )
2144 elif LossInPings == main.ERROR:
2145 main.log.info( "There are multiple mininet process running" )
2146 elif LossInPings == main.FALSE:
2147 main.log.info( "No Loss in the pings" )
2148 main.log.info( "No loss of dataplane connectivity" )
2149 utilities.assert_equals(
2150 expect=main.FALSE,
2151 actual=LossInPings,
2152 onpass="No Loss of connectivity",
2153 onfail="Loss of dataplane connectivity detected" )
2154 '''
2155
2156 main.step( "Leadership Election is still functional" )
2157 # Test of LeadershipElection
2158 leaderList = []
2159
2160 partitioned = []
2161 for i in main.partition:
2162 partitioned.append( main.nodes[i].ip_address )
2163 leaderResult = main.TRUE
2164
2165 for i in main.activeNodes:
2166 cli = main.CLIs[i]
2167 leaderN = cli.electionTestLeader()
2168 leaderList.append( leaderN )
2169 if leaderN == main.FALSE:
2170 # error in response
2171 main.log.error( "Something is wrong with " +
2172 "electionTestLeader function, check the" +
2173 " error logs" )
2174 leaderResult = main.FALSE
2175 elif leaderN is None:
2176 main.log.error( cli.name +
2177 " shows no leader for the election-app was" +
2178 " elected after the old one died" )
2179 leaderResult = main.FALSE
2180 elif leaderN in partitioned:
2181 main.log.error( cli.name + " shows " + str( leaderN ) +
2182 " as leader for the election-app, but it " +
2183 "was partitioned" )
2184 leaderResult = main.FALSE
2185 if len( set( leaderList ) ) != 1:
2186 leaderResult = main.FALSE
2187 main.log.error(
2188 "Inconsistent view of leader for the election test app" )
2189 # TODO: print the list
2190 utilities.assert_equals(
2191 expect=main.TRUE,
2192 actual=leaderResult,
2193 onpass="Leadership election passed",
2194 onfail="Something went wrong with Leadership election" )
2195
2196 def CASE8( self, main ):
2197 """
2198 Compare topo
2199 """
2200 import json
2201 import time
2202 assert main.numCtrls, "main.numCtrls not defined"
2203 assert main, "main not defined"
2204 assert utilities.assert_equals, "utilities.assert_equals not defined"
2205 assert main.CLIs, "main.CLIs not defined"
2206 assert main.nodes, "main.nodes not defined"
2207
2208 main.case( "Compare ONOS Topology view to Mininet topology" )
2209 main.caseExplanation = "Compare topology objects between Mininet" +\
2210 " and ONOS"
2211 topoResult = main.FALSE
2212 topoFailMsg = "ONOS topology don't match Mininet"
2213 elapsed = 0
2214 count = 0
2215 main.step( "Comparing ONOS topology to MN topology" )
2216 startTime = time.time()
2217 # Give time for Gossip to work
2218 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2219 devicesResults = main.TRUE
2220 linksResults = main.TRUE
2221 hostsResults = main.TRUE
2222 hostAttachmentResults = True
2223 count += 1
2224 cliStart = time.time()
2225 devices = []
2226 threads = []
2227 for i in main.activeNodes:
2228 t = main.Thread( target=utilities.retry,
2229 name="devices-" + str( i ),
2230 args=[ main.CLIs[i].devices, [ None ] ],
2231 kwargs= { 'sleep': 5, 'attempts': 5,
2232 'randomTime': True } )
2233 threads.append( t )
2234 t.start()
2235
2236 for t in threads:
2237 t.join()
2238 devices.append( t.result )
2239 hosts = []
2240 ipResult = main.TRUE
2241 threads = []
2242 for i in main.activeNodes:
2243 t = main.Thread( target=utilities.retry,
2244 name="hosts-" + str( i ),
2245 args=[ main.CLIs[i].hosts, [ None ] ],
2246 kwargs= { 'sleep': 5, 'attempts': 5,
2247 'randomTime': True } )
2248 threads.append( t )
2249 t.start()
2250
2251 for t in threads:
2252 t.join()
2253 try:
2254 hosts.append( json.loads( t.result ) )
2255 except ( ValueError, TypeError ):
2256 main.log.exception( "Error parsing hosts results" )
2257 main.log.error( repr( t.result ) )
2258 hosts.append( None )
2259 for controller in range( 0, len( hosts ) ):
2260 controllerStr = str( main.activeNodes[controller] + 1 )
2261 if hosts[ controller ]:
2262 for host in hosts[ controller ]:
2263 if host is None or host.get( 'ipAddresses', [] ) == []:
2264 main.log.error(
2265 "Error with host ipAddresses on controller" +
2266 controllerStr + ": " + str( host ) )
2267 ipResult = main.FALSE
2268 ports = []
2269 threads = []
2270 for i in main.activeNodes:
2271 t = main.Thread( target=utilities.retry,
2272 name="ports-" + str( i ),
2273 args=[ main.CLIs[i].ports, [ None ] ],
2274 kwargs= { 'sleep': 5, 'attempts': 5,
2275 'randomTime': True } )
2276 threads.append( t )
2277 t.start()
2278
2279 for t in threads:
2280 t.join()
2281 ports.append( t.result )
2282 links = []
2283 threads = []
2284 for i in main.activeNodes:
2285 t = main.Thread( target=utilities.retry,
2286 name="links-" + str( i ),
2287 args=[ main.CLIs[i].links, [ None ] ],
2288 kwargs= { 'sleep': 5, 'attempts': 5,
2289 'randomTime': True } )
2290 threads.append( t )
2291 t.start()
2292
2293 for t in threads:
2294 t.join()
2295 links.append( t.result )
2296 clusters = []
2297 threads = []
2298 for i in main.activeNodes:
2299 t = main.Thread( target=utilities.retry,
2300 name="clusters-" + str( i ),
2301 args=[ main.CLIs[i].clusters, [ None ] ],
2302 kwargs= { 'sleep': 5, 'attempts': 5,
2303 'randomTime': True } )
2304 threads.append( t )
2305 t.start()
2306
2307 for t in threads:
2308 t.join()
2309 clusters.append( t.result )
2310
2311 elapsed = time.time() - startTime
2312 cliTime = time.time() - cliStart
2313 print "Elapsed time: " + str( elapsed )
2314 print "CLI time: " + str( cliTime )
2315
2316 if all( e is None for e in devices ) and\
2317 all( e is None for e in hosts ) and\
2318 all( e is None for e in ports ) and\
2319 all( e is None for e in links ) and\
2320 all( e is None for e in clusters ):
2321 topoFailMsg = "Could not get topology from ONOS"
2322 main.log.error( topoFailMsg )
2323 continue # Try again, No use trying to compare
2324
2325 mnSwitches = main.Mininet1.getSwitches()
2326 mnLinks = main.Mininet1.getLinks()
2327 mnHosts = main.Mininet1.getHosts()
2328 for controller in range( len( main.activeNodes ) ):
2329 controllerStr = str( main.activeNodes[controller] + 1 )
2330 if devices[ controller ] and ports[ controller ] and\
2331 "Error" not in devices[ controller ] and\
2332 "Error" not in ports[ controller ]:
2333
2334 try:
2335 currentDevicesResult = main.Mininet1.compareSwitches(
2336 mnSwitches,
2337 json.loads( devices[ controller ] ),
2338 json.loads( ports[ controller ] ) )
2339 except ( TypeError, ValueError ) as e:
2340 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2341 devices[ controller ], ports[ controller ] ) )
2342 else:
2343 currentDevicesResult = main.FALSE
2344 utilities.assert_equals( expect=main.TRUE,
2345 actual=currentDevicesResult,
2346 onpass="ONOS" + controllerStr +
2347 " Switches view is correct",
2348 onfail="ONOS" + controllerStr +
2349 " Switches view is incorrect" )
2350
2351 if links[ controller ] and "Error" not in links[ controller ]:
2352 currentLinksResult = main.Mininet1.compareLinks(
2353 mnSwitches, mnLinks,
2354 json.loads( links[ controller ] ) )
2355 else:
2356 currentLinksResult = main.FALSE
2357 utilities.assert_equals( expect=main.TRUE,
2358 actual=currentLinksResult,
2359 onpass="ONOS" + controllerStr +
2360 " links view is correct",
2361 onfail="ONOS" + controllerStr +
2362 " links view is incorrect" )
2363 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2364 currentHostsResult = main.Mininet1.compareHosts(
2365 mnHosts,
2366 hosts[ controller ] )
2367 elif hosts[ controller ] == []:
2368 currentHostsResult = main.TRUE
2369 else:
2370 currentHostsResult = main.FALSE
2371 utilities.assert_equals( expect=main.TRUE,
2372 actual=currentHostsResult,
2373 onpass="ONOS" + controllerStr +
2374 " hosts exist in Mininet",
2375 onfail="ONOS" + controllerStr +
2376 " hosts don't match Mininet" )
2377 # CHECKING HOST ATTACHMENT POINTS
2378 hostAttachment = True
2379 zeroHosts = False
2380 # FIXME: topo-HA/obelisk specific mappings:
2381 # key is mac and value is dpid
2382 mappings = {}
2383 for i in range( 1, 29 ): # hosts 1 through 28
2384 # set up correct variables:
2385 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2386 if i == 1:
2387 deviceId = "1000".zfill(16)
2388 elif i == 2:
2389 deviceId = "2000".zfill(16)
2390 elif i == 3:
2391 deviceId = "3000".zfill(16)
2392 elif i == 4:
2393 deviceId = "3004".zfill(16)
2394 elif i == 5:
2395 deviceId = "5000".zfill(16)
2396 elif i == 6:
2397 deviceId = "6000".zfill(16)
2398 elif i == 7:
2399 deviceId = "6007".zfill(16)
2400 elif i >= 8 and i <= 17:
2401 dpid = '3' + str( i ).zfill( 3 )
2402 deviceId = dpid.zfill(16)
2403 elif i >= 18 and i <= 27:
2404 dpid = '6' + str( i ).zfill( 3 )
2405 deviceId = dpid.zfill(16)
2406 elif i == 28:
2407 deviceId = "2800".zfill(16)
2408 mappings[ macId ] = deviceId
2409 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2410 if hosts[ controller ] == []:
2411 main.log.warn( "There are no hosts discovered" )
2412 zeroHosts = True
2413 else:
2414 for host in hosts[ controller ]:
2415 mac = None
2416 location = None
2417 device = None
2418 port = None
2419 try:
2420 mac = host.get( 'mac' )
2421 assert mac, "mac field could not be found for this host object"
2422
2423 location = host.get( 'location' )
2424 assert location, "location field could not be found for this host object"
2425
2426 # Trim the protocol identifier off deviceId
2427 device = str( location.get( 'elementId' ) ).split(':')[1]
2428 assert device, "elementId field could not be found for this host location object"
2429
2430 port = location.get( 'port' )
2431 assert port, "port field could not be found for this host location object"
2432
2433 # Now check if this matches where they should be
2434 if mac and device and port:
2435 if str( port ) != "1":
2436 main.log.error( "The attachment port is incorrect for " +
2437 "host " + str( mac ) +
2438 ". Expected: 1 Actual: " + str( port) )
2439 hostAttachment = False
2440 if device != mappings[ str( mac ) ]:
2441 main.log.error( "The attachment device is incorrect for " +
2442 "host " + str( mac ) +
2443 ". Expected: " + mappings[ str( mac ) ] +
2444 " Actual: " + device )
2445 hostAttachment = False
2446 else:
2447 hostAttachment = False
2448 except AssertionError:
2449 main.log.exception( "Json object not as expected" )
2450 main.log.error( repr( host ) )
2451 hostAttachment = False
2452 else:
2453 main.log.error( "No hosts json output or \"Error\"" +
2454 " in output. hosts = " +
2455 repr( hosts[ controller ] ) )
2456 if zeroHosts is False:
2457 hostAttachment = True
2458
2459 # END CHECKING HOST ATTACHMENT POINTS
2460 devicesResults = devicesResults and currentDevicesResult
2461 linksResults = linksResults and currentLinksResult
2462 hostsResults = hostsResults and currentHostsResult
2463 hostAttachmentResults = hostAttachmentResults and\
2464 hostAttachment
Jon Halla440e872016-03-31 15:15:50 -07002465 topoResult = ( devicesResults and linksResults
2466 and hostsResults and ipResult and
2467 hostAttachmentResults )
Jon Hall6e709752016-02-01 13:38:46 -08002468 utilities.assert_equals( expect=True,
2469 actual=topoResult,
2470 onpass="ONOS topology matches Mininet",
2471 onfail=topoFailMsg )
2472 # End of While loop to pull ONOS state
2473
2474 # Compare json objects for hosts and dataplane clusters
2475
2476 # hosts
2477 main.step( "Hosts view is consistent across all ONOS nodes" )
2478 consistentHostsResult = main.TRUE
2479 for controller in range( len( hosts ) ):
2480 controllerStr = str( main.activeNodes[controller] + 1 )
2481 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2482 if hosts[ controller ] == hosts[ 0 ]:
2483 continue
2484 else: # hosts not consistent
2485 main.log.error( "hosts from ONOS" + controllerStr +
2486 " is inconsistent with ONOS1" )
2487 main.log.warn( repr( hosts[ controller ] ) )
2488 consistentHostsResult = main.FALSE
2489
2490 else:
2491 main.log.error( "Error in getting ONOS hosts from ONOS" +
2492 controllerStr )
2493 consistentHostsResult = main.FALSE
2494 main.log.warn( "ONOS" + controllerStr +
2495 " hosts response: " +
2496 repr( hosts[ controller ] ) )
2497 utilities.assert_equals(
2498 expect=main.TRUE,
2499 actual=consistentHostsResult,
2500 onpass="Hosts view is consistent across all ONOS nodes",
2501 onfail="ONOS nodes have different views of hosts" )
2502
2503 main.step( "Hosts information is correct" )
2504 hostsResults = hostsResults and ipResult
2505 utilities.assert_equals(
2506 expect=main.TRUE,
2507 actual=hostsResults,
2508 onpass="Host information is correct",
2509 onfail="Host information is incorrect" )
2510
2511 main.step( "Host attachment points to the network" )
2512 utilities.assert_equals(
2513 expect=True,
2514 actual=hostAttachmentResults,
2515 onpass="Hosts are correctly attached to the network",
2516 onfail="ONOS did not correctly attach hosts to the network" )
2517
2518 # Strongly connected clusters of devices
2519 main.step( "Clusters view is consistent across all ONOS nodes" )
2520 consistentClustersResult = main.TRUE
2521 for controller in range( len( clusters ) ):
2522 controllerStr = str( main.activeNodes[controller] + 1 )
2523 if "Error" not in clusters[ controller ]:
2524 if clusters[ controller ] == clusters[ 0 ]:
2525 continue
2526 else: # clusters not consistent
2527 main.log.error( "clusters from ONOS" +
2528 controllerStr +
2529 " is inconsistent with ONOS1" )
2530 consistentClustersResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002531 else:
2532 main.log.error( "Error in getting dataplane clusters " +
2533 "from ONOS" + controllerStr )
2534 consistentClustersResult = main.FALSE
2535 main.log.warn( "ONOS" + controllerStr +
2536 " clusters response: " +
2537 repr( clusters[ controller ] ) )
2538 utilities.assert_equals(
2539 expect=main.TRUE,
2540 actual=consistentClustersResult,
2541 onpass="Clusters view is consistent across all ONOS nodes",
2542 onfail="ONOS nodes have different views of clusters" )
Jon Hall64948022016-05-12 13:38:50 -07002543 if not consistentClustersResult:
2544 main.log.debug( clusters )
Jon Hall6e709752016-02-01 13:38:46 -08002545
2546 main.step( "There is only one SCC" )
2547 # there should always only be one cluster
2548 try:
2549 numClusters = len( json.loads( clusters[ 0 ] ) )
2550 except ( ValueError, TypeError ):
2551 main.log.exception( "Error parsing clusters[0]: " +
2552 repr( clusters[0] ) )
Jon Halla440e872016-03-31 15:15:50 -07002553 numClusters = "ERROR"
Jon Hall6e709752016-02-01 13:38:46 -08002554 clusterResults = main.FALSE
2555 if numClusters == 1:
2556 clusterResults = main.TRUE
2557 utilities.assert_equals(
2558 expect=1,
2559 actual=numClusters,
2560 onpass="ONOS shows 1 SCC",
2561 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2562
2563 topoResult = ( devicesResults and linksResults
2564 and hostsResults and consistentHostsResult
2565 and consistentClustersResult and clusterResults
2566 and ipResult and hostAttachmentResults )
2567
2568 topoResult = topoResult and int( count <= 2 )
2569 note = "note it takes about " + str( int( cliTime ) ) + \
2570 " seconds for the test to make all the cli calls to fetch " +\
2571 "the topology from each ONOS instance"
2572 main.log.info(
2573 "Very crass estimate for topology discovery/convergence( " +
2574 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2575 str( count ) + " tries" )
2576
2577 main.step( "Device information is correct" )
2578 utilities.assert_equals(
2579 expect=main.TRUE,
2580 actual=devicesResults,
2581 onpass="Device information is correct",
2582 onfail="Device information is incorrect" )
2583
2584 main.step( "Links are correct" )
2585 utilities.assert_equals(
2586 expect=main.TRUE,
2587 actual=linksResults,
2588 onpass="Link are correct",
2589 onfail="Links are incorrect" )
2590
Jon Halla440e872016-03-31 15:15:50 -07002591 main.step( "Hosts are correct" )
2592 utilities.assert_equals(
2593 expect=main.TRUE,
2594 actual=hostsResults,
2595 onpass="Hosts are correct",
2596 onfail="Hosts are incorrect" )
2597
Jon Hall6e709752016-02-01 13:38:46 -08002598 # FIXME: move this to an ONOS state case
2599 main.step( "Checking ONOS nodes" )
Jon Hall41d39f12016-04-11 22:54:35 -07002600 nodeResults = utilities.retry( main.HA.nodesCheck,
2601 False,
2602 args=[main.activeNodes],
2603 attempts=5 )
Jon Hall6e709752016-02-01 13:38:46 -08002604
Jon Hall41d39f12016-04-11 22:54:35 -07002605 utilities.assert_equals( expect=True, actual=nodeResults,
Jon Hall6e709752016-02-01 13:38:46 -08002606 onpass="Nodes check successful",
2607 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002608 if not nodeResults:
Jon Hall41d39f12016-04-11 22:54:35 -07002609 for i in main.activeNodes:
Jon Halla440e872016-03-31 15:15:50 -07002610 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hall41d39f12016-04-11 22:54:35 -07002611 main.CLIs[i].name,
2612 main.CLIs[i].sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08002613
2614 def CASE9( self, main ):
2615 """
2616 Link s3-s28 down
2617 """
2618 import time
2619 assert main.numCtrls, "main.numCtrls not defined"
2620 assert main, "main not defined"
2621 assert utilities.assert_equals, "utilities.assert_equals not defined"
2622 assert main.CLIs, "main.CLIs not defined"
2623 assert main.nodes, "main.nodes not defined"
2624 # NOTE: You should probably run a topology check after this
2625
2626 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2627
2628 description = "Turn off a link to ensure that Link Discovery " +\
2629 "is working properly"
2630 main.case( description )
2631
2632 main.step( "Kill Link between s3 and s28" )
2633 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2634 main.log.info( "Waiting " + str( linkSleep ) +
2635 " seconds for link down to be discovered" )
2636 time.sleep( linkSleep )
2637 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2638 onpass="Link down successful",
2639 onfail="Failed to bring link down" )
2640 # TODO do some sort of check here
2641
2642 def CASE10( self, main ):
2643 """
2644 Link s3-s28 up
2645 """
2646 import time
2647 assert main.numCtrls, "main.numCtrls not defined"
2648 assert main, "main not defined"
2649 assert utilities.assert_equals, "utilities.assert_equals not defined"
2650 assert main.CLIs, "main.CLIs not defined"
2651 assert main.nodes, "main.nodes not defined"
2652 # NOTE: You should probably run a topology check after this
2653
2654 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2655
2656 description = "Restore a link to ensure that Link Discovery is " + \
2657 "working properly"
2658 main.case( description )
2659
2660 main.step( "Bring link between s3 and s28 back up" )
2661 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2662 main.log.info( "Waiting " + str( linkSleep ) +
2663 " seconds for link up to be discovered" )
2664 time.sleep( linkSleep )
2665 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2666 onpass="Link up successful",
2667 onfail="Failed to bring link up" )
2668 # TODO do some sort of check here
2669
2670 def CASE11( self, main ):
2671 """
2672 Switch Down
2673 """
2674 # NOTE: You should probably run a topology check after this
2675 import time
2676 assert main.numCtrls, "main.numCtrls not defined"
2677 assert main, "main not defined"
2678 assert utilities.assert_equals, "utilities.assert_equals not defined"
2679 assert main.CLIs, "main.CLIs not defined"
2680 assert main.nodes, "main.nodes not defined"
2681
2682 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2683
2684 description = "Killing a switch to ensure it is discovered correctly"
2685 onosCli = main.CLIs[ main.activeNodes[0] ]
2686 main.case( description )
2687 switch = main.params[ 'kill' ][ 'switch' ]
2688 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2689
2690 # TODO: Make this switch parameterizable
2691 main.step( "Kill " + switch )
2692 main.log.info( "Deleting " + switch )
2693 main.Mininet1.delSwitch( switch )
2694 main.log.info( "Waiting " + str( switchSleep ) +
2695 " seconds for switch down to be discovered" )
2696 time.sleep( switchSleep )
2697 device = onosCli.getDevice( dpid=switchDPID )
2698 # Peek at the deleted switch
2699 main.log.warn( str( device ) )
2700 result = main.FALSE
2701 if device and device[ 'available' ] is False:
2702 result = main.TRUE
2703 utilities.assert_equals( expect=main.TRUE, actual=result,
2704 onpass="Kill switch successful",
2705 onfail="Failed to kill switch?" )
2706
2707 def CASE12( self, main ):
2708 """
2709 Switch Up
2710 """
2711 # NOTE: You should probably run a topology check after this
2712 import time
2713 assert main.numCtrls, "main.numCtrls not defined"
2714 assert main, "main not defined"
2715 assert utilities.assert_equals, "utilities.assert_equals not defined"
2716 assert main.CLIs, "main.CLIs not defined"
2717 assert main.nodes, "main.nodes not defined"
2718 assert ONOS1Port, "ONOS1Port not defined"
2719 assert ONOS2Port, "ONOS2Port not defined"
2720 assert ONOS3Port, "ONOS3Port not defined"
2721 assert ONOS4Port, "ONOS4Port not defined"
2722 assert ONOS5Port, "ONOS5Port not defined"
2723 assert ONOS6Port, "ONOS6Port not defined"
2724 assert ONOS7Port, "ONOS7Port not defined"
2725
2726 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2727 switch = main.params[ 'kill' ][ 'switch' ]
2728 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2729 links = main.params[ 'kill' ][ 'links' ].split()
2730 onosCli = main.CLIs[ main.activeNodes[0] ]
2731 description = "Adding a switch to ensure it is discovered correctly"
2732 main.case( description )
2733
2734 main.step( "Add back " + switch )
2735 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2736 for peer in links:
2737 main.Mininet1.addLink( switch, peer )
2738 ipList = [ node.ip_address for node in main.nodes ]
2739 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2740 main.log.info( "Waiting " + str( switchSleep ) +
2741 " seconds for switch up to be discovered" )
2742 time.sleep( switchSleep )
2743 device = onosCli.getDevice( dpid=switchDPID )
2744 # Peek at the deleted switch
2745 main.log.warn( str( device ) )
2746 result = main.FALSE
2747 if device and device[ 'available' ]:
2748 result = main.TRUE
2749 utilities.assert_equals( expect=main.TRUE, actual=result,
2750 onpass="add switch successful",
2751 onfail="Failed to add switch?" )
2752
2753 def CASE13( self, main ):
2754 """
2755 Clean up
2756 """
2757 import os
2758 import time
2759 assert main.numCtrls, "main.numCtrls not defined"
2760 assert main, "main not defined"
2761 assert utilities.assert_equals, "utilities.assert_equals not defined"
2762 assert main.CLIs, "main.CLIs not defined"
2763 assert main.nodes, "main.nodes not defined"
2764
2765 # printing colors to terminal
2766 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2767 'blue': '\033[94m', 'green': '\033[92m',
2768 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2769 main.case( "Test Cleanup" )
2770 main.step( "Killing tcpdumps" )
2771 main.Mininet2.stopTcpdump()
2772
2773 testname = main.TEST
2774 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2775 main.step( "Copying MN pcap and ONOS log files to test station" )
2776 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2777 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2778 # NOTE: MN Pcap file is being saved to logdir.
2779 # We scp this file as MN and TestON aren't necessarily the same vm
2780
2781 # FIXME: To be replaced with a Jenkin's post script
2782 # TODO: Load these from params
2783 # NOTE: must end in /
2784 logFolder = "/opt/onos/log/"
2785 logFiles = [ "karaf.log", "karaf.log.1" ]
2786 # NOTE: must end in /
2787 for f in logFiles:
2788 for node in main.nodes:
2789 dstName = main.logdir + "/" + node.name + "-" + f
2790 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2791 logFolder + f, dstName )
2792 # std*.log's
2793 # NOTE: must end in /
2794 logFolder = "/opt/onos/var/"
2795 logFiles = [ "stderr.log", "stdout.log" ]
2796 # NOTE: must end in /
2797 for f in logFiles:
2798 for node in main.nodes:
2799 dstName = main.logdir + "/" + node.name + "-" + f
2800 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2801 logFolder + f, dstName )
2802 else:
2803 main.log.debug( "skipping saving log files" )
2804
2805 main.step( "Stopping Mininet" )
2806 mnResult = main.Mininet1.stopNet()
2807 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2808 onpass="Mininet stopped",
2809 onfail="MN cleanup NOT successful" )
2810
2811 main.step( "Checking ONOS Logs for errors" )
2812 for node in main.nodes:
2813 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2814 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2815
2816 try:
2817 timerLog = open( main.logdir + "/Timers.csv", 'w')
2818 # Overwrite with empty line and close
2819 labels = "Gossip Intents"
2820 data = str( gossipTime )
2821 timerLog.write( labels + "\n" + data )
2822 timerLog.close()
2823 except NameError, e:
2824 main.log.exception(e)
2825
2826 def CASE14( self, main ):
2827 """
2828 start election app on all onos nodes
2829 """
2830 assert main.numCtrls, "main.numCtrls not defined"
2831 assert main, "main not defined"
2832 assert utilities.assert_equals, "utilities.assert_equals not defined"
2833 assert main.CLIs, "main.CLIs not defined"
2834 assert main.nodes, "main.nodes not defined"
2835
2836 main.case("Start Leadership Election app")
2837 main.step( "Install leadership election app" )
2838 onosCli = main.CLIs[ main.activeNodes[0] ]
2839 appResult = onosCli.activateApp( "org.onosproject.election" )
2840 utilities.assert_equals(
2841 expect=main.TRUE,
2842 actual=appResult,
2843 onpass="Election app installed",
2844 onfail="Something went wrong with installing Leadership election" )
2845
2846 main.step( "Run for election on each node" )
Jon Hall6e709752016-02-01 13:38:46 -08002847 for i in main.activeNodes:
2848 main.CLIs[i].electionTestRun()
Jon Hall25463a82016-04-13 14:03:52 -07002849 time.sleep(5)
2850 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2851 sameResult, leaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall6e709752016-02-01 13:38:46 -08002852 utilities.assert_equals(
Jon Hall25463a82016-04-13 14:03:52 -07002853 expect=True,
2854 actual=sameResult,
2855 onpass="All nodes see the same leaderboards",
2856 onfail="Inconsistent leaderboards" )
Jon Hall6e709752016-02-01 13:38:46 -08002857
Jon Hall25463a82016-04-13 14:03:52 -07002858 if sameResult:
2859 leader = leaders[ 0 ][ 0 ]
2860 if main.nodes[main.activeNodes[0]].ip_address in leader:
2861 correctLeader = True
2862 else:
2863 correctLeader = False
2864 main.step( "First node was elected leader" )
2865 utilities.assert_equals(
2866 expect=True,
2867 actual=correctLeader,
2868 onpass="Correct leader was elected",
2869 onfail="Incorrect leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002870
2871 def CASE15( self, main ):
2872 """
2873 Check that Leadership Election is still functional
2874 15.1 Run election on each node
2875 15.2 Check that each node has the same leaders and candidates
2876 15.3 Find current leader and withdraw
2877 15.4 Check that a new node was elected leader
2878 15.5 Check that that new leader was the candidate of old leader
2879 15.6 Run for election on old leader
2880 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2881 15.8 Make sure that the old leader was added to the candidate list
2882
2883 old and new variable prefixes refer to data from before vs after
2884 withdrawl and later before withdrawl vs after re-election
2885 """
2886 import time
2887 assert main.numCtrls, "main.numCtrls not defined"
2888 assert main, "main not defined"
2889 assert utilities.assert_equals, "utilities.assert_equals not defined"
2890 assert main.CLIs, "main.CLIs not defined"
2891 assert main.nodes, "main.nodes not defined"
2892
2893 description = "Check that Leadership Election is still functional"
2894 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002895 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall6e709752016-02-01 13:38:46 -08002896
Jon Halla440e872016-03-31 15:15:50 -07002897 oldLeaders = [] # list of lists of each nodes' candidates before
2898 newLeaders = [] # list of lists of each nodes' candidates after
Jon Hall6e709752016-02-01 13:38:46 -08002899 oldLeader = '' # the old leader from oldLeaders, None if not same
2900 newLeader = '' # the new leaders fron newLoeaders, None if not same
2901 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2902 expectNoLeader = False # True when there is only one leader
2903 if main.numCtrls == 1:
2904 expectNoLeader = True
2905
2906 main.step( "Run for election on each node" )
2907 electionResult = main.TRUE
2908
2909 for i in main.activeNodes: # run test election on each node
2910 if main.CLIs[i].electionTestRun() == main.FALSE:
2911 electionResult = main.FALSE
2912 utilities.assert_equals(
2913 expect=main.TRUE,
2914 actual=electionResult,
2915 onpass="All nodes successfully ran for leadership",
2916 onfail="At least one node failed to run for leadership" )
2917
2918 if electionResult == main.FALSE:
2919 main.log.error(
2920 "Skipping Test Case because Election Test App isn't loaded" )
2921 main.skipCase()
2922
2923 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07002924 failMessage = "Nodes have different leaderboards"
Jon Halla440e872016-03-31 15:15:50 -07002925 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
Jon Hall41d39f12016-04-11 22:54:35 -07002926 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Halla440e872016-03-31 15:15:50 -07002927 if sameResult:
2928 oldLeader = oldLeaders[ 0 ][ 0 ]
2929 main.log.warn( oldLeader )
Jon Hall6e709752016-02-01 13:38:46 -08002930 else:
Jon Halla440e872016-03-31 15:15:50 -07002931 oldLeader = None
Jon Hall6e709752016-02-01 13:38:46 -08002932 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002933 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08002934 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07002935 onpass="Leaderboards are consistent for the election topic",
Jon Hall6e709752016-02-01 13:38:46 -08002936 onfail=failMessage )
2937
2938 main.step( "Find current leader and withdraw" )
2939 withdrawResult = main.TRUE
2940 # do some sanity checking on leader before using it
2941 if oldLeader is None:
2942 main.log.error( "Leadership isn't consistent." )
2943 withdrawResult = main.FALSE
2944 # Get the CLI of the oldLeader
2945 for i in main.activeNodes:
2946 if oldLeader == main.nodes[ i ].ip_address:
2947 oldLeaderCLI = main.CLIs[ i ]
2948 break
2949 else: # FOR/ELSE statement
2950 main.log.error( "Leader election, could not find current leader" )
2951 if oldLeader:
2952 withdrawResult = oldLeaderCLI.electionTestWithdraw()
2953 utilities.assert_equals(
2954 expect=main.TRUE,
2955 actual=withdrawResult,
2956 onpass="Node was withdrawn from election",
2957 onfail="Node was not withdrawn from election" )
2958
2959 main.step( "Check that a new node was elected leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002960 failMessage = "Nodes have different leaders"
Jon Hall6e709752016-02-01 13:38:46 -08002961 # Get new leaders and candidates
Jon Hall41d39f12016-04-11 22:54:35 -07002962 newLeaderResult, newLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall3a7843a2016-04-12 03:01:09 -07002963 newLeader = None
Jon Halla440e872016-03-31 15:15:50 -07002964 if newLeaderResult:
Jon Hall3a7843a2016-04-12 03:01:09 -07002965 if newLeaders[ 0 ][ 0 ] == 'none':
2966 main.log.error( "No leader was elected on at least 1 node" )
2967 if not expectNoLeader:
2968 newLeaderResult = False
Jon Hall25463a82016-04-13 14:03:52 -07002969 newLeader = newLeaders[ 0 ][ 0 ]
Jon Hall6e709752016-02-01 13:38:46 -08002970
2971 # Check that the new leader is not the older leader, which was withdrawn
2972 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07002973 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08002974 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
2975 " as the current leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002976 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002977 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08002978 actual=newLeaderResult,
2979 onpass="Leadership election passed",
2980 onfail="Something went wrong with Leadership election" )
2981
Jon Halla440e872016-03-31 15:15:50 -07002982 main.step( "Check that that new leader was the candidate of old leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002983 # candidates[ 2 ] should become the top candidate after withdrawl
2984 correctCandidateResult = main.TRUE
2985 if expectNoLeader:
2986 if newLeader == 'none':
2987 main.log.info( "No leader expected. None found. Pass" )
2988 correctCandidateResult = main.TRUE
2989 else:
2990 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2991 correctCandidateResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07002992 elif len( oldLeaders[0] ) >= 3:
2993 if newLeader == oldLeaders[ 0 ][ 2 ]:
2994 # correct leader was elected
2995 correctCandidateResult = main.TRUE
2996 else:
2997 correctCandidateResult = main.FALSE
2998 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
2999 newLeader, oldLeaders[ 0 ][ 2 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08003000 else:
3001 main.log.warn( "Could not determine who should be the correct leader" )
Jon Halla440e872016-03-31 15:15:50 -07003002 main.log.debug( oldLeaders[ 0 ] )
Jon Hall6e709752016-02-01 13:38:46 -08003003 correctCandidateResult = main.FALSE
3004 utilities.assert_equals(
3005 expect=main.TRUE,
3006 actual=correctCandidateResult,
3007 onpass="Correct Candidate Elected",
3008 onfail="Incorrect Candidate Elected" )
3009
3010 main.step( "Run for election on old leader( just so everyone " +
3011 "is in the hat )" )
3012 if oldLeaderCLI is not None:
3013 runResult = oldLeaderCLI.electionTestRun()
3014 else:
3015 main.log.error( "No old leader to re-elect" )
3016 runResult = main.FALSE
3017 utilities.assert_equals(
3018 expect=main.TRUE,
3019 actual=runResult,
3020 onpass="App re-ran for election",
3021 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07003022
Jon Hall6e709752016-02-01 13:38:46 -08003023 main.step(
3024 "Check that oldLeader is a candidate, and leader if only 1 node" )
3025 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07003026 # Get new leaders and candidates
3027 reRunLeaders = []
3028 time.sleep( 5 ) # Paremterize
Jon Hall41d39f12016-04-11 22:54:35 -07003029 positionResult, reRunLeaders = main.HA.consistentLeaderboards( activeCLIs )
Jon Hall6e709752016-02-01 13:38:46 -08003030
3031 # Check that the re-elected node is last on the candidate List
Jon Hall3a7843a2016-04-12 03:01:09 -07003032 if not reRunLeaders[0]:
3033 positionResult = main.FALSE
3034 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
Jon Halla440e872016-03-31 15:15:50 -07003035 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3036 str( reRunLeaders[ 0 ] ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08003037 positionResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08003038 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003039 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08003040 actual=positionResult,
3041 onpass="Old leader successfully re-ran for election",
3042 onfail="Something went wrong with Leadership election after " +
3043 "the old leader re-ran for election" )
3044
3045 def CASE16( self, main ):
3046 """
3047 Install Distributed Primitives app
3048 """
3049 import time
3050 assert main.numCtrls, "main.numCtrls not defined"
3051 assert main, "main not defined"
3052 assert utilities.assert_equals, "utilities.assert_equals not defined"
3053 assert main.CLIs, "main.CLIs not defined"
3054 assert main.nodes, "main.nodes not defined"
3055
3056 # Variables for the distributed primitives tests
3057 global pCounterName
Jon Hall6e709752016-02-01 13:38:46 -08003058 global pCounterValue
Jon Hall6e709752016-02-01 13:38:46 -08003059 global onosSet
3060 global onosSetName
3061 pCounterName = "TestON-Partitions"
Jon Hall6e709752016-02-01 13:38:46 -08003062 pCounterValue = 0
Jon Hall6e709752016-02-01 13:38:46 -08003063 onosSet = set([])
3064 onosSetName = "TestON-set"
3065
3066 description = "Install Primitives app"
3067 main.case( description )
3068 main.step( "Install Primitives app" )
3069 appName = "org.onosproject.distributedprimitives"
3070 node = main.activeNodes[0]
3071 appResults = main.CLIs[node].activateApp( appName )
3072 utilities.assert_equals( expect=main.TRUE,
3073 actual=appResults,
3074 onpass="Primitives app activated",
3075 onfail="Primitives app not activated" )
3076 time.sleep( 5 ) # To allow all nodes to activate
3077
3078 def CASE17( self, main ):
3079 """
3080 Check for basic functionality with distributed primitives
3081 """
3082 # Make sure variables are defined/set
3083 assert main.numCtrls, "main.numCtrls not defined"
3084 assert main, "main not defined"
3085 assert utilities.assert_equals, "utilities.assert_equals not defined"
3086 assert main.CLIs, "main.CLIs not defined"
3087 assert main.nodes, "main.nodes not defined"
3088 assert pCounterName, "pCounterName not defined"
Jon Hall6e709752016-02-01 13:38:46 -08003089 assert onosSetName, "onosSetName not defined"
3090 # NOTE: assert fails if value is 0/None/Empty/False
3091 try:
3092 pCounterValue
3093 except NameError:
3094 main.log.error( "pCounterValue not defined, setting to 0" )
3095 pCounterValue = 0
3096 try:
Jon Hall6e709752016-02-01 13:38:46 -08003097 onosSet
3098 except NameError:
3099 main.log.error( "onosSet not defined, setting to empty Set" )
3100 onosSet = set([])
3101 # Variables for the distributed primitives tests. These are local only
3102 addValue = "a"
3103 addAllValue = "a b c d e f"
3104 retainValue = "c d e f"
3105
3106 description = "Check for basic functionality with distributed " +\
3107 "primitives"
3108 main.case( description )
3109 main.caseExplanation = "Test the methods of the distributed " +\
3110 "primitives (counters and sets) throught the cli"
3111 # DISTRIBUTED ATOMIC COUNTERS
3112 # Partitioned counters
3113 main.step( "Increment then get a default counter on each node" )
3114 pCounters = []
3115 threads = []
3116 addedPValues = []
3117 for i in main.activeNodes:
3118 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3119 name="counterAddAndGet-" + str( i ),
3120 args=[ pCounterName ] )
3121 pCounterValue += 1
3122 addedPValues.append( pCounterValue )
3123 threads.append( t )
3124 t.start()
3125
3126 for t in threads:
3127 t.join()
3128 pCounters.append( t.result )
3129 # Check that counter incremented numController times
3130 pCounterResults = True
3131 for i in addedPValues:
3132 tmpResult = i in pCounters
3133 pCounterResults = pCounterResults and tmpResult
3134 if not tmpResult:
3135 main.log.error( str( i ) + " is not in partitioned "
3136 "counter incremented results" )
3137 utilities.assert_equals( expect=True,
3138 actual=pCounterResults,
3139 onpass="Default counter incremented",
3140 onfail="Error incrementing default" +
3141 " counter" )
3142
3143 main.step( "Get then Increment a default counter on each node" )
3144 pCounters = []
3145 threads = []
3146 addedPValues = []
3147 for i in main.activeNodes:
3148 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3149 name="counterGetAndAdd-" + str( i ),
3150 args=[ pCounterName ] )
3151 addedPValues.append( pCounterValue )
3152 pCounterValue += 1
3153 threads.append( t )
3154 t.start()
3155
3156 for t in threads:
3157 t.join()
3158 pCounters.append( t.result )
3159 # Check that counter incremented numController times
3160 pCounterResults = True
3161 for i in addedPValues:
3162 tmpResult = i in pCounters
3163 pCounterResults = pCounterResults and tmpResult
3164 if not tmpResult:
3165 main.log.error( str( i ) + " is not in partitioned "
3166 "counter incremented results" )
3167 utilities.assert_equals( expect=True,
3168 actual=pCounterResults,
3169 onpass="Default counter incremented",
3170 onfail="Error incrementing default" +
3171 " counter" )
3172
3173 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003174 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Hall6e709752016-02-01 13:38:46 -08003175 utilities.assert_equals( expect=main.TRUE,
3176 actual=incrementCheck,
3177 onpass="Added counters are correct",
3178 onfail="Added counters are incorrect" )
3179
3180 main.step( "Add -8 to then get a default counter on each node" )
3181 pCounters = []
3182 threads = []
3183 addedPValues = []
3184 for i in main.activeNodes:
3185 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3186 name="counterIncrement-" + str( i ),
3187 args=[ pCounterName ],
3188 kwargs={ "delta": -8 } )
3189 pCounterValue += -8
3190 addedPValues.append( pCounterValue )
3191 threads.append( t )
3192 t.start()
3193
3194 for t in threads:
3195 t.join()
3196 pCounters.append( t.result )
3197 # Check that counter incremented numController times
3198 pCounterResults = True
3199 for i in addedPValues:
3200 tmpResult = i in pCounters
3201 pCounterResults = pCounterResults and tmpResult
3202 if not tmpResult:
3203 main.log.error( str( i ) + " is not in partitioned "
3204 "counter incremented results" )
3205 utilities.assert_equals( expect=True,
3206 actual=pCounterResults,
3207 onpass="Default counter incremented",
3208 onfail="Error incrementing default" +
3209 " counter" )
3210
3211 main.step( "Add 5 to then get a default counter on each node" )
3212 pCounters = []
3213 threads = []
3214 addedPValues = []
3215 for i in main.activeNodes:
3216 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3217 name="counterIncrement-" + str( i ),
3218 args=[ pCounterName ],
3219 kwargs={ "delta": 5 } )
3220 pCounterValue += 5
3221 addedPValues.append( pCounterValue )
3222 threads.append( t )
3223 t.start()
3224
3225 for t in threads:
3226 t.join()
3227 pCounters.append( t.result )
3228 # Check that counter incremented numController times
3229 pCounterResults = True
3230 for i in addedPValues:
3231 tmpResult = i in pCounters
3232 pCounterResults = pCounterResults and tmpResult
3233 if not tmpResult:
3234 main.log.error( str( i ) + " is not in partitioned "
3235 "counter incremented results" )
3236 utilities.assert_equals( expect=True,
3237 actual=pCounterResults,
3238 onpass="Default counter incremented",
3239 onfail="Error incrementing default" +
3240 " counter" )
3241
3242 main.step( "Get then add 5 to a default counter on each node" )
3243 pCounters = []
3244 threads = []
3245 addedPValues = []
3246 for i in main.activeNodes:
3247 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3248 name="counterIncrement-" + str( i ),
3249 args=[ pCounterName ],
3250 kwargs={ "delta": 5 } )
3251 addedPValues.append( pCounterValue )
3252 pCounterValue += 5
3253 threads.append( t )
3254 t.start()
3255
3256 for t in threads:
3257 t.join()
3258 pCounters.append( t.result )
3259 # Check that counter incremented numController times
3260 pCounterResults = True
3261 for i in addedPValues:
3262 tmpResult = i in pCounters
3263 pCounterResults = pCounterResults and tmpResult
3264 if not tmpResult:
3265 main.log.error( str( i ) + " is not in partitioned "
3266 "counter incremented results" )
3267 utilities.assert_equals( expect=True,
3268 actual=pCounterResults,
3269 onpass="Default counter incremented",
3270 onfail="Error incrementing default" +
3271 " counter" )
3272
3273 main.step( "Counters we added have the correct values" )
Jon Hall41d39f12016-04-11 22:54:35 -07003274 incrementCheck = main.HA.counterCheck( pCounterName, pCounterValue )
Jon Hall6e709752016-02-01 13:38:46 -08003275 utilities.assert_equals( expect=main.TRUE,
3276 actual=incrementCheck,
3277 onpass="Added counters are correct",
3278 onfail="Added counters are incorrect" )
3279
Jon Hall6e709752016-02-01 13:38:46 -08003280 # DISTRIBUTED SETS
3281 main.step( "Distributed Set get" )
3282 size = len( onosSet )
3283 getResponses = []
3284 threads = []
3285 for i in main.activeNodes:
3286 t = main.Thread( target=main.CLIs[i].setTestGet,
3287 name="setTestGet-" + str( i ),
3288 args=[ onosSetName ] )
3289 threads.append( t )
3290 t.start()
3291 for t in threads:
3292 t.join()
3293 getResponses.append( t.result )
3294
3295 getResults = main.TRUE
3296 for i in range( len( main.activeNodes ) ):
3297 node = str( main.activeNodes[i] + 1 )
3298 if isinstance( getResponses[ i ], list):
3299 current = set( getResponses[ i ] )
3300 if len( current ) == len( getResponses[ i ] ):
3301 # no repeats
3302 if onosSet != current:
3303 main.log.error( "ONOS" + node +
3304 " has incorrect view" +
3305 " of set " + onosSetName + ":\n" +
3306 str( getResponses[ i ] ) )
3307 main.log.debug( "Expected: " + str( onosSet ) )
3308 main.log.debug( "Actual: " + str( current ) )
3309 getResults = main.FALSE
3310 else:
3311 # error, set is not a set
3312 main.log.error( "ONOS" + node +
3313 " has repeat elements in" +
3314 " set " + onosSetName + ":\n" +
3315 str( getResponses[ i ] ) )
3316 getResults = main.FALSE
3317 elif getResponses[ i ] == main.ERROR:
3318 getResults = main.FALSE
3319 utilities.assert_equals( expect=main.TRUE,
3320 actual=getResults,
3321 onpass="Set elements are correct",
3322 onfail="Set elements are incorrect" )
3323
3324 main.step( "Distributed Set size" )
3325 sizeResponses = []
3326 threads = []
3327 for i in main.activeNodes:
3328 t = main.Thread( target=main.CLIs[i].setTestSize,
3329 name="setTestSize-" + str( i ),
3330 args=[ onosSetName ] )
3331 threads.append( t )
3332 t.start()
3333 for t in threads:
3334 t.join()
3335 sizeResponses.append( t.result )
3336
3337 sizeResults = main.TRUE
3338 for i in range( len( main.activeNodes ) ):
3339 node = str( main.activeNodes[i] + 1 )
3340 if size != sizeResponses[ i ]:
3341 sizeResults = main.FALSE
3342 main.log.error( "ONOS" + node +
3343 " expected a size of " + str( size ) +
3344 " for set " + onosSetName +
3345 " but got " + str( sizeResponses[ i ] ) )
3346 utilities.assert_equals( expect=main.TRUE,
3347 actual=sizeResults,
3348 onpass="Set sizes are correct",
3349 onfail="Set sizes are incorrect" )
3350
3351 main.step( "Distributed Set add()" )
3352 onosSet.add( addValue )
3353 addResponses = []
3354 threads = []
3355 for i in main.activeNodes:
3356 t = main.Thread( target=main.CLIs[i].setTestAdd,
3357 name="setTestAdd-" + str( i ),
3358 args=[ onosSetName, addValue ] )
3359 threads.append( t )
3360 t.start()
3361 for t in threads:
3362 t.join()
3363 addResponses.append( t.result )
3364
3365 # main.TRUE = successfully changed the set
3366 # main.FALSE = action resulted in no change in set
3367 # main.ERROR - Some error in executing the function
3368 addResults = main.TRUE
3369 for i in range( len( main.activeNodes ) ):
3370 if addResponses[ i ] == main.TRUE:
3371 # All is well
3372 pass
3373 elif addResponses[ i ] == main.FALSE:
3374 # Already in set, probably fine
3375 pass
3376 elif addResponses[ i ] == main.ERROR:
3377 # Error in execution
3378 addResults = main.FALSE
3379 else:
3380 # unexpected result
3381 addResults = main.FALSE
3382 if addResults != main.TRUE:
3383 main.log.error( "Error executing set add" )
3384
3385 # Check if set is still correct
3386 size = len( onosSet )
3387 getResponses = []
3388 threads = []
3389 for i in main.activeNodes:
3390 t = main.Thread( target=main.CLIs[i].setTestGet,
3391 name="setTestGet-" + str( i ),
3392 args=[ onosSetName ] )
3393 threads.append( t )
3394 t.start()
3395 for t in threads:
3396 t.join()
3397 getResponses.append( t.result )
3398 getResults = main.TRUE
3399 for i in range( len( main.activeNodes ) ):
3400 node = str( main.activeNodes[i] + 1 )
3401 if isinstance( getResponses[ i ], list):
3402 current = set( getResponses[ i ] )
3403 if len( current ) == len( getResponses[ i ] ):
3404 # no repeats
3405 if onosSet != current:
3406 main.log.error( "ONOS" + node + " has incorrect view" +
3407 " of set " + onosSetName + ":\n" +
3408 str( getResponses[ i ] ) )
3409 main.log.debug( "Expected: " + str( onosSet ) )
3410 main.log.debug( "Actual: " + str( current ) )
3411 getResults = main.FALSE
3412 else:
3413 # error, set is not a set
3414 main.log.error( "ONOS" + node + " has repeat elements in" +
3415 " set " + onosSetName + ":\n" +
3416 str( getResponses[ i ] ) )
3417 getResults = main.FALSE
3418 elif getResponses[ i ] == main.ERROR:
3419 getResults = main.FALSE
3420 sizeResponses = []
3421 threads = []
3422 for i in main.activeNodes:
3423 t = main.Thread( target=main.CLIs[i].setTestSize,
3424 name="setTestSize-" + str( i ),
3425 args=[ onosSetName ] )
3426 threads.append( t )
3427 t.start()
3428 for t in threads:
3429 t.join()
3430 sizeResponses.append( t.result )
3431 sizeResults = main.TRUE
3432 for i in range( len( main.activeNodes ) ):
3433 node = str( main.activeNodes[i] + 1 )
3434 if size != sizeResponses[ i ]:
3435 sizeResults = main.FALSE
3436 main.log.error( "ONOS" + node +
3437 " expected a size of " + str( size ) +
3438 " for set " + onosSetName +
3439 " but got " + str( sizeResponses[ i ] ) )
3440 addResults = addResults and getResults and sizeResults
3441 utilities.assert_equals( expect=main.TRUE,
3442 actual=addResults,
3443 onpass="Set add correct",
3444 onfail="Set add was incorrect" )
3445
3446 main.step( "Distributed Set addAll()" )
3447 onosSet.update( addAllValue.split() )
3448 addResponses = []
3449 threads = []
3450 for i in main.activeNodes:
3451 t = main.Thread( target=main.CLIs[i].setTestAdd,
3452 name="setTestAddAll-" + str( i ),
3453 args=[ onosSetName, addAllValue ] )
3454 threads.append( t )
3455 t.start()
3456 for t in threads:
3457 t.join()
3458 addResponses.append( t.result )
3459
3460 # main.TRUE = successfully changed the set
3461 # main.FALSE = action resulted in no change in set
3462 # main.ERROR - Some error in executing the function
3463 addAllResults = main.TRUE
3464 for i in range( len( main.activeNodes ) ):
3465 if addResponses[ i ] == main.TRUE:
3466 # All is well
3467 pass
3468 elif addResponses[ i ] == main.FALSE:
3469 # Already in set, probably fine
3470 pass
3471 elif addResponses[ i ] == main.ERROR:
3472 # Error in execution
3473 addAllResults = main.FALSE
3474 else:
3475 # unexpected result
3476 addAllResults = main.FALSE
3477 if addAllResults != main.TRUE:
3478 main.log.error( "Error executing set addAll" )
3479
3480 # Check if set is still correct
3481 size = len( onosSet )
3482 getResponses = []
3483 threads = []
3484 for i in main.activeNodes:
3485 t = main.Thread( target=main.CLIs[i].setTestGet,
3486 name="setTestGet-" + str( i ),
3487 args=[ onosSetName ] )
3488 threads.append( t )
3489 t.start()
3490 for t in threads:
3491 t.join()
3492 getResponses.append( t.result )
3493 getResults = main.TRUE
3494 for i in range( len( main.activeNodes ) ):
3495 node = str( main.activeNodes[i] + 1 )
3496 if isinstance( getResponses[ i ], list):
3497 current = set( getResponses[ i ] )
3498 if len( current ) == len( getResponses[ i ] ):
3499 # no repeats
3500 if onosSet != current:
3501 main.log.error( "ONOS" + node +
3502 " has incorrect view" +
3503 " of set " + onosSetName + ":\n" +
3504 str( getResponses[ i ] ) )
3505 main.log.debug( "Expected: " + str( onosSet ) )
3506 main.log.debug( "Actual: " + str( current ) )
3507 getResults = main.FALSE
3508 else:
3509 # error, set is not a set
3510 main.log.error( "ONOS" + node +
3511 " has repeat elements in" +
3512 " set " + onosSetName + ":\n" +
3513 str( getResponses[ i ] ) )
3514 getResults = main.FALSE
3515 elif getResponses[ i ] == main.ERROR:
3516 getResults = main.FALSE
3517 sizeResponses = []
3518 threads = []
3519 for i in main.activeNodes:
3520 t = main.Thread( target=main.CLIs[i].setTestSize,
3521 name="setTestSize-" + str( i ),
3522 args=[ onosSetName ] )
3523 threads.append( t )
3524 t.start()
3525 for t in threads:
3526 t.join()
3527 sizeResponses.append( t.result )
3528 sizeResults = main.TRUE
3529 for i in range( len( main.activeNodes ) ):
3530 node = str( main.activeNodes[i] + 1 )
3531 if size != sizeResponses[ i ]:
3532 sizeResults = main.FALSE
3533 main.log.error( "ONOS" + node +
3534 " expected a size of " + str( size ) +
3535 " for set " + onosSetName +
3536 " but got " + str( sizeResponses[ i ] ) )
3537 addAllResults = addAllResults and getResults and sizeResults
3538 utilities.assert_equals( expect=main.TRUE,
3539 actual=addAllResults,
3540 onpass="Set addAll correct",
3541 onfail="Set addAll was incorrect" )
3542
3543 main.step( "Distributed Set contains()" )
3544 containsResponses = []
3545 threads = []
3546 for i in main.activeNodes:
3547 t = main.Thread( target=main.CLIs[i].setTestGet,
3548 name="setContains-" + str( i ),
3549 args=[ onosSetName ],
3550 kwargs={ "values": addValue } )
3551 threads.append( t )
3552 t.start()
3553 for t in threads:
3554 t.join()
3555 # NOTE: This is the tuple
3556 containsResponses.append( t.result )
3557
3558 containsResults = main.TRUE
3559 for i in range( len( main.activeNodes ) ):
3560 if containsResponses[ i ] == main.ERROR:
3561 containsResults = main.FALSE
3562 else:
3563 containsResults = containsResults and\
3564 containsResponses[ i ][ 1 ]
3565 utilities.assert_equals( expect=main.TRUE,
3566 actual=containsResults,
3567 onpass="Set contains is functional",
3568 onfail="Set contains failed" )
3569
3570 main.step( "Distributed Set containsAll()" )
3571 containsAllResponses = []
3572 threads = []
3573 for i in main.activeNodes:
3574 t = main.Thread( target=main.CLIs[i].setTestGet,
3575 name="setContainsAll-" + str( i ),
3576 args=[ onosSetName ],
3577 kwargs={ "values": addAllValue } )
3578 threads.append( t )
3579 t.start()
3580 for t in threads:
3581 t.join()
3582 # NOTE: This is the tuple
3583 containsAllResponses.append( t.result )
3584
3585 containsAllResults = main.TRUE
3586 for i in range( len( main.activeNodes ) ):
3587 if containsResponses[ i ] == main.ERROR:
3588 containsResults = main.FALSE
3589 else:
3590 containsResults = containsResults and\
3591 containsResponses[ i ][ 1 ]
3592 utilities.assert_equals( expect=main.TRUE,
3593 actual=containsAllResults,
3594 onpass="Set containsAll is functional",
3595 onfail="Set containsAll failed" )
3596
3597 main.step( "Distributed Set remove()" )
3598 onosSet.remove( addValue )
3599 removeResponses = []
3600 threads = []
3601 for i in main.activeNodes:
3602 t = main.Thread( target=main.CLIs[i].setTestRemove,
3603 name="setTestRemove-" + str( i ),
3604 args=[ onosSetName, addValue ] )
3605 threads.append( t )
3606 t.start()
3607 for t in threads:
3608 t.join()
3609 removeResponses.append( t.result )
3610
3611 # main.TRUE = successfully changed the set
3612 # main.FALSE = action resulted in no change in set
3613 # main.ERROR - Some error in executing the function
3614 removeResults = main.TRUE
3615 for i in range( len( main.activeNodes ) ):
3616 if removeResponses[ i ] == main.TRUE:
3617 # All is well
3618 pass
3619 elif removeResponses[ i ] == main.FALSE:
3620 # not in set, probably fine
3621 pass
3622 elif removeResponses[ i ] == main.ERROR:
3623 # Error in execution
3624 removeResults = main.FALSE
3625 else:
3626 # unexpected result
3627 removeResults = main.FALSE
3628 if removeResults != main.TRUE:
3629 main.log.error( "Error executing set remove" )
3630
3631 # Check if set is still correct
3632 size = len( onosSet )
3633 getResponses = []
3634 threads = []
3635 for i in main.activeNodes:
3636 t = main.Thread( target=main.CLIs[i].setTestGet,
3637 name="setTestGet-" + str( i ),
3638 args=[ onosSetName ] )
3639 threads.append( t )
3640 t.start()
3641 for t in threads:
3642 t.join()
3643 getResponses.append( t.result )
3644 getResults = main.TRUE
3645 for i in range( len( main.activeNodes ) ):
3646 node = str( main.activeNodes[i] + 1 )
3647 if isinstance( getResponses[ i ], list):
3648 current = set( getResponses[ i ] )
3649 if len( current ) == len( getResponses[ i ] ):
3650 # no repeats
3651 if onosSet != current:
3652 main.log.error( "ONOS" + node +
3653 " has incorrect view" +
3654 " of set " + onosSetName + ":\n" +
3655 str( getResponses[ i ] ) )
3656 main.log.debug( "Expected: " + str( onosSet ) )
3657 main.log.debug( "Actual: " + str( current ) )
3658 getResults = main.FALSE
3659 else:
3660 # error, set is not a set
3661 main.log.error( "ONOS" + node +
3662 " has repeat elements in" +
3663 " set " + onosSetName + ":\n" +
3664 str( getResponses[ i ] ) )
3665 getResults = main.FALSE
3666 elif getResponses[ i ] == main.ERROR:
3667 getResults = main.FALSE
3668 sizeResponses = []
3669 threads = []
3670 for i in main.activeNodes:
3671 t = main.Thread( target=main.CLIs[i].setTestSize,
3672 name="setTestSize-" + str( i ),
3673 args=[ onosSetName ] )
3674 threads.append( t )
3675 t.start()
3676 for t in threads:
3677 t.join()
3678 sizeResponses.append( t.result )
3679 sizeResults = main.TRUE
3680 for i in range( len( main.activeNodes ) ):
3681 node = str( main.activeNodes[i] + 1 )
3682 if size != sizeResponses[ i ]:
3683 sizeResults = main.FALSE
3684 main.log.error( "ONOS" + node +
3685 " expected a size of " + str( size ) +
3686 " for set " + onosSetName +
3687 " but got " + str( sizeResponses[ i ] ) )
3688 removeResults = removeResults and getResults and sizeResults
3689 utilities.assert_equals( expect=main.TRUE,
3690 actual=removeResults,
3691 onpass="Set remove correct",
3692 onfail="Set remove was incorrect" )
3693
3694 main.step( "Distributed Set removeAll()" )
3695 onosSet.difference_update( addAllValue.split() )
3696 removeAllResponses = []
3697 threads = []
3698 try:
3699 for i in main.activeNodes:
3700 t = main.Thread( target=main.CLIs[i].setTestRemove,
3701 name="setTestRemoveAll-" + str( i ),
3702 args=[ onosSetName, addAllValue ] )
3703 threads.append( t )
3704 t.start()
3705 for t in threads:
3706 t.join()
3707 removeAllResponses.append( t.result )
3708 except Exception, e:
3709 main.log.exception(e)
3710
3711 # main.TRUE = successfully changed the set
3712 # main.FALSE = action resulted in no change in set
3713 # main.ERROR - Some error in executing the function
3714 removeAllResults = main.TRUE
3715 for i in range( len( main.activeNodes ) ):
3716 if removeAllResponses[ i ] == main.TRUE:
3717 # All is well
3718 pass
3719 elif removeAllResponses[ i ] == main.FALSE:
3720 # not in set, probably fine
3721 pass
3722 elif removeAllResponses[ i ] == main.ERROR:
3723 # Error in execution
3724 removeAllResults = main.FALSE
3725 else:
3726 # unexpected result
3727 removeAllResults = main.FALSE
3728 if removeAllResults != main.TRUE:
3729 main.log.error( "Error executing set removeAll" )
3730
3731 # Check if set is still correct
3732 size = len( onosSet )
3733 getResponses = []
3734 threads = []
3735 for i in main.activeNodes:
3736 t = main.Thread( target=main.CLIs[i].setTestGet,
3737 name="setTestGet-" + str( i ),
3738 args=[ onosSetName ] )
3739 threads.append( t )
3740 t.start()
3741 for t in threads:
3742 t.join()
3743 getResponses.append( t.result )
3744 getResults = main.TRUE
3745 for i in range( len( main.activeNodes ) ):
3746 node = str( main.activeNodes[i] + 1 )
3747 if isinstance( getResponses[ i ], list):
3748 current = set( getResponses[ i ] )
3749 if len( current ) == len( getResponses[ i ] ):
3750 # no repeats
3751 if onosSet != current:
3752 main.log.error( "ONOS" + node +
3753 " has incorrect view" +
3754 " of set " + onosSetName + ":\n" +
3755 str( getResponses[ i ] ) )
3756 main.log.debug( "Expected: " + str( onosSet ) )
3757 main.log.debug( "Actual: " + str( current ) )
3758 getResults = main.FALSE
3759 else:
3760 # error, set is not a set
3761 main.log.error( "ONOS" + node +
3762 " has repeat elements in" +
3763 " set " + onosSetName + ":\n" +
3764 str( getResponses[ i ] ) )
3765 getResults = main.FALSE
3766 elif getResponses[ i ] == main.ERROR:
3767 getResults = main.FALSE
3768 sizeResponses = []
3769 threads = []
3770 for i in main.activeNodes:
3771 t = main.Thread( target=main.CLIs[i].setTestSize,
3772 name="setTestSize-" + str( i ),
3773 args=[ onosSetName ] )
3774 threads.append( t )
3775 t.start()
3776 for t in threads:
3777 t.join()
3778 sizeResponses.append( t.result )
3779 sizeResults = main.TRUE
3780 for i in range( len( main.activeNodes ) ):
3781 node = str( main.activeNodes[i] + 1 )
3782 if size != sizeResponses[ i ]:
3783 sizeResults = main.FALSE
3784 main.log.error( "ONOS" + node +
3785 " expected a size of " + str( size ) +
3786 " for set " + onosSetName +
3787 " but got " + str( sizeResponses[ i ] ) )
3788 removeAllResults = removeAllResults and getResults and sizeResults
3789 utilities.assert_equals( expect=main.TRUE,
3790 actual=removeAllResults,
3791 onpass="Set removeAll correct",
3792 onfail="Set removeAll was incorrect" )
3793
3794 main.step( "Distributed Set addAll()" )
3795 onosSet.update( addAllValue.split() )
3796 addResponses = []
3797 threads = []
3798 for i in main.activeNodes:
3799 t = main.Thread( target=main.CLIs[i].setTestAdd,
3800 name="setTestAddAll-" + str( i ),
3801 args=[ onosSetName, addAllValue ] )
3802 threads.append( t )
3803 t.start()
3804 for t in threads:
3805 t.join()
3806 addResponses.append( t.result )
3807
3808 # main.TRUE = successfully changed the set
3809 # main.FALSE = action resulted in no change in set
3810 # main.ERROR - Some error in executing the function
3811 addAllResults = main.TRUE
3812 for i in range( len( main.activeNodes ) ):
3813 if addResponses[ i ] == main.TRUE:
3814 # All is well
3815 pass
3816 elif addResponses[ i ] == main.FALSE:
3817 # Already in set, probably fine
3818 pass
3819 elif addResponses[ i ] == main.ERROR:
3820 # Error in execution
3821 addAllResults = main.FALSE
3822 else:
3823 # unexpected result
3824 addAllResults = main.FALSE
3825 if addAllResults != main.TRUE:
3826 main.log.error( "Error executing set addAll" )
3827
3828 # Check if set is still correct
3829 size = len( onosSet )
3830 getResponses = []
3831 threads = []
3832 for i in main.activeNodes:
3833 t = main.Thread( target=main.CLIs[i].setTestGet,
3834 name="setTestGet-" + str( i ),
3835 args=[ onosSetName ] )
3836 threads.append( t )
3837 t.start()
3838 for t in threads:
3839 t.join()
3840 getResponses.append( t.result )
3841 getResults = main.TRUE
3842 for i in range( len( main.activeNodes ) ):
3843 node = str( main.activeNodes[i] + 1 )
3844 if isinstance( getResponses[ i ], list):
3845 current = set( getResponses[ i ] )
3846 if len( current ) == len( getResponses[ i ] ):
3847 # no repeats
3848 if onosSet != current:
3849 main.log.error( "ONOS" + node +
3850 " has incorrect view" +
3851 " of set " + onosSetName + ":\n" +
3852 str( getResponses[ i ] ) )
3853 main.log.debug( "Expected: " + str( onosSet ) )
3854 main.log.debug( "Actual: " + str( current ) )
3855 getResults = main.FALSE
3856 else:
3857 # error, set is not a set
3858 main.log.error( "ONOS" + node +
3859 " has repeat elements in" +
3860 " set " + onosSetName + ":\n" +
3861 str( getResponses[ i ] ) )
3862 getResults = main.FALSE
3863 elif getResponses[ i ] == main.ERROR:
3864 getResults = main.FALSE
3865 sizeResponses = []
3866 threads = []
3867 for i in main.activeNodes:
3868 t = main.Thread( target=main.CLIs[i].setTestSize,
3869 name="setTestSize-" + str( i ),
3870 args=[ onosSetName ] )
3871 threads.append( t )
3872 t.start()
3873 for t in threads:
3874 t.join()
3875 sizeResponses.append( t.result )
3876 sizeResults = main.TRUE
3877 for i in range( len( main.activeNodes ) ):
3878 node = str( main.activeNodes[i] + 1 )
3879 if size != sizeResponses[ i ]:
3880 sizeResults = main.FALSE
3881 main.log.error( "ONOS" + node +
3882 " expected a size of " + str( size ) +
3883 " for set " + onosSetName +
3884 " but got " + str( sizeResponses[ i ] ) )
3885 addAllResults = addAllResults and getResults and sizeResults
3886 utilities.assert_equals( expect=main.TRUE,
3887 actual=addAllResults,
3888 onpass="Set addAll correct",
3889 onfail="Set addAll was incorrect" )
3890
3891 main.step( "Distributed Set clear()" )
3892 onosSet.clear()
3893 clearResponses = []
3894 threads = []
3895 for i in main.activeNodes:
3896 t = main.Thread( target=main.CLIs[i].setTestRemove,
3897 name="setTestClear-" + str( i ),
3898 args=[ onosSetName, " "], # Values doesn't matter
3899 kwargs={ "clear": True } )
3900 threads.append( t )
3901 t.start()
3902 for t in threads:
3903 t.join()
3904 clearResponses.append( t.result )
3905
3906 # main.TRUE = successfully changed the set
3907 # main.FALSE = action resulted in no change in set
3908 # main.ERROR - Some error in executing the function
3909 clearResults = main.TRUE
3910 for i in range( len( main.activeNodes ) ):
3911 if clearResponses[ i ] == main.TRUE:
3912 # All is well
3913 pass
3914 elif clearResponses[ i ] == main.FALSE:
3915 # Nothing set, probably fine
3916 pass
3917 elif clearResponses[ i ] == main.ERROR:
3918 # Error in execution
3919 clearResults = main.FALSE
3920 else:
3921 # unexpected result
3922 clearResults = main.FALSE
3923 if clearResults != main.TRUE:
3924 main.log.error( "Error executing set clear" )
3925
3926 # Check if set is still correct
3927 size = len( onosSet )
3928 getResponses = []
3929 threads = []
3930 for i in main.activeNodes:
3931 t = main.Thread( target=main.CLIs[i].setTestGet,
3932 name="setTestGet-" + str( i ),
3933 args=[ onosSetName ] )
3934 threads.append( t )
3935 t.start()
3936 for t in threads:
3937 t.join()
3938 getResponses.append( t.result )
3939 getResults = main.TRUE
3940 for i in range( len( main.activeNodes ) ):
3941 node = str( main.activeNodes[i] + 1 )
3942 if isinstance( getResponses[ i ], list):
3943 current = set( getResponses[ i ] )
3944 if len( current ) == len( getResponses[ i ] ):
3945 # no repeats
3946 if onosSet != current:
3947 main.log.error( "ONOS" + node +
3948 " has incorrect view" +
3949 " of set " + onosSetName + ":\n" +
3950 str( getResponses[ i ] ) )
3951 main.log.debug( "Expected: " + str( onosSet ) )
3952 main.log.debug( "Actual: " + str( current ) )
3953 getResults = main.FALSE
3954 else:
3955 # error, set is not a set
3956 main.log.error( "ONOS" + node +
3957 " has repeat elements in" +
3958 " set " + onosSetName + ":\n" +
3959 str( getResponses[ i ] ) )
3960 getResults = main.FALSE
3961 elif getResponses[ i ] == main.ERROR:
3962 getResults = main.FALSE
3963 sizeResponses = []
3964 threads = []
3965 for i in main.activeNodes:
3966 t = main.Thread( target=main.CLIs[i].setTestSize,
3967 name="setTestSize-" + str( i ),
3968 args=[ onosSetName ] )
3969 threads.append( t )
3970 t.start()
3971 for t in threads:
3972 t.join()
3973 sizeResponses.append( t.result )
3974 sizeResults = main.TRUE
3975 for i in range( len( main.activeNodes ) ):
3976 node = str( main.activeNodes[i] + 1 )
3977 if size != sizeResponses[ i ]:
3978 sizeResults = main.FALSE
3979 main.log.error( "ONOS" + node +
3980 " expected a size of " + str( size ) +
3981 " for set " + onosSetName +
3982 " but got " + str( sizeResponses[ i ] ) )
3983 clearResults = clearResults and getResults and sizeResults
3984 utilities.assert_equals( expect=main.TRUE,
3985 actual=clearResults,
3986 onpass="Set clear correct",
3987 onfail="Set clear was incorrect" )
3988
3989 main.step( "Distributed Set addAll()" )
3990 onosSet.update( addAllValue.split() )
3991 addResponses = []
3992 threads = []
3993 for i in main.activeNodes:
3994 t = main.Thread( target=main.CLIs[i].setTestAdd,
3995 name="setTestAddAll-" + str( i ),
3996 args=[ onosSetName, addAllValue ] )
3997 threads.append( t )
3998 t.start()
3999 for t in threads:
4000 t.join()
4001 addResponses.append( t.result )
4002
4003 # main.TRUE = successfully changed the set
4004 # main.FALSE = action resulted in no change in set
4005 # main.ERROR - Some error in executing the function
4006 addAllResults = main.TRUE
4007 for i in range( len( main.activeNodes ) ):
4008 if addResponses[ i ] == main.TRUE:
4009 # All is well
4010 pass
4011 elif addResponses[ i ] == main.FALSE:
4012 # Already in set, probably fine
4013 pass
4014 elif addResponses[ i ] == main.ERROR:
4015 # Error in execution
4016 addAllResults = main.FALSE
4017 else:
4018 # unexpected result
4019 addAllResults = main.FALSE
4020 if addAllResults != main.TRUE:
4021 main.log.error( "Error executing set addAll" )
4022
4023 # Check if set is still correct
4024 size = len( onosSet )
4025 getResponses = []
4026 threads = []
4027 for i in main.activeNodes:
4028 t = main.Thread( target=main.CLIs[i].setTestGet,
4029 name="setTestGet-" + str( i ),
4030 args=[ onosSetName ] )
4031 threads.append( t )
4032 t.start()
4033 for t in threads:
4034 t.join()
4035 getResponses.append( t.result )
4036 getResults = main.TRUE
4037 for i in range( len( main.activeNodes ) ):
4038 node = str( main.activeNodes[i] + 1 )
4039 if isinstance( getResponses[ i ], list):
4040 current = set( getResponses[ i ] )
4041 if len( current ) == len( getResponses[ i ] ):
4042 # no repeats
4043 if onosSet != current:
4044 main.log.error( "ONOS" + node +
4045 " has incorrect view" +
4046 " of set " + onosSetName + ":\n" +
4047 str( getResponses[ i ] ) )
4048 main.log.debug( "Expected: " + str( onosSet ) )
4049 main.log.debug( "Actual: " + str( current ) )
4050 getResults = main.FALSE
4051 else:
4052 # error, set is not a set
4053 main.log.error( "ONOS" + node +
4054 " has repeat elements in" +
4055 " set " + onosSetName + ":\n" +
4056 str( getResponses[ i ] ) )
4057 getResults = main.FALSE
4058 elif getResponses[ i ] == main.ERROR:
4059 getResults = main.FALSE
4060 sizeResponses = []
4061 threads = []
4062 for i in main.activeNodes:
4063 t = main.Thread( target=main.CLIs[i].setTestSize,
4064 name="setTestSize-" + str( i ),
4065 args=[ onosSetName ] )
4066 threads.append( t )
4067 t.start()
4068 for t in threads:
4069 t.join()
4070 sizeResponses.append( t.result )
4071 sizeResults = main.TRUE
4072 for i in range( len( main.activeNodes ) ):
4073 node = str( main.activeNodes[i] + 1 )
4074 if size != sizeResponses[ i ]:
4075 sizeResults = main.FALSE
4076 main.log.error( "ONOS" + node +
4077 " expected a size of " + str( size ) +
4078 " for set " + onosSetName +
4079 " but got " + str( sizeResponses[ i ] ) )
4080 addAllResults = addAllResults and getResults and sizeResults
4081 utilities.assert_equals( expect=main.TRUE,
4082 actual=addAllResults,
4083 onpass="Set addAll correct",
4084 onfail="Set addAll was incorrect" )
4085
4086 main.step( "Distributed Set retain()" )
4087 onosSet.intersection_update( retainValue.split() )
4088 retainResponses = []
4089 threads = []
4090 for i in main.activeNodes:
4091 t = main.Thread( target=main.CLIs[i].setTestRemove,
4092 name="setTestRetain-" + str( i ),
4093 args=[ onosSetName, retainValue ],
4094 kwargs={ "retain": True } )
4095 threads.append( t )
4096 t.start()
4097 for t in threads:
4098 t.join()
4099 retainResponses.append( t.result )
4100
4101 # main.TRUE = successfully changed the set
4102 # main.FALSE = action resulted in no change in set
4103 # main.ERROR - Some error in executing the function
4104 retainResults = main.TRUE
4105 for i in range( len( main.activeNodes ) ):
4106 if retainResponses[ i ] == main.TRUE:
4107 # All is well
4108 pass
4109 elif retainResponses[ i ] == main.FALSE:
4110 # Already in set, probably fine
4111 pass
4112 elif retainResponses[ i ] == main.ERROR:
4113 # Error in execution
4114 retainResults = main.FALSE
4115 else:
4116 # unexpected result
4117 retainResults = main.FALSE
4118 if retainResults != main.TRUE:
4119 main.log.error( "Error executing set retain" )
4120
4121 # Check if set is still correct
4122 size = len( onosSet )
4123 getResponses = []
4124 threads = []
4125 for i in main.activeNodes:
4126 t = main.Thread( target=main.CLIs[i].setTestGet,
4127 name="setTestGet-" + str( i ),
4128 args=[ onosSetName ] )
4129 threads.append( t )
4130 t.start()
4131 for t in threads:
4132 t.join()
4133 getResponses.append( t.result )
4134 getResults = main.TRUE
4135 for i in range( len( main.activeNodes ) ):
4136 node = str( main.activeNodes[i] + 1 )
4137 if isinstance( getResponses[ i ], list):
4138 current = set( getResponses[ i ] )
4139 if len( current ) == len( getResponses[ i ] ):
4140 # no repeats
4141 if onosSet != current:
4142 main.log.error( "ONOS" + node +
4143 " has incorrect view" +
4144 " of set " + onosSetName + ":\n" +
4145 str( getResponses[ i ] ) )
4146 main.log.debug( "Expected: " + str( onosSet ) )
4147 main.log.debug( "Actual: " + str( current ) )
4148 getResults = main.FALSE
4149 else:
4150 # error, set is not a set
4151 main.log.error( "ONOS" + node +
4152 " has repeat elements in" +
4153 " set " + onosSetName + ":\n" +
4154 str( getResponses[ i ] ) )
4155 getResults = main.FALSE
4156 elif getResponses[ i ] == main.ERROR:
4157 getResults = main.FALSE
4158 sizeResponses = []
4159 threads = []
4160 for i in main.activeNodes:
4161 t = main.Thread( target=main.CLIs[i].setTestSize,
4162 name="setTestSize-" + str( i ),
4163 args=[ onosSetName ] )
4164 threads.append( t )
4165 t.start()
4166 for t in threads:
4167 t.join()
4168 sizeResponses.append( t.result )
4169 sizeResults = main.TRUE
4170 for i in range( len( main.activeNodes ) ):
4171 node = str( main.activeNodes[i] + 1 )
4172 if size != sizeResponses[ i ]:
4173 sizeResults = main.FALSE
4174 main.log.error( "ONOS" + node + " expected a size of " +
4175 str( size ) + " for set " + onosSetName +
4176 " but got " + str( sizeResponses[ i ] ) )
4177 retainResults = retainResults and getResults and sizeResults
4178 utilities.assert_equals( expect=main.TRUE,
4179 actual=retainResults,
4180 onpass="Set retain correct",
4181 onfail="Set retain was incorrect" )
4182
4183 # Transactional maps
4184 main.step( "Partitioned Transactional maps put" )
4185 tMapValue = "Testing"
4186 numKeys = 100
4187 putResult = True
4188 node = main.activeNodes[0]
4189 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4190 if putResponses and len( putResponses ) == 100:
4191 for i in putResponses:
4192 if putResponses[ i ][ 'value' ] != tMapValue:
4193 putResult = False
4194 else:
4195 putResult = False
4196 if not putResult:
4197 main.log.debug( "Put response values: " + str( putResponses ) )
4198 utilities.assert_equals( expect=True,
4199 actual=putResult,
4200 onpass="Partitioned Transactional Map put successful",
4201 onfail="Partitioned Transactional Map put values are incorrect" )
4202
4203 main.step( "Partitioned Transactional maps get" )
Jon Hall9bfadd22016-05-11 14:48:07 -07004204 # FIXME: is this sleep needed?
4205 time.sleep( 5 )
4206
Jon Hall6e709752016-02-01 13:38:46 -08004207 getCheck = True
4208 for n in range( 1, numKeys + 1 ):
4209 getResponses = []
4210 threads = []
4211 valueCheck = True
4212 for i in main.activeNodes:
4213 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4214 name="TMap-get-" + str( i ),
4215 args=[ "Key" + str( n ) ] )
4216 threads.append( t )
4217 t.start()
4218 for t in threads:
4219 t.join()
4220 getResponses.append( t.result )
4221 for node in getResponses:
4222 if node != tMapValue:
4223 valueCheck = False
4224 if not valueCheck:
4225 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4226 main.log.warn( getResponses )
4227 getCheck = getCheck and valueCheck
4228 utilities.assert_equals( expect=True,
4229 actual=getCheck,
4230 onpass="Partitioned Transactional Map get values were correct",
4231 onfail="Partitioned Transactional Map values incorrect" )