blob: 12d8bbb5760e068ec33158a55393c0a32734d660 [file] [log] [blame]
Jon Hall6e709752016-02-01 13:38:46 -08001"""
2Description: This test is to determine if ONOS can handle
3 a full network partion
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
14CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
28class HAfullNetPartition:
29
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
51 import imp
52 import pexpect
53 import time
Jon Halla440e872016-03-31 15:15:50 -070054 import json
Jon Hall6e709752016-02-01 13:38:46 -080055 main.log.info( "ONOS HA test: Partition ONOS nodes into two sub-clusters - " +
56 "initialization" )
57 main.case( "Setting up test environment" )
58 main.caseExplanation = "Setup the test environment including " +\
59 "installing ONOS, starting Mininet and ONOS" +\
60 "cli sessions."
61
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
69 main.numCtrls = int( main.params[ 'num_controllers' ] )
70 if main.ONOSbench.maxNodes:
71 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
74 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070081 # These are for csv plotting in jenkins
82 global labels
83 global data
84 labels = []
85 data = []
Jon Hall6e709752016-02-01 13:38:46 -080086
87 # FIXME: just get controller port from params?
88 # TODO: do we really need all these?
89 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
90 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
91 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
92 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
93 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
94 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
95 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
96
97 try:
Jon Halla440e872016-03-31 15:15:50 -070098 from tests.HAsanity.dependencies.Counters import Counters
99 main.Counters = Counters()
Jon Hall6e709752016-02-01 13:38:46 -0800100 except Exception as e:
101 main.log.exception( e )
102 main.cleanup()
103 main.exit()
104
105 main.CLIs = []
106 main.nodes = []
107 ipList = []
108 for i in range( 1, main.numCtrls + 1 ):
109 try:
110 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
111 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
112 ipList.append( main.nodes[ -1 ].ip_address )
113 except AttributeError:
114 break
115
116 main.step( "Create cell file" )
117 cellAppString = main.params[ 'ENV' ][ 'appString' ]
118 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
119 main.Mininet1.ip_address,
120 cellAppString, ipList )
121 main.step( "Applying cell variable to environment" )
122 cellResult = main.ONOSbench.setCell( cellName )
123 verifyResult = main.ONOSbench.verifyCell()
124
125 # FIXME:this is short term fix
126 main.log.info( "Removing raft logs" )
127 main.ONOSbench.onosRemoveRaftLogs()
128
129 main.log.info( "Uninstalling ONOS" )
130 for node in main.nodes:
131 main.ONOSbench.onosUninstall( node.ip_address )
132
133 # Make sure ONOS is DEAD
134 main.log.info( "Killing any ONOS processes" )
135 killResults = main.TRUE
136 for node in main.nodes:
137 killed = main.ONOSbench.onosKill( node.ip_address )
138 killResults = killResults and killed
139
140 cleanInstallResult = main.TRUE
141 gitPullResult = main.TRUE
142
143 main.step( "Starting Mininet" )
144 # scp topo file to mininet
145 # TODO: move to params?
146 topoName = "obelisk.py"
147 filePath = main.ONOSbench.home + "/tools/test/topos/"
148 main.ONOSbench.scp( main.Mininet1,
149 filePath + topoName,
150 main.Mininet1.home,
151 direction="to" )
152 mnResult = main.Mininet1.startNet( )
153 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
154 onpass="Mininet Started",
155 onfail="Error starting Mininet" )
156
157 main.step( "Git checkout and pull " + gitBranch )
158 if PULLCODE:
159 main.ONOSbench.gitCheckout( gitBranch )
160 gitPullResult = main.ONOSbench.gitPull()
161 # values of 1 or 3 are good
162 utilities.assert_lesser( expect=0, actual=gitPullResult,
163 onpass="Git pull successful",
164 onfail="Git pull failed" )
165 main.ONOSbench.getVersion( report=True )
166
167 main.step( "Using mvn clean install" )
168 cleanInstallResult = main.TRUE
169 if PULLCODE and gitPullResult == main.TRUE:
170 cleanInstallResult = main.ONOSbench.cleanInstall()
171 else:
172 main.log.warn( "Did not pull new code so skipping mvn " +
173 "clean install" )
174 utilities.assert_equals( expect=main.TRUE,
175 actual=cleanInstallResult,
176 onpass="MCI successful",
177 onfail="MCI failed" )
178 # GRAPHS
179 # NOTE: important params here:
180 # job = name of Jenkins job
181 # Plot Name = Plot-HA, only can be used if multiple plots
182 # index = The number of the graph under plot name
183 job = "HAfullNetPartition"
184 plotName = "Plot-HA"
Jon Hall843f8bc2016-03-18 14:28:13 -0700185 index = "1"
Jon Hall6e709752016-02-01 13:38:46 -0800186 graphs = '<ac:structured-macro ac:name="html">\n'
187 graphs += '<ac:plain-text-body><![CDATA[\n'
188 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
189 '/plot/' + plotName + '/getPlot?index=' + index +\
190 '&width=500&height=300"' +\
191 'noborder="0" width="500" height="300" scrolling="yes" ' +\
192 'seamless="seamless"></iframe>\n'
193 graphs += ']]></ac:plain-text-body>\n'
194 graphs += '</ac:structured-macro>\n'
195 main.log.wiki(graphs)
196
197 main.step( "Creating ONOS package" )
198 # copy gen-partions file to ONOS
199 # NOTE: this assumes TestON and ONOS are on the same machine
200 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
201 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
202 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
203 main.ONOSbench.ip_address,
204 srcFile,
205 dstDir,
206 pwd=main.ONOSbench.pwd,
207 direction="from" )
208 packageResult = main.ONOSbench.onosPackage()
209 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
210 onpass="ONOS package successful",
211 onfail="ONOS package failed" )
212
213 main.step( "Installing ONOS package" )
214 onosInstallResult = main.TRUE
215 for node in main.nodes:
216 tmpResult = main.ONOSbench.onosInstall( options="-f",
217 node=node.ip_address )
218 onosInstallResult = onosInstallResult and tmpResult
219 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
220 onpass="ONOS install successful",
221 onfail="ONOS install failed" )
222 # clean up gen-partitions file
223 try:
224 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
225 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
226 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
227 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
228 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
229 str( main.ONOSbench.handle.before ) )
230 except ( pexpect.TIMEOUT, pexpect.EOF ):
231 main.log.exception( "ONOSbench: pexpect exception found:" +
232 main.ONOSbench.handle.before )
233 main.cleanup()
234 main.exit()
235
236 main.step( "Checking if ONOS is up yet" )
237 for i in range( 2 ):
238 onosIsupResult = main.TRUE
239 for node in main.nodes:
240 started = main.ONOSbench.isup( node.ip_address )
241 if not started:
242 main.log.error( node.name + " hasn't started" )
243 onosIsupResult = onosIsupResult and started
244 if onosIsupResult == main.TRUE:
245 break
246 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
247 onpass="ONOS startup successful",
248 onfail="ONOS startup failed" )
249
250 main.log.step( "Starting ONOS CLI sessions" )
251 cliResults = main.TRUE
252 threads = []
253 for i in range( main.numCtrls ):
254 t = main.Thread( target=main.CLIs[i].startOnosCli,
255 name="startOnosCli-" + str( i ),
256 args=[main.nodes[i].ip_address] )
257 threads.append( t )
258 t.start()
259
260 for t in threads:
261 t.join()
262 cliResults = cliResults and t.result
263 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
264 onpass="ONOS cli startup successful",
265 onfail="ONOS cli startup failed" )
266
267 # Create a list of active nodes for use when some nodes are stopped
268 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
269
270 if main.params[ 'tcpdump' ].lower() == "true":
271 main.step( "Start Packet Capture MN" )
272 main.Mininet2.startTcpdump(
273 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
274 + "-MN.pcap",
275 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
276 port=main.params[ 'MNtcpdump' ][ 'port' ] )
277
Jon Halla440e872016-03-31 15:15:50 -0700278 main.step( "Checking ONOS nodes" )
279 nodesOutput = []
280 nodeResults = main.TRUE
281 threads = []
282 for i in main.activeNodes:
283 t = main.Thread( target=main.CLIs[i].nodes,
284 name="nodes-" + str( i ),
285 args=[ ] )
286 threads.append( t )
287 t.start()
288
289 for t in threads:
290 t.join()
291 nodesOutput.append( t.result )
292 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
293 ips.sort()
294 for i in nodesOutput:
295 try:
296 current = json.loads( i )
297 activeIps = []
298 currentResult = main.FALSE
299 for node in current:
300 if node['state'] == 'READY':
301 activeIps.append( node['ip'] )
302 activeIps.sort()
303 if ips == activeIps:
304 currentResult = main.TRUE
305 except ( ValueError, TypeError ):
306 main.log.error( "Error parsing nodes output" )
307 main.log.warn( repr( i ) )
308 currentResult = main.FALSE
309 nodeResults = nodeResults and currentResult
310 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
311 onpass="Nodes check successful",
312 onfail="Nodes check NOT successful" )
313
314 if not nodeResults:
315 for cli in main.CLIs:
316 main.log.debug( "{} components not ACTIVE: \n{}".format(
317 cli.name,
318 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
319
Jon Hall6e709752016-02-01 13:38:46 -0800320 if cliResults == main.FALSE:
321 main.log.error( "Failed to start ONOS, stopping test" )
322 main.cleanup()
323 main.exit()
324
Jon Hall172b7ba2016-04-07 18:12:20 -0700325 main.step( "Activate apps defined in the params file" )
326 # get data from the params
327 apps = main.params.get( 'apps' )
328 if apps:
329 apps = apps.split(',')
330 main.log.warn( apps )
331 activateResult = True
332 for app in apps:
333 main.CLIs[ 0 ].app( app, "Activate" )
334 # TODO: check this worked
335 time.sleep( 10 ) # wait for apps to activate
336 for app in apps:
337 state = main.CLIs[ 0 ].appStatus( app )
338 if state == "ACTIVE":
339 activateResult = activeResult and True
340 else:
341 main.log.error( "{} is in {} state".format( app, state ) )
342 activeResult = False
343 utilities.assert_equals( expect=True,
344 actual=activateResult,
345 onpass="Successfully activated apps",
346 onfail="Failed to activate apps" )
347 else:
348 main.log.warn( "No apps were specified to be loaded after startup" )
349
350 main.step( "Set ONOS configurations" )
351 config = main.params.get( 'ONOS_Configuration' )
352 if config:
353 main.log.debug( config )
354 checkResult = main.TRUE
355 for component in config:
356 for setting in config[component]:
357 value = config[component][setting]
358 check = main.CLIs[ 0 ].setCfg( component, setting, value )
359 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
360 checkResult = check and checkResult
361 utilities.assert_equals( expect=main.TRUE,
362 actual=checkResult,
363 onpass="Successfully set config",
364 onfail="Failed to set config" )
365 else:
366 main.log.warn( "No configurations were specified to be changed after startup" )
367
Jon Hall9d2dcad2016-04-08 10:15:20 -0700368 main.step( "App Ids check" )
369 appCheck = main.TRUE
370 threads = []
371 for i in main.activeNodes:
372 t = main.Thread( target=main.CLIs[i].appToIDCheck,
373 name="appToIDCheck-" + str( i ),
374 args=[] )
375 threads.append( t )
376 t.start()
377
378 for t in threads:
379 t.join()
380 appCheck = appCheck and t.result
381 if appCheck != main.TRUE:
382 node = main.activeNodes[0]
383 main.log.warn( main.CLIs[node].apps() )
384 main.log.warn( main.CLIs[node].appIDs() )
385 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
386 onpass="App Ids seem to be correct",
387 onfail="Something is wrong with app Ids" )
388
Jon Hall6e709752016-02-01 13:38:46 -0800389 def CASE2( self, main ):
390 """
391 Assign devices to controllers
392 """
393 import re
394 assert main.numCtrls, "main.numCtrls not defined"
395 assert main, "main not defined"
396 assert utilities.assert_equals, "utilities.assert_equals not defined"
397 assert main.CLIs, "main.CLIs not defined"
398 assert main.nodes, "main.nodes not defined"
399 assert ONOS1Port, "ONOS1Port not defined"
400 assert ONOS2Port, "ONOS2Port not defined"
401 assert ONOS3Port, "ONOS3Port not defined"
402 assert ONOS4Port, "ONOS4Port not defined"
403 assert ONOS5Port, "ONOS5Port not defined"
404 assert ONOS6Port, "ONOS6Port not defined"
405 assert ONOS7Port, "ONOS7Port not defined"
406
407 main.case( "Assigning devices to controllers" )
408 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
409 "and check that an ONOS node becomes the " +\
410 "master of the device."
411 main.step( "Assign switches to controllers" )
412
413 ipList = []
414 for i in range( main.numCtrls ):
415 ipList.append( main.nodes[ i ].ip_address )
416 swList = []
417 for i in range( 1, 29 ):
418 swList.append( "s" + str( i ) )
419 main.Mininet1.assignSwController( sw=swList, ip=ipList )
420
421 mastershipCheck = main.TRUE
422 for i in range( 1, 29 ):
423 response = main.Mininet1.getSwController( "s" + str( i ) )
424 try:
425 main.log.info( str( response ) )
426 except Exception:
427 main.log.info( repr( response ) )
428 for node in main.nodes:
429 if re.search( "tcp:" + node.ip_address, response ):
430 mastershipCheck = mastershipCheck and main.TRUE
431 else:
432 main.log.error( "Error, node " + node.ip_address + " is " +
433 "not in the list of controllers s" +
434 str( i ) + " is connecting to." )
435 mastershipCheck = main.FALSE
436 utilities.assert_equals(
437 expect=main.TRUE,
438 actual=mastershipCheck,
439 onpass="Switch mastership assigned correctly",
440 onfail="Switches not assigned correctly to controllers" )
441
442 def CASE21( self, main ):
443 """
444 Assign mastership to controllers
445 """
446 import time
447 assert main.numCtrls, "main.numCtrls not defined"
448 assert main, "main not defined"
449 assert utilities.assert_equals, "utilities.assert_equals not defined"
450 assert main.CLIs, "main.CLIs not defined"
451 assert main.nodes, "main.nodes not defined"
452 assert ONOS1Port, "ONOS1Port not defined"
453 assert ONOS2Port, "ONOS2Port not defined"
454 assert ONOS3Port, "ONOS3Port not defined"
455 assert ONOS4Port, "ONOS4Port not defined"
456 assert ONOS5Port, "ONOS5Port not defined"
457 assert ONOS6Port, "ONOS6Port not defined"
458 assert ONOS7Port, "ONOS7Port not defined"
459
460 main.case( "Assigning Controller roles for switches" )
461 main.caseExplanation = "Check that ONOS is connected to each " +\
462 "device. Then manually assign" +\
463 " mastership to specific ONOS nodes using" +\
464 " 'device-role'"
465 main.step( "Assign mastership of switches to specific controllers" )
466 # Manually assign mastership to the controller we want
467 roleCall = main.TRUE
468
469 ipList = [ ]
470 deviceList = []
471 onosCli = main.CLIs[ main.activeNodes[0] ]
472 try:
473 # Assign mastership to specific controllers. This assignment was
474 # determined for a 7 node cluser, but will work with any sized
475 # cluster
476 for i in range( 1, 29 ): # switches 1 through 28
477 # set up correct variables:
478 if i == 1:
479 c = 0
480 ip = main.nodes[ c ].ip_address # ONOS1
481 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
482 elif i == 2:
483 c = 1 % main.numCtrls
484 ip = main.nodes[ c ].ip_address # ONOS2
485 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
486 elif i == 3:
487 c = 1 % main.numCtrls
488 ip = main.nodes[ c ].ip_address # ONOS2
489 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
490 elif i == 4:
491 c = 3 % main.numCtrls
492 ip = main.nodes[ c ].ip_address # ONOS4
493 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
494 elif i == 5:
495 c = 2 % main.numCtrls
496 ip = main.nodes[ c ].ip_address # ONOS3
497 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
498 elif i == 6:
499 c = 2 % main.numCtrls
500 ip = main.nodes[ c ].ip_address # ONOS3
501 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
502 elif i == 7:
503 c = 5 % main.numCtrls
504 ip = main.nodes[ c ].ip_address # ONOS6
505 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
506 elif i >= 8 and i <= 17:
507 c = 4 % main.numCtrls
508 ip = main.nodes[ c ].ip_address # ONOS5
509 dpid = '3' + str( i ).zfill( 3 )
510 deviceId = onosCli.getDevice( dpid ).get( 'id' )
511 elif i >= 18 and i <= 27:
512 c = 6 % main.numCtrls
513 ip = main.nodes[ c ].ip_address # ONOS7
514 dpid = '6' + str( i ).zfill( 3 )
515 deviceId = onosCli.getDevice( dpid ).get( 'id' )
516 elif i == 28:
517 c = 0
518 ip = main.nodes[ c ].ip_address # ONOS1
519 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
520 else:
521 main.log.error( "You didn't write an else statement for " +
522 "switch s" + str( i ) )
523 roleCall = main.FALSE
524 # Assign switch
525 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
526 # TODO: make this controller dynamic
527 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
528 ipList.append( ip )
529 deviceList.append( deviceId )
530 except ( AttributeError, AssertionError ):
531 main.log.exception( "Something is wrong with ONOS device view" )
532 main.log.info( onosCli.devices() )
533 utilities.assert_equals(
534 expect=main.TRUE,
535 actual=roleCall,
536 onpass="Re-assigned switch mastership to designated controller",
537 onfail="Something wrong with deviceRole calls" )
538
539 main.step( "Check mastership was correctly assigned" )
540 roleCheck = main.TRUE
541 # NOTE: This is due to the fact that device mastership change is not
542 # atomic and is actually a multi step process
543 time.sleep( 5 )
544 for i in range( len( ipList ) ):
545 ip = ipList[i]
546 deviceId = deviceList[i]
547 # Check assignment
548 master = onosCli.getRole( deviceId ).get( 'master' )
549 if ip in master:
550 roleCheck = roleCheck and main.TRUE
551 else:
552 roleCheck = roleCheck and main.FALSE
553 main.log.error( "Error, controller " + ip + " is not" +
554 " master " + "of device " +
555 str( deviceId ) + ". Master is " +
556 repr( master ) + "." )
557 utilities.assert_equals(
558 expect=main.TRUE,
559 actual=roleCheck,
560 onpass="Switches were successfully reassigned to designated " +
561 "controller",
562 onfail="Switches were not successfully reassigned" )
563
564 def CASE3( self, main ):
565 """
566 Assign intents
567 """
568 import time
569 import json
570 assert main.numCtrls, "main.numCtrls not defined"
571 assert main, "main not defined"
572 assert utilities.assert_equals, "utilities.assert_equals not defined"
573 assert main.CLIs, "main.CLIs not defined"
574 assert main.nodes, "main.nodes not defined"
575 main.case( "Adding host Intents" )
576 main.caseExplanation = "Discover hosts by using pingall then " +\
577 "assign predetermined host-to-host intents." +\
578 " After installation, check that the intent" +\
579 " is distributed to all nodes and the state" +\
580 " is INSTALLED"
581
582 # install onos-app-fwd
583 main.step( "Install reactive forwarding app" )
584 onosCli = main.CLIs[ main.activeNodes[0] ]
585 installResults = onosCli.activateApp( "org.onosproject.fwd" )
586 utilities.assert_equals( expect=main.TRUE, actual=installResults,
587 onpass="Install fwd successful",
588 onfail="Install fwd failed" )
589
590 main.step( "Check app ids" )
591 appCheck = main.TRUE
592 threads = []
593 for i in main.activeNodes:
594 t = main.Thread( target=main.CLIs[i].appToIDCheck,
595 name="appToIDCheck-" + str( i ),
596 args=[] )
597 threads.append( t )
598 t.start()
599
600 for t in threads:
601 t.join()
602 appCheck = appCheck and t.result
603 if appCheck != main.TRUE:
604 main.log.warn( onosCli.apps() )
605 main.log.warn( onosCli.appIDs() )
606 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
607 onpass="App Ids seem to be correct",
608 onfail="Something is wrong with app Ids" )
609
610 main.step( "Discovering Hosts( Via pingall for now )" )
611 # FIXME: Once we have a host discovery mechanism, use that instead
612 # REACTIVE FWD test
613 pingResult = main.FALSE
614 passMsg = "Reactive Pingall test passed"
615 time1 = time.time()
616 pingResult = main.Mininet1.pingall()
617 time2 = time.time()
618 if not pingResult:
619 main.log.warn("First pingall failed. Trying again...")
620 pingResult = main.Mininet1.pingall()
621 passMsg += " on the second try"
622 utilities.assert_equals(
623 expect=main.TRUE,
624 actual=pingResult,
625 onpass= passMsg,
626 onfail="Reactive Pingall failed, " +
627 "one or more ping pairs failed" )
628 main.log.info( "Time for pingall: %2f seconds" %
629 ( time2 - time1 ) )
630 # timeout for fwd flows
631 time.sleep( 11 )
632 # uninstall onos-app-fwd
633 main.step( "Uninstall reactive forwarding app" )
634 node = main.activeNodes[0]
635 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
636 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
637 onpass="Uninstall fwd successful",
638 onfail="Uninstall fwd failed" )
639
640 main.step( "Check app ids" )
641 threads = []
642 appCheck2 = main.TRUE
643 for i in main.activeNodes:
644 t = main.Thread( target=main.CLIs[i].appToIDCheck,
645 name="appToIDCheck-" + str( i ),
646 args=[] )
647 threads.append( t )
648 t.start()
649
650 for t in threads:
651 t.join()
652 appCheck2 = appCheck2 and t.result
653 if appCheck2 != main.TRUE:
654 node = main.activeNodes[0]
655 main.log.warn( main.CLIs[node].apps() )
656 main.log.warn( main.CLIs[node].appIDs() )
657 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
658 onpass="App Ids seem to be correct",
659 onfail="Something is wrong with app Ids" )
660
661 main.step( "Add host intents via cli" )
662 intentIds = []
663 # TODO: move the host numbers to params
664 # Maybe look at all the paths we ping?
665 intentAddResult = True
666 hostResult = main.TRUE
667 for i in range( 8, 18 ):
668 main.log.info( "Adding host intent between h" + str( i ) +
669 " and h" + str( i + 10 ) )
670 host1 = "00:00:00:00:00:" + \
671 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
672 host2 = "00:00:00:00:00:" + \
673 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
674 # NOTE: getHost can return None
675 host1Dict = onosCli.getHost( host1 )
676 host2Dict = onosCli.getHost( host2 )
677 host1Id = None
678 host2Id = None
679 if host1Dict and host2Dict:
680 host1Id = host1Dict.get( 'id', None )
681 host2Id = host2Dict.get( 'id', None )
682 if host1Id and host2Id:
683 nodeNum = ( i % len( main.activeNodes ) )
684 node = main.activeNodes[nodeNum]
685 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
686 if tmpId:
687 main.log.info( "Added intent with id: " + tmpId )
688 intentIds.append( tmpId )
689 else:
690 main.log.error( "addHostIntent returned: " +
691 repr( tmpId ) )
692 else:
693 main.log.error( "Error, getHost() failed for h" + str( i ) +
694 " and/or h" + str( i + 10 ) )
695 node = main.activeNodes[0]
696 hosts = main.CLIs[node].hosts()
697 main.log.warn( "Hosts output: " )
698 try:
699 main.log.warn( json.dumps( json.loads( hosts ),
700 sort_keys=True,
701 indent=4,
702 separators=( ',', ': ' ) ) )
703 except ( ValueError, TypeError ):
704 main.log.warn( repr( hosts ) )
705 hostResult = main.FALSE
706 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
707 onpass="Found a host id for each host",
708 onfail="Error looking up host ids" )
709
710 intentStart = time.time()
711 onosIds = onosCli.getAllIntentsId()
712 main.log.info( "Submitted intents: " + str( intentIds ) )
713 main.log.info( "Intents in ONOS: " + str( onosIds ) )
714 for intent in intentIds:
715 if intent in onosIds:
716 pass # intent submitted is in onos
717 else:
718 intentAddResult = False
719 if intentAddResult:
720 intentStop = time.time()
721 else:
722 intentStop = None
723 # Print the intent states
724 intents = onosCli.intents()
725 intentStates = []
726 installedCheck = True
727 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
728 count = 0
729 try:
730 for intent in json.loads( intents ):
731 state = intent.get( 'state', None )
732 if "INSTALLED" not in state:
733 installedCheck = False
734 intentId = intent.get( 'id', None )
735 intentStates.append( ( intentId, state ) )
736 except ( ValueError, TypeError ):
737 main.log.exception( "Error parsing intents" )
738 # add submitted intents not in the store
739 tmplist = [ i for i, s in intentStates ]
740 missingIntents = False
741 for i in intentIds:
742 if i not in tmplist:
743 intentStates.append( ( i, " - " ) )
744 missingIntents = True
745 intentStates.sort()
746 for i, s in intentStates:
747 count += 1
748 main.log.info( "%-6s%-15s%-15s" %
749 ( str( count ), str( i ), str( s ) ) )
750 leaders = onosCli.leaders()
751 try:
752 missing = False
753 if leaders:
754 parsedLeaders = json.loads( leaders )
755 main.log.warn( json.dumps( parsedLeaders,
756 sort_keys=True,
757 indent=4,
758 separators=( ',', ': ' ) ) )
759 # check for all intent partitions
760 topics = []
761 for i in range( 14 ):
762 topics.append( "intent-partition-" + str( i ) )
763 main.log.debug( topics )
764 ONOStopics = [ j['topic'] for j in parsedLeaders ]
765 for topic in topics:
766 if topic not in ONOStopics:
767 main.log.error( "Error: " + topic +
768 " not in leaders" )
769 missing = True
770 else:
771 main.log.error( "leaders() returned None" )
772 except ( ValueError, TypeError ):
773 main.log.exception( "Error parsing leaders" )
774 main.log.error( repr( leaders ) )
775 # Check all nodes
776 if missing:
777 for i in main.activeNodes:
778 response = main.CLIs[i].leaders( jsonFormat=False)
779 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
780 str( response ) )
781
782 partitions = onosCli.partitions()
783 try:
784 if partitions :
785 parsedPartitions = json.loads( partitions )
786 main.log.warn( json.dumps( parsedPartitions,
787 sort_keys=True,
788 indent=4,
789 separators=( ',', ': ' ) ) )
790 # TODO check for a leader in all paritions
791 # TODO check for consistency among nodes
792 else:
793 main.log.error( "partitions() returned None" )
794 except ( ValueError, TypeError ):
795 main.log.exception( "Error parsing partitions" )
796 main.log.error( repr( partitions ) )
797 pendingMap = onosCli.pendingMap()
798 try:
799 if pendingMap :
800 parsedPending = json.loads( pendingMap )
801 main.log.warn( json.dumps( parsedPending,
802 sort_keys=True,
803 indent=4,
804 separators=( ',', ': ' ) ) )
805 # TODO check something here?
806 else:
807 main.log.error( "pendingMap() returned None" )
808 except ( ValueError, TypeError ):
809 main.log.exception( "Error parsing pending map" )
810 main.log.error( repr( pendingMap ) )
811
812 intentAddResult = bool( intentAddResult and not missingIntents and
813 installedCheck )
814 if not intentAddResult:
815 main.log.error( "Error in pushing host intents to ONOS" )
816
817 main.step( "Intent Anti-Entropy dispersion" )
818 for j in range(100):
819 correct = True
820 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
821 for i in main.activeNodes:
822 onosIds = []
823 ids = main.CLIs[i].getAllIntentsId()
824 onosIds.append( ids )
825 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
826 str( sorted( onosIds ) ) )
827 if sorted( ids ) != sorted( intentIds ):
828 main.log.warn( "Set of intent IDs doesn't match" )
829 correct = False
830 break
831 else:
832 intents = json.loads( main.CLIs[i].intents() )
833 for intent in intents:
834 if intent[ 'state' ] != "INSTALLED":
835 main.log.warn( "Intent " + intent[ 'id' ] +
836 " is " + intent[ 'state' ] )
837 correct = False
838 break
839 if correct:
840 break
841 else:
842 time.sleep(1)
843 if not intentStop:
844 intentStop = time.time()
845 global gossipTime
846 gossipTime = intentStop - intentStart
847 main.log.info( "It took about " + str( gossipTime ) +
848 " seconds for all intents to appear in each node" )
849 gossipPeriod = int( main.params['timers']['gossip'] )
850 maxGossipTime = gossipPeriod * len( main.activeNodes )
851 utilities.assert_greater_equals(
852 expect=maxGossipTime, actual=gossipTime,
853 onpass="ECM anti-entropy for intents worked within " +
854 "expected time",
855 onfail="Intent ECM anti-entropy took too long. " +
856 "Expected time:{}, Actual time:{}".format( maxGossipTime,
857 gossipTime ) )
858 if gossipTime <= maxGossipTime:
859 intentAddResult = True
860
861 if not intentAddResult or "key" in pendingMap:
862 import time
863 installedCheck = True
864 main.log.info( "Sleeping 60 seconds to see if intents are found" )
865 time.sleep( 60 )
866 onosIds = onosCli.getAllIntentsId()
867 main.log.info( "Submitted intents: " + str( intentIds ) )
868 main.log.info( "Intents in ONOS: " + str( onosIds ) )
869 # Print the intent states
870 intents = onosCli.intents()
871 intentStates = []
872 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
873 count = 0
874 try:
875 for intent in json.loads( intents ):
876 # Iter through intents of a node
877 state = intent.get( 'state', None )
878 if "INSTALLED" not in state:
879 installedCheck = False
880 intentId = intent.get( 'id', None )
881 intentStates.append( ( intentId, state ) )
882 except ( ValueError, TypeError ):
883 main.log.exception( "Error parsing intents" )
884 # add submitted intents not in the store
885 tmplist = [ i for i, s in intentStates ]
886 for i in intentIds:
887 if i not in tmplist:
888 intentStates.append( ( i, " - " ) )
889 intentStates.sort()
890 for i, s in intentStates:
891 count += 1
892 main.log.info( "%-6s%-15s%-15s" %
893 ( str( count ), str( i ), str( s ) ) )
894 leaders = onosCli.leaders()
895 try:
896 missing = False
897 if leaders:
898 parsedLeaders = json.loads( leaders )
899 main.log.warn( json.dumps( parsedLeaders,
900 sort_keys=True,
901 indent=4,
902 separators=( ',', ': ' ) ) )
903 # check for all intent partitions
904 # check for election
905 topics = []
906 for i in range( 14 ):
907 topics.append( "intent-partition-" + str( i ) )
908 # FIXME: this should only be after we start the app
909 topics.append( "org.onosproject.election" )
910 main.log.debug( topics )
911 ONOStopics = [ j['topic'] for j in parsedLeaders ]
912 for topic in topics:
913 if topic not in ONOStopics:
914 main.log.error( "Error: " + topic +
915 " not in leaders" )
916 missing = True
917 else:
918 main.log.error( "leaders() returned None" )
919 except ( ValueError, TypeError ):
920 main.log.exception( "Error parsing leaders" )
921 main.log.error( repr( leaders ) )
922 # Check all nodes
923 if missing:
924 for i in main.activeNodes:
925 node = main.CLIs[i]
926 response = node.leaders( jsonFormat=False)
927 main.log.warn( str( node.name ) + " leaders output: \n" +
928 str( response ) )
929
930 partitions = onosCli.partitions()
931 try:
932 if partitions :
933 parsedPartitions = json.loads( partitions )
934 main.log.warn( json.dumps( parsedPartitions,
935 sort_keys=True,
936 indent=4,
937 separators=( ',', ': ' ) ) )
938 # TODO check for a leader in all paritions
939 # TODO check for consistency among nodes
940 else:
941 main.log.error( "partitions() returned None" )
942 except ( ValueError, TypeError ):
943 main.log.exception( "Error parsing partitions" )
944 main.log.error( repr( partitions ) )
945 pendingMap = onosCli.pendingMap()
946 try:
947 if pendingMap :
948 parsedPending = json.loads( pendingMap )
949 main.log.warn( json.dumps( parsedPending,
950 sort_keys=True,
951 indent=4,
952 separators=( ',', ': ' ) ) )
953 # TODO check something here?
954 else:
955 main.log.error( "pendingMap() returned None" )
956 except ( ValueError, TypeError ):
957 main.log.exception( "Error parsing pending map" )
958 main.log.error( repr( pendingMap ) )
959
960 def CASE4( self, main ):
961 """
962 Ping across added host intents
963 """
964 import json
965 import time
966 assert main.numCtrls, "main.numCtrls not defined"
967 assert main, "main not defined"
968 assert utilities.assert_equals, "utilities.assert_equals not defined"
969 assert main.CLIs, "main.CLIs not defined"
970 assert main.nodes, "main.nodes not defined"
971 main.case( "Verify connectivity by sending traffic across Intents" )
972 main.caseExplanation = "Ping across added host intents to check " +\
973 "functionality and check the state of " +\
974 "the intent"
Jon Hall6e709752016-02-01 13:38:46 -0800975
976 main.step( "Check Intent state" )
977 installedCheck = False
978 loopCount = 0
979 while not installedCheck and loopCount < 40:
980 installedCheck = True
981 # Print the intent states
982 intents = onosCli.intents()
983 intentStates = []
984 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
985 count = 0
986 # Iter through intents of a node
987 try:
988 for intent in json.loads( intents ):
989 state = intent.get( 'state', None )
990 if "INSTALLED" not in state:
991 installedCheck = False
992 intentId = intent.get( 'id', None )
993 intentStates.append( ( intentId, state ) )
994 except ( ValueError, TypeError ):
995 main.log.exception( "Error parsing intents." )
996 # Print states
997 intentStates.sort()
998 for i, s in intentStates:
999 count += 1
1000 main.log.info( "%-6s%-15s%-15s" %
1001 ( str( count ), str( i ), str( s ) ) )
1002 if not installedCheck:
1003 time.sleep( 1 )
1004 loopCount += 1
1005 utilities.assert_equals( expect=True, actual=installedCheck,
1006 onpass="Intents are all INSTALLED",
1007 onfail="Intents are not all in " +
1008 "INSTALLED state" )
1009
Jon Hall9d2dcad2016-04-08 10:15:20 -07001010 main.step( "Ping across added host intents" )
1011 onosCli = main.CLIs[ main.activeNodes[0] ]
1012 PingResult = main.TRUE
1013 for i in range( 8, 18 ):
1014 ping = main.Mininet1.pingHost( src="h" + str( i ),
1015 target="h" + str( i + 10 ) )
1016 PingResult = PingResult and ping
1017 if ping == main.FALSE:
1018 main.log.warn( "Ping failed between h" + str( i ) +
1019 " and h" + str( i + 10 ) )
1020 elif ping == main.TRUE:
1021 main.log.info( "Ping test passed!" )
1022 # Don't set PingResult or you'd override failures
1023 if PingResult == main.FALSE:
1024 main.log.error(
1025 "Intents have not been installed correctly, pings failed." )
1026 # TODO: pretty print
1027 main.log.warn( "ONOS1 intents: " )
1028 try:
1029 tmpIntents = onosCli.intents()
1030 main.log.warn( json.dumps( json.loads( tmpIntents ),
1031 sort_keys=True,
1032 indent=4,
1033 separators=( ',', ': ' ) ) )
1034 except ( ValueError, TypeError ):
1035 main.log.warn( repr( tmpIntents ) )
1036 utilities.assert_equals(
1037 expect=main.TRUE,
1038 actual=PingResult,
1039 onpass="Intents have been installed correctly and pings work",
1040 onfail="Intents have not been installed correctly, pings failed." )
1041
Jon Hall6e709752016-02-01 13:38:46 -08001042 main.step( "Check leadership of topics" )
1043 leaders = onosCli.leaders()
1044 topicCheck = main.TRUE
1045 try:
1046 if leaders:
1047 parsedLeaders = json.loads( leaders )
1048 main.log.warn( json.dumps( parsedLeaders,
1049 sort_keys=True,
1050 indent=4,
1051 separators=( ',', ': ' ) ) )
1052 # check for all intent partitions
1053 # check for election
1054 # TODO: Look at Devices as topics now that it uses this system
1055 topics = []
1056 for i in range( 14 ):
1057 topics.append( "intent-partition-" + str( i ) )
1058 # FIXME: this should only be after we start the app
1059 # FIXME: topics.append( "org.onosproject.election" )
1060 # Print leaders output
1061 main.log.debug( topics )
1062 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1063 for topic in topics:
1064 if topic not in ONOStopics:
1065 main.log.error( "Error: " + topic +
1066 " not in leaders" )
1067 topicCheck = main.FALSE
1068 else:
1069 main.log.error( "leaders() returned None" )
1070 topicCheck = main.FALSE
1071 except ( ValueError, TypeError ):
1072 topicCheck = main.FALSE
1073 main.log.exception( "Error parsing leaders" )
1074 main.log.error( repr( leaders ) )
1075 # TODO: Check for a leader of these topics
1076 # Check all nodes
1077 if topicCheck:
1078 for i in main.activeNodes:
1079 node = main.CLIs[i]
1080 response = node.leaders( jsonFormat=False)
1081 main.log.warn( str( node.name ) + " leaders output: \n" +
1082 str( response ) )
1083
1084 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1085 onpass="intent Partitions is in leaders",
1086 onfail="Some topics were lost " )
1087 # Print partitions
1088 partitions = onosCli.partitions()
1089 try:
1090 if partitions :
1091 parsedPartitions = json.loads( partitions )
1092 main.log.warn( json.dumps( parsedPartitions,
1093 sort_keys=True,
1094 indent=4,
1095 separators=( ',', ': ' ) ) )
1096 # TODO check for a leader in all paritions
1097 # TODO check for consistency among nodes
1098 else:
1099 main.log.error( "partitions() returned None" )
1100 except ( ValueError, TypeError ):
1101 main.log.exception( "Error parsing partitions" )
1102 main.log.error( repr( partitions ) )
1103 # Print Pending Map
1104 pendingMap = onosCli.pendingMap()
1105 try:
1106 if pendingMap :
1107 parsedPending = json.loads( pendingMap )
1108 main.log.warn( json.dumps( parsedPending,
1109 sort_keys=True,
1110 indent=4,
1111 separators=( ',', ': ' ) ) )
1112 # TODO check something here?
1113 else:
1114 main.log.error( "pendingMap() returned None" )
1115 except ( ValueError, TypeError ):
1116 main.log.exception( "Error parsing pending map" )
1117 main.log.error( repr( pendingMap ) )
1118
1119 if not installedCheck:
1120 main.log.info( "Waiting 60 seconds to see if the state of " +
1121 "intents change" )
1122 time.sleep( 60 )
1123 # Print the intent states
1124 intents = onosCli.intents()
1125 intentStates = []
1126 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1127 count = 0
1128 # Iter through intents of a node
1129 try:
1130 for intent in json.loads( intents ):
1131 state = intent.get( 'state', None )
1132 if "INSTALLED" not in state:
1133 installedCheck = False
1134 intentId = intent.get( 'id', None )
1135 intentStates.append( ( intentId, state ) )
1136 except ( ValueError, TypeError ):
1137 main.log.exception( "Error parsing intents." )
1138 intentStates.sort()
1139 for i, s in intentStates:
1140 count += 1
1141 main.log.info( "%-6s%-15s%-15s" %
1142 ( str( count ), str( i ), str( s ) ) )
1143 leaders = onosCli.leaders()
1144 try:
1145 missing = False
1146 if leaders:
1147 parsedLeaders = json.loads( leaders )
1148 main.log.warn( json.dumps( parsedLeaders,
1149 sort_keys=True,
1150 indent=4,
1151 separators=( ',', ': ' ) ) )
1152 # check for all intent partitions
1153 # check for election
1154 topics = []
1155 for i in range( 14 ):
1156 topics.append( "intent-partition-" + str( i ) )
1157 # FIXME: this should only be after we start the app
1158 topics.append( "org.onosproject.election" )
1159 main.log.debug( topics )
1160 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1161 for topic in topics:
1162 if topic not in ONOStopics:
1163 main.log.error( "Error: " + topic +
1164 " not in leaders" )
1165 missing = True
1166 else:
1167 main.log.error( "leaders() returned None" )
1168 except ( ValueError, TypeError ):
1169 main.log.exception( "Error parsing leaders" )
1170 main.log.error( repr( leaders ) )
1171 if missing:
1172 for i in main.activeNodes:
1173 node = main.CLIs[i]
1174 response = node.leaders( jsonFormat=False)
1175 main.log.warn( str( node.name ) + " leaders output: \n" +
1176 str( response ) )
1177
1178 partitions = onosCli.partitions()
1179 try:
1180 if partitions :
1181 parsedPartitions = json.loads( partitions )
1182 main.log.warn( json.dumps( parsedPartitions,
1183 sort_keys=True,
1184 indent=4,
1185 separators=( ',', ': ' ) ) )
1186 # TODO check for a leader in all paritions
1187 # TODO check for consistency among nodes
1188 else:
1189 main.log.error( "partitions() returned None" )
1190 except ( ValueError, TypeError ):
1191 main.log.exception( "Error parsing partitions" )
1192 main.log.error( repr( partitions ) )
1193 pendingMap = onosCli.pendingMap()
1194 try:
1195 if pendingMap :
1196 parsedPending = json.loads( pendingMap )
1197 main.log.warn( json.dumps( parsedPending,
1198 sort_keys=True,
1199 indent=4,
1200 separators=( ',', ': ' ) ) )
1201 # TODO check something here?
1202 else:
1203 main.log.error( "pendingMap() returned None" )
1204 except ( ValueError, TypeError ):
1205 main.log.exception( "Error parsing pending map" )
1206 main.log.error( repr( pendingMap ) )
1207 # Print flowrules
1208 node = main.activeNodes[0]
1209 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1210 main.step( "Wait a minute then ping again" )
1211 # the wait is above
1212 PingResult = main.TRUE
1213 for i in range( 8, 18 ):
1214 ping = main.Mininet1.pingHost( src="h" + str( i ),
1215 target="h" + str( i + 10 ) )
1216 PingResult = PingResult and ping
1217 if ping == main.FALSE:
1218 main.log.warn( "Ping failed between h" + str( i ) +
1219 " and h" + str( i + 10 ) )
1220 elif ping == main.TRUE:
1221 main.log.info( "Ping test passed!" )
1222 # Don't set PingResult or you'd override failures
1223 if PingResult == main.FALSE:
1224 main.log.error(
1225 "Intents have not been installed correctly, pings failed." )
1226 # TODO: pretty print
1227 main.log.warn( "ONOS1 intents: " )
1228 try:
1229 tmpIntents = onosCli.intents()
1230 main.log.warn( json.dumps( json.loads( tmpIntents ),
1231 sort_keys=True,
1232 indent=4,
1233 separators=( ',', ': ' ) ) )
1234 except ( ValueError, TypeError ):
1235 main.log.warn( repr( tmpIntents ) )
1236 utilities.assert_equals(
1237 expect=main.TRUE,
1238 actual=PingResult,
1239 onpass="Intents have been installed correctly and pings work",
1240 onfail="Intents have not been installed correctly, pings failed." )
1241
1242 def CASE5( self, main ):
1243 """
1244 Reading state of ONOS
1245 """
1246 import json
1247 import time
1248 assert main.numCtrls, "main.numCtrls not defined"
1249 assert main, "main not defined"
1250 assert utilities.assert_equals, "utilities.assert_equals not defined"
1251 assert main.CLIs, "main.CLIs not defined"
1252 assert main.nodes, "main.nodes not defined"
1253
1254 main.case( "Setting up and gathering data for current state" )
1255 # The general idea for this test case is to pull the state of
1256 # ( intents,flows, topology,... ) from each ONOS node
1257 # We can then compare them with each other and also with past states
1258
1259 main.step( "Check that each switch has a master" )
1260 global mastershipState
1261 mastershipState = '[]'
1262
1263 # Assert that each device has a master
1264 rolesNotNull = main.TRUE
1265 threads = []
1266 for i in main.activeNodes:
1267 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1268 name="rolesNotNull-" + str( i ),
1269 args=[] )
1270 threads.append( t )
1271 t.start()
1272
1273 for t in threads:
1274 t.join()
1275 rolesNotNull = rolesNotNull and t.result
1276 utilities.assert_equals(
1277 expect=main.TRUE,
1278 actual=rolesNotNull,
1279 onpass="Each device has a master",
1280 onfail="Some devices don't have a master assigned" )
1281
1282 main.step( "Get the Mastership of each switch from each controller" )
1283 ONOSMastership = []
1284 mastershipCheck = main.FALSE
1285 consistentMastership = True
1286 rolesResults = True
1287 threads = []
1288 for i in main.activeNodes:
1289 t = main.Thread( target=main.CLIs[i].roles,
1290 name="roles-" + str( i ),
1291 args=[] )
1292 threads.append( t )
1293 t.start()
1294
1295 for t in threads:
1296 t.join()
1297 ONOSMastership.append( t.result )
1298
1299 for i in range( len( ONOSMastership ) ):
1300 node = str( main.activeNodes[i] + 1 )
1301 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1302 main.log.error( "Error in getting ONOS" + node + " roles" )
1303 main.log.warn( "ONOS" + node + " mastership response: " +
1304 repr( ONOSMastership[i] ) )
1305 rolesResults = False
1306 utilities.assert_equals(
1307 expect=True,
1308 actual=rolesResults,
1309 onpass="No error in reading roles output",
1310 onfail="Error in reading roles from ONOS" )
1311
1312 main.step( "Check for consistency in roles from each controller" )
1313 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1314 main.log.info(
1315 "Switch roles are consistent across all ONOS nodes" )
1316 else:
1317 consistentMastership = False
1318 utilities.assert_equals(
1319 expect=True,
1320 actual=consistentMastership,
1321 onpass="Switch roles are consistent across all ONOS nodes",
1322 onfail="ONOS nodes have different views of switch roles" )
1323
1324 if rolesResults and not consistentMastership:
1325 for i in range( len( main.activeNodes ) ):
1326 node = str( main.activeNodes[i] + 1 )
1327 try:
1328 main.log.warn(
1329 "ONOS" + node + " roles: ",
1330 json.dumps(
1331 json.loads( ONOSMastership[ i ] ),
1332 sort_keys=True,
1333 indent=4,
1334 separators=( ',', ': ' ) ) )
1335 except ( ValueError, TypeError ):
1336 main.log.warn( repr( ONOSMastership[ i ] ) )
1337 elif rolesResults and consistentMastership:
1338 mastershipCheck = main.TRUE
1339 mastershipState = ONOSMastership[ 0 ]
1340
1341 main.step( "Get the intents from each controller" )
1342 global intentState
1343 intentState = []
1344 ONOSIntents = []
1345 intentCheck = main.FALSE
1346 consistentIntents = True
1347 intentsResults = True
1348 threads = []
1349 for i in main.activeNodes:
1350 t = main.Thread( target=main.CLIs[i].intents,
1351 name="intents-" + str( i ),
1352 args=[],
1353 kwargs={ 'jsonFormat': True } )
1354 threads.append( t )
1355 t.start()
1356
1357 for t in threads:
1358 t.join()
1359 ONOSIntents.append( t.result )
1360
1361 for i in range( len( ONOSIntents ) ):
1362 node = str( main.activeNodes[i] + 1 )
1363 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1364 main.log.error( "Error in getting ONOS" + node + " intents" )
1365 main.log.warn( "ONOS" + node + " intents response: " +
1366 repr( ONOSIntents[ i ] ) )
1367 intentsResults = False
1368 utilities.assert_equals(
1369 expect=True,
1370 actual=intentsResults,
1371 onpass="No error in reading intents output",
1372 onfail="Error in reading intents from ONOS" )
1373
1374 main.step( "Check for consistency in Intents from each controller" )
1375 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1376 main.log.info( "Intents are consistent across all ONOS " +
1377 "nodes" )
1378 else:
1379 consistentIntents = False
1380 main.log.error( "Intents not consistent" )
1381 utilities.assert_equals(
1382 expect=True,
1383 actual=consistentIntents,
1384 onpass="Intents are consistent across all ONOS nodes",
1385 onfail="ONOS nodes have different views of intents" )
1386
1387 if intentsResults:
1388 # Try to make it easy to figure out what is happening
1389 #
1390 # Intent ONOS1 ONOS2 ...
1391 # 0x01 INSTALLED INSTALLING
1392 # ... ... ...
1393 # ... ... ...
1394 title = " Id"
1395 for n in main.activeNodes:
1396 title += " " * 10 + "ONOS" + str( n + 1 )
1397 main.log.warn( title )
1398 # get all intent keys in the cluster
1399 keys = []
Jon Halla440e872016-03-31 15:15:50 -07001400 try:
1401 # Get the set of all intent keys
Jon Hall6e709752016-02-01 13:38:46 -08001402 for nodeStr in ONOSIntents:
1403 node = json.loads( nodeStr )
1404 for intent in node:
Jon Halla440e872016-03-31 15:15:50 -07001405 keys.append( intent.get( 'id' ) )
1406 keys = set( keys )
1407 # For each intent key, print the state on each node
1408 for key in keys:
1409 row = "%-13s" % key
1410 for nodeStr in ONOSIntents:
1411 node = json.loads( nodeStr )
1412 for intent in node:
1413 if intent.get( 'id', "Error" ) == key:
1414 row += "%-15s" % intent.get( 'state' )
1415 main.log.warn( row )
1416 # End of intent state table
1417 except ValueError as e:
1418 main.log.exception( e )
1419 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
Jon Hall6e709752016-02-01 13:38:46 -08001420
1421 if intentsResults and not consistentIntents:
1422 # print the json objects
1423 n = str( main.activeNodes[-1] + 1 )
1424 main.log.debug( "ONOS" + n + " intents: " )
1425 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1426 sort_keys=True,
1427 indent=4,
1428 separators=( ',', ': ' ) ) )
1429 for i in range( len( ONOSIntents ) ):
1430 node = str( main.activeNodes[i] + 1 )
1431 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1432 main.log.debug( "ONOS" + node + " intents: " )
1433 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1434 sort_keys=True,
1435 indent=4,
1436 separators=( ',', ': ' ) ) )
1437 else:
1438 main.log.debug( "ONOS" + node + " intents match ONOS" +
1439 n + " intents" )
1440 elif intentsResults and consistentIntents:
1441 intentCheck = main.TRUE
1442 intentState = ONOSIntents[ 0 ]
1443
1444 main.step( "Get the flows from each controller" )
1445 global flowState
1446 flowState = []
1447 ONOSFlows = []
1448 ONOSFlowsJson = []
1449 flowCheck = main.FALSE
1450 consistentFlows = True
1451 flowsResults = True
1452 threads = []
1453 for i in main.activeNodes:
1454 t = main.Thread( target=main.CLIs[i].flows,
1455 name="flows-" + str( i ),
1456 args=[],
1457 kwargs={ 'jsonFormat': True } )
1458 threads.append( t )
1459 t.start()
1460
1461 # NOTE: Flows command can take some time to run
1462 time.sleep(30)
1463 for t in threads:
1464 t.join()
1465 result = t.result
1466 ONOSFlows.append( result )
1467
1468 for i in range( len( ONOSFlows ) ):
1469 num = str( main.activeNodes[i] + 1 )
1470 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1471 main.log.error( "Error in getting ONOS" + num + " flows" )
1472 main.log.warn( "ONOS" + num + " flows response: " +
1473 repr( ONOSFlows[ i ] ) )
1474 flowsResults = False
1475 ONOSFlowsJson.append( None )
1476 else:
1477 try:
1478 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1479 except ( ValueError, TypeError ):
1480 # FIXME: change this to log.error?
1481 main.log.exception( "Error in parsing ONOS" + num +
1482 " response as json." )
1483 main.log.error( repr( ONOSFlows[ i ] ) )
1484 ONOSFlowsJson.append( None )
1485 flowsResults = False
1486 utilities.assert_equals(
1487 expect=True,
1488 actual=flowsResults,
1489 onpass="No error in reading flows output",
1490 onfail="Error in reading flows from ONOS" )
1491
1492 main.step( "Check for consistency in Flows from each controller" )
1493 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1494 if all( tmp ):
1495 main.log.info( "Flow count is consistent across all ONOS nodes" )
1496 else:
1497 consistentFlows = False
1498 utilities.assert_equals(
1499 expect=True,
1500 actual=consistentFlows,
1501 onpass="The flow count is consistent across all ONOS nodes",
1502 onfail="ONOS nodes have different flow counts" )
1503
1504 if flowsResults and not consistentFlows:
1505 for i in range( len( ONOSFlows ) ):
1506 node = str( main.activeNodes[i] + 1 )
1507 try:
1508 main.log.warn(
1509 "ONOS" + node + " flows: " +
1510 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1511 indent=4, separators=( ',', ': ' ) ) )
1512 except ( ValueError, TypeError ):
1513 main.log.warn( "ONOS" + node + " flows: " +
1514 repr( ONOSFlows[ i ] ) )
1515 elif flowsResults and consistentFlows:
1516 flowCheck = main.TRUE
1517 flowState = ONOSFlows[ 0 ]
1518
1519 main.step( "Get the OF Table entries" )
1520 global flows
1521 flows = []
1522 for i in range( 1, 29 ):
1523 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1524 if flowCheck == main.FALSE:
1525 for table in flows:
1526 main.log.warn( table )
1527 # TODO: Compare switch flow tables with ONOS flow tables
1528
1529 main.step( "Start continuous pings" )
1530 main.Mininet2.pingLong(
1531 src=main.params[ 'PING' ][ 'source1' ],
1532 target=main.params[ 'PING' ][ 'target1' ],
1533 pingTime=500 )
1534 main.Mininet2.pingLong(
1535 src=main.params[ 'PING' ][ 'source2' ],
1536 target=main.params[ 'PING' ][ 'target2' ],
1537 pingTime=500 )
1538 main.Mininet2.pingLong(
1539 src=main.params[ 'PING' ][ 'source3' ],
1540 target=main.params[ 'PING' ][ 'target3' ],
1541 pingTime=500 )
1542 main.Mininet2.pingLong(
1543 src=main.params[ 'PING' ][ 'source4' ],
1544 target=main.params[ 'PING' ][ 'target4' ],
1545 pingTime=500 )
1546 main.Mininet2.pingLong(
1547 src=main.params[ 'PING' ][ 'source5' ],
1548 target=main.params[ 'PING' ][ 'target5' ],
1549 pingTime=500 )
1550 main.Mininet2.pingLong(
1551 src=main.params[ 'PING' ][ 'source6' ],
1552 target=main.params[ 'PING' ][ 'target6' ],
1553 pingTime=500 )
1554 main.Mininet2.pingLong(
1555 src=main.params[ 'PING' ][ 'source7' ],
1556 target=main.params[ 'PING' ][ 'target7' ],
1557 pingTime=500 )
1558 main.Mininet2.pingLong(
1559 src=main.params[ 'PING' ][ 'source8' ],
1560 target=main.params[ 'PING' ][ 'target8' ],
1561 pingTime=500 )
1562 main.Mininet2.pingLong(
1563 src=main.params[ 'PING' ][ 'source9' ],
1564 target=main.params[ 'PING' ][ 'target9' ],
1565 pingTime=500 )
1566 main.Mininet2.pingLong(
1567 src=main.params[ 'PING' ][ 'source10' ],
1568 target=main.params[ 'PING' ][ 'target10' ],
1569 pingTime=500 )
1570
1571 main.step( "Collecting topology information from ONOS" )
1572 devices = []
1573 threads = []
1574 for i in main.activeNodes:
1575 t = main.Thread( target=main.CLIs[i].devices,
1576 name="devices-" + str( i ),
1577 args=[ ] )
1578 threads.append( t )
1579 t.start()
1580
1581 for t in threads:
1582 t.join()
1583 devices.append( t.result )
1584 hosts = []
1585 threads = []
1586 for i in main.activeNodes:
1587 t = main.Thread( target=main.CLIs[i].hosts,
1588 name="hosts-" + str( i ),
1589 args=[ ] )
1590 threads.append( t )
1591 t.start()
1592
1593 for t in threads:
1594 t.join()
1595 try:
1596 hosts.append( json.loads( t.result ) )
1597 except ( ValueError, TypeError ):
1598 # FIXME: better handling of this, print which node
1599 # Maybe use thread name?
1600 main.log.exception( "Error parsing json output of hosts" )
1601 main.log.warn( repr( t.result ) )
1602 hosts.append( None )
1603
1604 ports = []
1605 threads = []
1606 for i in main.activeNodes:
1607 t = main.Thread( target=main.CLIs[i].ports,
1608 name="ports-" + str( i ),
1609 args=[ ] )
1610 threads.append( t )
1611 t.start()
1612
1613 for t in threads:
1614 t.join()
1615 ports.append( t.result )
1616 links = []
1617 threads = []
1618 for i in main.activeNodes:
1619 t = main.Thread( target=main.CLIs[i].links,
1620 name="links-" + str( i ),
1621 args=[ ] )
1622 threads.append( t )
1623 t.start()
1624
1625 for t in threads:
1626 t.join()
1627 links.append( t.result )
1628 clusters = []
1629 threads = []
1630 for i in main.activeNodes:
1631 t = main.Thread( target=main.CLIs[i].clusters,
1632 name="clusters-" + str( i ),
1633 args=[ ] )
1634 threads.append( t )
1635 t.start()
1636
1637 for t in threads:
1638 t.join()
1639 clusters.append( t.result )
1640 # Compare json objects for hosts and dataplane clusters
1641
1642 # hosts
1643 main.step( "Host view is consistent across ONOS nodes" )
1644 consistentHostsResult = main.TRUE
1645 for controller in range( len( hosts ) ):
1646 controllerStr = str( main.activeNodes[controller] + 1 )
1647 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1648 if hosts[ controller ] == hosts[ 0 ]:
1649 continue
1650 else: # hosts not consistent
1651 main.log.error( "hosts from ONOS" +
1652 controllerStr +
1653 " is inconsistent with ONOS1" )
1654 main.log.warn( repr( hosts[ controller ] ) )
1655 consistentHostsResult = main.FALSE
1656
1657 else:
1658 main.log.error( "Error in getting ONOS hosts from ONOS" +
1659 controllerStr )
1660 consistentHostsResult = main.FALSE
1661 main.log.warn( "ONOS" + controllerStr +
1662 " hosts response: " +
1663 repr( hosts[ controller ] ) )
1664 utilities.assert_equals(
1665 expect=main.TRUE,
1666 actual=consistentHostsResult,
1667 onpass="Hosts view is consistent across all ONOS nodes",
1668 onfail="ONOS nodes have different views of hosts" )
1669
1670 main.step( "Each host has an IP address" )
1671 ipResult = main.TRUE
1672 for controller in range( 0, len( hosts ) ):
1673 controllerStr = str( main.activeNodes[controller] + 1 )
1674 if hosts[ controller ]:
1675 for host in hosts[ controller ]:
1676 if not host.get( 'ipAddresses', [ ] ):
1677 main.log.error( "Error with host ips on controller" +
1678 controllerStr + ": " + str( host ) )
1679 ipResult = main.FALSE
1680 utilities.assert_equals(
1681 expect=main.TRUE,
1682 actual=ipResult,
1683 onpass="The ips of the hosts aren't empty",
1684 onfail="The ip of at least one host is missing" )
1685
1686 # Strongly connected clusters of devices
1687 main.step( "Cluster view is consistent across ONOS nodes" )
1688 consistentClustersResult = main.TRUE
1689 for controller in range( len( clusters ) ):
1690 controllerStr = str( main.activeNodes[controller] + 1 )
1691 if "Error" not in clusters[ controller ]:
1692 if clusters[ controller ] == clusters[ 0 ]:
1693 continue
1694 else: # clusters not consistent
1695 main.log.error( "clusters from ONOS" + controllerStr +
1696 " is inconsistent with ONOS1" )
1697 consistentClustersResult = main.FALSE
1698
1699 else:
1700 main.log.error( "Error in getting dataplane clusters " +
1701 "from ONOS" + controllerStr )
1702 consistentClustersResult = main.FALSE
1703 main.log.warn( "ONOS" + controllerStr +
1704 " clusters response: " +
1705 repr( clusters[ controller ] ) )
1706 utilities.assert_equals(
1707 expect=main.TRUE,
1708 actual=consistentClustersResult,
1709 onpass="Clusters view is consistent across all ONOS nodes",
1710 onfail="ONOS nodes have different views of clusters" )
Jon Hall172b7ba2016-04-07 18:12:20 -07001711 if consistentClustersResult != main.TRUE:
1712 main.log.debug( clusters )
Jon Hall6e709752016-02-01 13:38:46 -08001713 # there should always only be one cluster
1714 main.step( "Cluster view correct across ONOS nodes" )
1715 try:
1716 numClusters = len( json.loads( clusters[ 0 ] ) )
1717 except ( ValueError, TypeError ):
1718 main.log.exception( "Error parsing clusters[0]: " +
1719 repr( clusters[ 0 ] ) )
1720 numClusters = "ERROR"
1721 clusterResults = main.FALSE
1722 if numClusters == 1:
1723 clusterResults = main.TRUE
1724 utilities.assert_equals(
1725 expect=1,
1726 actual=numClusters,
1727 onpass="ONOS shows 1 SCC",
1728 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1729
1730 main.step( "Comparing ONOS topology to MN" )
1731 devicesResults = main.TRUE
1732 linksResults = main.TRUE
1733 hostsResults = main.TRUE
1734 mnSwitches = main.Mininet1.getSwitches()
1735 mnLinks = main.Mininet1.getLinks()
1736 mnHosts = main.Mininet1.getHosts()
1737 for controller in main.activeNodes:
1738 controllerStr = str( main.activeNodes[controller] + 1 )
1739 if devices[ controller ] and ports[ controller ] and\
1740 "Error" not in devices[ controller ] and\
1741 "Error" not in ports[ controller ]:
1742 currentDevicesResult = main.Mininet1.compareSwitches(
1743 mnSwitches,
1744 json.loads( devices[ controller ] ),
1745 json.loads( ports[ controller ] ) )
1746 else:
1747 currentDevicesResult = main.FALSE
1748 utilities.assert_equals( expect=main.TRUE,
1749 actual=currentDevicesResult,
1750 onpass="ONOS" + controllerStr +
1751 " Switches view is correct",
1752 onfail="ONOS" + controllerStr +
1753 " Switches view is incorrect" )
1754 if links[ controller ] and "Error" not in links[ controller ]:
1755 currentLinksResult = main.Mininet1.compareLinks(
1756 mnSwitches, mnLinks,
1757 json.loads( links[ controller ] ) )
1758 else:
1759 currentLinksResult = main.FALSE
1760 utilities.assert_equals( expect=main.TRUE,
1761 actual=currentLinksResult,
1762 onpass="ONOS" + controllerStr +
1763 " links view is correct",
1764 onfail="ONOS" + controllerStr +
1765 " links view is incorrect" )
1766
1767 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1768 currentHostsResult = main.Mininet1.compareHosts(
1769 mnHosts,
1770 hosts[ controller ] )
1771 else:
1772 currentHostsResult = main.FALSE
1773 utilities.assert_equals( expect=main.TRUE,
1774 actual=currentHostsResult,
1775 onpass="ONOS" + controllerStr +
1776 " hosts exist in Mininet",
1777 onfail="ONOS" + controllerStr +
1778 " hosts don't match Mininet" )
1779
1780 devicesResults = devicesResults and currentDevicesResult
1781 linksResults = linksResults and currentLinksResult
1782 hostsResults = hostsResults and currentHostsResult
1783
1784 main.step( "Device information is correct" )
1785 utilities.assert_equals(
1786 expect=main.TRUE,
1787 actual=devicesResults,
1788 onpass="Device information is correct",
1789 onfail="Device information is incorrect" )
1790
1791 main.step( "Links are correct" )
1792 utilities.assert_equals(
1793 expect=main.TRUE,
1794 actual=linksResults,
1795 onpass="Link are correct",
1796 onfail="Links are incorrect" )
1797
1798 main.step( "Hosts are correct" )
1799 utilities.assert_equals(
1800 expect=main.TRUE,
1801 actual=hostsResults,
1802 onpass="Hosts are correct",
1803 onfail="Hosts are incorrect" )
1804
1805 def CASE61( self, main ):
1806 """
1807 The Failure case.
1808 """
1809 import math
1810 assert main.numCtrls, "main.numCtrls not defined"
1811 assert main, "main not defined"
1812 assert utilities.assert_equals, "utilities.assert_equals not defined"
1813 assert main.CLIs, "main.CLIs not defined"
1814 assert main.nodes, "main.nodes not defined"
1815 main.case( "Partition ONOS nodes into two distinct partitions" )
1816
1817 main.step( "Checking ONOS Logs for errors" )
1818 for node in main.nodes:
1819 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1820 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1821
1822 n = len( main.nodes ) # Number of nodes
1823 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1824 main.partition = [ 0 ] # ONOS node to partition, listed by index in main.nodes
1825 if n > 3:
1826 main.partition.append( p - 1 )
1827 # NOTE: This only works for cluster sizes of 3,5, or 7.
1828
1829 main.step( "Partitioning ONOS nodes" )
1830 nodeList = [ str( i + 1 ) for i in main.partition ]
1831 main.log.info( "Nodes to be partitioned: " + str( nodeList ) )
1832 partitionResults = main.TRUE
1833 for i in range( 0, n ):
1834 this = main.nodes[i]
1835 if i not in main.partition:
1836 for j in main.partition:
1837 foe = main.nodes[j]
1838 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1839 #CMD HERE
1840 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1841 this.handle.sendline( cmdStr )
1842 this.handle.expect( "\$" )
1843 main.log.debug( this.handle.before )
1844 else:
1845 for j in range( 0, n ):
1846 if j not in main.partition:
1847 foe = main.nodes[j]
1848 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1849 #CMD HERE
1850 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1851 this.handle.sendline( cmdStr )
1852 this.handle.expect( "\$" )
1853 main.log.debug( this.handle.before )
1854 main.activeNodes.remove( i )
1855 # NOTE: When dynamic clustering is finished, we need to start checking
1856 # main.partion nodes still work when partitioned
1857 utilities.assert_equals( expect=main.TRUE, actual=partitionResults,
1858 onpass="Firewall rules set successfully",
1859 onfail="Error setting firewall rules" )
1860
1861 main.log.step( "Sleeping 60 seconds" )
1862 time.sleep( 60 )
1863
1864 def CASE62( self, main ):
1865 """
1866 Healing Partition
1867 """
1868 import time
1869 assert main.numCtrls, "main.numCtrls not defined"
1870 assert main, "main not defined"
1871 assert utilities.assert_equals, "utilities.assert_equals not defined"
1872 assert main.CLIs, "main.CLIs not defined"
1873 assert main.nodes, "main.nodes not defined"
1874 assert main.partition, "main.partition not defined"
1875 main.case( "Healing Partition" )
1876
1877 main.step( "Deleteing firewall rules" )
1878 healResults = main.TRUE
1879 for node in main.nodes:
1880 cmdStr = "sudo iptables -F"
1881 node.handle.sendline( cmdStr )
1882 node.handle.expect( "\$" )
1883 main.log.debug( node.handle.before )
1884 utilities.assert_equals( expect=main.TRUE, actual=healResults,
1885 onpass="Firewall rules removed",
1886 onfail="Error removing firewall rules" )
1887
1888 for node in main.partition:
1889 main.activeNodes.append( node )
1890 main.activeNodes.sort()
1891 try:
1892 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1893 "List of active nodes has duplicates, this likely indicates something was run out of order"
1894 except AssertionError:
1895 main.log.exception( "" )
1896 main.cleanup()
1897 main.exit()
1898
1899 def CASE7( self, main ):
1900 """
1901 Check state after ONOS failure
1902 """
1903 import json
1904 assert main.numCtrls, "main.numCtrls not defined"
1905 assert main, "main not defined"
1906 assert utilities.assert_equals, "utilities.assert_equals not defined"
1907 assert main.CLIs, "main.CLIs not defined"
1908 assert main.nodes, "main.nodes not defined"
1909 try:
1910 main.partition
1911 except AttributeError:
1912 main.partition = []
1913
1914 main.case( "Running ONOS Constant State Tests" )
1915
1916 main.step( "Check that each switch has a master" )
1917 # Assert that each device has a master
1918 rolesNotNull = main.TRUE
1919 threads = []
1920 for i in main.activeNodes:
1921 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1922 name="rolesNotNull-" + str( i ),
1923 args=[ ] )
1924 threads.append( t )
1925 t.start()
1926
1927 for t in threads:
1928 t.join()
1929 rolesNotNull = rolesNotNull and t.result
1930 utilities.assert_equals(
1931 expect=main.TRUE,
1932 actual=rolesNotNull,
1933 onpass="Each device has a master",
1934 onfail="Some devices don't have a master assigned" )
1935
1936 main.step( "Read device roles from ONOS" )
1937 ONOSMastership = []
1938 mastershipCheck = main.FALSE
1939 consistentMastership = True
1940 rolesResults = True
1941 threads = []
1942 for i in main.activeNodes:
1943 t = main.Thread( target=main.CLIs[i].roles,
1944 name="roles-" + str( i ),
1945 args=[] )
1946 threads.append( t )
1947 t.start()
1948
1949 for t in threads:
1950 t.join()
1951 ONOSMastership.append( t.result )
1952
1953 for i in range( len( ONOSMastership ) ):
1954 node = str( main.activeNodes[i] + 1 )
1955 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1956 main.log.error( "Error in getting ONOS" + node + " roles" )
1957 main.log.warn( "ONOS" + node + " mastership response: " +
1958 repr( ONOSMastership[i] ) )
1959 rolesResults = False
1960 utilities.assert_equals(
1961 expect=True,
1962 actual=rolesResults,
1963 onpass="No error in reading roles output",
1964 onfail="Error in reading roles from ONOS" )
1965
1966 main.step( "Check for consistency in roles from each controller" )
1967 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1968 main.log.info(
1969 "Switch roles are consistent across all ONOS nodes" )
1970 else:
1971 consistentMastership = False
1972 utilities.assert_equals(
1973 expect=True,
1974 actual=consistentMastership,
1975 onpass="Switch roles are consistent across all ONOS nodes",
1976 onfail="ONOS nodes have different views of switch roles" )
1977
1978 if rolesResults and not consistentMastership:
1979 for i in range( len( ONOSMastership ) ):
1980 node = str( main.activeNodes[i] + 1 )
1981 main.log.warn( "ONOS" + node + " roles: ",
1982 json.dumps( json.loads( ONOSMastership[ i ] ),
1983 sort_keys=True,
1984 indent=4,
1985 separators=( ',', ': ' ) ) )
1986
1987 # NOTE: we expect mastership to change on controller failure
1988
1989 main.step( "Get the intents and compare across all nodes" )
1990 ONOSIntents = []
1991 intentCheck = main.FALSE
1992 consistentIntents = True
1993 intentsResults = True
1994 threads = []
1995 for i in main.activeNodes:
1996 t = main.Thread( target=main.CLIs[i].intents,
1997 name="intents-" + str( i ),
1998 args=[],
1999 kwargs={ 'jsonFormat': True } )
2000 threads.append( t )
2001 t.start()
2002
2003 for t in threads:
2004 t.join()
2005 ONOSIntents.append( t.result )
2006
2007 for i in range( len( ONOSIntents) ):
2008 node = str( main.activeNodes[i] + 1 )
2009 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
2010 main.log.error( "Error in getting ONOS" + node + " intents" )
2011 main.log.warn( "ONOS" + node + " intents response: " +
2012 repr( ONOSIntents[ i ] ) )
2013 intentsResults = False
2014 utilities.assert_equals(
2015 expect=True,
2016 actual=intentsResults,
2017 onpass="No error in reading intents output",
2018 onfail="Error in reading intents from ONOS" )
2019
2020 main.step( "Check for consistency in Intents from each controller" )
2021 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2022 main.log.info( "Intents are consistent across all ONOS " +
2023 "nodes" )
2024 else:
2025 consistentIntents = False
2026
2027 # Try to make it easy to figure out what is happening
2028 #
2029 # Intent ONOS1 ONOS2 ...
2030 # 0x01 INSTALLED INSTALLING
2031 # ... ... ...
2032 # ... ... ...
2033 title = " ID"
2034 for n in main.activeNodes:
2035 title += " " * 10 + "ONOS" + str( n + 1 )
2036 main.log.warn( title )
2037 # get all intent keys in the cluster
2038 keys = []
2039 for nodeStr in ONOSIntents:
2040 node = json.loads( nodeStr )
2041 for intent in node:
2042 keys.append( intent.get( 'id' ) )
2043 keys = set( keys )
2044 for key in keys:
2045 row = "%-13s" % key
2046 for nodeStr in ONOSIntents:
2047 node = json.loads( nodeStr )
2048 for intent in node:
2049 if intent.get( 'id' ) == key:
2050 row += "%-15s" % intent.get( 'state' )
2051 main.log.warn( row )
2052 # End table view
2053
2054 utilities.assert_equals(
2055 expect=True,
2056 actual=consistentIntents,
2057 onpass="Intents are consistent across all ONOS nodes",
2058 onfail="ONOS nodes have different views of intents" )
2059 intentStates = []
2060 for node in ONOSIntents: # Iter through ONOS nodes
2061 nodeStates = []
2062 # Iter through intents of a node
2063 try:
2064 for intent in json.loads( node ):
2065 nodeStates.append( intent[ 'state' ] )
2066 except ( ValueError, TypeError ):
2067 main.log.exception( "Error in parsing intents" )
2068 main.log.error( repr( node ) )
2069 intentStates.append( nodeStates )
2070 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2071 main.log.info( dict( out ) )
2072
2073 if intentsResults and not consistentIntents:
2074 for i in range( len( main.activeNodes ) ):
2075 node = str( main.activeNodes[i] + 1 )
2076 main.log.warn( "ONOS" + node + " intents: " )
2077 main.log.warn( json.dumps(
2078 json.loads( ONOSIntents[ i ] ),
2079 sort_keys=True,
2080 indent=4,
2081 separators=( ',', ': ' ) ) )
2082 elif intentsResults and consistentIntents:
2083 intentCheck = main.TRUE
2084
2085 # NOTE: Store has no durability, so intents are lost across system
2086 # restarts
2087 main.step( "Compare current intents with intents before the failure" )
2088 # NOTE: this requires case 5 to pass for intentState to be set.
2089 # maybe we should stop the test if that fails?
2090 sameIntents = main.FALSE
2091 try:
2092 intentState
2093 except NameError:
2094 main.log.warn( "No previous intent state was saved" )
2095 else:
2096 if intentState and intentState == ONOSIntents[ 0 ]:
2097 sameIntents = main.TRUE
2098 main.log.info( "Intents are consistent with before failure" )
2099 # TODO: possibly the states have changed? we may need to figure out
2100 # what the acceptable states are
2101 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2102 sameIntents = main.TRUE
2103 try:
2104 before = json.loads( intentState )
2105 after = json.loads( ONOSIntents[ 0 ] )
2106 for intent in before:
2107 if intent not in after:
2108 sameIntents = main.FALSE
2109 main.log.debug( "Intent is not currently in ONOS " +
2110 "(at least in the same form):" )
2111 main.log.debug( json.dumps( intent ) )
2112 except ( ValueError, TypeError ):
2113 main.log.exception( "Exception printing intents" )
2114 main.log.debug( repr( ONOSIntents[0] ) )
2115 main.log.debug( repr( intentState ) )
2116 if sameIntents == main.FALSE:
2117 try:
2118 main.log.debug( "ONOS intents before: " )
2119 main.log.debug( json.dumps( json.loads( intentState ),
2120 sort_keys=True, indent=4,
2121 separators=( ',', ': ' ) ) )
2122 main.log.debug( "Current ONOS intents: " )
2123 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2124 sort_keys=True, indent=4,
2125 separators=( ',', ': ' ) ) )
2126 except ( ValueError, TypeError ):
2127 main.log.exception( "Exception printing intents" )
2128 main.log.debug( repr( ONOSIntents[0] ) )
2129 main.log.debug( repr( intentState ) )
2130 utilities.assert_equals(
2131 expect=main.TRUE,
2132 actual=sameIntents,
2133 onpass="Intents are consistent with before failure",
2134 onfail="The Intents changed during failure" )
2135 intentCheck = intentCheck and sameIntents
2136
2137 main.step( "Get the OF Table entries and compare to before " +
2138 "component failure" )
2139 FlowTables = main.TRUE
2140 for i in range( 28 ):
2141 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2142 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2143 FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
2144 if FlowTables == main.FALSE:
2145 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2146 utilities.assert_equals(
2147 expect=main.TRUE,
2148 actual=FlowTables,
2149 onpass="No changes were found in the flow tables",
2150 onfail="Changes were found in the flow tables" )
2151
2152 main.Mininet2.pingLongKill()
2153 '''
2154 main.step( "Check the continuous pings to ensure that no packets " +
2155 "were dropped during component failure" )
2156 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2157 main.params[ 'TESTONIP' ] )
2158 LossInPings = main.FALSE
2159 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2160 for i in range( 8, 18 ):
2161 main.log.info(
2162 "Checking for a loss in pings along flow from s" +
2163 str( i ) )
2164 LossInPings = main.Mininet2.checkForLoss(
2165 "/tmp/ping.h" +
2166 str( i ) ) or LossInPings
2167 if LossInPings == main.TRUE:
2168 main.log.info( "Loss in ping detected" )
2169 elif LossInPings == main.ERROR:
2170 main.log.info( "There are multiple mininet process running" )
2171 elif LossInPings == main.FALSE:
2172 main.log.info( "No Loss in the pings" )
2173 main.log.info( "No loss of dataplane connectivity" )
2174 utilities.assert_equals(
2175 expect=main.FALSE,
2176 actual=LossInPings,
2177 onpass="No Loss of connectivity",
2178 onfail="Loss of dataplane connectivity detected" )
2179 '''
2180
2181 main.step( "Leadership Election is still functional" )
2182 # Test of LeadershipElection
2183 leaderList = []
2184
2185 partitioned = []
2186 for i in main.partition:
2187 partitioned.append( main.nodes[i].ip_address )
2188 leaderResult = main.TRUE
2189
2190 for i in main.activeNodes:
2191 cli = main.CLIs[i]
2192 leaderN = cli.electionTestLeader()
2193 leaderList.append( leaderN )
2194 if leaderN == main.FALSE:
2195 # error in response
2196 main.log.error( "Something is wrong with " +
2197 "electionTestLeader function, check the" +
2198 " error logs" )
2199 leaderResult = main.FALSE
2200 elif leaderN is None:
2201 main.log.error( cli.name +
2202 " shows no leader for the election-app was" +
2203 " elected after the old one died" )
2204 leaderResult = main.FALSE
2205 elif leaderN in partitioned:
2206 main.log.error( cli.name + " shows " + str( leaderN ) +
2207 " as leader for the election-app, but it " +
2208 "was partitioned" )
2209 leaderResult = main.FALSE
2210 if len( set( leaderList ) ) != 1:
2211 leaderResult = main.FALSE
2212 main.log.error(
2213 "Inconsistent view of leader for the election test app" )
2214 # TODO: print the list
2215 utilities.assert_equals(
2216 expect=main.TRUE,
2217 actual=leaderResult,
2218 onpass="Leadership election passed",
2219 onfail="Something went wrong with Leadership election" )
2220
2221 def CASE8( self, main ):
2222 """
2223 Compare topo
2224 """
2225 import json
2226 import time
2227 assert main.numCtrls, "main.numCtrls not defined"
2228 assert main, "main not defined"
2229 assert utilities.assert_equals, "utilities.assert_equals not defined"
2230 assert main.CLIs, "main.CLIs not defined"
2231 assert main.nodes, "main.nodes not defined"
2232
2233 main.case( "Compare ONOS Topology view to Mininet topology" )
2234 main.caseExplanation = "Compare topology objects between Mininet" +\
2235 " and ONOS"
2236 topoResult = main.FALSE
2237 topoFailMsg = "ONOS topology don't match Mininet"
2238 elapsed = 0
2239 count = 0
2240 main.step( "Comparing ONOS topology to MN topology" )
2241 startTime = time.time()
2242 # Give time for Gossip to work
2243 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2244 devicesResults = main.TRUE
2245 linksResults = main.TRUE
2246 hostsResults = main.TRUE
2247 hostAttachmentResults = True
2248 count += 1
2249 cliStart = time.time()
2250 devices = []
2251 threads = []
2252 for i in main.activeNodes:
2253 t = main.Thread( target=utilities.retry,
2254 name="devices-" + str( i ),
2255 args=[ main.CLIs[i].devices, [ None ] ],
2256 kwargs= { 'sleep': 5, 'attempts': 5,
2257 'randomTime': True } )
2258 threads.append( t )
2259 t.start()
2260
2261 for t in threads:
2262 t.join()
2263 devices.append( t.result )
2264 hosts = []
2265 ipResult = main.TRUE
2266 threads = []
2267 for i in main.activeNodes:
2268 t = main.Thread( target=utilities.retry,
2269 name="hosts-" + str( i ),
2270 args=[ main.CLIs[i].hosts, [ None ] ],
2271 kwargs= { 'sleep': 5, 'attempts': 5,
2272 'randomTime': True } )
2273 threads.append( t )
2274 t.start()
2275
2276 for t in threads:
2277 t.join()
2278 try:
2279 hosts.append( json.loads( t.result ) )
2280 except ( ValueError, TypeError ):
2281 main.log.exception( "Error parsing hosts results" )
2282 main.log.error( repr( t.result ) )
2283 hosts.append( None )
2284 for controller in range( 0, len( hosts ) ):
2285 controllerStr = str( main.activeNodes[controller] + 1 )
2286 if hosts[ controller ]:
2287 for host in hosts[ controller ]:
2288 if host is None or host.get( 'ipAddresses', [] ) == []:
2289 main.log.error(
2290 "Error with host ipAddresses on controller" +
2291 controllerStr + ": " + str( host ) )
2292 ipResult = main.FALSE
2293 ports = []
2294 threads = []
2295 for i in main.activeNodes:
2296 t = main.Thread( target=utilities.retry,
2297 name="ports-" + str( i ),
2298 args=[ main.CLIs[i].ports, [ None ] ],
2299 kwargs= { 'sleep': 5, 'attempts': 5,
2300 'randomTime': True } )
2301 threads.append( t )
2302 t.start()
2303
2304 for t in threads:
2305 t.join()
2306 ports.append( t.result )
2307 links = []
2308 threads = []
2309 for i in main.activeNodes:
2310 t = main.Thread( target=utilities.retry,
2311 name="links-" + str( i ),
2312 args=[ main.CLIs[i].links, [ None ] ],
2313 kwargs= { 'sleep': 5, 'attempts': 5,
2314 'randomTime': True } )
2315 threads.append( t )
2316 t.start()
2317
2318 for t in threads:
2319 t.join()
2320 links.append( t.result )
2321 clusters = []
2322 threads = []
2323 for i in main.activeNodes:
2324 t = main.Thread( target=utilities.retry,
2325 name="clusters-" + str( i ),
2326 args=[ main.CLIs[i].clusters, [ None ] ],
2327 kwargs= { 'sleep': 5, 'attempts': 5,
2328 'randomTime': True } )
2329 threads.append( t )
2330 t.start()
2331
2332 for t in threads:
2333 t.join()
2334 clusters.append( t.result )
2335
2336 elapsed = time.time() - startTime
2337 cliTime = time.time() - cliStart
2338 print "Elapsed time: " + str( elapsed )
2339 print "CLI time: " + str( cliTime )
2340
2341 if all( e is None for e in devices ) and\
2342 all( e is None for e in hosts ) and\
2343 all( e is None for e in ports ) and\
2344 all( e is None for e in links ) and\
2345 all( e is None for e in clusters ):
2346 topoFailMsg = "Could not get topology from ONOS"
2347 main.log.error( topoFailMsg )
2348 continue # Try again, No use trying to compare
2349
2350 mnSwitches = main.Mininet1.getSwitches()
2351 mnLinks = main.Mininet1.getLinks()
2352 mnHosts = main.Mininet1.getHosts()
2353 for controller in range( len( main.activeNodes ) ):
2354 controllerStr = str( main.activeNodes[controller] + 1 )
2355 if devices[ controller ] and ports[ controller ] and\
2356 "Error" not in devices[ controller ] and\
2357 "Error" not in ports[ controller ]:
2358
2359 try:
2360 currentDevicesResult = main.Mininet1.compareSwitches(
2361 mnSwitches,
2362 json.loads( devices[ controller ] ),
2363 json.loads( ports[ controller ] ) )
2364 except ( TypeError, ValueError ) as e:
2365 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2366 devices[ controller ], ports[ controller ] ) )
2367 else:
2368 currentDevicesResult = main.FALSE
2369 utilities.assert_equals( expect=main.TRUE,
2370 actual=currentDevicesResult,
2371 onpass="ONOS" + controllerStr +
2372 " Switches view is correct",
2373 onfail="ONOS" + controllerStr +
2374 " Switches view is incorrect" )
2375
2376 if links[ controller ] and "Error" not in links[ controller ]:
2377 currentLinksResult = main.Mininet1.compareLinks(
2378 mnSwitches, mnLinks,
2379 json.loads( links[ controller ] ) )
2380 else:
2381 currentLinksResult = main.FALSE
2382 utilities.assert_equals( expect=main.TRUE,
2383 actual=currentLinksResult,
2384 onpass="ONOS" + controllerStr +
2385 " links view is correct",
2386 onfail="ONOS" + controllerStr +
2387 " links view is incorrect" )
2388 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2389 currentHostsResult = main.Mininet1.compareHosts(
2390 mnHosts,
2391 hosts[ controller ] )
2392 elif hosts[ controller ] == []:
2393 currentHostsResult = main.TRUE
2394 else:
2395 currentHostsResult = main.FALSE
2396 utilities.assert_equals( expect=main.TRUE,
2397 actual=currentHostsResult,
2398 onpass="ONOS" + controllerStr +
2399 " hosts exist in Mininet",
2400 onfail="ONOS" + controllerStr +
2401 " hosts don't match Mininet" )
2402 # CHECKING HOST ATTACHMENT POINTS
2403 hostAttachment = True
2404 zeroHosts = False
2405 # FIXME: topo-HA/obelisk specific mappings:
2406 # key is mac and value is dpid
2407 mappings = {}
2408 for i in range( 1, 29 ): # hosts 1 through 28
2409 # set up correct variables:
2410 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2411 if i == 1:
2412 deviceId = "1000".zfill(16)
2413 elif i == 2:
2414 deviceId = "2000".zfill(16)
2415 elif i == 3:
2416 deviceId = "3000".zfill(16)
2417 elif i == 4:
2418 deviceId = "3004".zfill(16)
2419 elif i == 5:
2420 deviceId = "5000".zfill(16)
2421 elif i == 6:
2422 deviceId = "6000".zfill(16)
2423 elif i == 7:
2424 deviceId = "6007".zfill(16)
2425 elif i >= 8 and i <= 17:
2426 dpid = '3' + str( i ).zfill( 3 )
2427 deviceId = dpid.zfill(16)
2428 elif i >= 18 and i <= 27:
2429 dpid = '6' + str( i ).zfill( 3 )
2430 deviceId = dpid.zfill(16)
2431 elif i == 28:
2432 deviceId = "2800".zfill(16)
2433 mappings[ macId ] = deviceId
2434 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2435 if hosts[ controller ] == []:
2436 main.log.warn( "There are no hosts discovered" )
2437 zeroHosts = True
2438 else:
2439 for host in hosts[ controller ]:
2440 mac = None
2441 location = None
2442 device = None
2443 port = None
2444 try:
2445 mac = host.get( 'mac' )
2446 assert mac, "mac field could not be found for this host object"
2447
2448 location = host.get( 'location' )
2449 assert location, "location field could not be found for this host object"
2450
2451 # Trim the protocol identifier off deviceId
2452 device = str( location.get( 'elementId' ) ).split(':')[1]
2453 assert device, "elementId field could not be found for this host location object"
2454
2455 port = location.get( 'port' )
2456 assert port, "port field could not be found for this host location object"
2457
2458 # Now check if this matches where they should be
2459 if mac and device and port:
2460 if str( port ) != "1":
2461 main.log.error( "The attachment port is incorrect for " +
2462 "host " + str( mac ) +
2463 ". Expected: 1 Actual: " + str( port) )
2464 hostAttachment = False
2465 if device != mappings[ str( mac ) ]:
2466 main.log.error( "The attachment device is incorrect for " +
2467 "host " + str( mac ) +
2468 ". Expected: " + mappings[ str( mac ) ] +
2469 " Actual: " + device )
2470 hostAttachment = False
2471 else:
2472 hostAttachment = False
2473 except AssertionError:
2474 main.log.exception( "Json object not as expected" )
2475 main.log.error( repr( host ) )
2476 hostAttachment = False
2477 else:
2478 main.log.error( "No hosts json output or \"Error\"" +
2479 " in output. hosts = " +
2480 repr( hosts[ controller ] ) )
2481 if zeroHosts is False:
2482 hostAttachment = True
2483
2484 # END CHECKING HOST ATTACHMENT POINTS
2485 devicesResults = devicesResults and currentDevicesResult
2486 linksResults = linksResults and currentLinksResult
2487 hostsResults = hostsResults and currentHostsResult
2488 hostAttachmentResults = hostAttachmentResults and\
2489 hostAttachment
Jon Halla440e872016-03-31 15:15:50 -07002490 topoResult = ( devicesResults and linksResults
2491 and hostsResults and ipResult and
2492 hostAttachmentResults )
Jon Hall6e709752016-02-01 13:38:46 -08002493 utilities.assert_equals( expect=True,
2494 actual=topoResult,
2495 onpass="ONOS topology matches Mininet",
2496 onfail=topoFailMsg )
2497 # End of While loop to pull ONOS state
2498
2499 # Compare json objects for hosts and dataplane clusters
2500
2501 # hosts
2502 main.step( "Hosts view is consistent across all ONOS nodes" )
2503 consistentHostsResult = main.TRUE
2504 for controller in range( len( hosts ) ):
2505 controllerStr = str( main.activeNodes[controller] + 1 )
2506 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2507 if hosts[ controller ] == hosts[ 0 ]:
2508 continue
2509 else: # hosts not consistent
2510 main.log.error( "hosts from ONOS" + controllerStr +
2511 " is inconsistent with ONOS1" )
2512 main.log.warn( repr( hosts[ controller ] ) )
2513 consistentHostsResult = main.FALSE
2514
2515 else:
2516 main.log.error( "Error in getting ONOS hosts from ONOS" +
2517 controllerStr )
2518 consistentHostsResult = main.FALSE
2519 main.log.warn( "ONOS" + controllerStr +
2520 " hosts response: " +
2521 repr( hosts[ controller ] ) )
2522 utilities.assert_equals(
2523 expect=main.TRUE,
2524 actual=consistentHostsResult,
2525 onpass="Hosts view is consistent across all ONOS nodes",
2526 onfail="ONOS nodes have different views of hosts" )
2527
2528 main.step( "Hosts information is correct" )
2529 hostsResults = hostsResults and ipResult
2530 utilities.assert_equals(
2531 expect=main.TRUE,
2532 actual=hostsResults,
2533 onpass="Host information is correct",
2534 onfail="Host information is incorrect" )
2535
2536 main.step( "Host attachment points to the network" )
2537 utilities.assert_equals(
2538 expect=True,
2539 actual=hostAttachmentResults,
2540 onpass="Hosts are correctly attached to the network",
2541 onfail="ONOS did not correctly attach hosts to the network" )
2542
2543 # Strongly connected clusters of devices
2544 main.step( "Clusters view is consistent across all ONOS nodes" )
2545 consistentClustersResult = main.TRUE
2546 for controller in range( len( clusters ) ):
2547 controllerStr = str( main.activeNodes[controller] + 1 )
2548 if "Error" not in clusters[ controller ]:
2549 if clusters[ controller ] == clusters[ 0 ]:
2550 continue
2551 else: # clusters not consistent
2552 main.log.error( "clusters from ONOS" +
2553 controllerStr +
2554 " is inconsistent with ONOS1" )
2555 consistentClustersResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002556 else:
2557 main.log.error( "Error in getting dataplane clusters " +
2558 "from ONOS" + controllerStr )
2559 consistentClustersResult = main.FALSE
2560 main.log.warn( "ONOS" + controllerStr +
2561 " clusters response: " +
2562 repr( clusters[ controller ] ) )
2563 utilities.assert_equals(
2564 expect=main.TRUE,
2565 actual=consistentClustersResult,
2566 onpass="Clusters view is consistent across all ONOS nodes",
2567 onfail="ONOS nodes have different views of clusters" )
2568
2569 main.step( "There is only one SCC" )
2570 # there should always only be one cluster
2571 try:
2572 numClusters = len( json.loads( clusters[ 0 ] ) )
2573 except ( ValueError, TypeError ):
2574 main.log.exception( "Error parsing clusters[0]: " +
2575 repr( clusters[0] ) )
Jon Halla440e872016-03-31 15:15:50 -07002576 numClusters = "ERROR"
Jon Hall6e709752016-02-01 13:38:46 -08002577 clusterResults = main.FALSE
2578 if numClusters == 1:
2579 clusterResults = main.TRUE
2580 utilities.assert_equals(
2581 expect=1,
2582 actual=numClusters,
2583 onpass="ONOS shows 1 SCC",
2584 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2585
2586 topoResult = ( devicesResults and linksResults
2587 and hostsResults and consistentHostsResult
2588 and consistentClustersResult and clusterResults
2589 and ipResult and hostAttachmentResults )
2590
2591 topoResult = topoResult and int( count <= 2 )
2592 note = "note it takes about " + str( int( cliTime ) ) + \
2593 " seconds for the test to make all the cli calls to fetch " +\
2594 "the topology from each ONOS instance"
2595 main.log.info(
2596 "Very crass estimate for topology discovery/convergence( " +
2597 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2598 str( count ) + " tries" )
2599
2600 main.step( "Device information is correct" )
2601 utilities.assert_equals(
2602 expect=main.TRUE,
2603 actual=devicesResults,
2604 onpass="Device information is correct",
2605 onfail="Device information is incorrect" )
2606
2607 main.step( "Links are correct" )
2608 utilities.assert_equals(
2609 expect=main.TRUE,
2610 actual=linksResults,
2611 onpass="Link are correct",
2612 onfail="Links are incorrect" )
2613
Jon Halla440e872016-03-31 15:15:50 -07002614 main.step( "Hosts are correct" )
2615 utilities.assert_equals(
2616 expect=main.TRUE,
2617 actual=hostsResults,
2618 onpass="Hosts are correct",
2619 onfail="Hosts are incorrect" )
2620
Jon Hall6e709752016-02-01 13:38:46 -08002621 # FIXME: move this to an ONOS state case
2622 main.step( "Checking ONOS nodes" )
2623 nodesOutput = []
2624 nodeResults = main.TRUE
2625 threads = []
2626 for i in main.activeNodes:
2627 t = main.Thread( target=main.CLIs[i].nodes,
2628 name="nodes-" + str( i ),
2629 args=[ ] )
2630 threads.append( t )
2631 t.start()
2632
2633 for t in threads:
2634 t.join()
2635 nodesOutput.append( t.result )
2636 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
2637 ips.sort()
2638 for i in nodesOutput:
2639 try:
2640 current = json.loads( i )
2641 activeIps = []
2642 currentResult = main.FALSE
2643 for node in current:
Jon Hallbd182782016-03-28 16:42:22 -07002644 if node['state'] == 'READY':
Jon Hall6e709752016-02-01 13:38:46 -08002645 activeIps.append( node['ip'] )
2646 activeIps.sort()
2647 if ips == activeIps:
2648 currentResult = main.TRUE
2649 except ( ValueError, TypeError ):
2650 main.log.error( "Error parsing nodes output" )
2651 main.log.warn( repr( i ) )
2652 currentResult = main.FALSE
2653 nodeResults = nodeResults and currentResult
2654 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2655 onpass="Nodes check successful",
2656 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002657 if not nodeResults:
2658 for cli in main.CLIs:
2659 main.log.debug( "{} components not ACTIVE: \n{}".format(
2660 cli.name,
2661 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08002662
2663 def CASE9( self, main ):
2664 """
2665 Link s3-s28 down
2666 """
2667 import time
2668 assert main.numCtrls, "main.numCtrls not defined"
2669 assert main, "main not defined"
2670 assert utilities.assert_equals, "utilities.assert_equals not defined"
2671 assert main.CLIs, "main.CLIs not defined"
2672 assert main.nodes, "main.nodes not defined"
2673 # NOTE: You should probably run a topology check after this
2674
2675 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2676
2677 description = "Turn off a link to ensure that Link Discovery " +\
2678 "is working properly"
2679 main.case( description )
2680
2681 main.step( "Kill Link between s3 and s28" )
2682 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2683 main.log.info( "Waiting " + str( linkSleep ) +
2684 " seconds for link down to be discovered" )
2685 time.sleep( linkSleep )
2686 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2687 onpass="Link down successful",
2688 onfail="Failed to bring link down" )
2689 # TODO do some sort of check here
2690
2691 def CASE10( self, main ):
2692 """
2693 Link s3-s28 up
2694 """
2695 import time
2696 assert main.numCtrls, "main.numCtrls not defined"
2697 assert main, "main not defined"
2698 assert utilities.assert_equals, "utilities.assert_equals not defined"
2699 assert main.CLIs, "main.CLIs not defined"
2700 assert main.nodes, "main.nodes not defined"
2701 # NOTE: You should probably run a topology check after this
2702
2703 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2704
2705 description = "Restore a link to ensure that Link Discovery is " + \
2706 "working properly"
2707 main.case( description )
2708
2709 main.step( "Bring link between s3 and s28 back up" )
2710 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2711 main.log.info( "Waiting " + str( linkSleep ) +
2712 " seconds for link up to be discovered" )
2713 time.sleep( linkSleep )
2714 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2715 onpass="Link up successful",
2716 onfail="Failed to bring link up" )
2717 # TODO do some sort of check here
2718
2719 def CASE11( self, main ):
2720 """
2721 Switch Down
2722 """
2723 # NOTE: You should probably run a topology check after this
2724 import time
2725 assert main.numCtrls, "main.numCtrls not defined"
2726 assert main, "main not defined"
2727 assert utilities.assert_equals, "utilities.assert_equals not defined"
2728 assert main.CLIs, "main.CLIs not defined"
2729 assert main.nodes, "main.nodes not defined"
2730
2731 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2732
2733 description = "Killing a switch to ensure it is discovered correctly"
2734 onosCli = main.CLIs[ main.activeNodes[0] ]
2735 main.case( description )
2736 switch = main.params[ 'kill' ][ 'switch' ]
2737 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2738
2739 # TODO: Make this switch parameterizable
2740 main.step( "Kill " + switch )
2741 main.log.info( "Deleting " + switch )
2742 main.Mininet1.delSwitch( switch )
2743 main.log.info( "Waiting " + str( switchSleep ) +
2744 " seconds for switch down to be discovered" )
2745 time.sleep( switchSleep )
2746 device = onosCli.getDevice( dpid=switchDPID )
2747 # Peek at the deleted switch
2748 main.log.warn( str( device ) )
2749 result = main.FALSE
2750 if device and device[ 'available' ] is False:
2751 result = main.TRUE
2752 utilities.assert_equals( expect=main.TRUE, actual=result,
2753 onpass="Kill switch successful",
2754 onfail="Failed to kill switch?" )
2755
2756 def CASE12( self, main ):
2757 """
2758 Switch Up
2759 """
2760 # NOTE: You should probably run a topology check after this
2761 import time
2762 assert main.numCtrls, "main.numCtrls not defined"
2763 assert main, "main not defined"
2764 assert utilities.assert_equals, "utilities.assert_equals not defined"
2765 assert main.CLIs, "main.CLIs not defined"
2766 assert main.nodes, "main.nodes not defined"
2767 assert ONOS1Port, "ONOS1Port not defined"
2768 assert ONOS2Port, "ONOS2Port not defined"
2769 assert ONOS3Port, "ONOS3Port not defined"
2770 assert ONOS4Port, "ONOS4Port not defined"
2771 assert ONOS5Port, "ONOS5Port not defined"
2772 assert ONOS6Port, "ONOS6Port not defined"
2773 assert ONOS7Port, "ONOS7Port not defined"
2774
2775 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2776 switch = main.params[ 'kill' ][ 'switch' ]
2777 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2778 links = main.params[ 'kill' ][ 'links' ].split()
2779 onosCli = main.CLIs[ main.activeNodes[0] ]
2780 description = "Adding a switch to ensure it is discovered correctly"
2781 main.case( description )
2782
2783 main.step( "Add back " + switch )
2784 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2785 for peer in links:
2786 main.Mininet1.addLink( switch, peer )
2787 ipList = [ node.ip_address for node in main.nodes ]
2788 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2789 main.log.info( "Waiting " + str( switchSleep ) +
2790 " seconds for switch up to be discovered" )
2791 time.sleep( switchSleep )
2792 device = onosCli.getDevice( dpid=switchDPID )
2793 # Peek at the deleted switch
2794 main.log.warn( str( device ) )
2795 result = main.FALSE
2796 if device and device[ 'available' ]:
2797 result = main.TRUE
2798 utilities.assert_equals( expect=main.TRUE, actual=result,
2799 onpass="add switch successful",
2800 onfail="Failed to add switch?" )
2801
2802 def CASE13( self, main ):
2803 """
2804 Clean up
2805 """
2806 import os
2807 import time
2808 assert main.numCtrls, "main.numCtrls not defined"
2809 assert main, "main not defined"
2810 assert utilities.assert_equals, "utilities.assert_equals not defined"
2811 assert main.CLIs, "main.CLIs not defined"
2812 assert main.nodes, "main.nodes not defined"
2813
2814 # printing colors to terminal
2815 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2816 'blue': '\033[94m', 'green': '\033[92m',
2817 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2818 main.case( "Test Cleanup" )
2819 main.step( "Killing tcpdumps" )
2820 main.Mininet2.stopTcpdump()
2821
2822 testname = main.TEST
2823 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2824 main.step( "Copying MN pcap and ONOS log files to test station" )
2825 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2826 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2827 # NOTE: MN Pcap file is being saved to logdir.
2828 # We scp this file as MN and TestON aren't necessarily the same vm
2829
2830 # FIXME: To be replaced with a Jenkin's post script
2831 # TODO: Load these from params
2832 # NOTE: must end in /
2833 logFolder = "/opt/onos/log/"
2834 logFiles = [ "karaf.log", "karaf.log.1" ]
2835 # NOTE: must end in /
2836 for f in logFiles:
2837 for node in main.nodes:
2838 dstName = main.logdir + "/" + node.name + "-" + f
2839 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2840 logFolder + f, dstName )
2841 # std*.log's
2842 # NOTE: must end in /
2843 logFolder = "/opt/onos/var/"
2844 logFiles = [ "stderr.log", "stdout.log" ]
2845 # NOTE: must end in /
2846 for f in logFiles:
2847 for node in main.nodes:
2848 dstName = main.logdir + "/" + node.name + "-" + f
2849 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2850 logFolder + f, dstName )
2851 else:
2852 main.log.debug( "skipping saving log files" )
2853
2854 main.step( "Stopping Mininet" )
2855 mnResult = main.Mininet1.stopNet()
2856 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2857 onpass="Mininet stopped",
2858 onfail="MN cleanup NOT successful" )
2859
2860 main.step( "Checking ONOS Logs for errors" )
2861 for node in main.nodes:
2862 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2863 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2864
2865 try:
2866 timerLog = open( main.logdir + "/Timers.csv", 'w')
2867 # Overwrite with empty line and close
2868 labels = "Gossip Intents"
2869 data = str( gossipTime )
2870 timerLog.write( labels + "\n" + data )
2871 timerLog.close()
2872 except NameError, e:
2873 main.log.exception(e)
2874
2875 def CASE14( self, main ):
2876 """
2877 start election app on all onos nodes
2878 """
2879 assert main.numCtrls, "main.numCtrls not defined"
2880 assert main, "main not defined"
2881 assert utilities.assert_equals, "utilities.assert_equals not defined"
2882 assert main.CLIs, "main.CLIs not defined"
2883 assert main.nodes, "main.nodes not defined"
2884
2885 main.case("Start Leadership Election app")
2886 main.step( "Install leadership election app" )
2887 onosCli = main.CLIs[ main.activeNodes[0] ]
2888 appResult = onosCli.activateApp( "org.onosproject.election" )
2889 utilities.assert_equals(
2890 expect=main.TRUE,
2891 actual=appResult,
2892 onpass="Election app installed",
2893 onfail="Something went wrong with installing Leadership election" )
2894
2895 main.step( "Run for election on each node" )
2896 leaderResult = main.TRUE
2897 leaders = []
2898 for i in main.activeNodes:
2899 main.CLIs[i].electionTestRun()
2900 for i in main.activeNodes:
2901 cli = main.CLIs[i]
2902 leader = cli.electionTestLeader()
2903 if leader is None or leader == main.FALSE:
2904 main.log.error( cli.name + ": Leader for the election app " +
2905 "should be an ONOS node, instead got '" +
2906 str( leader ) + "'" )
2907 leaderResult = main.FALSE
2908 leaders.append( leader )
2909 utilities.assert_equals(
2910 expect=main.TRUE,
2911 actual=leaderResult,
2912 onpass="Successfully ran for leadership",
2913 onfail="Failed to run for leadership" )
2914
2915 main.step( "Check that each node shows the same leader" )
2916 sameLeader = main.TRUE
2917 if len( set( leaders ) ) != 1:
2918 sameLeader = main.FALSE
2919 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
2920 str( leaders ) )
2921 utilities.assert_equals(
2922 expect=main.TRUE,
2923 actual=sameLeader,
2924 onpass="Leadership is consistent for the election topic",
2925 onfail="Nodes have different leaders" )
2926
2927 def CASE15( self, main ):
2928 """
2929 Check that Leadership Election is still functional
2930 15.1 Run election on each node
2931 15.2 Check that each node has the same leaders and candidates
2932 15.3 Find current leader and withdraw
2933 15.4 Check that a new node was elected leader
2934 15.5 Check that that new leader was the candidate of old leader
2935 15.6 Run for election on old leader
2936 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2937 15.8 Make sure that the old leader was added to the candidate list
2938
2939 old and new variable prefixes refer to data from before vs after
2940 withdrawl and later before withdrawl vs after re-election
2941 """
2942 import time
2943 assert main.numCtrls, "main.numCtrls not defined"
2944 assert main, "main not defined"
2945 assert utilities.assert_equals, "utilities.assert_equals not defined"
2946 assert main.CLIs, "main.CLIs not defined"
2947 assert main.nodes, "main.nodes not defined"
2948
2949 description = "Check that Leadership Election is still functional"
2950 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002951 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall6e709752016-02-01 13:38:46 -08002952
Jon Halla440e872016-03-31 15:15:50 -07002953 oldLeaders = [] # list of lists of each nodes' candidates before
2954 newLeaders = [] # list of lists of each nodes' candidates after
Jon Hall6e709752016-02-01 13:38:46 -08002955 oldLeader = '' # the old leader from oldLeaders, None if not same
2956 newLeader = '' # the new leaders fron newLoeaders, None if not same
2957 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2958 expectNoLeader = False # True when there is only one leader
2959 if main.numCtrls == 1:
2960 expectNoLeader = True
2961
2962 main.step( "Run for election on each node" )
2963 electionResult = main.TRUE
2964
2965 for i in main.activeNodes: # run test election on each node
2966 if main.CLIs[i].electionTestRun() == main.FALSE:
2967 electionResult = main.FALSE
2968 utilities.assert_equals(
2969 expect=main.TRUE,
2970 actual=electionResult,
2971 onpass="All nodes successfully ran for leadership",
2972 onfail="At least one node failed to run for leadership" )
2973
2974 if electionResult == main.FALSE:
2975 main.log.error(
2976 "Skipping Test Case because Election Test App isn't loaded" )
2977 main.skipCase()
2978
2979 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07002980 failMessage = "Nodes have different leaderboards"
2981 def consistentLeaderboards( nodes ):
2982 TOPIC = 'org.onosproject.election'
2983 # FIXME: use threads
2984 #FIXME: should we retry outside the function?
2985 for n in range( 5 ): # Retry in case election is still happening
2986 leaderList = []
2987 # Get all leaderboards
2988 for cli in nodes:
2989 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
2990 # Compare leaderboards
2991 result = all( i == leaderList[0] for i in leaderList ) and\
2992 leaderList is not None
2993 main.log.debug( leaderList )
2994 main.log.warn( result )
2995 if result:
2996 return ( result, leaderList )
2997 time.sleep(5) #TODO: paramerterize
2998 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
2999 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
3000 sameResult, oldLeaders = consistentLeaderboards( activeCLIs )
3001 if sameResult:
3002 oldLeader = oldLeaders[ 0 ][ 0 ]
3003 main.log.warn( oldLeader )
Jon Hall6e709752016-02-01 13:38:46 -08003004 else:
Jon Halla440e872016-03-31 15:15:50 -07003005 oldLeader = None
Jon Hall6e709752016-02-01 13:38:46 -08003006 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003007 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08003008 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07003009 onpass="Leaderboards are consistent for the election topic",
Jon Hall6e709752016-02-01 13:38:46 -08003010 onfail=failMessage )
3011
3012 main.step( "Find current leader and withdraw" )
3013 withdrawResult = main.TRUE
3014 # do some sanity checking on leader before using it
3015 if oldLeader is None:
3016 main.log.error( "Leadership isn't consistent." )
3017 withdrawResult = main.FALSE
3018 # Get the CLI of the oldLeader
3019 for i in main.activeNodes:
3020 if oldLeader == main.nodes[ i ].ip_address:
3021 oldLeaderCLI = main.CLIs[ i ]
3022 break
3023 else: # FOR/ELSE statement
3024 main.log.error( "Leader election, could not find current leader" )
3025 if oldLeader:
3026 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3027 utilities.assert_equals(
3028 expect=main.TRUE,
3029 actual=withdrawResult,
3030 onpass="Node was withdrawn from election",
3031 onfail="Node was not withdrawn from election" )
3032
3033 main.step( "Check that a new node was elected leader" )
Jon Hall6e709752016-02-01 13:38:46 -08003034 failMessage = "Nodes have different leaders"
Jon Hall6e709752016-02-01 13:38:46 -08003035 # Get new leaders and candidates
Jon Halla440e872016-03-31 15:15:50 -07003036 newLeaderResult, newLeaders = consistentLeaderboards( activeCLIs )
3037 if newLeaders[ 0 ][ 0 ] == 'none':
3038 main.log.error( "No leader was elected on at least 1 node" )
3039 if not expectNoLeader:
3040 newLeaderResult = False
3041 if newLeaderResult:
3042 newLeader = newLeaders[ 0 ][ 0 ]
Jon Hall6e709752016-02-01 13:38:46 -08003043 else:
Jon Halla440e872016-03-31 15:15:50 -07003044 newLeader = None
Jon Hall6e709752016-02-01 13:38:46 -08003045
3046 # Check that the new leader is not the older leader, which was withdrawn
3047 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07003048 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08003049 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3050 " as the current leader" )
Jon Hall6e709752016-02-01 13:38:46 -08003051 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003052 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08003053 actual=newLeaderResult,
3054 onpass="Leadership election passed",
3055 onfail="Something went wrong with Leadership election" )
3056
Jon Halla440e872016-03-31 15:15:50 -07003057 main.step( "Check that that new leader was the candidate of old leader" )
Jon Hall6e709752016-02-01 13:38:46 -08003058 # candidates[ 2 ] should become the top candidate after withdrawl
3059 correctCandidateResult = main.TRUE
3060 if expectNoLeader:
3061 if newLeader == 'none':
3062 main.log.info( "No leader expected. None found. Pass" )
3063 correctCandidateResult = main.TRUE
3064 else:
3065 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3066 correctCandidateResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003067 elif len( oldLeaders[0] ) >= 3:
3068 if newLeader == oldLeaders[ 0 ][ 2 ]:
3069 # correct leader was elected
3070 correctCandidateResult = main.TRUE
3071 else:
3072 correctCandidateResult = main.FALSE
3073 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3074 newLeader, oldLeaders[ 0 ][ 2 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08003075 else:
3076 main.log.warn( "Could not determine who should be the correct leader" )
Jon Halla440e872016-03-31 15:15:50 -07003077 main.log.debug( oldLeaders[ 0 ] )
Jon Hall6e709752016-02-01 13:38:46 -08003078 correctCandidateResult = main.FALSE
3079 utilities.assert_equals(
3080 expect=main.TRUE,
3081 actual=correctCandidateResult,
3082 onpass="Correct Candidate Elected",
3083 onfail="Incorrect Candidate Elected" )
3084
3085 main.step( "Run for election on old leader( just so everyone " +
3086 "is in the hat )" )
3087 if oldLeaderCLI is not None:
3088 runResult = oldLeaderCLI.electionTestRun()
3089 else:
3090 main.log.error( "No old leader to re-elect" )
3091 runResult = main.FALSE
3092 utilities.assert_equals(
3093 expect=main.TRUE,
3094 actual=runResult,
3095 onpass="App re-ran for election",
3096 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07003097
Jon Hall6e709752016-02-01 13:38:46 -08003098 main.step(
3099 "Check that oldLeader is a candidate, and leader if only 1 node" )
3100 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07003101 # Get new leaders and candidates
3102 reRunLeaders = []
3103 time.sleep( 5 ) # Paremterize
3104 positionResult, reRunLeaders = consistentLeaderboards( activeCLIs )
Jon Hall6e709752016-02-01 13:38:46 -08003105
3106 # Check that the re-elected node is last on the candidate List
Jon Halla440e872016-03-31 15:15:50 -07003107 if oldLeader != reRunLeaders[ 0 ][ -1 ]:
3108 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3109 str( reRunLeaders[ 0 ] ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08003110 positionResult = main.FALSE
3111
3112 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003113 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08003114 actual=positionResult,
3115 onpass="Old leader successfully re-ran for election",
3116 onfail="Something went wrong with Leadership election after " +
3117 "the old leader re-ran for election" )
3118
3119 def CASE16( self, main ):
3120 """
3121 Install Distributed Primitives app
3122 """
3123 import time
3124 assert main.numCtrls, "main.numCtrls not defined"
3125 assert main, "main not defined"
3126 assert utilities.assert_equals, "utilities.assert_equals not defined"
3127 assert main.CLIs, "main.CLIs not defined"
3128 assert main.nodes, "main.nodes not defined"
3129
3130 # Variables for the distributed primitives tests
3131 global pCounterName
Jon Hall6e709752016-02-01 13:38:46 -08003132 global pCounterValue
Jon Hall6e709752016-02-01 13:38:46 -08003133 global onosSet
3134 global onosSetName
3135 pCounterName = "TestON-Partitions"
Jon Hall6e709752016-02-01 13:38:46 -08003136 pCounterValue = 0
Jon Hall6e709752016-02-01 13:38:46 -08003137 onosSet = set([])
3138 onosSetName = "TestON-set"
3139
3140 description = "Install Primitives app"
3141 main.case( description )
3142 main.step( "Install Primitives app" )
3143 appName = "org.onosproject.distributedprimitives"
3144 node = main.activeNodes[0]
3145 appResults = main.CLIs[node].activateApp( appName )
3146 utilities.assert_equals( expect=main.TRUE,
3147 actual=appResults,
3148 onpass="Primitives app activated",
3149 onfail="Primitives app not activated" )
3150 time.sleep( 5 ) # To allow all nodes to activate
3151
3152 def CASE17( self, main ):
3153 """
3154 Check for basic functionality with distributed primitives
3155 """
3156 # Make sure variables are defined/set
3157 assert main.numCtrls, "main.numCtrls not defined"
3158 assert main, "main not defined"
3159 assert utilities.assert_equals, "utilities.assert_equals not defined"
3160 assert main.CLIs, "main.CLIs not defined"
3161 assert main.nodes, "main.nodes not defined"
3162 assert pCounterName, "pCounterName not defined"
Jon Hall6e709752016-02-01 13:38:46 -08003163 assert onosSetName, "onosSetName not defined"
3164 # NOTE: assert fails if value is 0/None/Empty/False
3165 try:
3166 pCounterValue
3167 except NameError:
3168 main.log.error( "pCounterValue not defined, setting to 0" )
3169 pCounterValue = 0
3170 try:
Jon Hall6e709752016-02-01 13:38:46 -08003171 onosSet
3172 except NameError:
3173 main.log.error( "onosSet not defined, setting to empty Set" )
3174 onosSet = set([])
3175 # Variables for the distributed primitives tests. These are local only
3176 addValue = "a"
3177 addAllValue = "a b c d e f"
3178 retainValue = "c d e f"
3179
3180 description = "Check for basic functionality with distributed " +\
3181 "primitives"
3182 main.case( description )
3183 main.caseExplanation = "Test the methods of the distributed " +\
3184 "primitives (counters and sets) throught the cli"
3185 # DISTRIBUTED ATOMIC COUNTERS
3186 # Partitioned counters
3187 main.step( "Increment then get a default counter on each node" )
3188 pCounters = []
3189 threads = []
3190 addedPValues = []
3191 for i in main.activeNodes:
3192 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3193 name="counterAddAndGet-" + str( i ),
3194 args=[ pCounterName ] )
3195 pCounterValue += 1
3196 addedPValues.append( pCounterValue )
3197 threads.append( t )
3198 t.start()
3199
3200 for t in threads:
3201 t.join()
3202 pCounters.append( t.result )
3203 # Check that counter incremented numController times
3204 pCounterResults = True
3205 for i in addedPValues:
3206 tmpResult = i in pCounters
3207 pCounterResults = pCounterResults and tmpResult
3208 if not tmpResult:
3209 main.log.error( str( i ) + " is not in partitioned "
3210 "counter incremented results" )
3211 utilities.assert_equals( expect=True,
3212 actual=pCounterResults,
3213 onpass="Default counter incremented",
3214 onfail="Error incrementing default" +
3215 " counter" )
3216
3217 main.step( "Get then Increment a default counter on each node" )
3218 pCounters = []
3219 threads = []
3220 addedPValues = []
3221 for i in main.activeNodes:
3222 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3223 name="counterGetAndAdd-" + str( i ),
3224 args=[ pCounterName ] )
3225 addedPValues.append( pCounterValue )
3226 pCounterValue += 1
3227 threads.append( t )
3228 t.start()
3229
3230 for t in threads:
3231 t.join()
3232 pCounters.append( t.result )
3233 # Check that counter incremented numController times
3234 pCounterResults = True
3235 for i in addedPValues:
3236 tmpResult = i in pCounters
3237 pCounterResults = pCounterResults and tmpResult
3238 if not tmpResult:
3239 main.log.error( str( i ) + " is not in partitioned "
3240 "counter incremented results" )
3241 utilities.assert_equals( expect=True,
3242 actual=pCounterResults,
3243 onpass="Default counter incremented",
3244 onfail="Error incrementing default" +
3245 " counter" )
3246
3247 main.step( "Counters we added have the correct values" )
3248 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3249 utilities.assert_equals( expect=main.TRUE,
3250 actual=incrementCheck,
3251 onpass="Added counters are correct",
3252 onfail="Added counters are incorrect" )
3253
3254 main.step( "Add -8 to then get a default counter on each node" )
3255 pCounters = []
3256 threads = []
3257 addedPValues = []
3258 for i in main.activeNodes:
3259 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3260 name="counterIncrement-" + str( i ),
3261 args=[ pCounterName ],
3262 kwargs={ "delta": -8 } )
3263 pCounterValue += -8
3264 addedPValues.append( pCounterValue )
3265 threads.append( t )
3266 t.start()
3267
3268 for t in threads:
3269 t.join()
3270 pCounters.append( t.result )
3271 # Check that counter incremented numController times
3272 pCounterResults = True
3273 for i in addedPValues:
3274 tmpResult = i in pCounters
3275 pCounterResults = pCounterResults and tmpResult
3276 if not tmpResult:
3277 main.log.error( str( i ) + " is not in partitioned "
3278 "counter incremented results" )
3279 utilities.assert_equals( expect=True,
3280 actual=pCounterResults,
3281 onpass="Default counter incremented",
3282 onfail="Error incrementing default" +
3283 " counter" )
3284
3285 main.step( "Add 5 to then get a default counter on each node" )
3286 pCounters = []
3287 threads = []
3288 addedPValues = []
3289 for i in main.activeNodes:
3290 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3291 name="counterIncrement-" + str( i ),
3292 args=[ pCounterName ],
3293 kwargs={ "delta": 5 } )
3294 pCounterValue += 5
3295 addedPValues.append( pCounterValue )
3296 threads.append( t )
3297 t.start()
3298
3299 for t in threads:
3300 t.join()
3301 pCounters.append( t.result )
3302 # Check that counter incremented numController times
3303 pCounterResults = True
3304 for i in addedPValues:
3305 tmpResult = i in pCounters
3306 pCounterResults = pCounterResults and tmpResult
3307 if not tmpResult:
3308 main.log.error( str( i ) + " is not in partitioned "
3309 "counter incremented results" )
3310 utilities.assert_equals( expect=True,
3311 actual=pCounterResults,
3312 onpass="Default counter incremented",
3313 onfail="Error incrementing default" +
3314 " counter" )
3315
3316 main.step( "Get then add 5 to a default counter on each node" )
3317 pCounters = []
3318 threads = []
3319 addedPValues = []
3320 for i in main.activeNodes:
3321 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3322 name="counterIncrement-" + str( i ),
3323 args=[ pCounterName ],
3324 kwargs={ "delta": 5 } )
3325 addedPValues.append( pCounterValue )
3326 pCounterValue += 5
3327 threads.append( t )
3328 t.start()
3329
3330 for t in threads:
3331 t.join()
3332 pCounters.append( t.result )
3333 # Check that counter incremented numController times
3334 pCounterResults = True
3335 for i in addedPValues:
3336 tmpResult = i in pCounters
3337 pCounterResults = pCounterResults and tmpResult
3338 if not tmpResult:
3339 main.log.error( str( i ) + " is not in partitioned "
3340 "counter incremented results" )
3341 utilities.assert_equals( expect=True,
3342 actual=pCounterResults,
3343 onpass="Default counter incremented",
3344 onfail="Error incrementing default" +
3345 " counter" )
3346
3347 main.step( "Counters we added have the correct values" )
3348 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3349 utilities.assert_equals( expect=main.TRUE,
3350 actual=incrementCheck,
3351 onpass="Added counters are correct",
3352 onfail="Added counters are incorrect" )
3353
Jon Hall6e709752016-02-01 13:38:46 -08003354 # DISTRIBUTED SETS
3355 main.step( "Distributed Set get" )
3356 size = len( onosSet )
3357 getResponses = []
3358 threads = []
3359 for i in main.activeNodes:
3360 t = main.Thread( target=main.CLIs[i].setTestGet,
3361 name="setTestGet-" + str( i ),
3362 args=[ onosSetName ] )
3363 threads.append( t )
3364 t.start()
3365 for t in threads:
3366 t.join()
3367 getResponses.append( t.result )
3368
3369 getResults = main.TRUE
3370 for i in range( len( main.activeNodes ) ):
3371 node = str( main.activeNodes[i] + 1 )
3372 if isinstance( getResponses[ i ], list):
3373 current = set( getResponses[ i ] )
3374 if len( current ) == len( getResponses[ i ] ):
3375 # no repeats
3376 if onosSet != current:
3377 main.log.error( "ONOS" + node +
3378 " has incorrect view" +
3379 " of set " + onosSetName + ":\n" +
3380 str( getResponses[ i ] ) )
3381 main.log.debug( "Expected: " + str( onosSet ) )
3382 main.log.debug( "Actual: " + str( current ) )
3383 getResults = main.FALSE
3384 else:
3385 # error, set is not a set
3386 main.log.error( "ONOS" + node +
3387 " has repeat elements in" +
3388 " set " + onosSetName + ":\n" +
3389 str( getResponses[ i ] ) )
3390 getResults = main.FALSE
3391 elif getResponses[ i ] == main.ERROR:
3392 getResults = main.FALSE
3393 utilities.assert_equals( expect=main.TRUE,
3394 actual=getResults,
3395 onpass="Set elements are correct",
3396 onfail="Set elements are incorrect" )
3397
3398 main.step( "Distributed Set size" )
3399 sizeResponses = []
3400 threads = []
3401 for i in main.activeNodes:
3402 t = main.Thread( target=main.CLIs[i].setTestSize,
3403 name="setTestSize-" + str( i ),
3404 args=[ onosSetName ] )
3405 threads.append( t )
3406 t.start()
3407 for t in threads:
3408 t.join()
3409 sizeResponses.append( t.result )
3410
3411 sizeResults = main.TRUE
3412 for i in range( len( main.activeNodes ) ):
3413 node = str( main.activeNodes[i] + 1 )
3414 if size != sizeResponses[ i ]:
3415 sizeResults = main.FALSE
3416 main.log.error( "ONOS" + node +
3417 " expected a size of " + str( size ) +
3418 " for set " + onosSetName +
3419 " but got " + str( sizeResponses[ i ] ) )
3420 utilities.assert_equals( expect=main.TRUE,
3421 actual=sizeResults,
3422 onpass="Set sizes are correct",
3423 onfail="Set sizes are incorrect" )
3424
3425 main.step( "Distributed Set add()" )
3426 onosSet.add( addValue )
3427 addResponses = []
3428 threads = []
3429 for i in main.activeNodes:
3430 t = main.Thread( target=main.CLIs[i].setTestAdd,
3431 name="setTestAdd-" + str( i ),
3432 args=[ onosSetName, addValue ] )
3433 threads.append( t )
3434 t.start()
3435 for t in threads:
3436 t.join()
3437 addResponses.append( t.result )
3438
3439 # main.TRUE = successfully changed the set
3440 # main.FALSE = action resulted in no change in set
3441 # main.ERROR - Some error in executing the function
3442 addResults = main.TRUE
3443 for i in range( len( main.activeNodes ) ):
3444 if addResponses[ i ] == main.TRUE:
3445 # All is well
3446 pass
3447 elif addResponses[ i ] == main.FALSE:
3448 # Already in set, probably fine
3449 pass
3450 elif addResponses[ i ] == main.ERROR:
3451 # Error in execution
3452 addResults = main.FALSE
3453 else:
3454 # unexpected result
3455 addResults = main.FALSE
3456 if addResults != main.TRUE:
3457 main.log.error( "Error executing set add" )
3458
3459 # Check if set is still correct
3460 size = len( onosSet )
3461 getResponses = []
3462 threads = []
3463 for i in main.activeNodes:
3464 t = main.Thread( target=main.CLIs[i].setTestGet,
3465 name="setTestGet-" + str( i ),
3466 args=[ onosSetName ] )
3467 threads.append( t )
3468 t.start()
3469 for t in threads:
3470 t.join()
3471 getResponses.append( t.result )
3472 getResults = main.TRUE
3473 for i in range( len( main.activeNodes ) ):
3474 node = str( main.activeNodes[i] + 1 )
3475 if isinstance( getResponses[ i ], list):
3476 current = set( getResponses[ i ] )
3477 if len( current ) == len( getResponses[ i ] ):
3478 # no repeats
3479 if onosSet != current:
3480 main.log.error( "ONOS" + node + " has incorrect view" +
3481 " of set " + onosSetName + ":\n" +
3482 str( getResponses[ i ] ) )
3483 main.log.debug( "Expected: " + str( onosSet ) )
3484 main.log.debug( "Actual: " + str( current ) )
3485 getResults = main.FALSE
3486 else:
3487 # error, set is not a set
3488 main.log.error( "ONOS" + node + " has repeat elements in" +
3489 " set " + onosSetName + ":\n" +
3490 str( getResponses[ i ] ) )
3491 getResults = main.FALSE
3492 elif getResponses[ i ] == main.ERROR:
3493 getResults = main.FALSE
3494 sizeResponses = []
3495 threads = []
3496 for i in main.activeNodes:
3497 t = main.Thread( target=main.CLIs[i].setTestSize,
3498 name="setTestSize-" + str( i ),
3499 args=[ onosSetName ] )
3500 threads.append( t )
3501 t.start()
3502 for t in threads:
3503 t.join()
3504 sizeResponses.append( t.result )
3505 sizeResults = main.TRUE
3506 for i in range( len( main.activeNodes ) ):
3507 node = str( main.activeNodes[i] + 1 )
3508 if size != sizeResponses[ i ]:
3509 sizeResults = main.FALSE
3510 main.log.error( "ONOS" + node +
3511 " expected a size of " + str( size ) +
3512 " for set " + onosSetName +
3513 " but got " + str( sizeResponses[ i ] ) )
3514 addResults = addResults and getResults and sizeResults
3515 utilities.assert_equals( expect=main.TRUE,
3516 actual=addResults,
3517 onpass="Set add correct",
3518 onfail="Set add was incorrect" )
3519
3520 main.step( "Distributed Set addAll()" )
3521 onosSet.update( addAllValue.split() )
3522 addResponses = []
3523 threads = []
3524 for i in main.activeNodes:
3525 t = main.Thread( target=main.CLIs[i].setTestAdd,
3526 name="setTestAddAll-" + str( i ),
3527 args=[ onosSetName, addAllValue ] )
3528 threads.append( t )
3529 t.start()
3530 for t in threads:
3531 t.join()
3532 addResponses.append( t.result )
3533
3534 # main.TRUE = successfully changed the set
3535 # main.FALSE = action resulted in no change in set
3536 # main.ERROR - Some error in executing the function
3537 addAllResults = main.TRUE
3538 for i in range( len( main.activeNodes ) ):
3539 if addResponses[ i ] == main.TRUE:
3540 # All is well
3541 pass
3542 elif addResponses[ i ] == main.FALSE:
3543 # Already in set, probably fine
3544 pass
3545 elif addResponses[ i ] == main.ERROR:
3546 # Error in execution
3547 addAllResults = main.FALSE
3548 else:
3549 # unexpected result
3550 addAllResults = main.FALSE
3551 if addAllResults != main.TRUE:
3552 main.log.error( "Error executing set addAll" )
3553
3554 # Check if set is still correct
3555 size = len( onosSet )
3556 getResponses = []
3557 threads = []
3558 for i in main.activeNodes:
3559 t = main.Thread( target=main.CLIs[i].setTestGet,
3560 name="setTestGet-" + str( i ),
3561 args=[ onosSetName ] )
3562 threads.append( t )
3563 t.start()
3564 for t in threads:
3565 t.join()
3566 getResponses.append( t.result )
3567 getResults = main.TRUE
3568 for i in range( len( main.activeNodes ) ):
3569 node = str( main.activeNodes[i] + 1 )
3570 if isinstance( getResponses[ i ], list):
3571 current = set( getResponses[ i ] )
3572 if len( current ) == len( getResponses[ i ] ):
3573 # no repeats
3574 if onosSet != current:
3575 main.log.error( "ONOS" + node +
3576 " has incorrect view" +
3577 " of set " + onosSetName + ":\n" +
3578 str( getResponses[ i ] ) )
3579 main.log.debug( "Expected: " + str( onosSet ) )
3580 main.log.debug( "Actual: " + str( current ) )
3581 getResults = main.FALSE
3582 else:
3583 # error, set is not a set
3584 main.log.error( "ONOS" + node +
3585 " has repeat elements in" +
3586 " set " + onosSetName + ":\n" +
3587 str( getResponses[ i ] ) )
3588 getResults = main.FALSE
3589 elif getResponses[ i ] == main.ERROR:
3590 getResults = main.FALSE
3591 sizeResponses = []
3592 threads = []
3593 for i in main.activeNodes:
3594 t = main.Thread( target=main.CLIs[i].setTestSize,
3595 name="setTestSize-" + str( i ),
3596 args=[ onosSetName ] )
3597 threads.append( t )
3598 t.start()
3599 for t in threads:
3600 t.join()
3601 sizeResponses.append( t.result )
3602 sizeResults = main.TRUE
3603 for i in range( len( main.activeNodes ) ):
3604 node = str( main.activeNodes[i] + 1 )
3605 if size != sizeResponses[ i ]:
3606 sizeResults = main.FALSE
3607 main.log.error( "ONOS" + node +
3608 " expected a size of " + str( size ) +
3609 " for set " + onosSetName +
3610 " but got " + str( sizeResponses[ i ] ) )
3611 addAllResults = addAllResults and getResults and sizeResults
3612 utilities.assert_equals( expect=main.TRUE,
3613 actual=addAllResults,
3614 onpass="Set addAll correct",
3615 onfail="Set addAll was incorrect" )
3616
3617 main.step( "Distributed Set contains()" )
3618 containsResponses = []
3619 threads = []
3620 for i in main.activeNodes:
3621 t = main.Thread( target=main.CLIs[i].setTestGet,
3622 name="setContains-" + str( i ),
3623 args=[ onosSetName ],
3624 kwargs={ "values": addValue } )
3625 threads.append( t )
3626 t.start()
3627 for t in threads:
3628 t.join()
3629 # NOTE: This is the tuple
3630 containsResponses.append( t.result )
3631
3632 containsResults = main.TRUE
3633 for i in range( len( main.activeNodes ) ):
3634 if containsResponses[ i ] == main.ERROR:
3635 containsResults = main.FALSE
3636 else:
3637 containsResults = containsResults and\
3638 containsResponses[ i ][ 1 ]
3639 utilities.assert_equals( expect=main.TRUE,
3640 actual=containsResults,
3641 onpass="Set contains is functional",
3642 onfail="Set contains failed" )
3643
3644 main.step( "Distributed Set containsAll()" )
3645 containsAllResponses = []
3646 threads = []
3647 for i in main.activeNodes:
3648 t = main.Thread( target=main.CLIs[i].setTestGet,
3649 name="setContainsAll-" + str( i ),
3650 args=[ onosSetName ],
3651 kwargs={ "values": addAllValue } )
3652 threads.append( t )
3653 t.start()
3654 for t in threads:
3655 t.join()
3656 # NOTE: This is the tuple
3657 containsAllResponses.append( t.result )
3658
3659 containsAllResults = main.TRUE
3660 for i in range( len( main.activeNodes ) ):
3661 if containsResponses[ i ] == main.ERROR:
3662 containsResults = main.FALSE
3663 else:
3664 containsResults = containsResults and\
3665 containsResponses[ i ][ 1 ]
3666 utilities.assert_equals( expect=main.TRUE,
3667 actual=containsAllResults,
3668 onpass="Set containsAll is functional",
3669 onfail="Set containsAll failed" )
3670
3671 main.step( "Distributed Set remove()" )
3672 onosSet.remove( addValue )
3673 removeResponses = []
3674 threads = []
3675 for i in main.activeNodes:
3676 t = main.Thread( target=main.CLIs[i].setTestRemove,
3677 name="setTestRemove-" + str( i ),
3678 args=[ onosSetName, addValue ] )
3679 threads.append( t )
3680 t.start()
3681 for t in threads:
3682 t.join()
3683 removeResponses.append( t.result )
3684
3685 # main.TRUE = successfully changed the set
3686 # main.FALSE = action resulted in no change in set
3687 # main.ERROR - Some error in executing the function
3688 removeResults = main.TRUE
3689 for i in range( len( main.activeNodes ) ):
3690 if removeResponses[ i ] == main.TRUE:
3691 # All is well
3692 pass
3693 elif removeResponses[ i ] == main.FALSE:
3694 # not in set, probably fine
3695 pass
3696 elif removeResponses[ i ] == main.ERROR:
3697 # Error in execution
3698 removeResults = main.FALSE
3699 else:
3700 # unexpected result
3701 removeResults = main.FALSE
3702 if removeResults != main.TRUE:
3703 main.log.error( "Error executing set remove" )
3704
3705 # Check if set is still correct
3706 size = len( onosSet )
3707 getResponses = []
3708 threads = []
3709 for i in main.activeNodes:
3710 t = main.Thread( target=main.CLIs[i].setTestGet,
3711 name="setTestGet-" + str( i ),
3712 args=[ onosSetName ] )
3713 threads.append( t )
3714 t.start()
3715 for t in threads:
3716 t.join()
3717 getResponses.append( t.result )
3718 getResults = main.TRUE
3719 for i in range( len( main.activeNodes ) ):
3720 node = str( main.activeNodes[i] + 1 )
3721 if isinstance( getResponses[ i ], list):
3722 current = set( getResponses[ i ] )
3723 if len( current ) == len( getResponses[ i ] ):
3724 # no repeats
3725 if onosSet != current:
3726 main.log.error( "ONOS" + node +
3727 " has incorrect view" +
3728 " of set " + onosSetName + ":\n" +
3729 str( getResponses[ i ] ) )
3730 main.log.debug( "Expected: " + str( onosSet ) )
3731 main.log.debug( "Actual: " + str( current ) )
3732 getResults = main.FALSE
3733 else:
3734 # error, set is not a set
3735 main.log.error( "ONOS" + node +
3736 " has repeat elements in" +
3737 " set " + onosSetName + ":\n" +
3738 str( getResponses[ i ] ) )
3739 getResults = main.FALSE
3740 elif getResponses[ i ] == main.ERROR:
3741 getResults = main.FALSE
3742 sizeResponses = []
3743 threads = []
3744 for i in main.activeNodes:
3745 t = main.Thread( target=main.CLIs[i].setTestSize,
3746 name="setTestSize-" + str( i ),
3747 args=[ onosSetName ] )
3748 threads.append( t )
3749 t.start()
3750 for t in threads:
3751 t.join()
3752 sizeResponses.append( t.result )
3753 sizeResults = main.TRUE
3754 for i in range( len( main.activeNodes ) ):
3755 node = str( main.activeNodes[i] + 1 )
3756 if size != sizeResponses[ i ]:
3757 sizeResults = main.FALSE
3758 main.log.error( "ONOS" + node +
3759 " expected a size of " + str( size ) +
3760 " for set " + onosSetName +
3761 " but got " + str( sizeResponses[ i ] ) )
3762 removeResults = removeResults and getResults and sizeResults
3763 utilities.assert_equals( expect=main.TRUE,
3764 actual=removeResults,
3765 onpass="Set remove correct",
3766 onfail="Set remove was incorrect" )
3767
3768 main.step( "Distributed Set removeAll()" )
3769 onosSet.difference_update( addAllValue.split() )
3770 removeAllResponses = []
3771 threads = []
3772 try:
3773 for i in main.activeNodes:
3774 t = main.Thread( target=main.CLIs[i].setTestRemove,
3775 name="setTestRemoveAll-" + str( i ),
3776 args=[ onosSetName, addAllValue ] )
3777 threads.append( t )
3778 t.start()
3779 for t in threads:
3780 t.join()
3781 removeAllResponses.append( t.result )
3782 except Exception, e:
3783 main.log.exception(e)
3784
3785 # main.TRUE = successfully changed the set
3786 # main.FALSE = action resulted in no change in set
3787 # main.ERROR - Some error in executing the function
3788 removeAllResults = main.TRUE
3789 for i in range( len( main.activeNodes ) ):
3790 if removeAllResponses[ i ] == main.TRUE:
3791 # All is well
3792 pass
3793 elif removeAllResponses[ i ] == main.FALSE:
3794 # not in set, probably fine
3795 pass
3796 elif removeAllResponses[ i ] == main.ERROR:
3797 # Error in execution
3798 removeAllResults = main.FALSE
3799 else:
3800 # unexpected result
3801 removeAllResults = main.FALSE
3802 if removeAllResults != main.TRUE:
3803 main.log.error( "Error executing set removeAll" )
3804
3805 # Check if set is still correct
3806 size = len( onosSet )
3807 getResponses = []
3808 threads = []
3809 for i in main.activeNodes:
3810 t = main.Thread( target=main.CLIs[i].setTestGet,
3811 name="setTestGet-" + str( i ),
3812 args=[ onosSetName ] )
3813 threads.append( t )
3814 t.start()
3815 for t in threads:
3816 t.join()
3817 getResponses.append( t.result )
3818 getResults = main.TRUE
3819 for i in range( len( main.activeNodes ) ):
3820 node = str( main.activeNodes[i] + 1 )
3821 if isinstance( getResponses[ i ], list):
3822 current = set( getResponses[ i ] )
3823 if len( current ) == len( getResponses[ i ] ):
3824 # no repeats
3825 if onosSet != current:
3826 main.log.error( "ONOS" + node +
3827 " has incorrect view" +
3828 " of set " + onosSetName + ":\n" +
3829 str( getResponses[ i ] ) )
3830 main.log.debug( "Expected: " + str( onosSet ) )
3831 main.log.debug( "Actual: " + str( current ) )
3832 getResults = main.FALSE
3833 else:
3834 # error, set is not a set
3835 main.log.error( "ONOS" + node +
3836 " has repeat elements in" +
3837 " set " + onosSetName + ":\n" +
3838 str( getResponses[ i ] ) )
3839 getResults = main.FALSE
3840 elif getResponses[ i ] == main.ERROR:
3841 getResults = main.FALSE
3842 sizeResponses = []
3843 threads = []
3844 for i in main.activeNodes:
3845 t = main.Thread( target=main.CLIs[i].setTestSize,
3846 name="setTestSize-" + str( i ),
3847 args=[ onosSetName ] )
3848 threads.append( t )
3849 t.start()
3850 for t in threads:
3851 t.join()
3852 sizeResponses.append( t.result )
3853 sizeResults = main.TRUE
3854 for i in range( len( main.activeNodes ) ):
3855 node = str( main.activeNodes[i] + 1 )
3856 if size != sizeResponses[ i ]:
3857 sizeResults = main.FALSE
3858 main.log.error( "ONOS" + node +
3859 " expected a size of " + str( size ) +
3860 " for set " + onosSetName +
3861 " but got " + str( sizeResponses[ i ] ) )
3862 removeAllResults = removeAllResults and getResults and sizeResults
3863 utilities.assert_equals( expect=main.TRUE,
3864 actual=removeAllResults,
3865 onpass="Set removeAll correct",
3866 onfail="Set removeAll was incorrect" )
3867
3868 main.step( "Distributed Set addAll()" )
3869 onosSet.update( addAllValue.split() )
3870 addResponses = []
3871 threads = []
3872 for i in main.activeNodes:
3873 t = main.Thread( target=main.CLIs[i].setTestAdd,
3874 name="setTestAddAll-" + str( i ),
3875 args=[ onosSetName, addAllValue ] )
3876 threads.append( t )
3877 t.start()
3878 for t in threads:
3879 t.join()
3880 addResponses.append( t.result )
3881
3882 # main.TRUE = successfully changed the set
3883 # main.FALSE = action resulted in no change in set
3884 # main.ERROR - Some error in executing the function
3885 addAllResults = main.TRUE
3886 for i in range( len( main.activeNodes ) ):
3887 if addResponses[ i ] == main.TRUE:
3888 # All is well
3889 pass
3890 elif addResponses[ i ] == main.FALSE:
3891 # Already in set, probably fine
3892 pass
3893 elif addResponses[ i ] == main.ERROR:
3894 # Error in execution
3895 addAllResults = main.FALSE
3896 else:
3897 # unexpected result
3898 addAllResults = main.FALSE
3899 if addAllResults != main.TRUE:
3900 main.log.error( "Error executing set addAll" )
3901
3902 # Check if set is still correct
3903 size = len( onosSet )
3904 getResponses = []
3905 threads = []
3906 for i in main.activeNodes:
3907 t = main.Thread( target=main.CLIs[i].setTestGet,
3908 name="setTestGet-" + str( i ),
3909 args=[ onosSetName ] )
3910 threads.append( t )
3911 t.start()
3912 for t in threads:
3913 t.join()
3914 getResponses.append( t.result )
3915 getResults = main.TRUE
3916 for i in range( len( main.activeNodes ) ):
3917 node = str( main.activeNodes[i] + 1 )
3918 if isinstance( getResponses[ i ], list):
3919 current = set( getResponses[ i ] )
3920 if len( current ) == len( getResponses[ i ] ):
3921 # no repeats
3922 if onosSet != current:
3923 main.log.error( "ONOS" + node +
3924 " has incorrect view" +
3925 " of set " + onosSetName + ":\n" +
3926 str( getResponses[ i ] ) )
3927 main.log.debug( "Expected: " + str( onosSet ) )
3928 main.log.debug( "Actual: " + str( current ) )
3929 getResults = main.FALSE
3930 else:
3931 # error, set is not a set
3932 main.log.error( "ONOS" + node +
3933 " has repeat elements in" +
3934 " set " + onosSetName + ":\n" +
3935 str( getResponses[ i ] ) )
3936 getResults = main.FALSE
3937 elif getResponses[ i ] == main.ERROR:
3938 getResults = main.FALSE
3939 sizeResponses = []
3940 threads = []
3941 for i in main.activeNodes:
3942 t = main.Thread( target=main.CLIs[i].setTestSize,
3943 name="setTestSize-" + str( i ),
3944 args=[ onosSetName ] )
3945 threads.append( t )
3946 t.start()
3947 for t in threads:
3948 t.join()
3949 sizeResponses.append( t.result )
3950 sizeResults = main.TRUE
3951 for i in range( len( main.activeNodes ) ):
3952 node = str( main.activeNodes[i] + 1 )
3953 if size != sizeResponses[ i ]:
3954 sizeResults = main.FALSE
3955 main.log.error( "ONOS" + node +
3956 " expected a size of " + str( size ) +
3957 " for set " + onosSetName +
3958 " but got " + str( sizeResponses[ i ] ) )
3959 addAllResults = addAllResults and getResults and sizeResults
3960 utilities.assert_equals( expect=main.TRUE,
3961 actual=addAllResults,
3962 onpass="Set addAll correct",
3963 onfail="Set addAll was incorrect" )
3964
3965 main.step( "Distributed Set clear()" )
3966 onosSet.clear()
3967 clearResponses = []
3968 threads = []
3969 for i in main.activeNodes:
3970 t = main.Thread( target=main.CLIs[i].setTestRemove,
3971 name="setTestClear-" + str( i ),
3972 args=[ onosSetName, " "], # Values doesn't matter
3973 kwargs={ "clear": True } )
3974 threads.append( t )
3975 t.start()
3976 for t in threads:
3977 t.join()
3978 clearResponses.append( t.result )
3979
3980 # main.TRUE = successfully changed the set
3981 # main.FALSE = action resulted in no change in set
3982 # main.ERROR - Some error in executing the function
3983 clearResults = main.TRUE
3984 for i in range( len( main.activeNodes ) ):
3985 if clearResponses[ i ] == main.TRUE:
3986 # All is well
3987 pass
3988 elif clearResponses[ i ] == main.FALSE:
3989 # Nothing set, probably fine
3990 pass
3991 elif clearResponses[ i ] == main.ERROR:
3992 # Error in execution
3993 clearResults = main.FALSE
3994 else:
3995 # unexpected result
3996 clearResults = main.FALSE
3997 if clearResults != main.TRUE:
3998 main.log.error( "Error executing set clear" )
3999
4000 # Check if set is still correct
4001 size = len( onosSet )
4002 getResponses = []
4003 threads = []
4004 for i in main.activeNodes:
4005 t = main.Thread( target=main.CLIs[i].setTestGet,
4006 name="setTestGet-" + str( i ),
4007 args=[ onosSetName ] )
4008 threads.append( t )
4009 t.start()
4010 for t in threads:
4011 t.join()
4012 getResponses.append( t.result )
4013 getResults = main.TRUE
4014 for i in range( len( main.activeNodes ) ):
4015 node = str( main.activeNodes[i] + 1 )
4016 if isinstance( getResponses[ i ], list):
4017 current = set( getResponses[ i ] )
4018 if len( current ) == len( getResponses[ i ] ):
4019 # no repeats
4020 if onosSet != current:
4021 main.log.error( "ONOS" + node +
4022 " has incorrect view" +
4023 " of set " + onosSetName + ":\n" +
4024 str( getResponses[ i ] ) )
4025 main.log.debug( "Expected: " + str( onosSet ) )
4026 main.log.debug( "Actual: " + str( current ) )
4027 getResults = main.FALSE
4028 else:
4029 # error, set is not a set
4030 main.log.error( "ONOS" + node +
4031 " has repeat elements in" +
4032 " set " + onosSetName + ":\n" +
4033 str( getResponses[ i ] ) )
4034 getResults = main.FALSE
4035 elif getResponses[ i ] == main.ERROR:
4036 getResults = main.FALSE
4037 sizeResponses = []
4038 threads = []
4039 for i in main.activeNodes:
4040 t = main.Thread( target=main.CLIs[i].setTestSize,
4041 name="setTestSize-" + str( i ),
4042 args=[ onosSetName ] )
4043 threads.append( t )
4044 t.start()
4045 for t in threads:
4046 t.join()
4047 sizeResponses.append( t.result )
4048 sizeResults = main.TRUE
4049 for i in range( len( main.activeNodes ) ):
4050 node = str( main.activeNodes[i] + 1 )
4051 if size != sizeResponses[ i ]:
4052 sizeResults = main.FALSE
4053 main.log.error( "ONOS" + node +
4054 " expected a size of " + str( size ) +
4055 " for set " + onosSetName +
4056 " but got " + str( sizeResponses[ i ] ) )
4057 clearResults = clearResults and getResults and sizeResults
4058 utilities.assert_equals( expect=main.TRUE,
4059 actual=clearResults,
4060 onpass="Set clear correct",
4061 onfail="Set clear was incorrect" )
4062
4063 main.step( "Distributed Set addAll()" )
4064 onosSet.update( addAllValue.split() )
4065 addResponses = []
4066 threads = []
4067 for i in main.activeNodes:
4068 t = main.Thread( target=main.CLIs[i].setTestAdd,
4069 name="setTestAddAll-" + str( i ),
4070 args=[ onosSetName, addAllValue ] )
4071 threads.append( t )
4072 t.start()
4073 for t in threads:
4074 t.join()
4075 addResponses.append( t.result )
4076
4077 # main.TRUE = successfully changed the set
4078 # main.FALSE = action resulted in no change in set
4079 # main.ERROR - Some error in executing the function
4080 addAllResults = main.TRUE
4081 for i in range( len( main.activeNodes ) ):
4082 if addResponses[ i ] == main.TRUE:
4083 # All is well
4084 pass
4085 elif addResponses[ i ] == main.FALSE:
4086 # Already in set, probably fine
4087 pass
4088 elif addResponses[ i ] == main.ERROR:
4089 # Error in execution
4090 addAllResults = main.FALSE
4091 else:
4092 # unexpected result
4093 addAllResults = main.FALSE
4094 if addAllResults != main.TRUE:
4095 main.log.error( "Error executing set addAll" )
4096
4097 # Check if set is still correct
4098 size = len( onosSet )
4099 getResponses = []
4100 threads = []
4101 for i in main.activeNodes:
4102 t = main.Thread( target=main.CLIs[i].setTestGet,
4103 name="setTestGet-" + str( i ),
4104 args=[ onosSetName ] )
4105 threads.append( t )
4106 t.start()
4107 for t in threads:
4108 t.join()
4109 getResponses.append( t.result )
4110 getResults = main.TRUE
4111 for i in range( len( main.activeNodes ) ):
4112 node = str( main.activeNodes[i] + 1 )
4113 if isinstance( getResponses[ i ], list):
4114 current = set( getResponses[ i ] )
4115 if len( current ) == len( getResponses[ i ] ):
4116 # no repeats
4117 if onosSet != current:
4118 main.log.error( "ONOS" + node +
4119 " has incorrect view" +
4120 " of set " + onosSetName + ":\n" +
4121 str( getResponses[ i ] ) )
4122 main.log.debug( "Expected: " + str( onosSet ) )
4123 main.log.debug( "Actual: " + str( current ) )
4124 getResults = main.FALSE
4125 else:
4126 # error, set is not a set
4127 main.log.error( "ONOS" + node +
4128 " has repeat elements in" +
4129 " set " + onosSetName + ":\n" +
4130 str( getResponses[ i ] ) )
4131 getResults = main.FALSE
4132 elif getResponses[ i ] == main.ERROR:
4133 getResults = main.FALSE
4134 sizeResponses = []
4135 threads = []
4136 for i in main.activeNodes:
4137 t = main.Thread( target=main.CLIs[i].setTestSize,
4138 name="setTestSize-" + str( i ),
4139 args=[ onosSetName ] )
4140 threads.append( t )
4141 t.start()
4142 for t in threads:
4143 t.join()
4144 sizeResponses.append( t.result )
4145 sizeResults = main.TRUE
4146 for i in range( len( main.activeNodes ) ):
4147 node = str( main.activeNodes[i] + 1 )
4148 if size != sizeResponses[ i ]:
4149 sizeResults = main.FALSE
4150 main.log.error( "ONOS" + node +
4151 " expected a size of " + str( size ) +
4152 " for set " + onosSetName +
4153 " but got " + str( sizeResponses[ i ] ) )
4154 addAllResults = addAllResults and getResults and sizeResults
4155 utilities.assert_equals( expect=main.TRUE,
4156 actual=addAllResults,
4157 onpass="Set addAll correct",
4158 onfail="Set addAll was incorrect" )
4159
4160 main.step( "Distributed Set retain()" )
4161 onosSet.intersection_update( retainValue.split() )
4162 retainResponses = []
4163 threads = []
4164 for i in main.activeNodes:
4165 t = main.Thread( target=main.CLIs[i].setTestRemove,
4166 name="setTestRetain-" + str( i ),
4167 args=[ onosSetName, retainValue ],
4168 kwargs={ "retain": True } )
4169 threads.append( t )
4170 t.start()
4171 for t in threads:
4172 t.join()
4173 retainResponses.append( t.result )
4174
4175 # main.TRUE = successfully changed the set
4176 # main.FALSE = action resulted in no change in set
4177 # main.ERROR - Some error in executing the function
4178 retainResults = main.TRUE
4179 for i in range( len( main.activeNodes ) ):
4180 if retainResponses[ i ] == main.TRUE:
4181 # All is well
4182 pass
4183 elif retainResponses[ i ] == main.FALSE:
4184 # Already in set, probably fine
4185 pass
4186 elif retainResponses[ i ] == main.ERROR:
4187 # Error in execution
4188 retainResults = main.FALSE
4189 else:
4190 # unexpected result
4191 retainResults = main.FALSE
4192 if retainResults != main.TRUE:
4193 main.log.error( "Error executing set retain" )
4194
4195 # Check if set is still correct
4196 size = len( onosSet )
4197 getResponses = []
4198 threads = []
4199 for i in main.activeNodes:
4200 t = main.Thread( target=main.CLIs[i].setTestGet,
4201 name="setTestGet-" + str( i ),
4202 args=[ onosSetName ] )
4203 threads.append( t )
4204 t.start()
4205 for t in threads:
4206 t.join()
4207 getResponses.append( t.result )
4208 getResults = main.TRUE
4209 for i in range( len( main.activeNodes ) ):
4210 node = str( main.activeNodes[i] + 1 )
4211 if isinstance( getResponses[ i ], list):
4212 current = set( getResponses[ i ] )
4213 if len( current ) == len( getResponses[ i ] ):
4214 # no repeats
4215 if onosSet != current:
4216 main.log.error( "ONOS" + node +
4217 " has incorrect view" +
4218 " of set " + onosSetName + ":\n" +
4219 str( getResponses[ i ] ) )
4220 main.log.debug( "Expected: " + str( onosSet ) )
4221 main.log.debug( "Actual: " + str( current ) )
4222 getResults = main.FALSE
4223 else:
4224 # error, set is not a set
4225 main.log.error( "ONOS" + node +
4226 " has repeat elements in" +
4227 " set " + onosSetName + ":\n" +
4228 str( getResponses[ i ] ) )
4229 getResults = main.FALSE
4230 elif getResponses[ i ] == main.ERROR:
4231 getResults = main.FALSE
4232 sizeResponses = []
4233 threads = []
4234 for i in main.activeNodes:
4235 t = main.Thread( target=main.CLIs[i].setTestSize,
4236 name="setTestSize-" + str( i ),
4237 args=[ onosSetName ] )
4238 threads.append( t )
4239 t.start()
4240 for t in threads:
4241 t.join()
4242 sizeResponses.append( t.result )
4243 sizeResults = main.TRUE
4244 for i in range( len( main.activeNodes ) ):
4245 node = str( main.activeNodes[i] + 1 )
4246 if size != sizeResponses[ i ]:
4247 sizeResults = main.FALSE
4248 main.log.error( "ONOS" + node + " expected a size of " +
4249 str( size ) + " for set " + onosSetName +
4250 " but got " + str( sizeResponses[ i ] ) )
4251 retainResults = retainResults and getResults and sizeResults
4252 utilities.assert_equals( expect=main.TRUE,
4253 actual=retainResults,
4254 onpass="Set retain correct",
4255 onfail="Set retain was incorrect" )
4256
4257 # Transactional maps
4258 main.step( "Partitioned Transactional maps put" )
4259 tMapValue = "Testing"
4260 numKeys = 100
4261 putResult = True
4262 node = main.activeNodes[0]
4263 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4264 if putResponses and len( putResponses ) == 100:
4265 for i in putResponses:
4266 if putResponses[ i ][ 'value' ] != tMapValue:
4267 putResult = False
4268 else:
4269 putResult = False
4270 if not putResult:
4271 main.log.debug( "Put response values: " + str( putResponses ) )
4272 utilities.assert_equals( expect=True,
4273 actual=putResult,
4274 onpass="Partitioned Transactional Map put successful",
4275 onfail="Partitioned Transactional Map put values are incorrect" )
4276
4277 main.step( "Partitioned Transactional maps get" )
4278 getCheck = True
4279 for n in range( 1, numKeys + 1 ):
4280 getResponses = []
4281 threads = []
4282 valueCheck = True
4283 for i in main.activeNodes:
4284 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4285 name="TMap-get-" + str( i ),
4286 args=[ "Key" + str( n ) ] )
4287 threads.append( t )
4288 t.start()
4289 for t in threads:
4290 t.join()
4291 getResponses.append( t.result )
4292 for node in getResponses:
4293 if node != tMapValue:
4294 valueCheck = False
4295 if not valueCheck:
4296 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4297 main.log.warn( getResponses )
4298 getCheck = getCheck and valueCheck
4299 utilities.assert_equals( expect=True,
4300 actual=getCheck,
4301 onpass="Partitioned Transactional Map get values were correct",
4302 onfail="Partitioned Transactional Map values incorrect" )