blob: 97e04cf0f7e448b16ffea4a95d71d824b80b033d [file] [log] [blame]
Jon Hall6e709752016-02-01 13:38:46 -08001"""
2Description: This test is to determine if ONOS can handle
3 a full network partion
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
14CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
28class HAfullNetPartition:
29
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
51 import imp
52 import pexpect
53 import time
Jon Halla440e872016-03-31 15:15:50 -070054 import json
Jon Hall6e709752016-02-01 13:38:46 -080055 main.log.info( "ONOS HA test: Partition ONOS nodes into two sub-clusters - " +
56 "initialization" )
57 main.case( "Setting up test environment" )
58 main.caseExplanation = "Setup the test environment including " +\
59 "installing ONOS, starting Mininet and ONOS" +\
60 "cli sessions."
61
62 # load some variables from the params file
63 PULLCODE = False
64 if main.params[ 'Git' ] == 'True':
65 PULLCODE = True
66 gitBranch = main.params[ 'branch' ]
67 cellName = main.params[ 'ENV' ][ 'cellName' ]
68
69 main.numCtrls = int( main.params[ 'num_controllers' ] )
70 if main.ONOSbench.maxNodes:
71 if main.ONOSbench.maxNodes < main.numCtrls:
72 main.numCtrls = int( main.ONOSbench.maxNodes )
73 # set global variables
74 global ONOS1Port
75 global ONOS2Port
76 global ONOS3Port
77 global ONOS4Port
78 global ONOS5Port
79 global ONOS6Port
80 global ONOS7Port
Jon Halla440e872016-03-31 15:15:50 -070081 # These are for csv plotting in jenkins
82 global labels
83 global data
84 labels = []
85 data = []
Jon Hall6e709752016-02-01 13:38:46 -080086
87 # FIXME: just get controller port from params?
88 # TODO: do we really need all these?
89 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
90 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
91 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
92 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
93 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
94 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
95 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
96
97 try:
Jon Halla440e872016-03-31 15:15:50 -070098 from tests.HAsanity.dependencies.Counters import Counters
99 main.Counters = Counters()
Jon Hall6e709752016-02-01 13:38:46 -0800100 except Exception as e:
101 main.log.exception( e )
102 main.cleanup()
103 main.exit()
104
105 main.CLIs = []
106 main.nodes = []
107 ipList = []
108 for i in range( 1, main.numCtrls + 1 ):
109 try:
110 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
111 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
112 ipList.append( main.nodes[ -1 ].ip_address )
113 except AttributeError:
114 break
115
116 main.step( "Create cell file" )
117 cellAppString = main.params[ 'ENV' ][ 'appString' ]
118 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
119 main.Mininet1.ip_address,
120 cellAppString, ipList )
121 main.step( "Applying cell variable to environment" )
122 cellResult = main.ONOSbench.setCell( cellName )
123 verifyResult = main.ONOSbench.verifyCell()
124
125 # FIXME:this is short term fix
126 main.log.info( "Removing raft logs" )
127 main.ONOSbench.onosRemoveRaftLogs()
128
129 main.log.info( "Uninstalling ONOS" )
130 for node in main.nodes:
131 main.ONOSbench.onosUninstall( node.ip_address )
132
133 # Make sure ONOS is DEAD
134 main.log.info( "Killing any ONOS processes" )
135 killResults = main.TRUE
136 for node in main.nodes:
137 killed = main.ONOSbench.onosKill( node.ip_address )
138 killResults = killResults and killed
139
140 cleanInstallResult = main.TRUE
141 gitPullResult = main.TRUE
142
143 main.step( "Starting Mininet" )
144 # scp topo file to mininet
145 # TODO: move to params?
146 topoName = "obelisk.py"
147 filePath = main.ONOSbench.home + "/tools/test/topos/"
148 main.ONOSbench.scp( main.Mininet1,
149 filePath + topoName,
150 main.Mininet1.home,
151 direction="to" )
152 mnResult = main.Mininet1.startNet( )
153 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
154 onpass="Mininet Started",
155 onfail="Error starting Mininet" )
156
157 main.step( "Git checkout and pull " + gitBranch )
158 if PULLCODE:
159 main.ONOSbench.gitCheckout( gitBranch )
160 gitPullResult = main.ONOSbench.gitPull()
161 # values of 1 or 3 are good
162 utilities.assert_lesser( expect=0, actual=gitPullResult,
163 onpass="Git pull successful",
164 onfail="Git pull failed" )
165 main.ONOSbench.getVersion( report=True )
166
167 main.step( "Using mvn clean install" )
168 cleanInstallResult = main.TRUE
169 if PULLCODE and gitPullResult == main.TRUE:
170 cleanInstallResult = main.ONOSbench.cleanInstall()
171 else:
172 main.log.warn( "Did not pull new code so skipping mvn " +
173 "clean install" )
174 utilities.assert_equals( expect=main.TRUE,
175 actual=cleanInstallResult,
176 onpass="MCI successful",
177 onfail="MCI failed" )
178 # GRAPHS
179 # NOTE: important params here:
180 # job = name of Jenkins job
181 # Plot Name = Plot-HA, only can be used if multiple plots
182 # index = The number of the graph under plot name
183 job = "HAfullNetPartition"
184 plotName = "Plot-HA"
Jon Hall843f8bc2016-03-18 14:28:13 -0700185 index = "1"
Jon Hall6e709752016-02-01 13:38:46 -0800186 graphs = '<ac:structured-macro ac:name="html">\n'
187 graphs += '<ac:plain-text-body><![CDATA[\n'
188 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
189 '/plot/' + plotName + '/getPlot?index=' + index +\
190 '&width=500&height=300"' +\
191 'noborder="0" width="500" height="300" scrolling="yes" ' +\
192 'seamless="seamless"></iframe>\n'
193 graphs += ']]></ac:plain-text-body>\n'
194 graphs += '</ac:structured-macro>\n'
195 main.log.wiki(graphs)
196
197 main.step( "Creating ONOS package" )
198 # copy gen-partions file to ONOS
199 # NOTE: this assumes TestON and ONOS are on the same machine
200 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
201 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
202 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
203 main.ONOSbench.ip_address,
204 srcFile,
205 dstDir,
206 pwd=main.ONOSbench.pwd,
207 direction="from" )
208 packageResult = main.ONOSbench.onosPackage()
209 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
210 onpass="ONOS package successful",
211 onfail="ONOS package failed" )
212
213 main.step( "Installing ONOS package" )
214 onosInstallResult = main.TRUE
215 for node in main.nodes:
216 tmpResult = main.ONOSbench.onosInstall( options="-f",
217 node=node.ip_address )
218 onosInstallResult = onosInstallResult and tmpResult
219 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
220 onpass="ONOS install successful",
221 onfail="ONOS install failed" )
222 # clean up gen-partitions file
223 try:
224 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
225 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
226 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
227 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
228 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
229 str( main.ONOSbench.handle.before ) )
230 except ( pexpect.TIMEOUT, pexpect.EOF ):
231 main.log.exception( "ONOSbench: pexpect exception found:" +
232 main.ONOSbench.handle.before )
233 main.cleanup()
234 main.exit()
235
236 main.step( "Checking if ONOS is up yet" )
237 for i in range( 2 ):
238 onosIsupResult = main.TRUE
239 for node in main.nodes:
240 started = main.ONOSbench.isup( node.ip_address )
241 if not started:
242 main.log.error( node.name + " hasn't started" )
243 onosIsupResult = onosIsupResult and started
244 if onosIsupResult == main.TRUE:
245 break
246 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
247 onpass="ONOS startup successful",
248 onfail="ONOS startup failed" )
249
250 main.log.step( "Starting ONOS CLI sessions" )
251 cliResults = main.TRUE
252 threads = []
253 for i in range( main.numCtrls ):
254 t = main.Thread( target=main.CLIs[i].startOnosCli,
255 name="startOnosCli-" + str( i ),
256 args=[main.nodes[i].ip_address] )
257 threads.append( t )
258 t.start()
259
260 for t in threads:
261 t.join()
262 cliResults = cliResults and t.result
263 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
264 onpass="ONOS cli startup successful",
265 onfail="ONOS cli startup failed" )
266
267 # Create a list of active nodes for use when some nodes are stopped
268 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
269
270 if main.params[ 'tcpdump' ].lower() == "true":
271 main.step( "Start Packet Capture MN" )
272 main.Mininet2.startTcpdump(
273 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
274 + "-MN.pcap",
275 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
276 port=main.params[ 'MNtcpdump' ][ 'port' ] )
277
278 main.step( "App Ids check" )
Jon Hall6e709752016-02-01 13:38:46 -0800279 appCheck = main.TRUE
280 threads = []
281 for i in main.activeNodes:
282 t = main.Thread( target=main.CLIs[i].appToIDCheck,
283 name="appToIDCheck-" + str( i ),
284 args=[] )
285 threads.append( t )
286 t.start()
287
288 for t in threads:
289 t.join()
290 appCheck = appCheck and t.result
291 if appCheck != main.TRUE:
292 node = main.activeNodes[0]
293 main.log.warn( main.CLIs[node].apps() )
294 main.log.warn( main.CLIs[node].appIDs() )
295 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
296 onpass="App Ids seem to be correct",
297 onfail="Something is wrong with app Ids" )
298
Jon Halla440e872016-03-31 15:15:50 -0700299 main.step( "Checking ONOS nodes" )
300 nodesOutput = []
301 nodeResults = main.TRUE
302 threads = []
303 for i in main.activeNodes:
304 t = main.Thread( target=main.CLIs[i].nodes,
305 name="nodes-" + str( i ),
306 args=[ ] )
307 threads.append( t )
308 t.start()
309
310 for t in threads:
311 t.join()
312 nodesOutput.append( t.result )
313 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
314 ips.sort()
315 for i in nodesOutput:
316 try:
317 current = json.loads( i )
318 activeIps = []
319 currentResult = main.FALSE
320 for node in current:
321 if node['state'] == 'READY':
322 activeIps.append( node['ip'] )
323 activeIps.sort()
324 if ips == activeIps:
325 currentResult = main.TRUE
326 except ( ValueError, TypeError ):
327 main.log.error( "Error parsing nodes output" )
328 main.log.warn( repr( i ) )
329 currentResult = main.FALSE
330 nodeResults = nodeResults and currentResult
331 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
332 onpass="Nodes check successful",
333 onfail="Nodes check NOT successful" )
334
335 if not nodeResults:
336 for cli in main.CLIs:
337 main.log.debug( "{} components not ACTIVE: \n{}".format(
338 cli.name,
339 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
340
Jon Hall6e709752016-02-01 13:38:46 -0800341 if cliResults == main.FALSE:
342 main.log.error( "Failed to start ONOS, stopping test" )
343 main.cleanup()
344 main.exit()
345
346 def CASE2( self, main ):
347 """
348 Assign devices to controllers
349 """
350 import re
351 assert main.numCtrls, "main.numCtrls not defined"
352 assert main, "main not defined"
353 assert utilities.assert_equals, "utilities.assert_equals not defined"
354 assert main.CLIs, "main.CLIs not defined"
355 assert main.nodes, "main.nodes not defined"
356 assert ONOS1Port, "ONOS1Port not defined"
357 assert ONOS2Port, "ONOS2Port not defined"
358 assert ONOS3Port, "ONOS3Port not defined"
359 assert ONOS4Port, "ONOS4Port not defined"
360 assert ONOS5Port, "ONOS5Port not defined"
361 assert ONOS6Port, "ONOS6Port not defined"
362 assert ONOS7Port, "ONOS7Port not defined"
363
364 main.case( "Assigning devices to controllers" )
365 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
366 "and check that an ONOS node becomes the " +\
367 "master of the device."
368 main.step( "Assign switches to controllers" )
369
370 ipList = []
371 for i in range( main.numCtrls ):
372 ipList.append( main.nodes[ i ].ip_address )
373 swList = []
374 for i in range( 1, 29 ):
375 swList.append( "s" + str( i ) )
376 main.Mininet1.assignSwController( sw=swList, ip=ipList )
377
378 mastershipCheck = main.TRUE
379 for i in range( 1, 29 ):
380 response = main.Mininet1.getSwController( "s" + str( i ) )
381 try:
382 main.log.info( str( response ) )
383 except Exception:
384 main.log.info( repr( response ) )
385 for node in main.nodes:
386 if re.search( "tcp:" + node.ip_address, response ):
387 mastershipCheck = mastershipCheck and main.TRUE
388 else:
389 main.log.error( "Error, node " + node.ip_address + " is " +
390 "not in the list of controllers s" +
391 str( i ) + " is connecting to." )
392 mastershipCheck = main.FALSE
393 utilities.assert_equals(
394 expect=main.TRUE,
395 actual=mastershipCheck,
396 onpass="Switch mastership assigned correctly",
397 onfail="Switches not assigned correctly to controllers" )
398
399 def CASE21( self, main ):
400 """
401 Assign mastership to controllers
402 """
403 import time
404 assert main.numCtrls, "main.numCtrls not defined"
405 assert main, "main not defined"
406 assert utilities.assert_equals, "utilities.assert_equals not defined"
407 assert main.CLIs, "main.CLIs not defined"
408 assert main.nodes, "main.nodes not defined"
409 assert ONOS1Port, "ONOS1Port not defined"
410 assert ONOS2Port, "ONOS2Port not defined"
411 assert ONOS3Port, "ONOS3Port not defined"
412 assert ONOS4Port, "ONOS4Port not defined"
413 assert ONOS5Port, "ONOS5Port not defined"
414 assert ONOS6Port, "ONOS6Port not defined"
415 assert ONOS7Port, "ONOS7Port not defined"
416
417 main.case( "Assigning Controller roles for switches" )
418 main.caseExplanation = "Check that ONOS is connected to each " +\
419 "device. Then manually assign" +\
420 " mastership to specific ONOS nodes using" +\
421 " 'device-role'"
422 main.step( "Assign mastership of switches to specific controllers" )
423 # Manually assign mastership to the controller we want
424 roleCall = main.TRUE
425
426 ipList = [ ]
427 deviceList = []
428 onosCli = main.CLIs[ main.activeNodes[0] ]
429 try:
430 # Assign mastership to specific controllers. This assignment was
431 # determined for a 7 node cluser, but will work with any sized
432 # cluster
433 for i in range( 1, 29 ): # switches 1 through 28
434 # set up correct variables:
435 if i == 1:
436 c = 0
437 ip = main.nodes[ c ].ip_address # ONOS1
438 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
439 elif i == 2:
440 c = 1 % main.numCtrls
441 ip = main.nodes[ c ].ip_address # ONOS2
442 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
443 elif i == 3:
444 c = 1 % main.numCtrls
445 ip = main.nodes[ c ].ip_address # ONOS2
446 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
447 elif i == 4:
448 c = 3 % main.numCtrls
449 ip = main.nodes[ c ].ip_address # ONOS4
450 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
451 elif i == 5:
452 c = 2 % main.numCtrls
453 ip = main.nodes[ c ].ip_address # ONOS3
454 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
455 elif i == 6:
456 c = 2 % main.numCtrls
457 ip = main.nodes[ c ].ip_address # ONOS3
458 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
459 elif i == 7:
460 c = 5 % main.numCtrls
461 ip = main.nodes[ c ].ip_address # ONOS6
462 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
463 elif i >= 8 and i <= 17:
464 c = 4 % main.numCtrls
465 ip = main.nodes[ c ].ip_address # ONOS5
466 dpid = '3' + str( i ).zfill( 3 )
467 deviceId = onosCli.getDevice( dpid ).get( 'id' )
468 elif i >= 18 and i <= 27:
469 c = 6 % main.numCtrls
470 ip = main.nodes[ c ].ip_address # ONOS7
471 dpid = '6' + str( i ).zfill( 3 )
472 deviceId = onosCli.getDevice( dpid ).get( 'id' )
473 elif i == 28:
474 c = 0
475 ip = main.nodes[ c ].ip_address # ONOS1
476 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
477 else:
478 main.log.error( "You didn't write an else statement for " +
479 "switch s" + str( i ) )
480 roleCall = main.FALSE
481 # Assign switch
482 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
483 # TODO: make this controller dynamic
484 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
485 ipList.append( ip )
486 deviceList.append( deviceId )
487 except ( AttributeError, AssertionError ):
488 main.log.exception( "Something is wrong with ONOS device view" )
489 main.log.info( onosCli.devices() )
490 utilities.assert_equals(
491 expect=main.TRUE,
492 actual=roleCall,
493 onpass="Re-assigned switch mastership to designated controller",
494 onfail="Something wrong with deviceRole calls" )
495
496 main.step( "Check mastership was correctly assigned" )
497 roleCheck = main.TRUE
498 # NOTE: This is due to the fact that device mastership change is not
499 # atomic and is actually a multi step process
500 time.sleep( 5 )
501 for i in range( len( ipList ) ):
502 ip = ipList[i]
503 deviceId = deviceList[i]
504 # Check assignment
505 master = onosCli.getRole( deviceId ).get( 'master' )
506 if ip in master:
507 roleCheck = roleCheck and main.TRUE
508 else:
509 roleCheck = roleCheck and main.FALSE
510 main.log.error( "Error, controller " + ip + " is not" +
511 " master " + "of device " +
512 str( deviceId ) + ". Master is " +
513 repr( master ) + "." )
514 utilities.assert_equals(
515 expect=main.TRUE,
516 actual=roleCheck,
517 onpass="Switches were successfully reassigned to designated " +
518 "controller",
519 onfail="Switches were not successfully reassigned" )
520
521 def CASE3( self, main ):
522 """
523 Assign intents
524 """
525 import time
526 import json
527 assert main.numCtrls, "main.numCtrls not defined"
528 assert main, "main not defined"
529 assert utilities.assert_equals, "utilities.assert_equals not defined"
530 assert main.CLIs, "main.CLIs not defined"
531 assert main.nodes, "main.nodes not defined"
532 main.case( "Adding host Intents" )
533 main.caseExplanation = "Discover hosts by using pingall then " +\
534 "assign predetermined host-to-host intents." +\
535 " After installation, check that the intent" +\
536 " is distributed to all nodes and the state" +\
537 " is INSTALLED"
538
539 # install onos-app-fwd
540 main.step( "Install reactive forwarding app" )
541 onosCli = main.CLIs[ main.activeNodes[0] ]
542 installResults = onosCli.activateApp( "org.onosproject.fwd" )
543 utilities.assert_equals( expect=main.TRUE, actual=installResults,
544 onpass="Install fwd successful",
545 onfail="Install fwd failed" )
546
547 main.step( "Check app ids" )
548 appCheck = main.TRUE
549 threads = []
550 for i in main.activeNodes:
551 t = main.Thread( target=main.CLIs[i].appToIDCheck,
552 name="appToIDCheck-" + str( i ),
553 args=[] )
554 threads.append( t )
555 t.start()
556
557 for t in threads:
558 t.join()
559 appCheck = appCheck and t.result
560 if appCheck != main.TRUE:
561 main.log.warn( onosCli.apps() )
562 main.log.warn( onosCli.appIDs() )
563 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
564 onpass="App Ids seem to be correct",
565 onfail="Something is wrong with app Ids" )
566
567 main.step( "Discovering Hosts( Via pingall for now )" )
568 # FIXME: Once we have a host discovery mechanism, use that instead
569 # REACTIVE FWD test
570 pingResult = main.FALSE
571 passMsg = "Reactive Pingall test passed"
572 time1 = time.time()
573 pingResult = main.Mininet1.pingall()
574 time2 = time.time()
575 if not pingResult:
576 main.log.warn("First pingall failed. Trying again...")
577 pingResult = main.Mininet1.pingall()
578 passMsg += " on the second try"
579 utilities.assert_equals(
580 expect=main.TRUE,
581 actual=pingResult,
582 onpass= passMsg,
583 onfail="Reactive Pingall failed, " +
584 "one or more ping pairs failed" )
585 main.log.info( "Time for pingall: %2f seconds" %
586 ( time2 - time1 ) )
587 # timeout for fwd flows
588 time.sleep( 11 )
589 # uninstall onos-app-fwd
590 main.step( "Uninstall reactive forwarding app" )
591 node = main.activeNodes[0]
592 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
593 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
594 onpass="Uninstall fwd successful",
595 onfail="Uninstall fwd failed" )
596
597 main.step( "Check app ids" )
598 threads = []
599 appCheck2 = main.TRUE
600 for i in main.activeNodes:
601 t = main.Thread( target=main.CLIs[i].appToIDCheck,
602 name="appToIDCheck-" + str( i ),
603 args=[] )
604 threads.append( t )
605 t.start()
606
607 for t in threads:
608 t.join()
609 appCheck2 = appCheck2 and t.result
610 if appCheck2 != main.TRUE:
611 node = main.activeNodes[0]
612 main.log.warn( main.CLIs[node].apps() )
613 main.log.warn( main.CLIs[node].appIDs() )
614 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
615 onpass="App Ids seem to be correct",
616 onfail="Something is wrong with app Ids" )
617
618 main.step( "Add host intents via cli" )
619 intentIds = []
620 # TODO: move the host numbers to params
621 # Maybe look at all the paths we ping?
622 intentAddResult = True
623 hostResult = main.TRUE
624 for i in range( 8, 18 ):
625 main.log.info( "Adding host intent between h" + str( i ) +
626 " and h" + str( i + 10 ) )
627 host1 = "00:00:00:00:00:" + \
628 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
629 host2 = "00:00:00:00:00:" + \
630 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
631 # NOTE: getHost can return None
632 host1Dict = onosCli.getHost( host1 )
633 host2Dict = onosCli.getHost( host2 )
634 host1Id = None
635 host2Id = None
636 if host1Dict and host2Dict:
637 host1Id = host1Dict.get( 'id', None )
638 host2Id = host2Dict.get( 'id', None )
639 if host1Id and host2Id:
640 nodeNum = ( i % len( main.activeNodes ) )
641 node = main.activeNodes[nodeNum]
642 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
643 if tmpId:
644 main.log.info( "Added intent with id: " + tmpId )
645 intentIds.append( tmpId )
646 else:
647 main.log.error( "addHostIntent returned: " +
648 repr( tmpId ) )
649 else:
650 main.log.error( "Error, getHost() failed for h" + str( i ) +
651 " and/or h" + str( i + 10 ) )
652 node = main.activeNodes[0]
653 hosts = main.CLIs[node].hosts()
654 main.log.warn( "Hosts output: " )
655 try:
656 main.log.warn( json.dumps( json.loads( hosts ),
657 sort_keys=True,
658 indent=4,
659 separators=( ',', ': ' ) ) )
660 except ( ValueError, TypeError ):
661 main.log.warn( repr( hosts ) )
662 hostResult = main.FALSE
663 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
664 onpass="Found a host id for each host",
665 onfail="Error looking up host ids" )
666
667 intentStart = time.time()
668 onosIds = onosCli.getAllIntentsId()
669 main.log.info( "Submitted intents: " + str( intentIds ) )
670 main.log.info( "Intents in ONOS: " + str( onosIds ) )
671 for intent in intentIds:
672 if intent in onosIds:
673 pass # intent submitted is in onos
674 else:
675 intentAddResult = False
676 if intentAddResult:
677 intentStop = time.time()
678 else:
679 intentStop = None
680 # Print the intent states
681 intents = onosCli.intents()
682 intentStates = []
683 installedCheck = True
684 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
685 count = 0
686 try:
687 for intent in json.loads( intents ):
688 state = intent.get( 'state', None )
689 if "INSTALLED" not in state:
690 installedCheck = False
691 intentId = intent.get( 'id', None )
692 intentStates.append( ( intentId, state ) )
693 except ( ValueError, TypeError ):
694 main.log.exception( "Error parsing intents" )
695 # add submitted intents not in the store
696 tmplist = [ i for i, s in intentStates ]
697 missingIntents = False
698 for i in intentIds:
699 if i not in tmplist:
700 intentStates.append( ( i, " - " ) )
701 missingIntents = True
702 intentStates.sort()
703 for i, s in intentStates:
704 count += 1
705 main.log.info( "%-6s%-15s%-15s" %
706 ( str( count ), str( i ), str( s ) ) )
707 leaders = onosCli.leaders()
708 try:
709 missing = False
710 if leaders:
711 parsedLeaders = json.loads( leaders )
712 main.log.warn( json.dumps( parsedLeaders,
713 sort_keys=True,
714 indent=4,
715 separators=( ',', ': ' ) ) )
716 # check for all intent partitions
717 topics = []
718 for i in range( 14 ):
719 topics.append( "intent-partition-" + str( i ) )
720 main.log.debug( topics )
721 ONOStopics = [ j['topic'] for j in parsedLeaders ]
722 for topic in topics:
723 if topic not in ONOStopics:
724 main.log.error( "Error: " + topic +
725 " not in leaders" )
726 missing = True
727 else:
728 main.log.error( "leaders() returned None" )
729 except ( ValueError, TypeError ):
730 main.log.exception( "Error parsing leaders" )
731 main.log.error( repr( leaders ) )
732 # Check all nodes
733 if missing:
734 for i in main.activeNodes:
735 response = main.CLIs[i].leaders( jsonFormat=False)
736 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
737 str( response ) )
738
739 partitions = onosCli.partitions()
740 try:
741 if partitions :
742 parsedPartitions = json.loads( partitions )
743 main.log.warn( json.dumps( parsedPartitions,
744 sort_keys=True,
745 indent=4,
746 separators=( ',', ': ' ) ) )
747 # TODO check for a leader in all paritions
748 # TODO check for consistency among nodes
749 else:
750 main.log.error( "partitions() returned None" )
751 except ( ValueError, TypeError ):
752 main.log.exception( "Error parsing partitions" )
753 main.log.error( repr( partitions ) )
754 pendingMap = onosCli.pendingMap()
755 try:
756 if pendingMap :
757 parsedPending = json.loads( pendingMap )
758 main.log.warn( json.dumps( parsedPending,
759 sort_keys=True,
760 indent=4,
761 separators=( ',', ': ' ) ) )
762 # TODO check something here?
763 else:
764 main.log.error( "pendingMap() returned None" )
765 except ( ValueError, TypeError ):
766 main.log.exception( "Error parsing pending map" )
767 main.log.error( repr( pendingMap ) )
768
769 intentAddResult = bool( intentAddResult and not missingIntents and
770 installedCheck )
771 if not intentAddResult:
772 main.log.error( "Error in pushing host intents to ONOS" )
773
774 main.step( "Intent Anti-Entropy dispersion" )
775 for j in range(100):
776 correct = True
777 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
778 for i in main.activeNodes:
779 onosIds = []
780 ids = main.CLIs[i].getAllIntentsId()
781 onosIds.append( ids )
782 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
783 str( sorted( onosIds ) ) )
784 if sorted( ids ) != sorted( intentIds ):
785 main.log.warn( "Set of intent IDs doesn't match" )
786 correct = False
787 break
788 else:
789 intents = json.loads( main.CLIs[i].intents() )
790 for intent in intents:
791 if intent[ 'state' ] != "INSTALLED":
792 main.log.warn( "Intent " + intent[ 'id' ] +
793 " is " + intent[ 'state' ] )
794 correct = False
795 break
796 if correct:
797 break
798 else:
799 time.sleep(1)
800 if not intentStop:
801 intentStop = time.time()
802 global gossipTime
803 gossipTime = intentStop - intentStart
804 main.log.info( "It took about " + str( gossipTime ) +
805 " seconds for all intents to appear in each node" )
806 gossipPeriod = int( main.params['timers']['gossip'] )
807 maxGossipTime = gossipPeriod * len( main.activeNodes )
808 utilities.assert_greater_equals(
809 expect=maxGossipTime, actual=gossipTime,
810 onpass="ECM anti-entropy for intents worked within " +
811 "expected time",
812 onfail="Intent ECM anti-entropy took too long. " +
813 "Expected time:{}, Actual time:{}".format( maxGossipTime,
814 gossipTime ) )
815 if gossipTime <= maxGossipTime:
816 intentAddResult = True
817
818 if not intentAddResult or "key" in pendingMap:
819 import time
820 installedCheck = True
821 main.log.info( "Sleeping 60 seconds to see if intents are found" )
822 time.sleep( 60 )
823 onosIds = onosCli.getAllIntentsId()
824 main.log.info( "Submitted intents: " + str( intentIds ) )
825 main.log.info( "Intents in ONOS: " + str( onosIds ) )
826 # Print the intent states
827 intents = onosCli.intents()
828 intentStates = []
829 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
830 count = 0
831 try:
832 for intent in json.loads( intents ):
833 # Iter through intents of a node
834 state = intent.get( 'state', None )
835 if "INSTALLED" not in state:
836 installedCheck = False
837 intentId = intent.get( 'id', None )
838 intentStates.append( ( intentId, state ) )
839 except ( ValueError, TypeError ):
840 main.log.exception( "Error parsing intents" )
841 # add submitted intents not in the store
842 tmplist = [ i for i, s in intentStates ]
843 for i in intentIds:
844 if i not in tmplist:
845 intentStates.append( ( i, " - " ) )
846 intentStates.sort()
847 for i, s in intentStates:
848 count += 1
849 main.log.info( "%-6s%-15s%-15s" %
850 ( str( count ), str( i ), str( s ) ) )
851 leaders = onosCli.leaders()
852 try:
853 missing = False
854 if leaders:
855 parsedLeaders = json.loads( leaders )
856 main.log.warn( json.dumps( parsedLeaders,
857 sort_keys=True,
858 indent=4,
859 separators=( ',', ': ' ) ) )
860 # check for all intent partitions
861 # check for election
862 topics = []
863 for i in range( 14 ):
864 topics.append( "intent-partition-" + str( i ) )
865 # FIXME: this should only be after we start the app
866 topics.append( "org.onosproject.election" )
867 main.log.debug( topics )
868 ONOStopics = [ j['topic'] for j in parsedLeaders ]
869 for topic in topics:
870 if topic not in ONOStopics:
871 main.log.error( "Error: " + topic +
872 " not in leaders" )
873 missing = True
874 else:
875 main.log.error( "leaders() returned None" )
876 except ( ValueError, TypeError ):
877 main.log.exception( "Error parsing leaders" )
878 main.log.error( repr( leaders ) )
879 # Check all nodes
880 if missing:
881 for i in main.activeNodes:
882 node = main.CLIs[i]
883 response = node.leaders( jsonFormat=False)
884 main.log.warn( str( node.name ) + " leaders output: \n" +
885 str( response ) )
886
887 partitions = onosCli.partitions()
888 try:
889 if partitions :
890 parsedPartitions = json.loads( partitions )
891 main.log.warn( json.dumps( parsedPartitions,
892 sort_keys=True,
893 indent=4,
894 separators=( ',', ': ' ) ) )
895 # TODO check for a leader in all paritions
896 # TODO check for consistency among nodes
897 else:
898 main.log.error( "partitions() returned None" )
899 except ( ValueError, TypeError ):
900 main.log.exception( "Error parsing partitions" )
901 main.log.error( repr( partitions ) )
902 pendingMap = onosCli.pendingMap()
903 try:
904 if pendingMap :
905 parsedPending = json.loads( pendingMap )
906 main.log.warn( json.dumps( parsedPending,
907 sort_keys=True,
908 indent=4,
909 separators=( ',', ': ' ) ) )
910 # TODO check something here?
911 else:
912 main.log.error( "pendingMap() returned None" )
913 except ( ValueError, TypeError ):
914 main.log.exception( "Error parsing pending map" )
915 main.log.error( repr( pendingMap ) )
916
917 def CASE4( self, main ):
918 """
919 Ping across added host intents
920 """
921 import json
922 import time
923 assert main.numCtrls, "main.numCtrls not defined"
924 assert main, "main not defined"
925 assert utilities.assert_equals, "utilities.assert_equals not defined"
926 assert main.CLIs, "main.CLIs not defined"
927 assert main.nodes, "main.nodes not defined"
928 main.case( "Verify connectivity by sending traffic across Intents" )
929 main.caseExplanation = "Ping across added host intents to check " +\
930 "functionality and check the state of " +\
931 "the intent"
932 main.step( "Ping across added host intents" )
933 onosCli = main.CLIs[ main.activeNodes[0] ]
934 PingResult = main.TRUE
935 for i in range( 8, 18 ):
936 ping = main.Mininet1.pingHost( src="h" + str( i ),
937 target="h" + str( i + 10 ) )
938 PingResult = PingResult and ping
939 if ping == main.FALSE:
940 main.log.warn( "Ping failed between h" + str( i ) +
941 " and h" + str( i + 10 ) )
942 elif ping == main.TRUE:
943 main.log.info( "Ping test passed!" )
944 # Don't set PingResult or you'd override failures
945 if PingResult == main.FALSE:
946 main.log.error(
947 "Intents have not been installed correctly, pings failed." )
948 # TODO: pretty print
949 main.log.warn( "ONOS1 intents: " )
950 try:
951 tmpIntents = onosCli.intents()
952 main.log.warn( json.dumps( json.loads( tmpIntents ),
953 sort_keys=True,
954 indent=4,
955 separators=( ',', ': ' ) ) )
956 except ( ValueError, TypeError ):
957 main.log.warn( repr( tmpIntents ) )
958 utilities.assert_equals(
959 expect=main.TRUE,
960 actual=PingResult,
961 onpass="Intents have been installed correctly and pings work",
962 onfail="Intents have not been installed correctly, pings failed." )
963
964 main.step( "Check Intent state" )
965 installedCheck = False
966 loopCount = 0
967 while not installedCheck and loopCount < 40:
968 installedCheck = True
969 # Print the intent states
970 intents = onosCli.intents()
971 intentStates = []
972 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
973 count = 0
974 # Iter through intents of a node
975 try:
976 for intent in json.loads( intents ):
977 state = intent.get( 'state', None )
978 if "INSTALLED" not in state:
979 installedCheck = False
980 intentId = intent.get( 'id', None )
981 intentStates.append( ( intentId, state ) )
982 except ( ValueError, TypeError ):
983 main.log.exception( "Error parsing intents." )
984 # Print states
985 intentStates.sort()
986 for i, s in intentStates:
987 count += 1
988 main.log.info( "%-6s%-15s%-15s" %
989 ( str( count ), str( i ), str( s ) ) )
990 if not installedCheck:
991 time.sleep( 1 )
992 loopCount += 1
993 utilities.assert_equals( expect=True, actual=installedCheck,
994 onpass="Intents are all INSTALLED",
995 onfail="Intents are not all in " +
996 "INSTALLED state" )
997
998 main.step( "Check leadership of topics" )
999 leaders = onosCli.leaders()
1000 topicCheck = main.TRUE
1001 try:
1002 if leaders:
1003 parsedLeaders = json.loads( leaders )
1004 main.log.warn( json.dumps( parsedLeaders,
1005 sort_keys=True,
1006 indent=4,
1007 separators=( ',', ': ' ) ) )
1008 # check for all intent partitions
1009 # check for election
1010 # TODO: Look at Devices as topics now that it uses this system
1011 topics = []
1012 for i in range( 14 ):
1013 topics.append( "intent-partition-" + str( i ) )
1014 # FIXME: this should only be after we start the app
1015 # FIXME: topics.append( "org.onosproject.election" )
1016 # Print leaders output
1017 main.log.debug( topics )
1018 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1019 for topic in topics:
1020 if topic not in ONOStopics:
1021 main.log.error( "Error: " + topic +
1022 " not in leaders" )
1023 topicCheck = main.FALSE
1024 else:
1025 main.log.error( "leaders() returned None" )
1026 topicCheck = main.FALSE
1027 except ( ValueError, TypeError ):
1028 topicCheck = main.FALSE
1029 main.log.exception( "Error parsing leaders" )
1030 main.log.error( repr( leaders ) )
1031 # TODO: Check for a leader of these topics
1032 # Check all nodes
1033 if topicCheck:
1034 for i in main.activeNodes:
1035 node = main.CLIs[i]
1036 response = node.leaders( jsonFormat=False)
1037 main.log.warn( str( node.name ) + " leaders output: \n" +
1038 str( response ) )
1039
1040 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1041 onpass="intent Partitions is in leaders",
1042 onfail="Some topics were lost " )
1043 # Print partitions
1044 partitions = onosCli.partitions()
1045 try:
1046 if partitions :
1047 parsedPartitions = json.loads( partitions )
1048 main.log.warn( json.dumps( parsedPartitions,
1049 sort_keys=True,
1050 indent=4,
1051 separators=( ',', ': ' ) ) )
1052 # TODO check for a leader in all paritions
1053 # TODO check for consistency among nodes
1054 else:
1055 main.log.error( "partitions() returned None" )
1056 except ( ValueError, TypeError ):
1057 main.log.exception( "Error parsing partitions" )
1058 main.log.error( repr( partitions ) )
1059 # Print Pending Map
1060 pendingMap = onosCli.pendingMap()
1061 try:
1062 if pendingMap :
1063 parsedPending = json.loads( pendingMap )
1064 main.log.warn( json.dumps( parsedPending,
1065 sort_keys=True,
1066 indent=4,
1067 separators=( ',', ': ' ) ) )
1068 # TODO check something here?
1069 else:
1070 main.log.error( "pendingMap() returned None" )
1071 except ( ValueError, TypeError ):
1072 main.log.exception( "Error parsing pending map" )
1073 main.log.error( repr( pendingMap ) )
1074
1075 if not installedCheck:
1076 main.log.info( "Waiting 60 seconds to see if the state of " +
1077 "intents change" )
1078 time.sleep( 60 )
1079 # Print the intent states
1080 intents = onosCli.intents()
1081 intentStates = []
1082 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1083 count = 0
1084 # Iter through intents of a node
1085 try:
1086 for intent in json.loads( intents ):
1087 state = intent.get( 'state', None )
1088 if "INSTALLED" not in state:
1089 installedCheck = False
1090 intentId = intent.get( 'id', None )
1091 intentStates.append( ( intentId, state ) )
1092 except ( ValueError, TypeError ):
1093 main.log.exception( "Error parsing intents." )
1094 intentStates.sort()
1095 for i, s in intentStates:
1096 count += 1
1097 main.log.info( "%-6s%-15s%-15s" %
1098 ( str( count ), str( i ), str( s ) ) )
1099 leaders = onosCli.leaders()
1100 try:
1101 missing = False
1102 if leaders:
1103 parsedLeaders = json.loads( leaders )
1104 main.log.warn( json.dumps( parsedLeaders,
1105 sort_keys=True,
1106 indent=4,
1107 separators=( ',', ': ' ) ) )
1108 # check for all intent partitions
1109 # check for election
1110 topics = []
1111 for i in range( 14 ):
1112 topics.append( "intent-partition-" + str( i ) )
1113 # FIXME: this should only be after we start the app
1114 topics.append( "org.onosproject.election" )
1115 main.log.debug( topics )
1116 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1117 for topic in topics:
1118 if topic not in ONOStopics:
1119 main.log.error( "Error: " + topic +
1120 " not in leaders" )
1121 missing = True
1122 else:
1123 main.log.error( "leaders() returned None" )
1124 except ( ValueError, TypeError ):
1125 main.log.exception( "Error parsing leaders" )
1126 main.log.error( repr( leaders ) )
1127 if missing:
1128 for i in main.activeNodes:
1129 node = main.CLIs[i]
1130 response = node.leaders( jsonFormat=False)
1131 main.log.warn( str( node.name ) + " leaders output: \n" +
1132 str( response ) )
1133
1134 partitions = onosCli.partitions()
1135 try:
1136 if partitions :
1137 parsedPartitions = json.loads( partitions )
1138 main.log.warn( json.dumps( parsedPartitions,
1139 sort_keys=True,
1140 indent=4,
1141 separators=( ',', ': ' ) ) )
1142 # TODO check for a leader in all paritions
1143 # TODO check for consistency among nodes
1144 else:
1145 main.log.error( "partitions() returned None" )
1146 except ( ValueError, TypeError ):
1147 main.log.exception( "Error parsing partitions" )
1148 main.log.error( repr( partitions ) )
1149 pendingMap = onosCli.pendingMap()
1150 try:
1151 if pendingMap :
1152 parsedPending = json.loads( pendingMap )
1153 main.log.warn( json.dumps( parsedPending,
1154 sort_keys=True,
1155 indent=4,
1156 separators=( ',', ': ' ) ) )
1157 # TODO check something here?
1158 else:
1159 main.log.error( "pendingMap() returned None" )
1160 except ( ValueError, TypeError ):
1161 main.log.exception( "Error parsing pending map" )
1162 main.log.error( repr( pendingMap ) )
1163 # Print flowrules
1164 node = main.activeNodes[0]
1165 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1166 main.step( "Wait a minute then ping again" )
1167 # the wait is above
1168 PingResult = main.TRUE
1169 for i in range( 8, 18 ):
1170 ping = main.Mininet1.pingHost( src="h" + str( i ),
1171 target="h" + str( i + 10 ) )
1172 PingResult = PingResult and ping
1173 if ping == main.FALSE:
1174 main.log.warn( "Ping failed between h" + str( i ) +
1175 " and h" + str( i + 10 ) )
1176 elif ping == main.TRUE:
1177 main.log.info( "Ping test passed!" )
1178 # Don't set PingResult or you'd override failures
1179 if PingResult == main.FALSE:
1180 main.log.error(
1181 "Intents have not been installed correctly, pings failed." )
1182 # TODO: pretty print
1183 main.log.warn( "ONOS1 intents: " )
1184 try:
1185 tmpIntents = onosCli.intents()
1186 main.log.warn( json.dumps( json.loads( tmpIntents ),
1187 sort_keys=True,
1188 indent=4,
1189 separators=( ',', ': ' ) ) )
1190 except ( ValueError, TypeError ):
1191 main.log.warn( repr( tmpIntents ) )
1192 utilities.assert_equals(
1193 expect=main.TRUE,
1194 actual=PingResult,
1195 onpass="Intents have been installed correctly and pings work",
1196 onfail="Intents have not been installed correctly, pings failed." )
1197
1198 def CASE5( self, main ):
1199 """
1200 Reading state of ONOS
1201 """
1202 import json
1203 import time
1204 assert main.numCtrls, "main.numCtrls not defined"
1205 assert main, "main not defined"
1206 assert utilities.assert_equals, "utilities.assert_equals not defined"
1207 assert main.CLIs, "main.CLIs not defined"
1208 assert main.nodes, "main.nodes not defined"
1209
1210 main.case( "Setting up and gathering data for current state" )
1211 # The general idea for this test case is to pull the state of
1212 # ( intents,flows, topology,... ) from each ONOS node
1213 # We can then compare them with each other and also with past states
1214
1215 main.step( "Check that each switch has a master" )
1216 global mastershipState
1217 mastershipState = '[]'
1218
1219 # Assert that each device has a master
1220 rolesNotNull = main.TRUE
1221 threads = []
1222 for i in main.activeNodes:
1223 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1224 name="rolesNotNull-" + str( i ),
1225 args=[] )
1226 threads.append( t )
1227 t.start()
1228
1229 for t in threads:
1230 t.join()
1231 rolesNotNull = rolesNotNull and t.result
1232 utilities.assert_equals(
1233 expect=main.TRUE,
1234 actual=rolesNotNull,
1235 onpass="Each device has a master",
1236 onfail="Some devices don't have a master assigned" )
1237
1238 main.step( "Get the Mastership of each switch from each controller" )
1239 ONOSMastership = []
1240 mastershipCheck = main.FALSE
1241 consistentMastership = True
1242 rolesResults = True
1243 threads = []
1244 for i in main.activeNodes:
1245 t = main.Thread( target=main.CLIs[i].roles,
1246 name="roles-" + str( i ),
1247 args=[] )
1248 threads.append( t )
1249 t.start()
1250
1251 for t in threads:
1252 t.join()
1253 ONOSMastership.append( t.result )
1254
1255 for i in range( len( ONOSMastership ) ):
1256 node = str( main.activeNodes[i] + 1 )
1257 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1258 main.log.error( "Error in getting ONOS" + node + " roles" )
1259 main.log.warn( "ONOS" + node + " mastership response: " +
1260 repr( ONOSMastership[i] ) )
1261 rolesResults = False
1262 utilities.assert_equals(
1263 expect=True,
1264 actual=rolesResults,
1265 onpass="No error in reading roles output",
1266 onfail="Error in reading roles from ONOS" )
1267
1268 main.step( "Check for consistency in roles from each controller" )
1269 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1270 main.log.info(
1271 "Switch roles are consistent across all ONOS nodes" )
1272 else:
1273 consistentMastership = False
1274 utilities.assert_equals(
1275 expect=True,
1276 actual=consistentMastership,
1277 onpass="Switch roles are consistent across all ONOS nodes",
1278 onfail="ONOS nodes have different views of switch roles" )
1279
1280 if rolesResults and not consistentMastership:
1281 for i in range( len( main.activeNodes ) ):
1282 node = str( main.activeNodes[i] + 1 )
1283 try:
1284 main.log.warn(
1285 "ONOS" + node + " roles: ",
1286 json.dumps(
1287 json.loads( ONOSMastership[ i ] ),
1288 sort_keys=True,
1289 indent=4,
1290 separators=( ',', ': ' ) ) )
1291 except ( ValueError, TypeError ):
1292 main.log.warn( repr( ONOSMastership[ i ] ) )
1293 elif rolesResults and consistentMastership:
1294 mastershipCheck = main.TRUE
1295 mastershipState = ONOSMastership[ 0 ]
1296
1297 main.step( "Get the intents from each controller" )
1298 global intentState
1299 intentState = []
1300 ONOSIntents = []
1301 intentCheck = main.FALSE
1302 consistentIntents = True
1303 intentsResults = True
1304 threads = []
1305 for i in main.activeNodes:
1306 t = main.Thread( target=main.CLIs[i].intents,
1307 name="intents-" + str( i ),
1308 args=[],
1309 kwargs={ 'jsonFormat': True } )
1310 threads.append( t )
1311 t.start()
1312
1313 for t in threads:
1314 t.join()
1315 ONOSIntents.append( t.result )
1316
1317 for i in range( len( ONOSIntents ) ):
1318 node = str( main.activeNodes[i] + 1 )
1319 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1320 main.log.error( "Error in getting ONOS" + node + " intents" )
1321 main.log.warn( "ONOS" + node + " intents response: " +
1322 repr( ONOSIntents[ i ] ) )
1323 intentsResults = False
1324 utilities.assert_equals(
1325 expect=True,
1326 actual=intentsResults,
1327 onpass="No error in reading intents output",
1328 onfail="Error in reading intents from ONOS" )
1329
1330 main.step( "Check for consistency in Intents from each controller" )
1331 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1332 main.log.info( "Intents are consistent across all ONOS " +
1333 "nodes" )
1334 else:
1335 consistentIntents = False
1336 main.log.error( "Intents not consistent" )
1337 utilities.assert_equals(
1338 expect=True,
1339 actual=consistentIntents,
1340 onpass="Intents are consistent across all ONOS nodes",
1341 onfail="ONOS nodes have different views of intents" )
1342
1343 if intentsResults:
1344 # Try to make it easy to figure out what is happening
1345 #
1346 # Intent ONOS1 ONOS2 ...
1347 # 0x01 INSTALLED INSTALLING
1348 # ... ... ...
1349 # ... ... ...
1350 title = " Id"
1351 for n in main.activeNodes:
1352 title += " " * 10 + "ONOS" + str( n + 1 )
1353 main.log.warn( title )
1354 # get all intent keys in the cluster
1355 keys = []
Jon Halla440e872016-03-31 15:15:50 -07001356 try:
1357 # Get the set of all intent keys
Jon Hall6e709752016-02-01 13:38:46 -08001358 for nodeStr in ONOSIntents:
1359 node = json.loads( nodeStr )
1360 for intent in node:
Jon Halla440e872016-03-31 15:15:50 -07001361 keys.append( intent.get( 'id' ) )
1362 keys = set( keys )
1363 # For each intent key, print the state on each node
1364 for key in keys:
1365 row = "%-13s" % key
1366 for nodeStr in ONOSIntents:
1367 node = json.loads( nodeStr )
1368 for intent in node:
1369 if intent.get( 'id', "Error" ) == key:
1370 row += "%-15s" % intent.get( 'state' )
1371 main.log.warn( row )
1372 # End of intent state table
1373 except ValueError as e:
1374 main.log.exception( e )
1375 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
Jon Hall6e709752016-02-01 13:38:46 -08001376
1377 if intentsResults and not consistentIntents:
1378 # print the json objects
1379 n = str( main.activeNodes[-1] + 1 )
1380 main.log.debug( "ONOS" + n + " intents: " )
1381 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1382 sort_keys=True,
1383 indent=4,
1384 separators=( ',', ': ' ) ) )
1385 for i in range( len( ONOSIntents ) ):
1386 node = str( main.activeNodes[i] + 1 )
1387 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1388 main.log.debug( "ONOS" + node + " intents: " )
1389 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1390 sort_keys=True,
1391 indent=4,
1392 separators=( ',', ': ' ) ) )
1393 else:
1394 main.log.debug( "ONOS" + node + " intents match ONOS" +
1395 n + " intents" )
1396 elif intentsResults and consistentIntents:
1397 intentCheck = main.TRUE
1398 intentState = ONOSIntents[ 0 ]
1399
1400 main.step( "Get the flows from each controller" )
1401 global flowState
1402 flowState = []
1403 ONOSFlows = []
1404 ONOSFlowsJson = []
1405 flowCheck = main.FALSE
1406 consistentFlows = True
1407 flowsResults = True
1408 threads = []
1409 for i in main.activeNodes:
1410 t = main.Thread( target=main.CLIs[i].flows,
1411 name="flows-" + str( i ),
1412 args=[],
1413 kwargs={ 'jsonFormat': True } )
1414 threads.append( t )
1415 t.start()
1416
1417 # NOTE: Flows command can take some time to run
1418 time.sleep(30)
1419 for t in threads:
1420 t.join()
1421 result = t.result
1422 ONOSFlows.append( result )
1423
1424 for i in range( len( ONOSFlows ) ):
1425 num = str( main.activeNodes[i] + 1 )
1426 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1427 main.log.error( "Error in getting ONOS" + num + " flows" )
1428 main.log.warn( "ONOS" + num + " flows response: " +
1429 repr( ONOSFlows[ i ] ) )
1430 flowsResults = False
1431 ONOSFlowsJson.append( None )
1432 else:
1433 try:
1434 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1435 except ( ValueError, TypeError ):
1436 # FIXME: change this to log.error?
1437 main.log.exception( "Error in parsing ONOS" + num +
1438 " response as json." )
1439 main.log.error( repr( ONOSFlows[ i ] ) )
1440 ONOSFlowsJson.append( None )
1441 flowsResults = False
1442 utilities.assert_equals(
1443 expect=True,
1444 actual=flowsResults,
1445 onpass="No error in reading flows output",
1446 onfail="Error in reading flows from ONOS" )
1447
1448 main.step( "Check for consistency in Flows from each controller" )
1449 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1450 if all( tmp ):
1451 main.log.info( "Flow count is consistent across all ONOS nodes" )
1452 else:
1453 consistentFlows = False
1454 utilities.assert_equals(
1455 expect=True,
1456 actual=consistentFlows,
1457 onpass="The flow count is consistent across all ONOS nodes",
1458 onfail="ONOS nodes have different flow counts" )
1459
1460 if flowsResults and not consistentFlows:
1461 for i in range( len( ONOSFlows ) ):
1462 node = str( main.activeNodes[i] + 1 )
1463 try:
1464 main.log.warn(
1465 "ONOS" + node + " flows: " +
1466 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1467 indent=4, separators=( ',', ': ' ) ) )
1468 except ( ValueError, TypeError ):
1469 main.log.warn( "ONOS" + node + " flows: " +
1470 repr( ONOSFlows[ i ] ) )
1471 elif flowsResults and consistentFlows:
1472 flowCheck = main.TRUE
1473 flowState = ONOSFlows[ 0 ]
1474
1475 main.step( "Get the OF Table entries" )
1476 global flows
1477 flows = []
1478 for i in range( 1, 29 ):
1479 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1480 if flowCheck == main.FALSE:
1481 for table in flows:
1482 main.log.warn( table )
1483 # TODO: Compare switch flow tables with ONOS flow tables
1484
1485 main.step( "Start continuous pings" )
1486 main.Mininet2.pingLong(
1487 src=main.params[ 'PING' ][ 'source1' ],
1488 target=main.params[ 'PING' ][ 'target1' ],
1489 pingTime=500 )
1490 main.Mininet2.pingLong(
1491 src=main.params[ 'PING' ][ 'source2' ],
1492 target=main.params[ 'PING' ][ 'target2' ],
1493 pingTime=500 )
1494 main.Mininet2.pingLong(
1495 src=main.params[ 'PING' ][ 'source3' ],
1496 target=main.params[ 'PING' ][ 'target3' ],
1497 pingTime=500 )
1498 main.Mininet2.pingLong(
1499 src=main.params[ 'PING' ][ 'source4' ],
1500 target=main.params[ 'PING' ][ 'target4' ],
1501 pingTime=500 )
1502 main.Mininet2.pingLong(
1503 src=main.params[ 'PING' ][ 'source5' ],
1504 target=main.params[ 'PING' ][ 'target5' ],
1505 pingTime=500 )
1506 main.Mininet2.pingLong(
1507 src=main.params[ 'PING' ][ 'source6' ],
1508 target=main.params[ 'PING' ][ 'target6' ],
1509 pingTime=500 )
1510 main.Mininet2.pingLong(
1511 src=main.params[ 'PING' ][ 'source7' ],
1512 target=main.params[ 'PING' ][ 'target7' ],
1513 pingTime=500 )
1514 main.Mininet2.pingLong(
1515 src=main.params[ 'PING' ][ 'source8' ],
1516 target=main.params[ 'PING' ][ 'target8' ],
1517 pingTime=500 )
1518 main.Mininet2.pingLong(
1519 src=main.params[ 'PING' ][ 'source9' ],
1520 target=main.params[ 'PING' ][ 'target9' ],
1521 pingTime=500 )
1522 main.Mininet2.pingLong(
1523 src=main.params[ 'PING' ][ 'source10' ],
1524 target=main.params[ 'PING' ][ 'target10' ],
1525 pingTime=500 )
1526
1527 main.step( "Collecting topology information from ONOS" )
1528 devices = []
1529 threads = []
1530 for i in main.activeNodes:
1531 t = main.Thread( target=main.CLIs[i].devices,
1532 name="devices-" + str( i ),
1533 args=[ ] )
1534 threads.append( t )
1535 t.start()
1536
1537 for t in threads:
1538 t.join()
1539 devices.append( t.result )
1540 hosts = []
1541 threads = []
1542 for i in main.activeNodes:
1543 t = main.Thread( target=main.CLIs[i].hosts,
1544 name="hosts-" + str( i ),
1545 args=[ ] )
1546 threads.append( t )
1547 t.start()
1548
1549 for t in threads:
1550 t.join()
1551 try:
1552 hosts.append( json.loads( t.result ) )
1553 except ( ValueError, TypeError ):
1554 # FIXME: better handling of this, print which node
1555 # Maybe use thread name?
1556 main.log.exception( "Error parsing json output of hosts" )
1557 main.log.warn( repr( t.result ) )
1558 hosts.append( None )
1559
1560 ports = []
1561 threads = []
1562 for i in main.activeNodes:
1563 t = main.Thread( target=main.CLIs[i].ports,
1564 name="ports-" + str( i ),
1565 args=[ ] )
1566 threads.append( t )
1567 t.start()
1568
1569 for t in threads:
1570 t.join()
1571 ports.append( t.result )
1572 links = []
1573 threads = []
1574 for i in main.activeNodes:
1575 t = main.Thread( target=main.CLIs[i].links,
1576 name="links-" + str( i ),
1577 args=[ ] )
1578 threads.append( t )
1579 t.start()
1580
1581 for t in threads:
1582 t.join()
1583 links.append( t.result )
1584 clusters = []
1585 threads = []
1586 for i in main.activeNodes:
1587 t = main.Thread( target=main.CLIs[i].clusters,
1588 name="clusters-" + str( i ),
1589 args=[ ] )
1590 threads.append( t )
1591 t.start()
1592
1593 for t in threads:
1594 t.join()
1595 clusters.append( t.result )
1596 # Compare json objects for hosts and dataplane clusters
1597
1598 # hosts
1599 main.step( "Host view is consistent across ONOS nodes" )
1600 consistentHostsResult = main.TRUE
1601 for controller in range( len( hosts ) ):
1602 controllerStr = str( main.activeNodes[controller] + 1 )
1603 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1604 if hosts[ controller ] == hosts[ 0 ]:
1605 continue
1606 else: # hosts not consistent
1607 main.log.error( "hosts from ONOS" +
1608 controllerStr +
1609 " is inconsistent with ONOS1" )
1610 main.log.warn( repr( hosts[ controller ] ) )
1611 consistentHostsResult = main.FALSE
1612
1613 else:
1614 main.log.error( "Error in getting ONOS hosts from ONOS" +
1615 controllerStr )
1616 consistentHostsResult = main.FALSE
1617 main.log.warn( "ONOS" + controllerStr +
1618 " hosts response: " +
1619 repr( hosts[ controller ] ) )
1620 utilities.assert_equals(
1621 expect=main.TRUE,
1622 actual=consistentHostsResult,
1623 onpass="Hosts view is consistent across all ONOS nodes",
1624 onfail="ONOS nodes have different views of hosts" )
1625
1626 main.step( "Each host has an IP address" )
1627 ipResult = main.TRUE
1628 for controller in range( 0, len( hosts ) ):
1629 controllerStr = str( main.activeNodes[controller] + 1 )
1630 if hosts[ controller ]:
1631 for host in hosts[ controller ]:
1632 if not host.get( 'ipAddresses', [ ] ):
1633 main.log.error( "Error with host ips on controller" +
1634 controllerStr + ": " + str( host ) )
1635 ipResult = main.FALSE
1636 utilities.assert_equals(
1637 expect=main.TRUE,
1638 actual=ipResult,
1639 onpass="The ips of the hosts aren't empty",
1640 onfail="The ip of at least one host is missing" )
1641
1642 # Strongly connected clusters of devices
1643 main.step( "Cluster view is consistent across ONOS nodes" )
1644 consistentClustersResult = main.TRUE
1645 for controller in range( len( clusters ) ):
1646 controllerStr = str( main.activeNodes[controller] + 1 )
1647 if "Error" not in clusters[ controller ]:
1648 if clusters[ controller ] == clusters[ 0 ]:
1649 continue
1650 else: # clusters not consistent
1651 main.log.error( "clusters from ONOS" + controllerStr +
1652 " is inconsistent with ONOS1" )
1653 consistentClustersResult = main.FALSE
1654
1655 else:
1656 main.log.error( "Error in getting dataplane clusters " +
1657 "from ONOS" + controllerStr )
1658 consistentClustersResult = main.FALSE
1659 main.log.warn( "ONOS" + controllerStr +
1660 " clusters response: " +
1661 repr( clusters[ controller ] ) )
1662 utilities.assert_equals(
1663 expect=main.TRUE,
1664 actual=consistentClustersResult,
1665 onpass="Clusters view is consistent across all ONOS nodes",
1666 onfail="ONOS nodes have different views of clusters" )
1667 # there should always only be one cluster
1668 main.step( "Cluster view correct across ONOS nodes" )
1669 try:
1670 numClusters = len( json.loads( clusters[ 0 ] ) )
1671 except ( ValueError, TypeError ):
1672 main.log.exception( "Error parsing clusters[0]: " +
1673 repr( clusters[ 0 ] ) )
1674 numClusters = "ERROR"
1675 clusterResults = main.FALSE
1676 if numClusters == 1:
1677 clusterResults = main.TRUE
1678 utilities.assert_equals(
1679 expect=1,
1680 actual=numClusters,
1681 onpass="ONOS shows 1 SCC",
1682 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1683
1684 main.step( "Comparing ONOS topology to MN" )
1685 devicesResults = main.TRUE
1686 linksResults = main.TRUE
1687 hostsResults = main.TRUE
1688 mnSwitches = main.Mininet1.getSwitches()
1689 mnLinks = main.Mininet1.getLinks()
1690 mnHosts = main.Mininet1.getHosts()
1691 for controller in main.activeNodes:
1692 controllerStr = str( main.activeNodes[controller] + 1 )
1693 if devices[ controller ] and ports[ controller ] and\
1694 "Error" not in devices[ controller ] and\
1695 "Error" not in ports[ controller ]:
1696 currentDevicesResult = main.Mininet1.compareSwitches(
1697 mnSwitches,
1698 json.loads( devices[ controller ] ),
1699 json.loads( ports[ controller ] ) )
1700 else:
1701 currentDevicesResult = main.FALSE
1702 utilities.assert_equals( expect=main.TRUE,
1703 actual=currentDevicesResult,
1704 onpass="ONOS" + controllerStr +
1705 " Switches view is correct",
1706 onfail="ONOS" + controllerStr +
1707 " Switches view is incorrect" )
1708 if links[ controller ] and "Error" not in links[ controller ]:
1709 currentLinksResult = main.Mininet1.compareLinks(
1710 mnSwitches, mnLinks,
1711 json.loads( links[ controller ] ) )
1712 else:
1713 currentLinksResult = main.FALSE
1714 utilities.assert_equals( expect=main.TRUE,
1715 actual=currentLinksResult,
1716 onpass="ONOS" + controllerStr +
1717 " links view is correct",
1718 onfail="ONOS" + controllerStr +
1719 " links view is incorrect" )
1720
1721 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1722 currentHostsResult = main.Mininet1.compareHosts(
1723 mnHosts,
1724 hosts[ controller ] )
1725 else:
1726 currentHostsResult = main.FALSE
1727 utilities.assert_equals( expect=main.TRUE,
1728 actual=currentHostsResult,
1729 onpass="ONOS" + controllerStr +
1730 " hosts exist in Mininet",
1731 onfail="ONOS" + controllerStr +
1732 " hosts don't match Mininet" )
1733
1734 devicesResults = devicesResults and currentDevicesResult
1735 linksResults = linksResults and currentLinksResult
1736 hostsResults = hostsResults and currentHostsResult
1737
1738 main.step( "Device information is correct" )
1739 utilities.assert_equals(
1740 expect=main.TRUE,
1741 actual=devicesResults,
1742 onpass="Device information is correct",
1743 onfail="Device information is incorrect" )
1744
1745 main.step( "Links are correct" )
1746 utilities.assert_equals(
1747 expect=main.TRUE,
1748 actual=linksResults,
1749 onpass="Link are correct",
1750 onfail="Links are incorrect" )
1751
1752 main.step( "Hosts are correct" )
1753 utilities.assert_equals(
1754 expect=main.TRUE,
1755 actual=hostsResults,
1756 onpass="Hosts are correct",
1757 onfail="Hosts are incorrect" )
1758
1759 def CASE61( self, main ):
1760 """
1761 The Failure case.
1762 """
1763 import math
1764 assert main.numCtrls, "main.numCtrls not defined"
1765 assert main, "main not defined"
1766 assert utilities.assert_equals, "utilities.assert_equals not defined"
1767 assert main.CLIs, "main.CLIs not defined"
1768 assert main.nodes, "main.nodes not defined"
1769 main.case( "Partition ONOS nodes into two distinct partitions" )
1770
1771 main.step( "Checking ONOS Logs for errors" )
1772 for node in main.nodes:
1773 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1774 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1775
1776 n = len( main.nodes ) # Number of nodes
1777 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1778 main.partition = [ 0 ] # ONOS node to partition, listed by index in main.nodes
1779 if n > 3:
1780 main.partition.append( p - 1 )
1781 # NOTE: This only works for cluster sizes of 3,5, or 7.
1782
1783 main.step( "Partitioning ONOS nodes" )
1784 nodeList = [ str( i + 1 ) for i in main.partition ]
1785 main.log.info( "Nodes to be partitioned: " + str( nodeList ) )
1786 partitionResults = main.TRUE
1787 for i in range( 0, n ):
1788 this = main.nodes[i]
1789 if i not in main.partition:
1790 for j in main.partition:
1791 foe = main.nodes[j]
1792 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1793 #CMD HERE
1794 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1795 this.handle.sendline( cmdStr )
1796 this.handle.expect( "\$" )
1797 main.log.debug( this.handle.before )
1798 else:
1799 for j in range( 0, n ):
1800 if j not in main.partition:
1801 foe = main.nodes[j]
1802 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1803 #CMD HERE
1804 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1805 this.handle.sendline( cmdStr )
1806 this.handle.expect( "\$" )
1807 main.log.debug( this.handle.before )
1808 main.activeNodes.remove( i )
1809 # NOTE: When dynamic clustering is finished, we need to start checking
1810 # main.partion nodes still work when partitioned
1811 utilities.assert_equals( expect=main.TRUE, actual=partitionResults,
1812 onpass="Firewall rules set successfully",
1813 onfail="Error setting firewall rules" )
1814
1815 main.log.step( "Sleeping 60 seconds" )
1816 time.sleep( 60 )
1817
1818 def CASE62( self, main ):
1819 """
1820 Healing Partition
1821 """
1822 import time
1823 assert main.numCtrls, "main.numCtrls not defined"
1824 assert main, "main not defined"
1825 assert utilities.assert_equals, "utilities.assert_equals not defined"
1826 assert main.CLIs, "main.CLIs not defined"
1827 assert main.nodes, "main.nodes not defined"
1828 assert main.partition, "main.partition not defined"
1829 main.case( "Healing Partition" )
1830
1831 main.step( "Deleteing firewall rules" )
1832 healResults = main.TRUE
1833 for node in main.nodes:
1834 cmdStr = "sudo iptables -F"
1835 node.handle.sendline( cmdStr )
1836 node.handle.expect( "\$" )
1837 main.log.debug( node.handle.before )
1838 utilities.assert_equals( expect=main.TRUE, actual=healResults,
1839 onpass="Firewall rules removed",
1840 onfail="Error removing firewall rules" )
1841
1842 for node in main.partition:
1843 main.activeNodes.append( node )
1844 main.activeNodes.sort()
1845 try:
1846 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1847 "List of active nodes has duplicates, this likely indicates something was run out of order"
1848 except AssertionError:
1849 main.log.exception( "" )
1850 main.cleanup()
1851 main.exit()
1852
1853 def CASE7( self, main ):
1854 """
1855 Check state after ONOS failure
1856 """
1857 import json
1858 assert main.numCtrls, "main.numCtrls not defined"
1859 assert main, "main not defined"
1860 assert utilities.assert_equals, "utilities.assert_equals not defined"
1861 assert main.CLIs, "main.CLIs not defined"
1862 assert main.nodes, "main.nodes not defined"
1863 try:
1864 main.partition
1865 except AttributeError:
1866 main.partition = []
1867
1868 main.case( "Running ONOS Constant State Tests" )
1869
1870 main.step( "Check that each switch has a master" )
1871 # Assert that each device has a master
1872 rolesNotNull = main.TRUE
1873 threads = []
1874 for i in main.activeNodes:
1875 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1876 name="rolesNotNull-" + str( i ),
1877 args=[ ] )
1878 threads.append( t )
1879 t.start()
1880
1881 for t in threads:
1882 t.join()
1883 rolesNotNull = rolesNotNull and t.result
1884 utilities.assert_equals(
1885 expect=main.TRUE,
1886 actual=rolesNotNull,
1887 onpass="Each device has a master",
1888 onfail="Some devices don't have a master assigned" )
1889
1890 main.step( "Read device roles from ONOS" )
1891 ONOSMastership = []
1892 mastershipCheck = main.FALSE
1893 consistentMastership = True
1894 rolesResults = True
1895 threads = []
1896 for i in main.activeNodes:
1897 t = main.Thread( target=main.CLIs[i].roles,
1898 name="roles-" + str( i ),
1899 args=[] )
1900 threads.append( t )
1901 t.start()
1902
1903 for t in threads:
1904 t.join()
1905 ONOSMastership.append( t.result )
1906
1907 for i in range( len( ONOSMastership ) ):
1908 node = str( main.activeNodes[i] + 1 )
1909 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1910 main.log.error( "Error in getting ONOS" + node + " roles" )
1911 main.log.warn( "ONOS" + node + " mastership response: " +
1912 repr( ONOSMastership[i] ) )
1913 rolesResults = False
1914 utilities.assert_equals(
1915 expect=True,
1916 actual=rolesResults,
1917 onpass="No error in reading roles output",
1918 onfail="Error in reading roles from ONOS" )
1919
1920 main.step( "Check for consistency in roles from each controller" )
1921 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1922 main.log.info(
1923 "Switch roles are consistent across all ONOS nodes" )
1924 else:
1925 consistentMastership = False
1926 utilities.assert_equals(
1927 expect=True,
1928 actual=consistentMastership,
1929 onpass="Switch roles are consistent across all ONOS nodes",
1930 onfail="ONOS nodes have different views of switch roles" )
1931
1932 if rolesResults and not consistentMastership:
1933 for i in range( len( ONOSMastership ) ):
1934 node = str( main.activeNodes[i] + 1 )
1935 main.log.warn( "ONOS" + node + " roles: ",
1936 json.dumps( json.loads( ONOSMastership[ i ] ),
1937 sort_keys=True,
1938 indent=4,
1939 separators=( ',', ': ' ) ) )
1940
1941 # NOTE: we expect mastership to change on controller failure
1942
1943 main.step( "Get the intents and compare across all nodes" )
1944 ONOSIntents = []
1945 intentCheck = main.FALSE
1946 consistentIntents = True
1947 intentsResults = True
1948 threads = []
1949 for i in main.activeNodes:
1950 t = main.Thread( target=main.CLIs[i].intents,
1951 name="intents-" + str( i ),
1952 args=[],
1953 kwargs={ 'jsonFormat': True } )
1954 threads.append( t )
1955 t.start()
1956
1957 for t in threads:
1958 t.join()
1959 ONOSIntents.append( t.result )
1960
1961 for i in range( len( ONOSIntents) ):
1962 node = str( main.activeNodes[i] + 1 )
1963 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1964 main.log.error( "Error in getting ONOS" + node + " intents" )
1965 main.log.warn( "ONOS" + node + " intents response: " +
1966 repr( ONOSIntents[ i ] ) )
1967 intentsResults = False
1968 utilities.assert_equals(
1969 expect=True,
1970 actual=intentsResults,
1971 onpass="No error in reading intents output",
1972 onfail="Error in reading intents from ONOS" )
1973
1974 main.step( "Check for consistency in Intents from each controller" )
1975 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1976 main.log.info( "Intents are consistent across all ONOS " +
1977 "nodes" )
1978 else:
1979 consistentIntents = False
1980
1981 # Try to make it easy to figure out what is happening
1982 #
1983 # Intent ONOS1 ONOS2 ...
1984 # 0x01 INSTALLED INSTALLING
1985 # ... ... ...
1986 # ... ... ...
1987 title = " ID"
1988 for n in main.activeNodes:
1989 title += " " * 10 + "ONOS" + str( n + 1 )
1990 main.log.warn( title )
1991 # get all intent keys in the cluster
1992 keys = []
1993 for nodeStr in ONOSIntents:
1994 node = json.loads( nodeStr )
1995 for intent in node:
1996 keys.append( intent.get( 'id' ) )
1997 keys = set( keys )
1998 for key in keys:
1999 row = "%-13s" % key
2000 for nodeStr in ONOSIntents:
2001 node = json.loads( nodeStr )
2002 for intent in node:
2003 if intent.get( 'id' ) == key:
2004 row += "%-15s" % intent.get( 'state' )
2005 main.log.warn( row )
2006 # End table view
2007
2008 utilities.assert_equals(
2009 expect=True,
2010 actual=consistentIntents,
2011 onpass="Intents are consistent across all ONOS nodes",
2012 onfail="ONOS nodes have different views of intents" )
2013 intentStates = []
2014 for node in ONOSIntents: # Iter through ONOS nodes
2015 nodeStates = []
2016 # Iter through intents of a node
2017 try:
2018 for intent in json.loads( node ):
2019 nodeStates.append( intent[ 'state' ] )
2020 except ( ValueError, TypeError ):
2021 main.log.exception( "Error in parsing intents" )
2022 main.log.error( repr( node ) )
2023 intentStates.append( nodeStates )
2024 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2025 main.log.info( dict( out ) )
2026
2027 if intentsResults and not consistentIntents:
2028 for i in range( len( main.activeNodes ) ):
2029 node = str( main.activeNodes[i] + 1 )
2030 main.log.warn( "ONOS" + node + " intents: " )
2031 main.log.warn( json.dumps(
2032 json.loads( ONOSIntents[ i ] ),
2033 sort_keys=True,
2034 indent=4,
2035 separators=( ',', ': ' ) ) )
2036 elif intentsResults and consistentIntents:
2037 intentCheck = main.TRUE
2038
2039 # NOTE: Store has no durability, so intents are lost across system
2040 # restarts
2041 main.step( "Compare current intents with intents before the failure" )
2042 # NOTE: this requires case 5 to pass for intentState to be set.
2043 # maybe we should stop the test if that fails?
2044 sameIntents = main.FALSE
2045 try:
2046 intentState
2047 except NameError:
2048 main.log.warn( "No previous intent state was saved" )
2049 else:
2050 if intentState and intentState == ONOSIntents[ 0 ]:
2051 sameIntents = main.TRUE
2052 main.log.info( "Intents are consistent with before failure" )
2053 # TODO: possibly the states have changed? we may need to figure out
2054 # what the acceptable states are
2055 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2056 sameIntents = main.TRUE
2057 try:
2058 before = json.loads( intentState )
2059 after = json.loads( ONOSIntents[ 0 ] )
2060 for intent in before:
2061 if intent not in after:
2062 sameIntents = main.FALSE
2063 main.log.debug( "Intent is not currently in ONOS " +
2064 "(at least in the same form):" )
2065 main.log.debug( json.dumps( intent ) )
2066 except ( ValueError, TypeError ):
2067 main.log.exception( "Exception printing intents" )
2068 main.log.debug( repr( ONOSIntents[0] ) )
2069 main.log.debug( repr( intentState ) )
2070 if sameIntents == main.FALSE:
2071 try:
2072 main.log.debug( "ONOS intents before: " )
2073 main.log.debug( json.dumps( json.loads( intentState ),
2074 sort_keys=True, indent=4,
2075 separators=( ',', ': ' ) ) )
2076 main.log.debug( "Current ONOS intents: " )
2077 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2078 sort_keys=True, indent=4,
2079 separators=( ',', ': ' ) ) )
2080 except ( ValueError, TypeError ):
2081 main.log.exception( "Exception printing intents" )
2082 main.log.debug( repr( ONOSIntents[0] ) )
2083 main.log.debug( repr( intentState ) )
2084 utilities.assert_equals(
2085 expect=main.TRUE,
2086 actual=sameIntents,
2087 onpass="Intents are consistent with before failure",
2088 onfail="The Intents changed during failure" )
2089 intentCheck = intentCheck and sameIntents
2090
2091 main.step( "Get the OF Table entries and compare to before " +
2092 "component failure" )
2093 FlowTables = main.TRUE
2094 for i in range( 28 ):
2095 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2096 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2097 FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
2098 if FlowTables == main.FALSE:
2099 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2100 utilities.assert_equals(
2101 expect=main.TRUE,
2102 actual=FlowTables,
2103 onpass="No changes were found in the flow tables",
2104 onfail="Changes were found in the flow tables" )
2105
2106 main.Mininet2.pingLongKill()
2107 '''
2108 main.step( "Check the continuous pings to ensure that no packets " +
2109 "were dropped during component failure" )
2110 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2111 main.params[ 'TESTONIP' ] )
2112 LossInPings = main.FALSE
2113 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2114 for i in range( 8, 18 ):
2115 main.log.info(
2116 "Checking for a loss in pings along flow from s" +
2117 str( i ) )
2118 LossInPings = main.Mininet2.checkForLoss(
2119 "/tmp/ping.h" +
2120 str( i ) ) or LossInPings
2121 if LossInPings == main.TRUE:
2122 main.log.info( "Loss in ping detected" )
2123 elif LossInPings == main.ERROR:
2124 main.log.info( "There are multiple mininet process running" )
2125 elif LossInPings == main.FALSE:
2126 main.log.info( "No Loss in the pings" )
2127 main.log.info( "No loss of dataplane connectivity" )
2128 utilities.assert_equals(
2129 expect=main.FALSE,
2130 actual=LossInPings,
2131 onpass="No Loss of connectivity",
2132 onfail="Loss of dataplane connectivity detected" )
2133 '''
2134
2135 main.step( "Leadership Election is still functional" )
2136 # Test of LeadershipElection
2137 leaderList = []
2138
2139 partitioned = []
2140 for i in main.partition:
2141 partitioned.append( main.nodes[i].ip_address )
2142 leaderResult = main.TRUE
2143
2144 for i in main.activeNodes:
2145 cli = main.CLIs[i]
2146 leaderN = cli.electionTestLeader()
2147 leaderList.append( leaderN )
2148 if leaderN == main.FALSE:
2149 # error in response
2150 main.log.error( "Something is wrong with " +
2151 "electionTestLeader function, check the" +
2152 " error logs" )
2153 leaderResult = main.FALSE
2154 elif leaderN is None:
2155 main.log.error( cli.name +
2156 " shows no leader for the election-app was" +
2157 " elected after the old one died" )
2158 leaderResult = main.FALSE
2159 elif leaderN in partitioned:
2160 main.log.error( cli.name + " shows " + str( leaderN ) +
2161 " as leader for the election-app, but it " +
2162 "was partitioned" )
2163 leaderResult = main.FALSE
2164 if len( set( leaderList ) ) != 1:
2165 leaderResult = main.FALSE
2166 main.log.error(
2167 "Inconsistent view of leader for the election test app" )
2168 # TODO: print the list
2169 utilities.assert_equals(
2170 expect=main.TRUE,
2171 actual=leaderResult,
2172 onpass="Leadership election passed",
2173 onfail="Something went wrong with Leadership election" )
2174
2175 def CASE8( self, main ):
2176 """
2177 Compare topo
2178 """
2179 import json
2180 import time
2181 assert main.numCtrls, "main.numCtrls not defined"
2182 assert main, "main not defined"
2183 assert utilities.assert_equals, "utilities.assert_equals not defined"
2184 assert main.CLIs, "main.CLIs not defined"
2185 assert main.nodes, "main.nodes not defined"
2186
2187 main.case( "Compare ONOS Topology view to Mininet topology" )
2188 main.caseExplanation = "Compare topology objects between Mininet" +\
2189 " and ONOS"
2190 topoResult = main.FALSE
2191 topoFailMsg = "ONOS topology don't match Mininet"
2192 elapsed = 0
2193 count = 0
2194 main.step( "Comparing ONOS topology to MN topology" )
2195 startTime = time.time()
2196 # Give time for Gossip to work
2197 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2198 devicesResults = main.TRUE
2199 linksResults = main.TRUE
2200 hostsResults = main.TRUE
2201 hostAttachmentResults = True
2202 count += 1
2203 cliStart = time.time()
2204 devices = []
2205 threads = []
2206 for i in main.activeNodes:
2207 t = main.Thread( target=utilities.retry,
2208 name="devices-" + str( i ),
2209 args=[ main.CLIs[i].devices, [ None ] ],
2210 kwargs= { 'sleep': 5, 'attempts': 5,
2211 'randomTime': True } )
2212 threads.append( t )
2213 t.start()
2214
2215 for t in threads:
2216 t.join()
2217 devices.append( t.result )
2218 hosts = []
2219 ipResult = main.TRUE
2220 threads = []
2221 for i in main.activeNodes:
2222 t = main.Thread( target=utilities.retry,
2223 name="hosts-" + str( i ),
2224 args=[ main.CLIs[i].hosts, [ None ] ],
2225 kwargs= { 'sleep': 5, 'attempts': 5,
2226 'randomTime': True } )
2227 threads.append( t )
2228 t.start()
2229
2230 for t in threads:
2231 t.join()
2232 try:
2233 hosts.append( json.loads( t.result ) )
2234 except ( ValueError, TypeError ):
2235 main.log.exception( "Error parsing hosts results" )
2236 main.log.error( repr( t.result ) )
2237 hosts.append( None )
2238 for controller in range( 0, len( hosts ) ):
2239 controllerStr = str( main.activeNodes[controller] + 1 )
2240 if hosts[ controller ]:
2241 for host in hosts[ controller ]:
2242 if host is None or host.get( 'ipAddresses', [] ) == []:
2243 main.log.error(
2244 "Error with host ipAddresses on controller" +
2245 controllerStr + ": " + str( host ) )
2246 ipResult = main.FALSE
2247 ports = []
2248 threads = []
2249 for i in main.activeNodes:
2250 t = main.Thread( target=utilities.retry,
2251 name="ports-" + str( i ),
2252 args=[ main.CLIs[i].ports, [ None ] ],
2253 kwargs= { 'sleep': 5, 'attempts': 5,
2254 'randomTime': True } )
2255 threads.append( t )
2256 t.start()
2257
2258 for t in threads:
2259 t.join()
2260 ports.append( t.result )
2261 links = []
2262 threads = []
2263 for i in main.activeNodes:
2264 t = main.Thread( target=utilities.retry,
2265 name="links-" + str( i ),
2266 args=[ main.CLIs[i].links, [ None ] ],
2267 kwargs= { 'sleep': 5, 'attempts': 5,
2268 'randomTime': True } )
2269 threads.append( t )
2270 t.start()
2271
2272 for t in threads:
2273 t.join()
2274 links.append( t.result )
2275 clusters = []
2276 threads = []
2277 for i in main.activeNodes:
2278 t = main.Thread( target=utilities.retry,
2279 name="clusters-" + str( i ),
2280 args=[ main.CLIs[i].clusters, [ None ] ],
2281 kwargs= { 'sleep': 5, 'attempts': 5,
2282 'randomTime': True } )
2283 threads.append( t )
2284 t.start()
2285
2286 for t in threads:
2287 t.join()
2288 clusters.append( t.result )
2289
2290 elapsed = time.time() - startTime
2291 cliTime = time.time() - cliStart
2292 print "Elapsed time: " + str( elapsed )
2293 print "CLI time: " + str( cliTime )
2294
2295 if all( e is None for e in devices ) and\
2296 all( e is None for e in hosts ) and\
2297 all( e is None for e in ports ) and\
2298 all( e is None for e in links ) and\
2299 all( e is None for e in clusters ):
2300 topoFailMsg = "Could not get topology from ONOS"
2301 main.log.error( topoFailMsg )
2302 continue # Try again, No use trying to compare
2303
2304 mnSwitches = main.Mininet1.getSwitches()
2305 mnLinks = main.Mininet1.getLinks()
2306 mnHosts = main.Mininet1.getHosts()
2307 for controller in range( len( main.activeNodes ) ):
2308 controllerStr = str( main.activeNodes[controller] + 1 )
2309 if devices[ controller ] and ports[ controller ] and\
2310 "Error" not in devices[ controller ] and\
2311 "Error" not in ports[ controller ]:
2312
2313 try:
2314 currentDevicesResult = main.Mininet1.compareSwitches(
2315 mnSwitches,
2316 json.loads( devices[ controller ] ),
2317 json.loads( ports[ controller ] ) )
2318 except ( TypeError, ValueError ) as e:
2319 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2320 devices[ controller ], ports[ controller ] ) )
2321 else:
2322 currentDevicesResult = main.FALSE
2323 utilities.assert_equals( expect=main.TRUE,
2324 actual=currentDevicesResult,
2325 onpass="ONOS" + controllerStr +
2326 " Switches view is correct",
2327 onfail="ONOS" + controllerStr +
2328 " Switches view is incorrect" )
2329
2330 if links[ controller ] and "Error" not in links[ controller ]:
2331 currentLinksResult = main.Mininet1.compareLinks(
2332 mnSwitches, mnLinks,
2333 json.loads( links[ controller ] ) )
2334 else:
2335 currentLinksResult = main.FALSE
2336 utilities.assert_equals( expect=main.TRUE,
2337 actual=currentLinksResult,
2338 onpass="ONOS" + controllerStr +
2339 " links view is correct",
2340 onfail="ONOS" + controllerStr +
2341 " links view is incorrect" )
2342 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2343 currentHostsResult = main.Mininet1.compareHosts(
2344 mnHosts,
2345 hosts[ controller ] )
2346 elif hosts[ controller ] == []:
2347 currentHostsResult = main.TRUE
2348 else:
2349 currentHostsResult = main.FALSE
2350 utilities.assert_equals( expect=main.TRUE,
2351 actual=currentHostsResult,
2352 onpass="ONOS" + controllerStr +
2353 " hosts exist in Mininet",
2354 onfail="ONOS" + controllerStr +
2355 " hosts don't match Mininet" )
2356 # CHECKING HOST ATTACHMENT POINTS
2357 hostAttachment = True
2358 zeroHosts = False
2359 # FIXME: topo-HA/obelisk specific mappings:
2360 # key is mac and value is dpid
2361 mappings = {}
2362 for i in range( 1, 29 ): # hosts 1 through 28
2363 # set up correct variables:
2364 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2365 if i == 1:
2366 deviceId = "1000".zfill(16)
2367 elif i == 2:
2368 deviceId = "2000".zfill(16)
2369 elif i == 3:
2370 deviceId = "3000".zfill(16)
2371 elif i == 4:
2372 deviceId = "3004".zfill(16)
2373 elif i == 5:
2374 deviceId = "5000".zfill(16)
2375 elif i == 6:
2376 deviceId = "6000".zfill(16)
2377 elif i == 7:
2378 deviceId = "6007".zfill(16)
2379 elif i >= 8 and i <= 17:
2380 dpid = '3' + str( i ).zfill( 3 )
2381 deviceId = dpid.zfill(16)
2382 elif i >= 18 and i <= 27:
2383 dpid = '6' + str( i ).zfill( 3 )
2384 deviceId = dpid.zfill(16)
2385 elif i == 28:
2386 deviceId = "2800".zfill(16)
2387 mappings[ macId ] = deviceId
2388 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2389 if hosts[ controller ] == []:
2390 main.log.warn( "There are no hosts discovered" )
2391 zeroHosts = True
2392 else:
2393 for host in hosts[ controller ]:
2394 mac = None
2395 location = None
2396 device = None
2397 port = None
2398 try:
2399 mac = host.get( 'mac' )
2400 assert mac, "mac field could not be found for this host object"
2401
2402 location = host.get( 'location' )
2403 assert location, "location field could not be found for this host object"
2404
2405 # Trim the protocol identifier off deviceId
2406 device = str( location.get( 'elementId' ) ).split(':')[1]
2407 assert device, "elementId field could not be found for this host location object"
2408
2409 port = location.get( 'port' )
2410 assert port, "port field could not be found for this host location object"
2411
2412 # Now check if this matches where they should be
2413 if mac and device and port:
2414 if str( port ) != "1":
2415 main.log.error( "The attachment port is incorrect for " +
2416 "host " + str( mac ) +
2417 ". Expected: 1 Actual: " + str( port) )
2418 hostAttachment = False
2419 if device != mappings[ str( mac ) ]:
2420 main.log.error( "The attachment device is incorrect for " +
2421 "host " + str( mac ) +
2422 ". Expected: " + mappings[ str( mac ) ] +
2423 " Actual: " + device )
2424 hostAttachment = False
2425 else:
2426 hostAttachment = False
2427 except AssertionError:
2428 main.log.exception( "Json object not as expected" )
2429 main.log.error( repr( host ) )
2430 hostAttachment = False
2431 else:
2432 main.log.error( "No hosts json output or \"Error\"" +
2433 " in output. hosts = " +
2434 repr( hosts[ controller ] ) )
2435 if zeroHosts is False:
2436 hostAttachment = True
2437
2438 # END CHECKING HOST ATTACHMENT POINTS
2439 devicesResults = devicesResults and currentDevicesResult
2440 linksResults = linksResults and currentLinksResult
2441 hostsResults = hostsResults and currentHostsResult
2442 hostAttachmentResults = hostAttachmentResults and\
2443 hostAttachment
Jon Halla440e872016-03-31 15:15:50 -07002444 topoResult = ( devicesResults and linksResults
2445 and hostsResults and ipResult and
2446 hostAttachmentResults )
Jon Hall6e709752016-02-01 13:38:46 -08002447 utilities.assert_equals( expect=True,
2448 actual=topoResult,
2449 onpass="ONOS topology matches Mininet",
2450 onfail=topoFailMsg )
2451 # End of While loop to pull ONOS state
2452
2453 # Compare json objects for hosts and dataplane clusters
2454
2455 # hosts
2456 main.step( "Hosts view is consistent across all ONOS nodes" )
2457 consistentHostsResult = main.TRUE
2458 for controller in range( len( hosts ) ):
2459 controllerStr = str( main.activeNodes[controller] + 1 )
2460 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2461 if hosts[ controller ] == hosts[ 0 ]:
2462 continue
2463 else: # hosts not consistent
2464 main.log.error( "hosts from ONOS" + controllerStr +
2465 " is inconsistent with ONOS1" )
2466 main.log.warn( repr( hosts[ controller ] ) )
2467 consistentHostsResult = main.FALSE
2468
2469 else:
2470 main.log.error( "Error in getting ONOS hosts from ONOS" +
2471 controllerStr )
2472 consistentHostsResult = main.FALSE
2473 main.log.warn( "ONOS" + controllerStr +
2474 " hosts response: " +
2475 repr( hosts[ controller ] ) )
2476 utilities.assert_equals(
2477 expect=main.TRUE,
2478 actual=consistentHostsResult,
2479 onpass="Hosts view is consistent across all ONOS nodes",
2480 onfail="ONOS nodes have different views of hosts" )
2481
2482 main.step( "Hosts information is correct" )
2483 hostsResults = hostsResults and ipResult
2484 utilities.assert_equals(
2485 expect=main.TRUE,
2486 actual=hostsResults,
2487 onpass="Host information is correct",
2488 onfail="Host information is incorrect" )
2489
2490 main.step( "Host attachment points to the network" )
2491 utilities.assert_equals(
2492 expect=True,
2493 actual=hostAttachmentResults,
2494 onpass="Hosts are correctly attached to the network",
2495 onfail="ONOS did not correctly attach hosts to the network" )
2496
2497 # Strongly connected clusters of devices
2498 main.step( "Clusters view is consistent across all ONOS nodes" )
2499 consistentClustersResult = main.TRUE
2500 for controller in range( len( clusters ) ):
2501 controllerStr = str( main.activeNodes[controller] + 1 )
2502 if "Error" not in clusters[ controller ]:
2503 if clusters[ controller ] == clusters[ 0 ]:
2504 continue
2505 else: # clusters not consistent
2506 main.log.error( "clusters from ONOS" +
2507 controllerStr +
2508 " is inconsistent with ONOS1" )
2509 consistentClustersResult = main.FALSE
Jon Hall6e709752016-02-01 13:38:46 -08002510 else:
2511 main.log.error( "Error in getting dataplane clusters " +
2512 "from ONOS" + controllerStr )
2513 consistentClustersResult = main.FALSE
2514 main.log.warn( "ONOS" + controllerStr +
2515 " clusters response: " +
2516 repr( clusters[ controller ] ) )
2517 utilities.assert_equals(
2518 expect=main.TRUE,
2519 actual=consistentClustersResult,
2520 onpass="Clusters view is consistent across all ONOS nodes",
2521 onfail="ONOS nodes have different views of clusters" )
2522
2523 main.step( "There is only one SCC" )
2524 # there should always only be one cluster
2525 try:
2526 numClusters = len( json.loads( clusters[ 0 ] ) )
2527 except ( ValueError, TypeError ):
2528 main.log.exception( "Error parsing clusters[0]: " +
2529 repr( clusters[0] ) )
Jon Halla440e872016-03-31 15:15:50 -07002530 numClusters = "ERROR"
Jon Hall6e709752016-02-01 13:38:46 -08002531 clusterResults = main.FALSE
2532 if numClusters == 1:
2533 clusterResults = main.TRUE
2534 utilities.assert_equals(
2535 expect=1,
2536 actual=numClusters,
2537 onpass="ONOS shows 1 SCC",
2538 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2539
2540 topoResult = ( devicesResults and linksResults
2541 and hostsResults and consistentHostsResult
2542 and consistentClustersResult and clusterResults
2543 and ipResult and hostAttachmentResults )
2544
2545 topoResult = topoResult and int( count <= 2 )
2546 note = "note it takes about " + str( int( cliTime ) ) + \
2547 " seconds for the test to make all the cli calls to fetch " +\
2548 "the topology from each ONOS instance"
2549 main.log.info(
2550 "Very crass estimate for topology discovery/convergence( " +
2551 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2552 str( count ) + " tries" )
2553
2554 main.step( "Device information is correct" )
2555 utilities.assert_equals(
2556 expect=main.TRUE,
2557 actual=devicesResults,
2558 onpass="Device information is correct",
2559 onfail="Device information is incorrect" )
2560
2561 main.step( "Links are correct" )
2562 utilities.assert_equals(
2563 expect=main.TRUE,
2564 actual=linksResults,
2565 onpass="Link are correct",
2566 onfail="Links are incorrect" )
2567
Jon Halla440e872016-03-31 15:15:50 -07002568 main.step( "Hosts are correct" )
2569 utilities.assert_equals(
2570 expect=main.TRUE,
2571 actual=hostsResults,
2572 onpass="Hosts are correct",
2573 onfail="Hosts are incorrect" )
2574
Jon Hall6e709752016-02-01 13:38:46 -08002575 # FIXME: move this to an ONOS state case
2576 main.step( "Checking ONOS nodes" )
2577 nodesOutput = []
2578 nodeResults = main.TRUE
2579 threads = []
2580 for i in main.activeNodes:
2581 t = main.Thread( target=main.CLIs[i].nodes,
2582 name="nodes-" + str( i ),
2583 args=[ ] )
2584 threads.append( t )
2585 t.start()
2586
2587 for t in threads:
2588 t.join()
2589 nodesOutput.append( t.result )
2590 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
2591 ips.sort()
2592 for i in nodesOutput:
2593 try:
2594 current = json.loads( i )
2595 activeIps = []
2596 currentResult = main.FALSE
2597 for node in current:
Jon Hallbd182782016-03-28 16:42:22 -07002598 if node['state'] == 'READY':
Jon Hall6e709752016-02-01 13:38:46 -08002599 activeIps.append( node['ip'] )
2600 activeIps.sort()
2601 if ips == activeIps:
2602 currentResult = main.TRUE
2603 except ( ValueError, TypeError ):
2604 main.log.error( "Error parsing nodes output" )
2605 main.log.warn( repr( i ) )
2606 currentResult = main.FALSE
2607 nodeResults = nodeResults and currentResult
2608 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2609 onpass="Nodes check successful",
2610 onfail="Nodes check NOT successful" )
Jon Halla440e872016-03-31 15:15:50 -07002611 if not nodeResults:
2612 for cli in main.CLIs:
2613 main.log.debug( "{} components not ACTIVE: \n{}".format(
2614 cli.name,
2615 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08002616
2617 def CASE9( self, main ):
2618 """
2619 Link s3-s28 down
2620 """
2621 import time
2622 assert main.numCtrls, "main.numCtrls not defined"
2623 assert main, "main not defined"
2624 assert utilities.assert_equals, "utilities.assert_equals not defined"
2625 assert main.CLIs, "main.CLIs not defined"
2626 assert main.nodes, "main.nodes not defined"
2627 # NOTE: You should probably run a topology check after this
2628
2629 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2630
2631 description = "Turn off a link to ensure that Link Discovery " +\
2632 "is working properly"
2633 main.case( description )
2634
2635 main.step( "Kill Link between s3 and s28" )
2636 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2637 main.log.info( "Waiting " + str( linkSleep ) +
2638 " seconds for link down to be discovered" )
2639 time.sleep( linkSleep )
2640 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2641 onpass="Link down successful",
2642 onfail="Failed to bring link down" )
2643 # TODO do some sort of check here
2644
2645 def CASE10( self, main ):
2646 """
2647 Link s3-s28 up
2648 """
2649 import time
2650 assert main.numCtrls, "main.numCtrls not defined"
2651 assert main, "main not defined"
2652 assert utilities.assert_equals, "utilities.assert_equals not defined"
2653 assert main.CLIs, "main.CLIs not defined"
2654 assert main.nodes, "main.nodes not defined"
2655 # NOTE: You should probably run a topology check after this
2656
2657 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2658
2659 description = "Restore a link to ensure that Link Discovery is " + \
2660 "working properly"
2661 main.case( description )
2662
2663 main.step( "Bring link between s3 and s28 back up" )
2664 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2665 main.log.info( "Waiting " + str( linkSleep ) +
2666 " seconds for link up to be discovered" )
2667 time.sleep( linkSleep )
2668 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2669 onpass="Link up successful",
2670 onfail="Failed to bring link up" )
2671 # TODO do some sort of check here
2672
2673 def CASE11( self, main ):
2674 """
2675 Switch Down
2676 """
2677 # NOTE: You should probably run a topology check after this
2678 import time
2679 assert main.numCtrls, "main.numCtrls not defined"
2680 assert main, "main not defined"
2681 assert utilities.assert_equals, "utilities.assert_equals not defined"
2682 assert main.CLIs, "main.CLIs not defined"
2683 assert main.nodes, "main.nodes not defined"
2684
2685 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2686
2687 description = "Killing a switch to ensure it is discovered correctly"
2688 onosCli = main.CLIs[ main.activeNodes[0] ]
2689 main.case( description )
2690 switch = main.params[ 'kill' ][ 'switch' ]
2691 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2692
2693 # TODO: Make this switch parameterizable
2694 main.step( "Kill " + switch )
2695 main.log.info( "Deleting " + switch )
2696 main.Mininet1.delSwitch( switch )
2697 main.log.info( "Waiting " + str( switchSleep ) +
2698 " seconds for switch down to be discovered" )
2699 time.sleep( switchSleep )
2700 device = onosCli.getDevice( dpid=switchDPID )
2701 # Peek at the deleted switch
2702 main.log.warn( str( device ) )
2703 result = main.FALSE
2704 if device and device[ 'available' ] is False:
2705 result = main.TRUE
2706 utilities.assert_equals( expect=main.TRUE, actual=result,
2707 onpass="Kill switch successful",
2708 onfail="Failed to kill switch?" )
2709
2710 def CASE12( self, main ):
2711 """
2712 Switch Up
2713 """
2714 # NOTE: You should probably run a topology check after this
2715 import time
2716 assert main.numCtrls, "main.numCtrls not defined"
2717 assert main, "main not defined"
2718 assert utilities.assert_equals, "utilities.assert_equals not defined"
2719 assert main.CLIs, "main.CLIs not defined"
2720 assert main.nodes, "main.nodes not defined"
2721 assert ONOS1Port, "ONOS1Port not defined"
2722 assert ONOS2Port, "ONOS2Port not defined"
2723 assert ONOS3Port, "ONOS3Port not defined"
2724 assert ONOS4Port, "ONOS4Port not defined"
2725 assert ONOS5Port, "ONOS5Port not defined"
2726 assert ONOS6Port, "ONOS6Port not defined"
2727 assert ONOS7Port, "ONOS7Port not defined"
2728
2729 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2730 switch = main.params[ 'kill' ][ 'switch' ]
2731 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2732 links = main.params[ 'kill' ][ 'links' ].split()
2733 onosCli = main.CLIs[ main.activeNodes[0] ]
2734 description = "Adding a switch to ensure it is discovered correctly"
2735 main.case( description )
2736
2737 main.step( "Add back " + switch )
2738 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2739 for peer in links:
2740 main.Mininet1.addLink( switch, peer )
2741 ipList = [ node.ip_address for node in main.nodes ]
2742 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2743 main.log.info( "Waiting " + str( switchSleep ) +
2744 " seconds for switch up to be discovered" )
2745 time.sleep( switchSleep )
2746 device = onosCli.getDevice( dpid=switchDPID )
2747 # Peek at the deleted switch
2748 main.log.warn( str( device ) )
2749 result = main.FALSE
2750 if device and device[ 'available' ]:
2751 result = main.TRUE
2752 utilities.assert_equals( expect=main.TRUE, actual=result,
2753 onpass="add switch successful",
2754 onfail="Failed to add switch?" )
2755
2756 def CASE13( self, main ):
2757 """
2758 Clean up
2759 """
2760 import os
2761 import time
2762 assert main.numCtrls, "main.numCtrls not defined"
2763 assert main, "main not defined"
2764 assert utilities.assert_equals, "utilities.assert_equals not defined"
2765 assert main.CLIs, "main.CLIs not defined"
2766 assert main.nodes, "main.nodes not defined"
2767
2768 # printing colors to terminal
2769 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2770 'blue': '\033[94m', 'green': '\033[92m',
2771 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2772 main.case( "Test Cleanup" )
2773 main.step( "Killing tcpdumps" )
2774 main.Mininet2.stopTcpdump()
2775
2776 testname = main.TEST
2777 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2778 main.step( "Copying MN pcap and ONOS log files to test station" )
2779 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2780 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2781 # NOTE: MN Pcap file is being saved to logdir.
2782 # We scp this file as MN and TestON aren't necessarily the same vm
2783
2784 # FIXME: To be replaced with a Jenkin's post script
2785 # TODO: Load these from params
2786 # NOTE: must end in /
2787 logFolder = "/opt/onos/log/"
2788 logFiles = [ "karaf.log", "karaf.log.1" ]
2789 # NOTE: must end in /
2790 for f in logFiles:
2791 for node in main.nodes:
2792 dstName = main.logdir + "/" + node.name + "-" + f
2793 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2794 logFolder + f, dstName )
2795 # std*.log's
2796 # NOTE: must end in /
2797 logFolder = "/opt/onos/var/"
2798 logFiles = [ "stderr.log", "stdout.log" ]
2799 # NOTE: must end in /
2800 for f in logFiles:
2801 for node in main.nodes:
2802 dstName = main.logdir + "/" + node.name + "-" + f
2803 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2804 logFolder + f, dstName )
2805 else:
2806 main.log.debug( "skipping saving log files" )
2807
2808 main.step( "Stopping Mininet" )
2809 mnResult = main.Mininet1.stopNet()
2810 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2811 onpass="Mininet stopped",
2812 onfail="MN cleanup NOT successful" )
2813
2814 main.step( "Checking ONOS Logs for errors" )
2815 for node in main.nodes:
2816 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2817 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2818
2819 try:
2820 timerLog = open( main.logdir + "/Timers.csv", 'w')
2821 # Overwrite with empty line and close
2822 labels = "Gossip Intents"
2823 data = str( gossipTime )
2824 timerLog.write( labels + "\n" + data )
2825 timerLog.close()
2826 except NameError, e:
2827 main.log.exception(e)
2828
2829 def CASE14( self, main ):
2830 """
2831 start election app on all onos nodes
2832 """
2833 assert main.numCtrls, "main.numCtrls not defined"
2834 assert main, "main not defined"
2835 assert utilities.assert_equals, "utilities.assert_equals not defined"
2836 assert main.CLIs, "main.CLIs not defined"
2837 assert main.nodes, "main.nodes not defined"
2838
2839 main.case("Start Leadership Election app")
2840 main.step( "Install leadership election app" )
2841 onosCli = main.CLIs[ main.activeNodes[0] ]
2842 appResult = onosCli.activateApp( "org.onosproject.election" )
2843 utilities.assert_equals(
2844 expect=main.TRUE,
2845 actual=appResult,
2846 onpass="Election app installed",
2847 onfail="Something went wrong with installing Leadership election" )
2848
2849 main.step( "Run for election on each node" )
2850 leaderResult = main.TRUE
2851 leaders = []
2852 for i in main.activeNodes:
2853 main.CLIs[i].electionTestRun()
2854 for i in main.activeNodes:
2855 cli = main.CLIs[i]
2856 leader = cli.electionTestLeader()
2857 if leader is None or leader == main.FALSE:
2858 main.log.error( cli.name + ": Leader for the election app " +
2859 "should be an ONOS node, instead got '" +
2860 str( leader ) + "'" )
2861 leaderResult = main.FALSE
2862 leaders.append( leader )
2863 utilities.assert_equals(
2864 expect=main.TRUE,
2865 actual=leaderResult,
2866 onpass="Successfully ran for leadership",
2867 onfail="Failed to run for leadership" )
2868
2869 main.step( "Check that each node shows the same leader" )
2870 sameLeader = main.TRUE
2871 if len( set( leaders ) ) != 1:
2872 sameLeader = main.FALSE
2873 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
2874 str( leaders ) )
2875 utilities.assert_equals(
2876 expect=main.TRUE,
2877 actual=sameLeader,
2878 onpass="Leadership is consistent for the election topic",
2879 onfail="Nodes have different leaders" )
2880
2881 def CASE15( self, main ):
2882 """
2883 Check that Leadership Election is still functional
2884 15.1 Run election on each node
2885 15.2 Check that each node has the same leaders and candidates
2886 15.3 Find current leader and withdraw
2887 15.4 Check that a new node was elected leader
2888 15.5 Check that that new leader was the candidate of old leader
2889 15.6 Run for election on old leader
2890 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2891 15.8 Make sure that the old leader was added to the candidate list
2892
2893 old and new variable prefixes refer to data from before vs after
2894 withdrawl and later before withdrawl vs after re-election
2895 """
2896 import time
2897 assert main.numCtrls, "main.numCtrls not defined"
2898 assert main, "main not defined"
2899 assert utilities.assert_equals, "utilities.assert_equals not defined"
2900 assert main.CLIs, "main.CLIs not defined"
2901 assert main.nodes, "main.nodes not defined"
2902
2903 description = "Check that Leadership Election is still functional"
2904 main.case( description )
Jon Halla440e872016-03-31 15:15:50 -07002905 # NOTE: Need to re-run after restarts since being a canidate is not persistant
Jon Hall6e709752016-02-01 13:38:46 -08002906
Jon Halla440e872016-03-31 15:15:50 -07002907 oldLeaders = [] # list of lists of each nodes' candidates before
2908 newLeaders = [] # list of lists of each nodes' candidates after
Jon Hall6e709752016-02-01 13:38:46 -08002909 oldLeader = '' # the old leader from oldLeaders, None if not same
2910 newLeader = '' # the new leaders fron newLoeaders, None if not same
2911 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2912 expectNoLeader = False # True when there is only one leader
2913 if main.numCtrls == 1:
2914 expectNoLeader = True
2915
2916 main.step( "Run for election on each node" )
2917 electionResult = main.TRUE
2918
2919 for i in main.activeNodes: # run test election on each node
2920 if main.CLIs[i].electionTestRun() == main.FALSE:
2921 electionResult = main.FALSE
2922 utilities.assert_equals(
2923 expect=main.TRUE,
2924 actual=electionResult,
2925 onpass="All nodes successfully ran for leadership",
2926 onfail="At least one node failed to run for leadership" )
2927
2928 if electionResult == main.FALSE:
2929 main.log.error(
2930 "Skipping Test Case because Election Test App isn't loaded" )
2931 main.skipCase()
2932
2933 main.step( "Check that each node shows the same leader and candidates" )
Jon Halla440e872016-03-31 15:15:50 -07002934 failMessage = "Nodes have different leaderboards"
2935 def consistentLeaderboards( nodes ):
2936 TOPIC = 'org.onosproject.election'
2937 # FIXME: use threads
2938 #FIXME: should we retry outside the function?
2939 for n in range( 5 ): # Retry in case election is still happening
2940 leaderList = []
2941 # Get all leaderboards
2942 for cli in nodes:
2943 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
2944 # Compare leaderboards
2945 result = all( i == leaderList[0] for i in leaderList ) and\
2946 leaderList is not None
2947 main.log.debug( leaderList )
2948 main.log.warn( result )
2949 if result:
2950 return ( result, leaderList )
2951 time.sleep(5) #TODO: paramerterize
2952 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
2953 activeCLIs = [ main.CLIs[i] for i in main.activeNodes ]
2954 sameResult, oldLeaders = consistentLeaderboards( activeCLIs )
2955 if sameResult:
2956 oldLeader = oldLeaders[ 0 ][ 0 ]
2957 main.log.warn( oldLeader )
Jon Hall6e709752016-02-01 13:38:46 -08002958 else:
Jon Halla440e872016-03-31 15:15:50 -07002959 oldLeader = None
Jon Hall6e709752016-02-01 13:38:46 -08002960 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07002961 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08002962 actual=sameResult,
Jon Halla440e872016-03-31 15:15:50 -07002963 onpass="Leaderboards are consistent for the election topic",
Jon Hall6e709752016-02-01 13:38:46 -08002964 onfail=failMessage )
2965
2966 main.step( "Find current leader and withdraw" )
2967 withdrawResult = main.TRUE
2968 # do some sanity checking on leader before using it
2969 if oldLeader is None:
2970 main.log.error( "Leadership isn't consistent." )
2971 withdrawResult = main.FALSE
2972 # Get the CLI of the oldLeader
2973 for i in main.activeNodes:
2974 if oldLeader == main.nodes[ i ].ip_address:
2975 oldLeaderCLI = main.CLIs[ i ]
2976 break
2977 else: # FOR/ELSE statement
2978 main.log.error( "Leader election, could not find current leader" )
2979 if oldLeader:
2980 withdrawResult = oldLeaderCLI.electionTestWithdraw()
2981 utilities.assert_equals(
2982 expect=main.TRUE,
2983 actual=withdrawResult,
2984 onpass="Node was withdrawn from election",
2985 onfail="Node was not withdrawn from election" )
2986
2987 main.step( "Check that a new node was elected leader" )
Jon Hall6e709752016-02-01 13:38:46 -08002988 failMessage = "Nodes have different leaders"
Jon Hall6e709752016-02-01 13:38:46 -08002989 # Get new leaders and candidates
Jon Halla440e872016-03-31 15:15:50 -07002990 newLeaderResult, newLeaders = consistentLeaderboards( activeCLIs )
2991 if newLeaders[ 0 ][ 0 ] == 'none':
2992 main.log.error( "No leader was elected on at least 1 node" )
2993 if not expectNoLeader:
2994 newLeaderResult = False
2995 if newLeaderResult:
2996 newLeader = newLeaders[ 0 ][ 0 ]
Jon Hall6e709752016-02-01 13:38:46 -08002997 else:
Jon Halla440e872016-03-31 15:15:50 -07002998 newLeader = None
Jon Hall6e709752016-02-01 13:38:46 -08002999
3000 # Check that the new leader is not the older leader, which was withdrawn
3001 if newLeader == oldLeader:
Jon Halla440e872016-03-31 15:15:50 -07003002 newLeaderResult = False
Jon Hall6e709752016-02-01 13:38:46 -08003003 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3004 " as the current leader" )
Jon Hall6e709752016-02-01 13:38:46 -08003005 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003006 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08003007 actual=newLeaderResult,
3008 onpass="Leadership election passed",
3009 onfail="Something went wrong with Leadership election" )
3010
Jon Halla440e872016-03-31 15:15:50 -07003011 main.step( "Check that that new leader was the candidate of old leader" )
Jon Hall6e709752016-02-01 13:38:46 -08003012 # candidates[ 2 ] should become the top candidate after withdrawl
3013 correctCandidateResult = main.TRUE
3014 if expectNoLeader:
3015 if newLeader == 'none':
3016 main.log.info( "No leader expected. None found. Pass" )
3017 correctCandidateResult = main.TRUE
3018 else:
3019 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3020 correctCandidateResult = main.FALSE
Jon Halla440e872016-03-31 15:15:50 -07003021 elif len( oldLeaders[0] ) >= 3:
3022 if newLeader == oldLeaders[ 0 ][ 2 ]:
3023 # correct leader was elected
3024 correctCandidateResult = main.TRUE
3025 else:
3026 correctCandidateResult = main.FALSE
3027 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3028 newLeader, oldLeaders[ 0 ][ 2 ] ) )
Jon Hall6e709752016-02-01 13:38:46 -08003029 else:
3030 main.log.warn( "Could not determine who should be the correct leader" )
Jon Halla440e872016-03-31 15:15:50 -07003031 main.log.debug( oldLeaders[ 0 ] )
Jon Hall6e709752016-02-01 13:38:46 -08003032 correctCandidateResult = main.FALSE
3033 utilities.assert_equals(
3034 expect=main.TRUE,
3035 actual=correctCandidateResult,
3036 onpass="Correct Candidate Elected",
3037 onfail="Incorrect Candidate Elected" )
3038
3039 main.step( "Run for election on old leader( just so everyone " +
3040 "is in the hat )" )
3041 if oldLeaderCLI is not None:
3042 runResult = oldLeaderCLI.electionTestRun()
3043 else:
3044 main.log.error( "No old leader to re-elect" )
3045 runResult = main.FALSE
3046 utilities.assert_equals(
3047 expect=main.TRUE,
3048 actual=runResult,
3049 onpass="App re-ran for election",
3050 onfail="App failed to run for election" )
Jon Halla440e872016-03-31 15:15:50 -07003051
Jon Hall6e709752016-02-01 13:38:46 -08003052 main.step(
3053 "Check that oldLeader is a candidate, and leader if only 1 node" )
3054 # verify leader didn't just change
Jon Halla440e872016-03-31 15:15:50 -07003055 # Get new leaders and candidates
3056 reRunLeaders = []
3057 time.sleep( 5 ) # Paremterize
3058 positionResult, reRunLeaders = consistentLeaderboards( activeCLIs )
Jon Hall6e709752016-02-01 13:38:46 -08003059
3060 # Check that the re-elected node is last on the candidate List
Jon Halla440e872016-03-31 15:15:50 -07003061 if oldLeader != reRunLeaders[ 0 ][ -1 ]:
3062 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader),
3063 str( reRunLeaders[ 0 ] ) ) )
Jon Hall6e709752016-02-01 13:38:46 -08003064 positionResult = main.FALSE
3065
3066 utilities.assert_equals(
Jon Halla440e872016-03-31 15:15:50 -07003067 expect=True,
Jon Hall6e709752016-02-01 13:38:46 -08003068 actual=positionResult,
3069 onpass="Old leader successfully re-ran for election",
3070 onfail="Something went wrong with Leadership election after " +
3071 "the old leader re-ran for election" )
3072
3073 def CASE16( self, main ):
3074 """
3075 Install Distributed Primitives app
3076 """
3077 import time
3078 assert main.numCtrls, "main.numCtrls not defined"
3079 assert main, "main not defined"
3080 assert utilities.assert_equals, "utilities.assert_equals not defined"
3081 assert main.CLIs, "main.CLIs not defined"
3082 assert main.nodes, "main.nodes not defined"
3083
3084 # Variables for the distributed primitives tests
3085 global pCounterName
Jon Hall6e709752016-02-01 13:38:46 -08003086 global pCounterValue
Jon Hall6e709752016-02-01 13:38:46 -08003087 global onosSet
3088 global onosSetName
3089 pCounterName = "TestON-Partitions"
Jon Hall6e709752016-02-01 13:38:46 -08003090 pCounterValue = 0
Jon Hall6e709752016-02-01 13:38:46 -08003091 onosSet = set([])
3092 onosSetName = "TestON-set"
3093
3094 description = "Install Primitives app"
3095 main.case( description )
3096 main.step( "Install Primitives app" )
3097 appName = "org.onosproject.distributedprimitives"
3098 node = main.activeNodes[0]
3099 appResults = main.CLIs[node].activateApp( appName )
3100 utilities.assert_equals( expect=main.TRUE,
3101 actual=appResults,
3102 onpass="Primitives app activated",
3103 onfail="Primitives app not activated" )
3104 time.sleep( 5 ) # To allow all nodes to activate
3105
3106 def CASE17( self, main ):
3107 """
3108 Check for basic functionality with distributed primitives
3109 """
3110 # Make sure variables are defined/set
3111 assert main.numCtrls, "main.numCtrls not defined"
3112 assert main, "main not defined"
3113 assert utilities.assert_equals, "utilities.assert_equals not defined"
3114 assert main.CLIs, "main.CLIs not defined"
3115 assert main.nodes, "main.nodes not defined"
3116 assert pCounterName, "pCounterName not defined"
Jon Hall6e709752016-02-01 13:38:46 -08003117 assert onosSetName, "onosSetName not defined"
3118 # NOTE: assert fails if value is 0/None/Empty/False
3119 try:
3120 pCounterValue
3121 except NameError:
3122 main.log.error( "pCounterValue not defined, setting to 0" )
3123 pCounterValue = 0
3124 try:
Jon Hall6e709752016-02-01 13:38:46 -08003125 onosSet
3126 except NameError:
3127 main.log.error( "onosSet not defined, setting to empty Set" )
3128 onosSet = set([])
3129 # Variables for the distributed primitives tests. These are local only
3130 addValue = "a"
3131 addAllValue = "a b c d e f"
3132 retainValue = "c d e f"
3133
3134 description = "Check for basic functionality with distributed " +\
3135 "primitives"
3136 main.case( description )
3137 main.caseExplanation = "Test the methods of the distributed " +\
3138 "primitives (counters and sets) throught the cli"
3139 # DISTRIBUTED ATOMIC COUNTERS
3140 # Partitioned counters
3141 main.step( "Increment then get a default counter on each node" )
3142 pCounters = []
3143 threads = []
3144 addedPValues = []
3145 for i in main.activeNodes:
3146 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3147 name="counterAddAndGet-" + str( i ),
3148 args=[ pCounterName ] )
3149 pCounterValue += 1
3150 addedPValues.append( pCounterValue )
3151 threads.append( t )
3152 t.start()
3153
3154 for t in threads:
3155 t.join()
3156 pCounters.append( t.result )
3157 # Check that counter incremented numController times
3158 pCounterResults = True
3159 for i in addedPValues:
3160 tmpResult = i in pCounters
3161 pCounterResults = pCounterResults and tmpResult
3162 if not tmpResult:
3163 main.log.error( str( i ) + " is not in partitioned "
3164 "counter incremented results" )
3165 utilities.assert_equals( expect=True,
3166 actual=pCounterResults,
3167 onpass="Default counter incremented",
3168 onfail="Error incrementing default" +
3169 " counter" )
3170
3171 main.step( "Get then Increment a default counter on each node" )
3172 pCounters = []
3173 threads = []
3174 addedPValues = []
3175 for i in main.activeNodes:
3176 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3177 name="counterGetAndAdd-" + str( i ),
3178 args=[ pCounterName ] )
3179 addedPValues.append( pCounterValue )
3180 pCounterValue += 1
3181 threads.append( t )
3182 t.start()
3183
3184 for t in threads:
3185 t.join()
3186 pCounters.append( t.result )
3187 # Check that counter incremented numController times
3188 pCounterResults = True
3189 for i in addedPValues:
3190 tmpResult = i in pCounters
3191 pCounterResults = pCounterResults and tmpResult
3192 if not tmpResult:
3193 main.log.error( str( i ) + " is not in partitioned "
3194 "counter incremented results" )
3195 utilities.assert_equals( expect=True,
3196 actual=pCounterResults,
3197 onpass="Default counter incremented",
3198 onfail="Error incrementing default" +
3199 " counter" )
3200
3201 main.step( "Counters we added have the correct values" )
3202 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3203 utilities.assert_equals( expect=main.TRUE,
3204 actual=incrementCheck,
3205 onpass="Added counters are correct",
3206 onfail="Added counters are incorrect" )
3207
3208 main.step( "Add -8 to then get a default counter on each node" )
3209 pCounters = []
3210 threads = []
3211 addedPValues = []
3212 for i in main.activeNodes:
3213 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3214 name="counterIncrement-" + str( i ),
3215 args=[ pCounterName ],
3216 kwargs={ "delta": -8 } )
3217 pCounterValue += -8
3218 addedPValues.append( pCounterValue )
3219 threads.append( t )
3220 t.start()
3221
3222 for t in threads:
3223 t.join()
3224 pCounters.append( t.result )
3225 # Check that counter incremented numController times
3226 pCounterResults = True
3227 for i in addedPValues:
3228 tmpResult = i in pCounters
3229 pCounterResults = pCounterResults and tmpResult
3230 if not tmpResult:
3231 main.log.error( str( i ) + " is not in partitioned "
3232 "counter incremented results" )
3233 utilities.assert_equals( expect=True,
3234 actual=pCounterResults,
3235 onpass="Default counter incremented",
3236 onfail="Error incrementing default" +
3237 " counter" )
3238
3239 main.step( "Add 5 to then get a default counter on each node" )
3240 pCounters = []
3241 threads = []
3242 addedPValues = []
3243 for i in main.activeNodes:
3244 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3245 name="counterIncrement-" + str( i ),
3246 args=[ pCounterName ],
3247 kwargs={ "delta": 5 } )
3248 pCounterValue += 5
3249 addedPValues.append( pCounterValue )
3250 threads.append( t )
3251 t.start()
3252
3253 for t in threads:
3254 t.join()
3255 pCounters.append( t.result )
3256 # Check that counter incremented numController times
3257 pCounterResults = True
3258 for i in addedPValues:
3259 tmpResult = i in pCounters
3260 pCounterResults = pCounterResults and tmpResult
3261 if not tmpResult:
3262 main.log.error( str( i ) + " is not in partitioned "
3263 "counter incremented results" )
3264 utilities.assert_equals( expect=True,
3265 actual=pCounterResults,
3266 onpass="Default counter incremented",
3267 onfail="Error incrementing default" +
3268 " counter" )
3269
3270 main.step( "Get then add 5 to a default counter on each node" )
3271 pCounters = []
3272 threads = []
3273 addedPValues = []
3274 for i in main.activeNodes:
3275 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3276 name="counterIncrement-" + str( i ),
3277 args=[ pCounterName ],
3278 kwargs={ "delta": 5 } )
3279 addedPValues.append( pCounterValue )
3280 pCounterValue += 5
3281 threads.append( t )
3282 t.start()
3283
3284 for t in threads:
3285 t.join()
3286 pCounters.append( t.result )
3287 # Check that counter incremented numController times
3288 pCounterResults = True
3289 for i in addedPValues:
3290 tmpResult = i in pCounters
3291 pCounterResults = pCounterResults and tmpResult
3292 if not tmpResult:
3293 main.log.error( str( i ) + " is not in partitioned "
3294 "counter incremented results" )
3295 utilities.assert_equals( expect=True,
3296 actual=pCounterResults,
3297 onpass="Default counter incremented",
3298 onfail="Error incrementing default" +
3299 " counter" )
3300
3301 main.step( "Counters we added have the correct values" )
3302 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3303 utilities.assert_equals( expect=main.TRUE,
3304 actual=incrementCheck,
3305 onpass="Added counters are correct",
3306 onfail="Added counters are incorrect" )
3307
Jon Hall6e709752016-02-01 13:38:46 -08003308 # DISTRIBUTED SETS
3309 main.step( "Distributed Set get" )
3310 size = len( onosSet )
3311 getResponses = []
3312 threads = []
3313 for i in main.activeNodes:
3314 t = main.Thread( target=main.CLIs[i].setTestGet,
3315 name="setTestGet-" + str( i ),
3316 args=[ onosSetName ] )
3317 threads.append( t )
3318 t.start()
3319 for t in threads:
3320 t.join()
3321 getResponses.append( t.result )
3322
3323 getResults = main.TRUE
3324 for i in range( len( main.activeNodes ) ):
3325 node = str( main.activeNodes[i] + 1 )
3326 if isinstance( getResponses[ i ], list):
3327 current = set( getResponses[ i ] )
3328 if len( current ) == len( getResponses[ i ] ):
3329 # no repeats
3330 if onosSet != current:
3331 main.log.error( "ONOS" + node +
3332 " has incorrect view" +
3333 " of set " + onosSetName + ":\n" +
3334 str( getResponses[ i ] ) )
3335 main.log.debug( "Expected: " + str( onosSet ) )
3336 main.log.debug( "Actual: " + str( current ) )
3337 getResults = main.FALSE
3338 else:
3339 # error, set is not a set
3340 main.log.error( "ONOS" + node +
3341 " has repeat elements in" +
3342 " set " + onosSetName + ":\n" +
3343 str( getResponses[ i ] ) )
3344 getResults = main.FALSE
3345 elif getResponses[ i ] == main.ERROR:
3346 getResults = main.FALSE
3347 utilities.assert_equals( expect=main.TRUE,
3348 actual=getResults,
3349 onpass="Set elements are correct",
3350 onfail="Set elements are incorrect" )
3351
3352 main.step( "Distributed Set size" )
3353 sizeResponses = []
3354 threads = []
3355 for i in main.activeNodes:
3356 t = main.Thread( target=main.CLIs[i].setTestSize,
3357 name="setTestSize-" + str( i ),
3358 args=[ onosSetName ] )
3359 threads.append( t )
3360 t.start()
3361 for t in threads:
3362 t.join()
3363 sizeResponses.append( t.result )
3364
3365 sizeResults = main.TRUE
3366 for i in range( len( main.activeNodes ) ):
3367 node = str( main.activeNodes[i] + 1 )
3368 if size != sizeResponses[ i ]:
3369 sizeResults = main.FALSE
3370 main.log.error( "ONOS" + node +
3371 " expected a size of " + str( size ) +
3372 " for set " + onosSetName +
3373 " but got " + str( sizeResponses[ i ] ) )
3374 utilities.assert_equals( expect=main.TRUE,
3375 actual=sizeResults,
3376 onpass="Set sizes are correct",
3377 onfail="Set sizes are incorrect" )
3378
3379 main.step( "Distributed Set add()" )
3380 onosSet.add( addValue )
3381 addResponses = []
3382 threads = []
3383 for i in main.activeNodes:
3384 t = main.Thread( target=main.CLIs[i].setTestAdd,
3385 name="setTestAdd-" + str( i ),
3386 args=[ onosSetName, addValue ] )
3387 threads.append( t )
3388 t.start()
3389 for t in threads:
3390 t.join()
3391 addResponses.append( t.result )
3392
3393 # main.TRUE = successfully changed the set
3394 # main.FALSE = action resulted in no change in set
3395 # main.ERROR - Some error in executing the function
3396 addResults = main.TRUE
3397 for i in range( len( main.activeNodes ) ):
3398 if addResponses[ i ] == main.TRUE:
3399 # All is well
3400 pass
3401 elif addResponses[ i ] == main.FALSE:
3402 # Already in set, probably fine
3403 pass
3404 elif addResponses[ i ] == main.ERROR:
3405 # Error in execution
3406 addResults = main.FALSE
3407 else:
3408 # unexpected result
3409 addResults = main.FALSE
3410 if addResults != main.TRUE:
3411 main.log.error( "Error executing set add" )
3412
3413 # Check if set is still correct
3414 size = len( onosSet )
3415 getResponses = []
3416 threads = []
3417 for i in main.activeNodes:
3418 t = main.Thread( target=main.CLIs[i].setTestGet,
3419 name="setTestGet-" + str( i ),
3420 args=[ onosSetName ] )
3421 threads.append( t )
3422 t.start()
3423 for t in threads:
3424 t.join()
3425 getResponses.append( t.result )
3426 getResults = main.TRUE
3427 for i in range( len( main.activeNodes ) ):
3428 node = str( main.activeNodes[i] + 1 )
3429 if isinstance( getResponses[ i ], list):
3430 current = set( getResponses[ i ] )
3431 if len( current ) == len( getResponses[ i ] ):
3432 # no repeats
3433 if onosSet != current:
3434 main.log.error( "ONOS" + node + " has incorrect view" +
3435 " of set " + onosSetName + ":\n" +
3436 str( getResponses[ i ] ) )
3437 main.log.debug( "Expected: " + str( onosSet ) )
3438 main.log.debug( "Actual: " + str( current ) )
3439 getResults = main.FALSE
3440 else:
3441 # error, set is not a set
3442 main.log.error( "ONOS" + node + " has repeat elements in" +
3443 " set " + onosSetName + ":\n" +
3444 str( getResponses[ i ] ) )
3445 getResults = main.FALSE
3446 elif getResponses[ i ] == main.ERROR:
3447 getResults = main.FALSE
3448 sizeResponses = []
3449 threads = []
3450 for i in main.activeNodes:
3451 t = main.Thread( target=main.CLIs[i].setTestSize,
3452 name="setTestSize-" + str( i ),
3453 args=[ onosSetName ] )
3454 threads.append( t )
3455 t.start()
3456 for t in threads:
3457 t.join()
3458 sizeResponses.append( t.result )
3459 sizeResults = main.TRUE
3460 for i in range( len( main.activeNodes ) ):
3461 node = str( main.activeNodes[i] + 1 )
3462 if size != sizeResponses[ i ]:
3463 sizeResults = main.FALSE
3464 main.log.error( "ONOS" + node +
3465 " expected a size of " + str( size ) +
3466 " for set " + onosSetName +
3467 " but got " + str( sizeResponses[ i ] ) )
3468 addResults = addResults and getResults and sizeResults
3469 utilities.assert_equals( expect=main.TRUE,
3470 actual=addResults,
3471 onpass="Set add correct",
3472 onfail="Set add was incorrect" )
3473
3474 main.step( "Distributed Set addAll()" )
3475 onosSet.update( addAllValue.split() )
3476 addResponses = []
3477 threads = []
3478 for i in main.activeNodes:
3479 t = main.Thread( target=main.CLIs[i].setTestAdd,
3480 name="setTestAddAll-" + str( i ),
3481 args=[ onosSetName, addAllValue ] )
3482 threads.append( t )
3483 t.start()
3484 for t in threads:
3485 t.join()
3486 addResponses.append( t.result )
3487
3488 # main.TRUE = successfully changed the set
3489 # main.FALSE = action resulted in no change in set
3490 # main.ERROR - Some error in executing the function
3491 addAllResults = main.TRUE
3492 for i in range( len( main.activeNodes ) ):
3493 if addResponses[ i ] == main.TRUE:
3494 # All is well
3495 pass
3496 elif addResponses[ i ] == main.FALSE:
3497 # Already in set, probably fine
3498 pass
3499 elif addResponses[ i ] == main.ERROR:
3500 # Error in execution
3501 addAllResults = main.FALSE
3502 else:
3503 # unexpected result
3504 addAllResults = main.FALSE
3505 if addAllResults != main.TRUE:
3506 main.log.error( "Error executing set addAll" )
3507
3508 # Check if set is still correct
3509 size = len( onosSet )
3510 getResponses = []
3511 threads = []
3512 for i in main.activeNodes:
3513 t = main.Thread( target=main.CLIs[i].setTestGet,
3514 name="setTestGet-" + str( i ),
3515 args=[ onosSetName ] )
3516 threads.append( t )
3517 t.start()
3518 for t in threads:
3519 t.join()
3520 getResponses.append( t.result )
3521 getResults = main.TRUE
3522 for i in range( len( main.activeNodes ) ):
3523 node = str( main.activeNodes[i] + 1 )
3524 if isinstance( getResponses[ i ], list):
3525 current = set( getResponses[ i ] )
3526 if len( current ) == len( getResponses[ i ] ):
3527 # no repeats
3528 if onosSet != current:
3529 main.log.error( "ONOS" + node +
3530 " has incorrect view" +
3531 " of set " + onosSetName + ":\n" +
3532 str( getResponses[ i ] ) )
3533 main.log.debug( "Expected: " + str( onosSet ) )
3534 main.log.debug( "Actual: " + str( current ) )
3535 getResults = main.FALSE
3536 else:
3537 # error, set is not a set
3538 main.log.error( "ONOS" + node +
3539 " has repeat elements in" +
3540 " set " + onosSetName + ":\n" +
3541 str( getResponses[ i ] ) )
3542 getResults = main.FALSE
3543 elif getResponses[ i ] == main.ERROR:
3544 getResults = main.FALSE
3545 sizeResponses = []
3546 threads = []
3547 for i in main.activeNodes:
3548 t = main.Thread( target=main.CLIs[i].setTestSize,
3549 name="setTestSize-" + str( i ),
3550 args=[ onosSetName ] )
3551 threads.append( t )
3552 t.start()
3553 for t in threads:
3554 t.join()
3555 sizeResponses.append( t.result )
3556 sizeResults = main.TRUE
3557 for i in range( len( main.activeNodes ) ):
3558 node = str( main.activeNodes[i] + 1 )
3559 if size != sizeResponses[ i ]:
3560 sizeResults = main.FALSE
3561 main.log.error( "ONOS" + node +
3562 " expected a size of " + str( size ) +
3563 " for set " + onosSetName +
3564 " but got " + str( sizeResponses[ i ] ) )
3565 addAllResults = addAllResults and getResults and sizeResults
3566 utilities.assert_equals( expect=main.TRUE,
3567 actual=addAllResults,
3568 onpass="Set addAll correct",
3569 onfail="Set addAll was incorrect" )
3570
3571 main.step( "Distributed Set contains()" )
3572 containsResponses = []
3573 threads = []
3574 for i in main.activeNodes:
3575 t = main.Thread( target=main.CLIs[i].setTestGet,
3576 name="setContains-" + str( i ),
3577 args=[ onosSetName ],
3578 kwargs={ "values": addValue } )
3579 threads.append( t )
3580 t.start()
3581 for t in threads:
3582 t.join()
3583 # NOTE: This is the tuple
3584 containsResponses.append( t.result )
3585
3586 containsResults = main.TRUE
3587 for i in range( len( main.activeNodes ) ):
3588 if containsResponses[ i ] == main.ERROR:
3589 containsResults = main.FALSE
3590 else:
3591 containsResults = containsResults and\
3592 containsResponses[ i ][ 1 ]
3593 utilities.assert_equals( expect=main.TRUE,
3594 actual=containsResults,
3595 onpass="Set contains is functional",
3596 onfail="Set contains failed" )
3597
3598 main.step( "Distributed Set containsAll()" )
3599 containsAllResponses = []
3600 threads = []
3601 for i in main.activeNodes:
3602 t = main.Thread( target=main.CLIs[i].setTestGet,
3603 name="setContainsAll-" + str( i ),
3604 args=[ onosSetName ],
3605 kwargs={ "values": addAllValue } )
3606 threads.append( t )
3607 t.start()
3608 for t in threads:
3609 t.join()
3610 # NOTE: This is the tuple
3611 containsAllResponses.append( t.result )
3612
3613 containsAllResults = main.TRUE
3614 for i in range( len( main.activeNodes ) ):
3615 if containsResponses[ i ] == main.ERROR:
3616 containsResults = main.FALSE
3617 else:
3618 containsResults = containsResults and\
3619 containsResponses[ i ][ 1 ]
3620 utilities.assert_equals( expect=main.TRUE,
3621 actual=containsAllResults,
3622 onpass="Set containsAll is functional",
3623 onfail="Set containsAll failed" )
3624
3625 main.step( "Distributed Set remove()" )
3626 onosSet.remove( addValue )
3627 removeResponses = []
3628 threads = []
3629 for i in main.activeNodes:
3630 t = main.Thread( target=main.CLIs[i].setTestRemove,
3631 name="setTestRemove-" + str( i ),
3632 args=[ onosSetName, addValue ] )
3633 threads.append( t )
3634 t.start()
3635 for t in threads:
3636 t.join()
3637 removeResponses.append( t.result )
3638
3639 # main.TRUE = successfully changed the set
3640 # main.FALSE = action resulted in no change in set
3641 # main.ERROR - Some error in executing the function
3642 removeResults = main.TRUE
3643 for i in range( len( main.activeNodes ) ):
3644 if removeResponses[ i ] == main.TRUE:
3645 # All is well
3646 pass
3647 elif removeResponses[ i ] == main.FALSE:
3648 # not in set, probably fine
3649 pass
3650 elif removeResponses[ i ] == main.ERROR:
3651 # Error in execution
3652 removeResults = main.FALSE
3653 else:
3654 # unexpected result
3655 removeResults = main.FALSE
3656 if removeResults != main.TRUE:
3657 main.log.error( "Error executing set remove" )
3658
3659 # Check if set is still correct
3660 size = len( onosSet )
3661 getResponses = []
3662 threads = []
3663 for i in main.activeNodes:
3664 t = main.Thread( target=main.CLIs[i].setTestGet,
3665 name="setTestGet-" + str( i ),
3666 args=[ onosSetName ] )
3667 threads.append( t )
3668 t.start()
3669 for t in threads:
3670 t.join()
3671 getResponses.append( t.result )
3672 getResults = main.TRUE
3673 for i in range( len( main.activeNodes ) ):
3674 node = str( main.activeNodes[i] + 1 )
3675 if isinstance( getResponses[ i ], list):
3676 current = set( getResponses[ i ] )
3677 if len( current ) == len( getResponses[ i ] ):
3678 # no repeats
3679 if onosSet != current:
3680 main.log.error( "ONOS" + node +
3681 " has incorrect view" +
3682 " of set " + onosSetName + ":\n" +
3683 str( getResponses[ i ] ) )
3684 main.log.debug( "Expected: " + str( onosSet ) )
3685 main.log.debug( "Actual: " + str( current ) )
3686 getResults = main.FALSE
3687 else:
3688 # error, set is not a set
3689 main.log.error( "ONOS" + node +
3690 " has repeat elements in" +
3691 " set " + onosSetName + ":\n" +
3692 str( getResponses[ i ] ) )
3693 getResults = main.FALSE
3694 elif getResponses[ i ] == main.ERROR:
3695 getResults = main.FALSE
3696 sizeResponses = []
3697 threads = []
3698 for i in main.activeNodes:
3699 t = main.Thread( target=main.CLIs[i].setTestSize,
3700 name="setTestSize-" + str( i ),
3701 args=[ onosSetName ] )
3702 threads.append( t )
3703 t.start()
3704 for t in threads:
3705 t.join()
3706 sizeResponses.append( t.result )
3707 sizeResults = main.TRUE
3708 for i in range( len( main.activeNodes ) ):
3709 node = str( main.activeNodes[i] + 1 )
3710 if size != sizeResponses[ i ]:
3711 sizeResults = main.FALSE
3712 main.log.error( "ONOS" + node +
3713 " expected a size of " + str( size ) +
3714 " for set " + onosSetName +
3715 " but got " + str( sizeResponses[ i ] ) )
3716 removeResults = removeResults and getResults and sizeResults
3717 utilities.assert_equals( expect=main.TRUE,
3718 actual=removeResults,
3719 onpass="Set remove correct",
3720 onfail="Set remove was incorrect" )
3721
3722 main.step( "Distributed Set removeAll()" )
3723 onosSet.difference_update( addAllValue.split() )
3724 removeAllResponses = []
3725 threads = []
3726 try:
3727 for i in main.activeNodes:
3728 t = main.Thread( target=main.CLIs[i].setTestRemove,
3729 name="setTestRemoveAll-" + str( i ),
3730 args=[ onosSetName, addAllValue ] )
3731 threads.append( t )
3732 t.start()
3733 for t in threads:
3734 t.join()
3735 removeAllResponses.append( t.result )
3736 except Exception, e:
3737 main.log.exception(e)
3738
3739 # main.TRUE = successfully changed the set
3740 # main.FALSE = action resulted in no change in set
3741 # main.ERROR - Some error in executing the function
3742 removeAllResults = main.TRUE
3743 for i in range( len( main.activeNodes ) ):
3744 if removeAllResponses[ i ] == main.TRUE:
3745 # All is well
3746 pass
3747 elif removeAllResponses[ i ] == main.FALSE:
3748 # not in set, probably fine
3749 pass
3750 elif removeAllResponses[ i ] == main.ERROR:
3751 # Error in execution
3752 removeAllResults = main.FALSE
3753 else:
3754 # unexpected result
3755 removeAllResults = main.FALSE
3756 if removeAllResults != main.TRUE:
3757 main.log.error( "Error executing set removeAll" )
3758
3759 # Check if set is still correct
3760 size = len( onosSet )
3761 getResponses = []
3762 threads = []
3763 for i in main.activeNodes:
3764 t = main.Thread( target=main.CLIs[i].setTestGet,
3765 name="setTestGet-" + str( i ),
3766 args=[ onosSetName ] )
3767 threads.append( t )
3768 t.start()
3769 for t in threads:
3770 t.join()
3771 getResponses.append( t.result )
3772 getResults = main.TRUE
3773 for i in range( len( main.activeNodes ) ):
3774 node = str( main.activeNodes[i] + 1 )
3775 if isinstance( getResponses[ i ], list):
3776 current = set( getResponses[ i ] )
3777 if len( current ) == len( getResponses[ i ] ):
3778 # no repeats
3779 if onosSet != current:
3780 main.log.error( "ONOS" + node +
3781 " has incorrect view" +
3782 " of set " + onosSetName + ":\n" +
3783 str( getResponses[ i ] ) )
3784 main.log.debug( "Expected: " + str( onosSet ) )
3785 main.log.debug( "Actual: " + str( current ) )
3786 getResults = main.FALSE
3787 else:
3788 # error, set is not a set
3789 main.log.error( "ONOS" + node +
3790 " has repeat elements in" +
3791 " set " + onosSetName + ":\n" +
3792 str( getResponses[ i ] ) )
3793 getResults = main.FALSE
3794 elif getResponses[ i ] == main.ERROR:
3795 getResults = main.FALSE
3796 sizeResponses = []
3797 threads = []
3798 for i in main.activeNodes:
3799 t = main.Thread( target=main.CLIs[i].setTestSize,
3800 name="setTestSize-" + str( i ),
3801 args=[ onosSetName ] )
3802 threads.append( t )
3803 t.start()
3804 for t in threads:
3805 t.join()
3806 sizeResponses.append( t.result )
3807 sizeResults = main.TRUE
3808 for i in range( len( main.activeNodes ) ):
3809 node = str( main.activeNodes[i] + 1 )
3810 if size != sizeResponses[ i ]:
3811 sizeResults = main.FALSE
3812 main.log.error( "ONOS" + node +
3813 " expected a size of " + str( size ) +
3814 " for set " + onosSetName +
3815 " but got " + str( sizeResponses[ i ] ) )
3816 removeAllResults = removeAllResults and getResults and sizeResults
3817 utilities.assert_equals( expect=main.TRUE,
3818 actual=removeAllResults,
3819 onpass="Set removeAll correct",
3820 onfail="Set removeAll was incorrect" )
3821
3822 main.step( "Distributed Set addAll()" )
3823 onosSet.update( addAllValue.split() )
3824 addResponses = []
3825 threads = []
3826 for i in main.activeNodes:
3827 t = main.Thread( target=main.CLIs[i].setTestAdd,
3828 name="setTestAddAll-" + str( i ),
3829 args=[ onosSetName, addAllValue ] )
3830 threads.append( t )
3831 t.start()
3832 for t in threads:
3833 t.join()
3834 addResponses.append( t.result )
3835
3836 # main.TRUE = successfully changed the set
3837 # main.FALSE = action resulted in no change in set
3838 # main.ERROR - Some error in executing the function
3839 addAllResults = main.TRUE
3840 for i in range( len( main.activeNodes ) ):
3841 if addResponses[ i ] == main.TRUE:
3842 # All is well
3843 pass
3844 elif addResponses[ i ] == main.FALSE:
3845 # Already in set, probably fine
3846 pass
3847 elif addResponses[ i ] == main.ERROR:
3848 # Error in execution
3849 addAllResults = main.FALSE
3850 else:
3851 # unexpected result
3852 addAllResults = main.FALSE
3853 if addAllResults != main.TRUE:
3854 main.log.error( "Error executing set addAll" )
3855
3856 # Check if set is still correct
3857 size = len( onosSet )
3858 getResponses = []
3859 threads = []
3860 for i in main.activeNodes:
3861 t = main.Thread( target=main.CLIs[i].setTestGet,
3862 name="setTestGet-" + str( i ),
3863 args=[ onosSetName ] )
3864 threads.append( t )
3865 t.start()
3866 for t in threads:
3867 t.join()
3868 getResponses.append( t.result )
3869 getResults = main.TRUE
3870 for i in range( len( main.activeNodes ) ):
3871 node = str( main.activeNodes[i] + 1 )
3872 if isinstance( getResponses[ i ], list):
3873 current = set( getResponses[ i ] )
3874 if len( current ) == len( getResponses[ i ] ):
3875 # no repeats
3876 if onosSet != current:
3877 main.log.error( "ONOS" + node +
3878 " has incorrect view" +
3879 " of set " + onosSetName + ":\n" +
3880 str( getResponses[ i ] ) )
3881 main.log.debug( "Expected: " + str( onosSet ) )
3882 main.log.debug( "Actual: " + str( current ) )
3883 getResults = main.FALSE
3884 else:
3885 # error, set is not a set
3886 main.log.error( "ONOS" + node +
3887 " has repeat elements in" +
3888 " set " + onosSetName + ":\n" +
3889 str( getResponses[ i ] ) )
3890 getResults = main.FALSE
3891 elif getResponses[ i ] == main.ERROR:
3892 getResults = main.FALSE
3893 sizeResponses = []
3894 threads = []
3895 for i in main.activeNodes:
3896 t = main.Thread( target=main.CLIs[i].setTestSize,
3897 name="setTestSize-" + str( i ),
3898 args=[ onosSetName ] )
3899 threads.append( t )
3900 t.start()
3901 for t in threads:
3902 t.join()
3903 sizeResponses.append( t.result )
3904 sizeResults = main.TRUE
3905 for i in range( len( main.activeNodes ) ):
3906 node = str( main.activeNodes[i] + 1 )
3907 if size != sizeResponses[ i ]:
3908 sizeResults = main.FALSE
3909 main.log.error( "ONOS" + node +
3910 " expected a size of " + str( size ) +
3911 " for set " + onosSetName +
3912 " but got " + str( sizeResponses[ i ] ) )
3913 addAllResults = addAllResults and getResults and sizeResults
3914 utilities.assert_equals( expect=main.TRUE,
3915 actual=addAllResults,
3916 onpass="Set addAll correct",
3917 onfail="Set addAll was incorrect" )
3918
3919 main.step( "Distributed Set clear()" )
3920 onosSet.clear()
3921 clearResponses = []
3922 threads = []
3923 for i in main.activeNodes:
3924 t = main.Thread( target=main.CLIs[i].setTestRemove,
3925 name="setTestClear-" + str( i ),
3926 args=[ onosSetName, " "], # Values doesn't matter
3927 kwargs={ "clear": True } )
3928 threads.append( t )
3929 t.start()
3930 for t in threads:
3931 t.join()
3932 clearResponses.append( t.result )
3933
3934 # main.TRUE = successfully changed the set
3935 # main.FALSE = action resulted in no change in set
3936 # main.ERROR - Some error in executing the function
3937 clearResults = main.TRUE
3938 for i in range( len( main.activeNodes ) ):
3939 if clearResponses[ i ] == main.TRUE:
3940 # All is well
3941 pass
3942 elif clearResponses[ i ] == main.FALSE:
3943 # Nothing set, probably fine
3944 pass
3945 elif clearResponses[ i ] == main.ERROR:
3946 # Error in execution
3947 clearResults = main.FALSE
3948 else:
3949 # unexpected result
3950 clearResults = main.FALSE
3951 if clearResults != main.TRUE:
3952 main.log.error( "Error executing set clear" )
3953
3954 # Check if set is still correct
3955 size = len( onosSet )
3956 getResponses = []
3957 threads = []
3958 for i in main.activeNodes:
3959 t = main.Thread( target=main.CLIs[i].setTestGet,
3960 name="setTestGet-" + str( i ),
3961 args=[ onosSetName ] )
3962 threads.append( t )
3963 t.start()
3964 for t in threads:
3965 t.join()
3966 getResponses.append( t.result )
3967 getResults = main.TRUE
3968 for i in range( len( main.activeNodes ) ):
3969 node = str( main.activeNodes[i] + 1 )
3970 if isinstance( getResponses[ i ], list):
3971 current = set( getResponses[ i ] )
3972 if len( current ) == len( getResponses[ i ] ):
3973 # no repeats
3974 if onosSet != current:
3975 main.log.error( "ONOS" + node +
3976 " has incorrect view" +
3977 " of set " + onosSetName + ":\n" +
3978 str( getResponses[ i ] ) )
3979 main.log.debug( "Expected: " + str( onosSet ) )
3980 main.log.debug( "Actual: " + str( current ) )
3981 getResults = main.FALSE
3982 else:
3983 # error, set is not a set
3984 main.log.error( "ONOS" + node +
3985 " has repeat elements in" +
3986 " set " + onosSetName + ":\n" +
3987 str( getResponses[ i ] ) )
3988 getResults = main.FALSE
3989 elif getResponses[ i ] == main.ERROR:
3990 getResults = main.FALSE
3991 sizeResponses = []
3992 threads = []
3993 for i in main.activeNodes:
3994 t = main.Thread( target=main.CLIs[i].setTestSize,
3995 name="setTestSize-" + str( i ),
3996 args=[ onosSetName ] )
3997 threads.append( t )
3998 t.start()
3999 for t in threads:
4000 t.join()
4001 sizeResponses.append( t.result )
4002 sizeResults = main.TRUE
4003 for i in range( len( main.activeNodes ) ):
4004 node = str( main.activeNodes[i] + 1 )
4005 if size != sizeResponses[ i ]:
4006 sizeResults = main.FALSE
4007 main.log.error( "ONOS" + node +
4008 " expected a size of " + str( size ) +
4009 " for set " + onosSetName +
4010 " but got " + str( sizeResponses[ i ] ) )
4011 clearResults = clearResults and getResults and sizeResults
4012 utilities.assert_equals( expect=main.TRUE,
4013 actual=clearResults,
4014 onpass="Set clear correct",
4015 onfail="Set clear was incorrect" )
4016
4017 main.step( "Distributed Set addAll()" )
4018 onosSet.update( addAllValue.split() )
4019 addResponses = []
4020 threads = []
4021 for i in main.activeNodes:
4022 t = main.Thread( target=main.CLIs[i].setTestAdd,
4023 name="setTestAddAll-" + str( i ),
4024 args=[ onosSetName, addAllValue ] )
4025 threads.append( t )
4026 t.start()
4027 for t in threads:
4028 t.join()
4029 addResponses.append( t.result )
4030
4031 # main.TRUE = successfully changed the set
4032 # main.FALSE = action resulted in no change in set
4033 # main.ERROR - Some error in executing the function
4034 addAllResults = main.TRUE
4035 for i in range( len( main.activeNodes ) ):
4036 if addResponses[ i ] == main.TRUE:
4037 # All is well
4038 pass
4039 elif addResponses[ i ] == main.FALSE:
4040 # Already in set, probably fine
4041 pass
4042 elif addResponses[ i ] == main.ERROR:
4043 # Error in execution
4044 addAllResults = main.FALSE
4045 else:
4046 # unexpected result
4047 addAllResults = main.FALSE
4048 if addAllResults != main.TRUE:
4049 main.log.error( "Error executing set addAll" )
4050
4051 # Check if set is still correct
4052 size = len( onosSet )
4053 getResponses = []
4054 threads = []
4055 for i in main.activeNodes:
4056 t = main.Thread( target=main.CLIs[i].setTestGet,
4057 name="setTestGet-" + str( i ),
4058 args=[ onosSetName ] )
4059 threads.append( t )
4060 t.start()
4061 for t in threads:
4062 t.join()
4063 getResponses.append( t.result )
4064 getResults = main.TRUE
4065 for i in range( len( main.activeNodes ) ):
4066 node = str( main.activeNodes[i] + 1 )
4067 if isinstance( getResponses[ i ], list):
4068 current = set( getResponses[ i ] )
4069 if len( current ) == len( getResponses[ i ] ):
4070 # no repeats
4071 if onosSet != current:
4072 main.log.error( "ONOS" + node +
4073 " has incorrect view" +
4074 " of set " + onosSetName + ":\n" +
4075 str( getResponses[ i ] ) )
4076 main.log.debug( "Expected: " + str( onosSet ) )
4077 main.log.debug( "Actual: " + str( current ) )
4078 getResults = main.FALSE
4079 else:
4080 # error, set is not a set
4081 main.log.error( "ONOS" + node +
4082 " has repeat elements in" +
4083 " set " + onosSetName + ":\n" +
4084 str( getResponses[ i ] ) )
4085 getResults = main.FALSE
4086 elif getResponses[ i ] == main.ERROR:
4087 getResults = main.FALSE
4088 sizeResponses = []
4089 threads = []
4090 for i in main.activeNodes:
4091 t = main.Thread( target=main.CLIs[i].setTestSize,
4092 name="setTestSize-" + str( i ),
4093 args=[ onosSetName ] )
4094 threads.append( t )
4095 t.start()
4096 for t in threads:
4097 t.join()
4098 sizeResponses.append( t.result )
4099 sizeResults = main.TRUE
4100 for i in range( len( main.activeNodes ) ):
4101 node = str( main.activeNodes[i] + 1 )
4102 if size != sizeResponses[ i ]:
4103 sizeResults = main.FALSE
4104 main.log.error( "ONOS" + node +
4105 " expected a size of " + str( size ) +
4106 " for set " + onosSetName +
4107 " but got " + str( sizeResponses[ i ] ) )
4108 addAllResults = addAllResults and getResults and sizeResults
4109 utilities.assert_equals( expect=main.TRUE,
4110 actual=addAllResults,
4111 onpass="Set addAll correct",
4112 onfail="Set addAll was incorrect" )
4113
4114 main.step( "Distributed Set retain()" )
4115 onosSet.intersection_update( retainValue.split() )
4116 retainResponses = []
4117 threads = []
4118 for i in main.activeNodes:
4119 t = main.Thread( target=main.CLIs[i].setTestRemove,
4120 name="setTestRetain-" + str( i ),
4121 args=[ onosSetName, retainValue ],
4122 kwargs={ "retain": True } )
4123 threads.append( t )
4124 t.start()
4125 for t in threads:
4126 t.join()
4127 retainResponses.append( t.result )
4128
4129 # main.TRUE = successfully changed the set
4130 # main.FALSE = action resulted in no change in set
4131 # main.ERROR - Some error in executing the function
4132 retainResults = main.TRUE
4133 for i in range( len( main.activeNodes ) ):
4134 if retainResponses[ i ] == main.TRUE:
4135 # All is well
4136 pass
4137 elif retainResponses[ i ] == main.FALSE:
4138 # Already in set, probably fine
4139 pass
4140 elif retainResponses[ i ] == main.ERROR:
4141 # Error in execution
4142 retainResults = main.FALSE
4143 else:
4144 # unexpected result
4145 retainResults = main.FALSE
4146 if retainResults != main.TRUE:
4147 main.log.error( "Error executing set retain" )
4148
4149 # Check if set is still correct
4150 size = len( onosSet )
4151 getResponses = []
4152 threads = []
4153 for i in main.activeNodes:
4154 t = main.Thread( target=main.CLIs[i].setTestGet,
4155 name="setTestGet-" + str( i ),
4156 args=[ onosSetName ] )
4157 threads.append( t )
4158 t.start()
4159 for t in threads:
4160 t.join()
4161 getResponses.append( t.result )
4162 getResults = main.TRUE
4163 for i in range( len( main.activeNodes ) ):
4164 node = str( main.activeNodes[i] + 1 )
4165 if isinstance( getResponses[ i ], list):
4166 current = set( getResponses[ i ] )
4167 if len( current ) == len( getResponses[ i ] ):
4168 # no repeats
4169 if onosSet != current:
4170 main.log.error( "ONOS" + node +
4171 " has incorrect view" +
4172 " of set " + onosSetName + ":\n" +
4173 str( getResponses[ i ] ) )
4174 main.log.debug( "Expected: " + str( onosSet ) )
4175 main.log.debug( "Actual: " + str( current ) )
4176 getResults = main.FALSE
4177 else:
4178 # error, set is not a set
4179 main.log.error( "ONOS" + node +
4180 " has repeat elements in" +
4181 " set " + onosSetName + ":\n" +
4182 str( getResponses[ i ] ) )
4183 getResults = main.FALSE
4184 elif getResponses[ i ] == main.ERROR:
4185 getResults = main.FALSE
4186 sizeResponses = []
4187 threads = []
4188 for i in main.activeNodes:
4189 t = main.Thread( target=main.CLIs[i].setTestSize,
4190 name="setTestSize-" + str( i ),
4191 args=[ onosSetName ] )
4192 threads.append( t )
4193 t.start()
4194 for t in threads:
4195 t.join()
4196 sizeResponses.append( t.result )
4197 sizeResults = main.TRUE
4198 for i in range( len( main.activeNodes ) ):
4199 node = str( main.activeNodes[i] + 1 )
4200 if size != sizeResponses[ i ]:
4201 sizeResults = main.FALSE
4202 main.log.error( "ONOS" + node + " expected a size of " +
4203 str( size ) + " for set " + onosSetName +
4204 " but got " + str( sizeResponses[ i ] ) )
4205 retainResults = retainResults and getResults and sizeResults
4206 utilities.assert_equals( expect=main.TRUE,
4207 actual=retainResults,
4208 onpass="Set retain correct",
4209 onfail="Set retain was incorrect" )
4210
4211 # Transactional maps
4212 main.step( "Partitioned Transactional maps put" )
4213 tMapValue = "Testing"
4214 numKeys = 100
4215 putResult = True
4216 node = main.activeNodes[0]
4217 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4218 if putResponses and len( putResponses ) == 100:
4219 for i in putResponses:
4220 if putResponses[ i ][ 'value' ] != tMapValue:
4221 putResult = False
4222 else:
4223 putResult = False
4224 if not putResult:
4225 main.log.debug( "Put response values: " + str( putResponses ) )
4226 utilities.assert_equals( expect=True,
4227 actual=putResult,
4228 onpass="Partitioned Transactional Map put successful",
4229 onfail="Partitioned Transactional Map put values are incorrect" )
4230
4231 main.step( "Partitioned Transactional maps get" )
4232 getCheck = True
4233 for n in range( 1, numKeys + 1 ):
4234 getResponses = []
4235 threads = []
4236 valueCheck = True
4237 for i in main.activeNodes:
4238 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4239 name="TMap-get-" + str( i ),
4240 args=[ "Key" + str( n ) ] )
4241 threads.append( t )
4242 t.start()
4243 for t in threads:
4244 t.join()
4245 getResponses.append( t.result )
4246 for node in getResponses:
4247 if node != tMapValue:
4248 valueCheck = False
4249 if not valueCheck:
4250 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4251 main.log.warn( getResponses )
4252 getCheck = getCheck and valueCheck
4253 utilities.assert_equals( expect=True,
4254 actual=getCheck,
4255 onpass="Partitioned Transactional Map get values were correct",
4256 onfail="Partitioned Transactional Map values incorrect" )