blob: 5a6de1a80044f8bd6d4978314693c7f8374fff58 [file] [log] [blame]
Jon Hall6e709752016-02-01 13:38:46 -08001"""
2Description: This test is to determine if ONOS can handle
3 a full network partion
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE61: The Failure inducing case.
13CASE62: The Failure recovery case.
14CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
28class HAfullNetPartition:
29
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
51 import imp
52 import pexpect
53 import time
54 main.log.info( "ONOS HA test: Partition ONOS nodes into two sub-clusters - " +
55 "initialization" )
56 main.case( "Setting up test environment" )
57 main.caseExplanation = "Setup the test environment including " +\
58 "installing ONOS, starting Mininet and ONOS" +\
59 "cli sessions."
60
61 # load some variables from the params file
62 PULLCODE = False
63 if main.params[ 'Git' ] == 'True':
64 PULLCODE = True
65 gitBranch = main.params[ 'branch' ]
66 cellName = main.params[ 'ENV' ][ 'cellName' ]
67
68 main.numCtrls = int( main.params[ 'num_controllers' ] )
69 if main.ONOSbench.maxNodes:
70 if main.ONOSbench.maxNodes < main.numCtrls:
71 main.numCtrls = int( main.ONOSbench.maxNodes )
72 # set global variables
73 global ONOS1Port
74 global ONOS2Port
75 global ONOS3Port
76 global ONOS4Port
77 global ONOS5Port
78 global ONOS6Port
79 global ONOS7Port
80
81 # FIXME: just get controller port from params?
82 # TODO: do we really need all these?
83 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
84 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
85 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
86 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
87 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
88 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
89 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
90
91 try:
92 fileName = "Counters"
93 # TODO: Maybe make a library folder somewhere?
94 path = main.params[ 'imports' ][ 'path' ]
95 main.Counters = imp.load_source( fileName,
96 path + fileName + ".py" )
97 except Exception as e:
98 main.log.exception( e )
99 main.cleanup()
100 main.exit()
101
102 main.CLIs = []
103 main.nodes = []
104 ipList = []
105 for i in range( 1, main.numCtrls + 1 ):
106 try:
107 main.CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
108 main.nodes.append( getattr( main, 'ONOS' + str( i ) ) )
109 ipList.append( main.nodes[ -1 ].ip_address )
110 except AttributeError:
111 break
112
113 main.step( "Create cell file" )
114 cellAppString = main.params[ 'ENV' ][ 'appString' ]
115 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
116 main.Mininet1.ip_address,
117 cellAppString, ipList )
118 main.step( "Applying cell variable to environment" )
119 cellResult = main.ONOSbench.setCell( cellName )
120 verifyResult = main.ONOSbench.verifyCell()
121
122 # FIXME:this is short term fix
123 main.log.info( "Removing raft logs" )
124 main.ONOSbench.onosRemoveRaftLogs()
125
126 main.log.info( "Uninstalling ONOS" )
127 for node in main.nodes:
128 main.ONOSbench.onosUninstall( node.ip_address )
129
130 # Make sure ONOS is DEAD
131 main.log.info( "Killing any ONOS processes" )
132 killResults = main.TRUE
133 for node in main.nodes:
134 killed = main.ONOSbench.onosKill( node.ip_address )
135 killResults = killResults and killed
136
137 cleanInstallResult = main.TRUE
138 gitPullResult = main.TRUE
139
140 main.step( "Starting Mininet" )
141 # scp topo file to mininet
142 # TODO: move to params?
143 topoName = "obelisk.py"
144 filePath = main.ONOSbench.home + "/tools/test/topos/"
145 main.ONOSbench.scp( main.Mininet1,
146 filePath + topoName,
147 main.Mininet1.home,
148 direction="to" )
149 mnResult = main.Mininet1.startNet( )
150 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
151 onpass="Mininet Started",
152 onfail="Error starting Mininet" )
153
154 main.step( "Git checkout and pull " + gitBranch )
155 if PULLCODE:
156 main.ONOSbench.gitCheckout( gitBranch )
157 gitPullResult = main.ONOSbench.gitPull()
158 # values of 1 or 3 are good
159 utilities.assert_lesser( expect=0, actual=gitPullResult,
160 onpass="Git pull successful",
161 onfail="Git pull failed" )
162 main.ONOSbench.getVersion( report=True )
163
164 main.step( "Using mvn clean install" )
165 cleanInstallResult = main.TRUE
166 if PULLCODE and gitPullResult == main.TRUE:
167 cleanInstallResult = main.ONOSbench.cleanInstall()
168 else:
169 main.log.warn( "Did not pull new code so skipping mvn " +
170 "clean install" )
171 utilities.assert_equals( expect=main.TRUE,
172 actual=cleanInstallResult,
173 onpass="MCI successful",
174 onfail="MCI failed" )
175 # GRAPHS
176 # NOTE: important params here:
177 # job = name of Jenkins job
178 # Plot Name = Plot-HA, only can be used if multiple plots
179 # index = The number of the graph under plot name
180 job = "HAfullNetPartition"
181 plotName = "Plot-HA"
Jon Hall843f8bc2016-03-18 14:28:13 -0700182 index = "1"
Jon Hall6e709752016-02-01 13:38:46 -0800183 graphs = '<ac:structured-macro ac:name="html">\n'
184 graphs += '<ac:plain-text-body><![CDATA[\n'
185 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
186 '/plot/' + plotName + '/getPlot?index=' + index +\
187 '&width=500&height=300"' +\
188 'noborder="0" width="500" height="300" scrolling="yes" ' +\
189 'seamless="seamless"></iframe>\n'
190 graphs += ']]></ac:plain-text-body>\n'
191 graphs += '</ac:structured-macro>\n'
192 main.log.wiki(graphs)
193
194 main.step( "Creating ONOS package" )
195 # copy gen-partions file to ONOS
196 # NOTE: this assumes TestON and ONOS are on the same machine
197 srcFile = main.testDir + "/" + main.TEST + "/dependencies/onos-gen-partitions"
198 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
199 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
200 main.ONOSbench.ip_address,
201 srcFile,
202 dstDir,
203 pwd=main.ONOSbench.pwd,
204 direction="from" )
205 packageResult = main.ONOSbench.onosPackage()
206 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
207 onpass="ONOS package successful",
208 onfail="ONOS package failed" )
209
210 main.step( "Installing ONOS package" )
211 onosInstallResult = main.TRUE
212 for node in main.nodes:
213 tmpResult = main.ONOSbench.onosInstall( options="-f",
214 node=node.ip_address )
215 onosInstallResult = onosInstallResult and tmpResult
216 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
217 onpass="ONOS install successful",
218 onfail="ONOS install failed" )
219 # clean up gen-partitions file
220 try:
221 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
222 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
223 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
224 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
225 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
226 str( main.ONOSbench.handle.before ) )
227 except ( pexpect.TIMEOUT, pexpect.EOF ):
228 main.log.exception( "ONOSbench: pexpect exception found:" +
229 main.ONOSbench.handle.before )
230 main.cleanup()
231 main.exit()
232
233 main.step( "Checking if ONOS is up yet" )
234 for i in range( 2 ):
235 onosIsupResult = main.TRUE
236 for node in main.nodes:
237 started = main.ONOSbench.isup( node.ip_address )
238 if not started:
239 main.log.error( node.name + " hasn't started" )
240 onosIsupResult = onosIsupResult and started
241 if onosIsupResult == main.TRUE:
242 break
243 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
244 onpass="ONOS startup successful",
245 onfail="ONOS startup failed" )
246
247 main.log.step( "Starting ONOS CLI sessions" )
248 cliResults = main.TRUE
249 threads = []
250 for i in range( main.numCtrls ):
251 t = main.Thread( target=main.CLIs[i].startOnosCli,
252 name="startOnosCli-" + str( i ),
253 args=[main.nodes[i].ip_address] )
254 threads.append( t )
255 t.start()
256
257 for t in threads:
258 t.join()
259 cliResults = cliResults and t.result
260 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
261 onpass="ONOS cli startup successful",
262 onfail="ONOS cli startup failed" )
263
264 # Create a list of active nodes for use when some nodes are stopped
265 main.activeNodes = [ i for i in range( 0, len( main.CLIs ) ) ]
266
267 if main.params[ 'tcpdump' ].lower() == "true":
268 main.step( "Start Packet Capture MN" )
269 main.Mininet2.startTcpdump(
270 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
271 + "-MN.pcap",
272 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
273 port=main.params[ 'MNtcpdump' ][ 'port' ] )
274
275 main.step( "App Ids check" )
276 time.sleep(60)
277 appCheck = main.TRUE
278 threads = []
279 for i in main.activeNodes:
280 t = main.Thread( target=main.CLIs[i].appToIDCheck,
281 name="appToIDCheck-" + str( i ),
282 args=[] )
283 threads.append( t )
284 t.start()
285
286 for t in threads:
287 t.join()
288 appCheck = appCheck and t.result
289 if appCheck != main.TRUE:
290 node = main.activeNodes[0]
291 main.log.warn( main.CLIs[node].apps() )
292 main.log.warn( main.CLIs[node].appIDs() )
293 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
294 onpass="App Ids seem to be correct",
295 onfail="Something is wrong with app Ids" )
296
297 if cliResults == main.FALSE:
298 main.log.error( "Failed to start ONOS, stopping test" )
299 main.cleanup()
300 main.exit()
301
302 def CASE2( self, main ):
303 """
304 Assign devices to controllers
305 """
306 import re
307 assert main.numCtrls, "main.numCtrls not defined"
308 assert main, "main not defined"
309 assert utilities.assert_equals, "utilities.assert_equals not defined"
310 assert main.CLIs, "main.CLIs not defined"
311 assert main.nodes, "main.nodes not defined"
312 assert ONOS1Port, "ONOS1Port not defined"
313 assert ONOS2Port, "ONOS2Port not defined"
314 assert ONOS3Port, "ONOS3Port not defined"
315 assert ONOS4Port, "ONOS4Port not defined"
316 assert ONOS5Port, "ONOS5Port not defined"
317 assert ONOS6Port, "ONOS6Port not defined"
318 assert ONOS7Port, "ONOS7Port not defined"
319
320 main.case( "Assigning devices to controllers" )
321 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " +\
322 "and check that an ONOS node becomes the " +\
323 "master of the device."
324 main.step( "Assign switches to controllers" )
325
326 ipList = []
327 for i in range( main.numCtrls ):
328 ipList.append( main.nodes[ i ].ip_address )
329 swList = []
330 for i in range( 1, 29 ):
331 swList.append( "s" + str( i ) )
332 main.Mininet1.assignSwController( sw=swList, ip=ipList )
333
334 mastershipCheck = main.TRUE
335 for i in range( 1, 29 ):
336 response = main.Mininet1.getSwController( "s" + str( i ) )
337 try:
338 main.log.info( str( response ) )
339 except Exception:
340 main.log.info( repr( response ) )
341 for node in main.nodes:
342 if re.search( "tcp:" + node.ip_address, response ):
343 mastershipCheck = mastershipCheck and main.TRUE
344 else:
345 main.log.error( "Error, node " + node.ip_address + " is " +
346 "not in the list of controllers s" +
347 str( i ) + " is connecting to." )
348 mastershipCheck = main.FALSE
349 utilities.assert_equals(
350 expect=main.TRUE,
351 actual=mastershipCheck,
352 onpass="Switch mastership assigned correctly",
353 onfail="Switches not assigned correctly to controllers" )
354
355 def CASE21( self, main ):
356 """
357 Assign mastership to controllers
358 """
359 import time
360 assert main.numCtrls, "main.numCtrls not defined"
361 assert main, "main not defined"
362 assert utilities.assert_equals, "utilities.assert_equals not defined"
363 assert main.CLIs, "main.CLIs not defined"
364 assert main.nodes, "main.nodes not defined"
365 assert ONOS1Port, "ONOS1Port not defined"
366 assert ONOS2Port, "ONOS2Port not defined"
367 assert ONOS3Port, "ONOS3Port not defined"
368 assert ONOS4Port, "ONOS4Port not defined"
369 assert ONOS5Port, "ONOS5Port not defined"
370 assert ONOS6Port, "ONOS6Port not defined"
371 assert ONOS7Port, "ONOS7Port not defined"
372
373 main.case( "Assigning Controller roles for switches" )
374 main.caseExplanation = "Check that ONOS is connected to each " +\
375 "device. Then manually assign" +\
376 " mastership to specific ONOS nodes using" +\
377 " 'device-role'"
378 main.step( "Assign mastership of switches to specific controllers" )
379 # Manually assign mastership to the controller we want
380 roleCall = main.TRUE
381
382 ipList = [ ]
383 deviceList = []
384 onosCli = main.CLIs[ main.activeNodes[0] ]
385 try:
386 # Assign mastership to specific controllers. This assignment was
387 # determined for a 7 node cluser, but will work with any sized
388 # cluster
389 for i in range( 1, 29 ): # switches 1 through 28
390 # set up correct variables:
391 if i == 1:
392 c = 0
393 ip = main.nodes[ c ].ip_address # ONOS1
394 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
395 elif i == 2:
396 c = 1 % main.numCtrls
397 ip = main.nodes[ c ].ip_address # ONOS2
398 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
399 elif i == 3:
400 c = 1 % main.numCtrls
401 ip = main.nodes[ c ].ip_address # ONOS2
402 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
403 elif i == 4:
404 c = 3 % main.numCtrls
405 ip = main.nodes[ c ].ip_address # ONOS4
406 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
407 elif i == 5:
408 c = 2 % main.numCtrls
409 ip = main.nodes[ c ].ip_address # ONOS3
410 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
411 elif i == 6:
412 c = 2 % main.numCtrls
413 ip = main.nodes[ c ].ip_address # ONOS3
414 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
415 elif i == 7:
416 c = 5 % main.numCtrls
417 ip = main.nodes[ c ].ip_address # ONOS6
418 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
419 elif i >= 8 and i <= 17:
420 c = 4 % main.numCtrls
421 ip = main.nodes[ c ].ip_address # ONOS5
422 dpid = '3' + str( i ).zfill( 3 )
423 deviceId = onosCli.getDevice( dpid ).get( 'id' )
424 elif i >= 18 and i <= 27:
425 c = 6 % main.numCtrls
426 ip = main.nodes[ c ].ip_address # ONOS7
427 dpid = '6' + str( i ).zfill( 3 )
428 deviceId = onosCli.getDevice( dpid ).get( 'id' )
429 elif i == 28:
430 c = 0
431 ip = main.nodes[ c ].ip_address # ONOS1
432 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
433 else:
434 main.log.error( "You didn't write an else statement for " +
435 "switch s" + str( i ) )
436 roleCall = main.FALSE
437 # Assign switch
438 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
439 # TODO: make this controller dynamic
440 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
441 ipList.append( ip )
442 deviceList.append( deviceId )
443 except ( AttributeError, AssertionError ):
444 main.log.exception( "Something is wrong with ONOS device view" )
445 main.log.info( onosCli.devices() )
446 utilities.assert_equals(
447 expect=main.TRUE,
448 actual=roleCall,
449 onpass="Re-assigned switch mastership to designated controller",
450 onfail="Something wrong with deviceRole calls" )
451
452 main.step( "Check mastership was correctly assigned" )
453 roleCheck = main.TRUE
454 # NOTE: This is due to the fact that device mastership change is not
455 # atomic and is actually a multi step process
456 time.sleep( 5 )
457 for i in range( len( ipList ) ):
458 ip = ipList[i]
459 deviceId = deviceList[i]
460 # Check assignment
461 master = onosCli.getRole( deviceId ).get( 'master' )
462 if ip in master:
463 roleCheck = roleCheck and main.TRUE
464 else:
465 roleCheck = roleCheck and main.FALSE
466 main.log.error( "Error, controller " + ip + " is not" +
467 " master " + "of device " +
468 str( deviceId ) + ". Master is " +
469 repr( master ) + "." )
470 utilities.assert_equals(
471 expect=main.TRUE,
472 actual=roleCheck,
473 onpass="Switches were successfully reassigned to designated " +
474 "controller",
475 onfail="Switches were not successfully reassigned" )
476
477 def CASE3( self, main ):
478 """
479 Assign intents
480 """
481 import time
482 import json
483 assert main.numCtrls, "main.numCtrls not defined"
484 assert main, "main not defined"
485 assert utilities.assert_equals, "utilities.assert_equals not defined"
486 assert main.CLIs, "main.CLIs not defined"
487 assert main.nodes, "main.nodes not defined"
488 main.case( "Adding host Intents" )
489 main.caseExplanation = "Discover hosts by using pingall then " +\
490 "assign predetermined host-to-host intents." +\
491 " After installation, check that the intent" +\
492 " is distributed to all nodes and the state" +\
493 " is INSTALLED"
494
495 # install onos-app-fwd
496 main.step( "Install reactive forwarding app" )
497 onosCli = main.CLIs[ main.activeNodes[0] ]
498 installResults = onosCli.activateApp( "org.onosproject.fwd" )
499 utilities.assert_equals( expect=main.TRUE, actual=installResults,
500 onpass="Install fwd successful",
501 onfail="Install fwd failed" )
502
503 main.step( "Check app ids" )
504 appCheck = main.TRUE
505 threads = []
506 for i in main.activeNodes:
507 t = main.Thread( target=main.CLIs[i].appToIDCheck,
508 name="appToIDCheck-" + str( i ),
509 args=[] )
510 threads.append( t )
511 t.start()
512
513 for t in threads:
514 t.join()
515 appCheck = appCheck and t.result
516 if appCheck != main.TRUE:
517 main.log.warn( onosCli.apps() )
518 main.log.warn( onosCli.appIDs() )
519 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
520 onpass="App Ids seem to be correct",
521 onfail="Something is wrong with app Ids" )
522
523 main.step( "Discovering Hosts( Via pingall for now )" )
524 # FIXME: Once we have a host discovery mechanism, use that instead
525 # REACTIVE FWD test
526 pingResult = main.FALSE
527 passMsg = "Reactive Pingall test passed"
528 time1 = time.time()
529 pingResult = main.Mininet1.pingall()
530 time2 = time.time()
531 if not pingResult:
532 main.log.warn("First pingall failed. Trying again...")
533 pingResult = main.Mininet1.pingall()
534 passMsg += " on the second try"
535 utilities.assert_equals(
536 expect=main.TRUE,
537 actual=pingResult,
538 onpass= passMsg,
539 onfail="Reactive Pingall failed, " +
540 "one or more ping pairs failed" )
541 main.log.info( "Time for pingall: %2f seconds" %
542 ( time2 - time1 ) )
543 # timeout for fwd flows
544 time.sleep( 11 )
545 # uninstall onos-app-fwd
546 main.step( "Uninstall reactive forwarding app" )
547 node = main.activeNodes[0]
548 uninstallResult = main.CLIs[node].deactivateApp( "org.onosproject.fwd" )
549 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
550 onpass="Uninstall fwd successful",
551 onfail="Uninstall fwd failed" )
552
553 main.step( "Check app ids" )
554 threads = []
555 appCheck2 = main.TRUE
556 for i in main.activeNodes:
557 t = main.Thread( target=main.CLIs[i].appToIDCheck,
558 name="appToIDCheck-" + str( i ),
559 args=[] )
560 threads.append( t )
561 t.start()
562
563 for t in threads:
564 t.join()
565 appCheck2 = appCheck2 and t.result
566 if appCheck2 != main.TRUE:
567 node = main.activeNodes[0]
568 main.log.warn( main.CLIs[node].apps() )
569 main.log.warn( main.CLIs[node].appIDs() )
570 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
571 onpass="App Ids seem to be correct",
572 onfail="Something is wrong with app Ids" )
573
574 main.step( "Add host intents via cli" )
575 intentIds = []
576 # TODO: move the host numbers to params
577 # Maybe look at all the paths we ping?
578 intentAddResult = True
579 hostResult = main.TRUE
580 for i in range( 8, 18 ):
581 main.log.info( "Adding host intent between h" + str( i ) +
582 " and h" + str( i + 10 ) )
583 host1 = "00:00:00:00:00:" + \
584 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
585 host2 = "00:00:00:00:00:" + \
586 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
587 # NOTE: getHost can return None
588 host1Dict = onosCli.getHost( host1 )
589 host2Dict = onosCli.getHost( host2 )
590 host1Id = None
591 host2Id = None
592 if host1Dict and host2Dict:
593 host1Id = host1Dict.get( 'id', None )
594 host2Id = host2Dict.get( 'id', None )
595 if host1Id and host2Id:
596 nodeNum = ( i % len( main.activeNodes ) )
597 node = main.activeNodes[nodeNum]
598 tmpId = main.CLIs[node].addHostIntent( host1Id, host2Id )
599 if tmpId:
600 main.log.info( "Added intent with id: " + tmpId )
601 intentIds.append( tmpId )
602 else:
603 main.log.error( "addHostIntent returned: " +
604 repr( tmpId ) )
605 else:
606 main.log.error( "Error, getHost() failed for h" + str( i ) +
607 " and/or h" + str( i + 10 ) )
608 node = main.activeNodes[0]
609 hosts = main.CLIs[node].hosts()
610 main.log.warn( "Hosts output: " )
611 try:
612 main.log.warn( json.dumps( json.loads( hosts ),
613 sort_keys=True,
614 indent=4,
615 separators=( ',', ': ' ) ) )
616 except ( ValueError, TypeError ):
617 main.log.warn( repr( hosts ) )
618 hostResult = main.FALSE
619 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
620 onpass="Found a host id for each host",
621 onfail="Error looking up host ids" )
622
623 intentStart = time.time()
624 onosIds = onosCli.getAllIntentsId()
625 main.log.info( "Submitted intents: " + str( intentIds ) )
626 main.log.info( "Intents in ONOS: " + str( onosIds ) )
627 for intent in intentIds:
628 if intent in onosIds:
629 pass # intent submitted is in onos
630 else:
631 intentAddResult = False
632 if intentAddResult:
633 intentStop = time.time()
634 else:
635 intentStop = None
636 # Print the intent states
637 intents = onosCli.intents()
638 intentStates = []
639 installedCheck = True
640 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
641 count = 0
642 try:
643 for intent in json.loads( intents ):
644 state = intent.get( 'state', None )
645 if "INSTALLED" not in state:
646 installedCheck = False
647 intentId = intent.get( 'id', None )
648 intentStates.append( ( intentId, state ) )
649 except ( ValueError, TypeError ):
650 main.log.exception( "Error parsing intents" )
651 # add submitted intents not in the store
652 tmplist = [ i for i, s in intentStates ]
653 missingIntents = False
654 for i in intentIds:
655 if i not in tmplist:
656 intentStates.append( ( i, " - " ) )
657 missingIntents = True
658 intentStates.sort()
659 for i, s in intentStates:
660 count += 1
661 main.log.info( "%-6s%-15s%-15s" %
662 ( str( count ), str( i ), str( s ) ) )
663 leaders = onosCli.leaders()
664 try:
665 missing = False
666 if leaders:
667 parsedLeaders = json.loads( leaders )
668 main.log.warn( json.dumps( parsedLeaders,
669 sort_keys=True,
670 indent=4,
671 separators=( ',', ': ' ) ) )
672 # check for all intent partitions
673 topics = []
674 for i in range( 14 ):
675 topics.append( "intent-partition-" + str( i ) )
676 main.log.debug( topics )
677 ONOStopics = [ j['topic'] for j in parsedLeaders ]
678 for topic in topics:
679 if topic not in ONOStopics:
680 main.log.error( "Error: " + topic +
681 " not in leaders" )
682 missing = True
683 else:
684 main.log.error( "leaders() returned None" )
685 except ( ValueError, TypeError ):
686 main.log.exception( "Error parsing leaders" )
687 main.log.error( repr( leaders ) )
688 # Check all nodes
689 if missing:
690 for i in main.activeNodes:
691 response = main.CLIs[i].leaders( jsonFormat=False)
692 main.log.warn( str( main.CLIs[i].name ) + " leaders output: \n" +
693 str( response ) )
694
695 partitions = onosCli.partitions()
696 try:
697 if partitions :
698 parsedPartitions = json.loads( partitions )
699 main.log.warn( json.dumps( parsedPartitions,
700 sort_keys=True,
701 indent=4,
702 separators=( ',', ': ' ) ) )
703 # TODO check for a leader in all paritions
704 # TODO check for consistency among nodes
705 else:
706 main.log.error( "partitions() returned None" )
707 except ( ValueError, TypeError ):
708 main.log.exception( "Error parsing partitions" )
709 main.log.error( repr( partitions ) )
710 pendingMap = onosCli.pendingMap()
711 try:
712 if pendingMap :
713 parsedPending = json.loads( pendingMap )
714 main.log.warn( json.dumps( parsedPending,
715 sort_keys=True,
716 indent=4,
717 separators=( ',', ': ' ) ) )
718 # TODO check something here?
719 else:
720 main.log.error( "pendingMap() returned None" )
721 except ( ValueError, TypeError ):
722 main.log.exception( "Error parsing pending map" )
723 main.log.error( repr( pendingMap ) )
724
725 intentAddResult = bool( intentAddResult and not missingIntents and
726 installedCheck )
727 if not intentAddResult:
728 main.log.error( "Error in pushing host intents to ONOS" )
729
730 main.step( "Intent Anti-Entropy dispersion" )
731 for j in range(100):
732 correct = True
733 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
734 for i in main.activeNodes:
735 onosIds = []
736 ids = main.CLIs[i].getAllIntentsId()
737 onosIds.append( ids )
738 main.log.debug( "Intents in " + main.CLIs[i].name + ": " +
739 str( sorted( onosIds ) ) )
740 if sorted( ids ) != sorted( intentIds ):
741 main.log.warn( "Set of intent IDs doesn't match" )
742 correct = False
743 break
744 else:
745 intents = json.loads( main.CLIs[i].intents() )
746 for intent in intents:
747 if intent[ 'state' ] != "INSTALLED":
748 main.log.warn( "Intent " + intent[ 'id' ] +
749 " is " + intent[ 'state' ] )
750 correct = False
751 break
752 if correct:
753 break
754 else:
755 time.sleep(1)
756 if not intentStop:
757 intentStop = time.time()
758 global gossipTime
759 gossipTime = intentStop - intentStart
760 main.log.info( "It took about " + str( gossipTime ) +
761 " seconds for all intents to appear in each node" )
762 gossipPeriod = int( main.params['timers']['gossip'] )
763 maxGossipTime = gossipPeriod * len( main.activeNodes )
764 utilities.assert_greater_equals(
765 expect=maxGossipTime, actual=gossipTime,
766 onpass="ECM anti-entropy for intents worked within " +
767 "expected time",
768 onfail="Intent ECM anti-entropy took too long. " +
769 "Expected time:{}, Actual time:{}".format( maxGossipTime,
770 gossipTime ) )
771 if gossipTime <= maxGossipTime:
772 intentAddResult = True
773
774 if not intentAddResult or "key" in pendingMap:
775 import time
776 installedCheck = True
777 main.log.info( "Sleeping 60 seconds to see if intents are found" )
778 time.sleep( 60 )
779 onosIds = onosCli.getAllIntentsId()
780 main.log.info( "Submitted intents: " + str( intentIds ) )
781 main.log.info( "Intents in ONOS: " + str( onosIds ) )
782 # Print the intent states
783 intents = onosCli.intents()
784 intentStates = []
785 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
786 count = 0
787 try:
788 for intent in json.loads( intents ):
789 # Iter through intents of a node
790 state = intent.get( 'state', None )
791 if "INSTALLED" not in state:
792 installedCheck = False
793 intentId = intent.get( 'id', None )
794 intentStates.append( ( intentId, state ) )
795 except ( ValueError, TypeError ):
796 main.log.exception( "Error parsing intents" )
797 # add submitted intents not in the store
798 tmplist = [ i for i, s in intentStates ]
799 for i in intentIds:
800 if i not in tmplist:
801 intentStates.append( ( i, " - " ) )
802 intentStates.sort()
803 for i, s in intentStates:
804 count += 1
805 main.log.info( "%-6s%-15s%-15s" %
806 ( str( count ), str( i ), str( s ) ) )
807 leaders = onosCli.leaders()
808 try:
809 missing = False
810 if leaders:
811 parsedLeaders = json.loads( leaders )
812 main.log.warn( json.dumps( parsedLeaders,
813 sort_keys=True,
814 indent=4,
815 separators=( ',', ': ' ) ) )
816 # check for all intent partitions
817 # check for election
818 topics = []
819 for i in range( 14 ):
820 topics.append( "intent-partition-" + str( i ) )
821 # FIXME: this should only be after we start the app
822 topics.append( "org.onosproject.election" )
823 main.log.debug( topics )
824 ONOStopics = [ j['topic'] for j in parsedLeaders ]
825 for topic in topics:
826 if topic not in ONOStopics:
827 main.log.error( "Error: " + topic +
828 " not in leaders" )
829 missing = True
830 else:
831 main.log.error( "leaders() returned None" )
832 except ( ValueError, TypeError ):
833 main.log.exception( "Error parsing leaders" )
834 main.log.error( repr( leaders ) )
835 # Check all nodes
836 if missing:
837 for i in main.activeNodes:
838 node = main.CLIs[i]
839 response = node.leaders( jsonFormat=False)
840 main.log.warn( str( node.name ) + " leaders output: \n" +
841 str( response ) )
842
843 partitions = onosCli.partitions()
844 try:
845 if partitions :
846 parsedPartitions = json.loads( partitions )
847 main.log.warn( json.dumps( parsedPartitions,
848 sort_keys=True,
849 indent=4,
850 separators=( ',', ': ' ) ) )
851 # TODO check for a leader in all paritions
852 # TODO check for consistency among nodes
853 else:
854 main.log.error( "partitions() returned None" )
855 except ( ValueError, TypeError ):
856 main.log.exception( "Error parsing partitions" )
857 main.log.error( repr( partitions ) )
858 pendingMap = onosCli.pendingMap()
859 try:
860 if pendingMap :
861 parsedPending = json.loads( pendingMap )
862 main.log.warn( json.dumps( parsedPending,
863 sort_keys=True,
864 indent=4,
865 separators=( ',', ': ' ) ) )
866 # TODO check something here?
867 else:
868 main.log.error( "pendingMap() returned None" )
869 except ( ValueError, TypeError ):
870 main.log.exception( "Error parsing pending map" )
871 main.log.error( repr( pendingMap ) )
872
873 def CASE4( self, main ):
874 """
875 Ping across added host intents
876 """
877 import json
878 import time
879 assert main.numCtrls, "main.numCtrls not defined"
880 assert main, "main not defined"
881 assert utilities.assert_equals, "utilities.assert_equals not defined"
882 assert main.CLIs, "main.CLIs not defined"
883 assert main.nodes, "main.nodes not defined"
884 main.case( "Verify connectivity by sending traffic across Intents" )
885 main.caseExplanation = "Ping across added host intents to check " +\
886 "functionality and check the state of " +\
887 "the intent"
888 main.step( "Ping across added host intents" )
889 onosCli = main.CLIs[ main.activeNodes[0] ]
890 PingResult = main.TRUE
891 for i in range( 8, 18 ):
892 ping = main.Mininet1.pingHost( src="h" + str( i ),
893 target="h" + str( i + 10 ) )
894 PingResult = PingResult and ping
895 if ping == main.FALSE:
896 main.log.warn( "Ping failed between h" + str( i ) +
897 " and h" + str( i + 10 ) )
898 elif ping == main.TRUE:
899 main.log.info( "Ping test passed!" )
900 # Don't set PingResult or you'd override failures
901 if PingResult == main.FALSE:
902 main.log.error(
903 "Intents have not been installed correctly, pings failed." )
904 # TODO: pretty print
905 main.log.warn( "ONOS1 intents: " )
906 try:
907 tmpIntents = onosCli.intents()
908 main.log.warn( json.dumps( json.loads( tmpIntents ),
909 sort_keys=True,
910 indent=4,
911 separators=( ',', ': ' ) ) )
912 except ( ValueError, TypeError ):
913 main.log.warn( repr( tmpIntents ) )
914 utilities.assert_equals(
915 expect=main.TRUE,
916 actual=PingResult,
917 onpass="Intents have been installed correctly and pings work",
918 onfail="Intents have not been installed correctly, pings failed." )
919
920 main.step( "Check Intent state" )
921 installedCheck = False
922 loopCount = 0
923 while not installedCheck and loopCount < 40:
924 installedCheck = True
925 # Print the intent states
926 intents = onosCli.intents()
927 intentStates = []
928 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
929 count = 0
930 # Iter through intents of a node
931 try:
932 for intent in json.loads( intents ):
933 state = intent.get( 'state', None )
934 if "INSTALLED" not in state:
935 installedCheck = False
936 intentId = intent.get( 'id', None )
937 intentStates.append( ( intentId, state ) )
938 except ( ValueError, TypeError ):
939 main.log.exception( "Error parsing intents." )
940 # Print states
941 intentStates.sort()
942 for i, s in intentStates:
943 count += 1
944 main.log.info( "%-6s%-15s%-15s" %
945 ( str( count ), str( i ), str( s ) ) )
946 if not installedCheck:
947 time.sleep( 1 )
948 loopCount += 1
949 utilities.assert_equals( expect=True, actual=installedCheck,
950 onpass="Intents are all INSTALLED",
951 onfail="Intents are not all in " +
952 "INSTALLED state" )
953
954 main.step( "Check leadership of topics" )
955 leaders = onosCli.leaders()
956 topicCheck = main.TRUE
957 try:
958 if leaders:
959 parsedLeaders = json.loads( leaders )
960 main.log.warn( json.dumps( parsedLeaders,
961 sort_keys=True,
962 indent=4,
963 separators=( ',', ': ' ) ) )
964 # check for all intent partitions
965 # check for election
966 # TODO: Look at Devices as topics now that it uses this system
967 topics = []
968 for i in range( 14 ):
969 topics.append( "intent-partition-" + str( i ) )
970 # FIXME: this should only be after we start the app
971 # FIXME: topics.append( "org.onosproject.election" )
972 # Print leaders output
973 main.log.debug( topics )
974 ONOStopics = [ j['topic'] for j in parsedLeaders ]
975 for topic in topics:
976 if topic not in ONOStopics:
977 main.log.error( "Error: " + topic +
978 " not in leaders" )
979 topicCheck = main.FALSE
980 else:
981 main.log.error( "leaders() returned None" )
982 topicCheck = main.FALSE
983 except ( ValueError, TypeError ):
984 topicCheck = main.FALSE
985 main.log.exception( "Error parsing leaders" )
986 main.log.error( repr( leaders ) )
987 # TODO: Check for a leader of these topics
988 # Check all nodes
989 if topicCheck:
990 for i in main.activeNodes:
991 node = main.CLIs[i]
992 response = node.leaders( jsonFormat=False)
993 main.log.warn( str( node.name ) + " leaders output: \n" +
994 str( response ) )
995
996 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
997 onpass="intent Partitions is in leaders",
998 onfail="Some topics were lost " )
999 # Print partitions
1000 partitions = onosCli.partitions()
1001 try:
1002 if partitions :
1003 parsedPartitions = json.loads( partitions )
1004 main.log.warn( json.dumps( parsedPartitions,
1005 sort_keys=True,
1006 indent=4,
1007 separators=( ',', ': ' ) ) )
1008 # TODO check for a leader in all paritions
1009 # TODO check for consistency among nodes
1010 else:
1011 main.log.error( "partitions() returned None" )
1012 except ( ValueError, TypeError ):
1013 main.log.exception( "Error parsing partitions" )
1014 main.log.error( repr( partitions ) )
1015 # Print Pending Map
1016 pendingMap = onosCli.pendingMap()
1017 try:
1018 if pendingMap :
1019 parsedPending = json.loads( pendingMap )
1020 main.log.warn( json.dumps( parsedPending,
1021 sort_keys=True,
1022 indent=4,
1023 separators=( ',', ': ' ) ) )
1024 # TODO check something here?
1025 else:
1026 main.log.error( "pendingMap() returned None" )
1027 except ( ValueError, TypeError ):
1028 main.log.exception( "Error parsing pending map" )
1029 main.log.error( repr( pendingMap ) )
1030
1031 if not installedCheck:
1032 main.log.info( "Waiting 60 seconds to see if the state of " +
1033 "intents change" )
1034 time.sleep( 60 )
1035 # Print the intent states
1036 intents = onosCli.intents()
1037 intentStates = []
1038 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1039 count = 0
1040 # Iter through intents of a node
1041 try:
1042 for intent in json.loads( intents ):
1043 state = intent.get( 'state', None )
1044 if "INSTALLED" not in state:
1045 installedCheck = False
1046 intentId = intent.get( 'id', None )
1047 intentStates.append( ( intentId, state ) )
1048 except ( ValueError, TypeError ):
1049 main.log.exception( "Error parsing intents." )
1050 intentStates.sort()
1051 for i, s in intentStates:
1052 count += 1
1053 main.log.info( "%-6s%-15s%-15s" %
1054 ( str( count ), str( i ), str( s ) ) )
1055 leaders = onosCli.leaders()
1056 try:
1057 missing = False
1058 if leaders:
1059 parsedLeaders = json.loads( leaders )
1060 main.log.warn( json.dumps( parsedLeaders,
1061 sort_keys=True,
1062 indent=4,
1063 separators=( ',', ': ' ) ) )
1064 # check for all intent partitions
1065 # check for election
1066 topics = []
1067 for i in range( 14 ):
1068 topics.append( "intent-partition-" + str( i ) )
1069 # FIXME: this should only be after we start the app
1070 topics.append( "org.onosproject.election" )
1071 main.log.debug( topics )
1072 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1073 for topic in topics:
1074 if topic not in ONOStopics:
1075 main.log.error( "Error: " + topic +
1076 " not in leaders" )
1077 missing = True
1078 else:
1079 main.log.error( "leaders() returned None" )
1080 except ( ValueError, TypeError ):
1081 main.log.exception( "Error parsing leaders" )
1082 main.log.error( repr( leaders ) )
1083 if missing:
1084 for i in main.activeNodes:
1085 node = main.CLIs[i]
1086 response = node.leaders( jsonFormat=False)
1087 main.log.warn( str( node.name ) + " leaders output: \n" +
1088 str( response ) )
1089
1090 partitions = onosCli.partitions()
1091 try:
1092 if partitions :
1093 parsedPartitions = json.loads( partitions )
1094 main.log.warn( json.dumps( parsedPartitions,
1095 sort_keys=True,
1096 indent=4,
1097 separators=( ',', ': ' ) ) )
1098 # TODO check for a leader in all paritions
1099 # TODO check for consistency among nodes
1100 else:
1101 main.log.error( "partitions() returned None" )
1102 except ( ValueError, TypeError ):
1103 main.log.exception( "Error parsing partitions" )
1104 main.log.error( repr( partitions ) )
1105 pendingMap = onosCli.pendingMap()
1106 try:
1107 if pendingMap :
1108 parsedPending = json.loads( pendingMap )
1109 main.log.warn( json.dumps( parsedPending,
1110 sort_keys=True,
1111 indent=4,
1112 separators=( ',', ': ' ) ) )
1113 # TODO check something here?
1114 else:
1115 main.log.error( "pendingMap() returned None" )
1116 except ( ValueError, TypeError ):
1117 main.log.exception( "Error parsing pending map" )
1118 main.log.error( repr( pendingMap ) )
1119 # Print flowrules
1120 node = main.activeNodes[0]
1121 main.log.debug( main.CLIs[node].flows( jsonFormat=False ) )
1122 main.step( "Wait a minute then ping again" )
1123 # the wait is above
1124 PingResult = main.TRUE
1125 for i in range( 8, 18 ):
1126 ping = main.Mininet1.pingHost( src="h" + str( i ),
1127 target="h" + str( i + 10 ) )
1128 PingResult = PingResult and ping
1129 if ping == main.FALSE:
1130 main.log.warn( "Ping failed between h" + str( i ) +
1131 " and h" + str( i + 10 ) )
1132 elif ping == main.TRUE:
1133 main.log.info( "Ping test passed!" )
1134 # Don't set PingResult or you'd override failures
1135 if PingResult == main.FALSE:
1136 main.log.error(
1137 "Intents have not been installed correctly, pings failed." )
1138 # TODO: pretty print
1139 main.log.warn( "ONOS1 intents: " )
1140 try:
1141 tmpIntents = onosCli.intents()
1142 main.log.warn( json.dumps( json.loads( tmpIntents ),
1143 sort_keys=True,
1144 indent=4,
1145 separators=( ',', ': ' ) ) )
1146 except ( ValueError, TypeError ):
1147 main.log.warn( repr( tmpIntents ) )
1148 utilities.assert_equals(
1149 expect=main.TRUE,
1150 actual=PingResult,
1151 onpass="Intents have been installed correctly and pings work",
1152 onfail="Intents have not been installed correctly, pings failed." )
1153
1154 def CASE5( self, main ):
1155 """
1156 Reading state of ONOS
1157 """
1158 import json
1159 import time
1160 assert main.numCtrls, "main.numCtrls not defined"
1161 assert main, "main not defined"
1162 assert utilities.assert_equals, "utilities.assert_equals not defined"
1163 assert main.CLIs, "main.CLIs not defined"
1164 assert main.nodes, "main.nodes not defined"
1165
1166 main.case( "Setting up and gathering data for current state" )
1167 # The general idea for this test case is to pull the state of
1168 # ( intents,flows, topology,... ) from each ONOS node
1169 # We can then compare them with each other and also with past states
1170
1171 main.step( "Check that each switch has a master" )
1172 global mastershipState
1173 mastershipState = '[]'
1174
1175 # Assert that each device has a master
1176 rolesNotNull = main.TRUE
1177 threads = []
1178 for i in main.activeNodes:
1179 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1180 name="rolesNotNull-" + str( i ),
1181 args=[] )
1182 threads.append( t )
1183 t.start()
1184
1185 for t in threads:
1186 t.join()
1187 rolesNotNull = rolesNotNull and t.result
1188 utilities.assert_equals(
1189 expect=main.TRUE,
1190 actual=rolesNotNull,
1191 onpass="Each device has a master",
1192 onfail="Some devices don't have a master assigned" )
1193
1194 main.step( "Get the Mastership of each switch from each controller" )
1195 ONOSMastership = []
1196 mastershipCheck = main.FALSE
1197 consistentMastership = True
1198 rolesResults = True
1199 threads = []
1200 for i in main.activeNodes:
1201 t = main.Thread( target=main.CLIs[i].roles,
1202 name="roles-" + str( i ),
1203 args=[] )
1204 threads.append( t )
1205 t.start()
1206
1207 for t in threads:
1208 t.join()
1209 ONOSMastership.append( t.result )
1210
1211 for i in range( len( ONOSMastership ) ):
1212 node = str( main.activeNodes[i] + 1 )
1213 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1214 main.log.error( "Error in getting ONOS" + node + " roles" )
1215 main.log.warn( "ONOS" + node + " mastership response: " +
1216 repr( ONOSMastership[i] ) )
1217 rolesResults = False
1218 utilities.assert_equals(
1219 expect=True,
1220 actual=rolesResults,
1221 onpass="No error in reading roles output",
1222 onfail="Error in reading roles from ONOS" )
1223
1224 main.step( "Check for consistency in roles from each controller" )
1225 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1226 main.log.info(
1227 "Switch roles are consistent across all ONOS nodes" )
1228 else:
1229 consistentMastership = False
1230 utilities.assert_equals(
1231 expect=True,
1232 actual=consistentMastership,
1233 onpass="Switch roles are consistent across all ONOS nodes",
1234 onfail="ONOS nodes have different views of switch roles" )
1235
1236 if rolesResults and not consistentMastership:
1237 for i in range( len( main.activeNodes ) ):
1238 node = str( main.activeNodes[i] + 1 )
1239 try:
1240 main.log.warn(
1241 "ONOS" + node + " roles: ",
1242 json.dumps(
1243 json.loads( ONOSMastership[ i ] ),
1244 sort_keys=True,
1245 indent=4,
1246 separators=( ',', ': ' ) ) )
1247 except ( ValueError, TypeError ):
1248 main.log.warn( repr( ONOSMastership[ i ] ) )
1249 elif rolesResults and consistentMastership:
1250 mastershipCheck = main.TRUE
1251 mastershipState = ONOSMastership[ 0 ]
1252
1253 main.step( "Get the intents from each controller" )
1254 global intentState
1255 intentState = []
1256 ONOSIntents = []
1257 intentCheck = main.FALSE
1258 consistentIntents = True
1259 intentsResults = True
1260 threads = []
1261 for i in main.activeNodes:
1262 t = main.Thread( target=main.CLIs[i].intents,
1263 name="intents-" + str( i ),
1264 args=[],
1265 kwargs={ 'jsonFormat': True } )
1266 threads.append( t )
1267 t.start()
1268
1269 for t in threads:
1270 t.join()
1271 ONOSIntents.append( t.result )
1272
1273 for i in range( len( ONOSIntents ) ):
1274 node = str( main.activeNodes[i] + 1 )
1275 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1276 main.log.error( "Error in getting ONOS" + node + " intents" )
1277 main.log.warn( "ONOS" + node + " intents response: " +
1278 repr( ONOSIntents[ i ] ) )
1279 intentsResults = False
1280 utilities.assert_equals(
1281 expect=True,
1282 actual=intentsResults,
1283 onpass="No error in reading intents output",
1284 onfail="Error in reading intents from ONOS" )
1285
1286 main.step( "Check for consistency in Intents from each controller" )
1287 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1288 main.log.info( "Intents are consistent across all ONOS " +
1289 "nodes" )
1290 else:
1291 consistentIntents = False
1292 main.log.error( "Intents not consistent" )
1293 utilities.assert_equals(
1294 expect=True,
1295 actual=consistentIntents,
1296 onpass="Intents are consistent across all ONOS nodes",
1297 onfail="ONOS nodes have different views of intents" )
1298
1299 if intentsResults:
1300 # Try to make it easy to figure out what is happening
1301 #
1302 # Intent ONOS1 ONOS2 ...
1303 # 0x01 INSTALLED INSTALLING
1304 # ... ... ...
1305 # ... ... ...
1306 title = " Id"
1307 for n in main.activeNodes:
1308 title += " " * 10 + "ONOS" + str( n + 1 )
1309 main.log.warn( title )
1310 # get all intent keys in the cluster
1311 keys = []
1312 for nodeStr in ONOSIntents:
1313 node = json.loads( nodeStr )
1314 for intent in node:
1315 keys.append( intent.get( 'id' ) )
1316 keys = set( keys )
1317 for key in keys:
1318 row = "%-13s" % key
1319 for nodeStr in ONOSIntents:
1320 node = json.loads( nodeStr )
1321 for intent in node:
1322 if intent.get( 'id', "Error" ) == key:
1323 row += "%-15s" % intent.get( 'state' )
1324 main.log.warn( row )
1325 # End table view
1326
1327 if intentsResults and not consistentIntents:
1328 # print the json objects
1329 n = str( main.activeNodes[-1] + 1 )
1330 main.log.debug( "ONOS" + n + " intents: " )
1331 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1332 sort_keys=True,
1333 indent=4,
1334 separators=( ',', ': ' ) ) )
1335 for i in range( len( ONOSIntents ) ):
1336 node = str( main.activeNodes[i] + 1 )
1337 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1338 main.log.debug( "ONOS" + node + " intents: " )
1339 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1340 sort_keys=True,
1341 indent=4,
1342 separators=( ',', ': ' ) ) )
1343 else:
1344 main.log.debug( "ONOS" + node + " intents match ONOS" +
1345 n + " intents" )
1346 elif intentsResults and consistentIntents:
1347 intentCheck = main.TRUE
1348 intentState = ONOSIntents[ 0 ]
1349
1350 main.step( "Get the flows from each controller" )
1351 global flowState
1352 flowState = []
1353 ONOSFlows = []
1354 ONOSFlowsJson = []
1355 flowCheck = main.FALSE
1356 consistentFlows = True
1357 flowsResults = True
1358 threads = []
1359 for i in main.activeNodes:
1360 t = main.Thread( target=main.CLIs[i].flows,
1361 name="flows-" + str( i ),
1362 args=[],
1363 kwargs={ 'jsonFormat': True } )
1364 threads.append( t )
1365 t.start()
1366
1367 # NOTE: Flows command can take some time to run
1368 time.sleep(30)
1369 for t in threads:
1370 t.join()
1371 result = t.result
1372 ONOSFlows.append( result )
1373
1374 for i in range( len( ONOSFlows ) ):
1375 num = str( main.activeNodes[i] + 1 )
1376 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1377 main.log.error( "Error in getting ONOS" + num + " flows" )
1378 main.log.warn( "ONOS" + num + " flows response: " +
1379 repr( ONOSFlows[ i ] ) )
1380 flowsResults = False
1381 ONOSFlowsJson.append( None )
1382 else:
1383 try:
1384 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1385 except ( ValueError, TypeError ):
1386 # FIXME: change this to log.error?
1387 main.log.exception( "Error in parsing ONOS" + num +
1388 " response as json." )
1389 main.log.error( repr( ONOSFlows[ i ] ) )
1390 ONOSFlowsJson.append( None )
1391 flowsResults = False
1392 utilities.assert_equals(
1393 expect=True,
1394 actual=flowsResults,
1395 onpass="No error in reading flows output",
1396 onfail="Error in reading flows from ONOS" )
1397
1398 main.step( "Check for consistency in Flows from each controller" )
1399 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1400 if all( tmp ):
1401 main.log.info( "Flow count is consistent across all ONOS nodes" )
1402 else:
1403 consistentFlows = False
1404 utilities.assert_equals(
1405 expect=True,
1406 actual=consistentFlows,
1407 onpass="The flow count is consistent across all ONOS nodes",
1408 onfail="ONOS nodes have different flow counts" )
1409
1410 if flowsResults and not consistentFlows:
1411 for i in range( len( ONOSFlows ) ):
1412 node = str( main.activeNodes[i] + 1 )
1413 try:
1414 main.log.warn(
1415 "ONOS" + node + " flows: " +
1416 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1417 indent=4, separators=( ',', ': ' ) ) )
1418 except ( ValueError, TypeError ):
1419 main.log.warn( "ONOS" + node + " flows: " +
1420 repr( ONOSFlows[ i ] ) )
1421 elif flowsResults and consistentFlows:
1422 flowCheck = main.TRUE
1423 flowState = ONOSFlows[ 0 ]
1424
1425 main.step( "Get the OF Table entries" )
1426 global flows
1427 flows = []
1428 for i in range( 1, 29 ):
1429 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1430 if flowCheck == main.FALSE:
1431 for table in flows:
1432 main.log.warn( table )
1433 # TODO: Compare switch flow tables with ONOS flow tables
1434
1435 main.step( "Start continuous pings" )
1436 main.Mininet2.pingLong(
1437 src=main.params[ 'PING' ][ 'source1' ],
1438 target=main.params[ 'PING' ][ 'target1' ],
1439 pingTime=500 )
1440 main.Mininet2.pingLong(
1441 src=main.params[ 'PING' ][ 'source2' ],
1442 target=main.params[ 'PING' ][ 'target2' ],
1443 pingTime=500 )
1444 main.Mininet2.pingLong(
1445 src=main.params[ 'PING' ][ 'source3' ],
1446 target=main.params[ 'PING' ][ 'target3' ],
1447 pingTime=500 )
1448 main.Mininet2.pingLong(
1449 src=main.params[ 'PING' ][ 'source4' ],
1450 target=main.params[ 'PING' ][ 'target4' ],
1451 pingTime=500 )
1452 main.Mininet2.pingLong(
1453 src=main.params[ 'PING' ][ 'source5' ],
1454 target=main.params[ 'PING' ][ 'target5' ],
1455 pingTime=500 )
1456 main.Mininet2.pingLong(
1457 src=main.params[ 'PING' ][ 'source6' ],
1458 target=main.params[ 'PING' ][ 'target6' ],
1459 pingTime=500 )
1460 main.Mininet2.pingLong(
1461 src=main.params[ 'PING' ][ 'source7' ],
1462 target=main.params[ 'PING' ][ 'target7' ],
1463 pingTime=500 )
1464 main.Mininet2.pingLong(
1465 src=main.params[ 'PING' ][ 'source8' ],
1466 target=main.params[ 'PING' ][ 'target8' ],
1467 pingTime=500 )
1468 main.Mininet2.pingLong(
1469 src=main.params[ 'PING' ][ 'source9' ],
1470 target=main.params[ 'PING' ][ 'target9' ],
1471 pingTime=500 )
1472 main.Mininet2.pingLong(
1473 src=main.params[ 'PING' ][ 'source10' ],
1474 target=main.params[ 'PING' ][ 'target10' ],
1475 pingTime=500 )
1476
1477 main.step( "Collecting topology information from ONOS" )
1478 devices = []
1479 threads = []
1480 for i in main.activeNodes:
1481 t = main.Thread( target=main.CLIs[i].devices,
1482 name="devices-" + str( i ),
1483 args=[ ] )
1484 threads.append( t )
1485 t.start()
1486
1487 for t in threads:
1488 t.join()
1489 devices.append( t.result )
1490 hosts = []
1491 threads = []
1492 for i in main.activeNodes:
1493 t = main.Thread( target=main.CLIs[i].hosts,
1494 name="hosts-" + str( i ),
1495 args=[ ] )
1496 threads.append( t )
1497 t.start()
1498
1499 for t in threads:
1500 t.join()
1501 try:
1502 hosts.append( json.loads( t.result ) )
1503 except ( ValueError, TypeError ):
1504 # FIXME: better handling of this, print which node
1505 # Maybe use thread name?
1506 main.log.exception( "Error parsing json output of hosts" )
1507 main.log.warn( repr( t.result ) )
1508 hosts.append( None )
1509
1510 ports = []
1511 threads = []
1512 for i in main.activeNodes:
1513 t = main.Thread( target=main.CLIs[i].ports,
1514 name="ports-" + str( i ),
1515 args=[ ] )
1516 threads.append( t )
1517 t.start()
1518
1519 for t in threads:
1520 t.join()
1521 ports.append( t.result )
1522 links = []
1523 threads = []
1524 for i in main.activeNodes:
1525 t = main.Thread( target=main.CLIs[i].links,
1526 name="links-" + str( i ),
1527 args=[ ] )
1528 threads.append( t )
1529 t.start()
1530
1531 for t in threads:
1532 t.join()
1533 links.append( t.result )
1534 clusters = []
1535 threads = []
1536 for i in main.activeNodes:
1537 t = main.Thread( target=main.CLIs[i].clusters,
1538 name="clusters-" + str( i ),
1539 args=[ ] )
1540 threads.append( t )
1541 t.start()
1542
1543 for t in threads:
1544 t.join()
1545 clusters.append( t.result )
1546 # Compare json objects for hosts and dataplane clusters
1547
1548 # hosts
1549 main.step( "Host view is consistent across ONOS nodes" )
1550 consistentHostsResult = main.TRUE
1551 for controller in range( len( hosts ) ):
1552 controllerStr = str( main.activeNodes[controller] + 1 )
1553 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1554 if hosts[ controller ] == hosts[ 0 ]:
1555 continue
1556 else: # hosts not consistent
1557 main.log.error( "hosts from ONOS" +
1558 controllerStr +
1559 " is inconsistent with ONOS1" )
1560 main.log.warn( repr( hosts[ controller ] ) )
1561 consistentHostsResult = main.FALSE
1562
1563 else:
1564 main.log.error( "Error in getting ONOS hosts from ONOS" +
1565 controllerStr )
1566 consistentHostsResult = main.FALSE
1567 main.log.warn( "ONOS" + controllerStr +
1568 " hosts response: " +
1569 repr( hosts[ controller ] ) )
1570 utilities.assert_equals(
1571 expect=main.TRUE,
1572 actual=consistentHostsResult,
1573 onpass="Hosts view is consistent across all ONOS nodes",
1574 onfail="ONOS nodes have different views of hosts" )
1575
1576 main.step( "Each host has an IP address" )
1577 ipResult = main.TRUE
1578 for controller in range( 0, len( hosts ) ):
1579 controllerStr = str( main.activeNodes[controller] + 1 )
1580 if hosts[ controller ]:
1581 for host in hosts[ controller ]:
1582 if not host.get( 'ipAddresses', [ ] ):
1583 main.log.error( "Error with host ips on controller" +
1584 controllerStr + ": " + str( host ) )
1585 ipResult = main.FALSE
1586 utilities.assert_equals(
1587 expect=main.TRUE,
1588 actual=ipResult,
1589 onpass="The ips of the hosts aren't empty",
1590 onfail="The ip of at least one host is missing" )
1591
1592 # Strongly connected clusters of devices
1593 main.step( "Cluster view is consistent across ONOS nodes" )
1594 consistentClustersResult = main.TRUE
1595 for controller in range( len( clusters ) ):
1596 controllerStr = str( main.activeNodes[controller] + 1 )
1597 if "Error" not in clusters[ controller ]:
1598 if clusters[ controller ] == clusters[ 0 ]:
1599 continue
1600 else: # clusters not consistent
1601 main.log.error( "clusters from ONOS" + controllerStr +
1602 " is inconsistent with ONOS1" )
1603 consistentClustersResult = main.FALSE
1604
1605 else:
1606 main.log.error( "Error in getting dataplane clusters " +
1607 "from ONOS" + controllerStr )
1608 consistentClustersResult = main.FALSE
1609 main.log.warn( "ONOS" + controllerStr +
1610 " clusters response: " +
1611 repr( clusters[ controller ] ) )
1612 utilities.assert_equals(
1613 expect=main.TRUE,
1614 actual=consistentClustersResult,
1615 onpass="Clusters view is consistent across all ONOS nodes",
1616 onfail="ONOS nodes have different views of clusters" )
1617 # there should always only be one cluster
1618 main.step( "Cluster view correct across ONOS nodes" )
1619 try:
1620 numClusters = len( json.loads( clusters[ 0 ] ) )
1621 except ( ValueError, TypeError ):
1622 main.log.exception( "Error parsing clusters[0]: " +
1623 repr( clusters[ 0 ] ) )
1624 numClusters = "ERROR"
1625 clusterResults = main.FALSE
1626 if numClusters == 1:
1627 clusterResults = main.TRUE
1628 utilities.assert_equals(
1629 expect=1,
1630 actual=numClusters,
1631 onpass="ONOS shows 1 SCC",
1632 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1633
1634 main.step( "Comparing ONOS topology to MN" )
1635 devicesResults = main.TRUE
1636 linksResults = main.TRUE
1637 hostsResults = main.TRUE
1638 mnSwitches = main.Mininet1.getSwitches()
1639 mnLinks = main.Mininet1.getLinks()
1640 mnHosts = main.Mininet1.getHosts()
1641 for controller in main.activeNodes:
1642 controllerStr = str( main.activeNodes[controller] + 1 )
1643 if devices[ controller ] and ports[ controller ] and\
1644 "Error" not in devices[ controller ] and\
1645 "Error" not in ports[ controller ]:
1646 currentDevicesResult = main.Mininet1.compareSwitches(
1647 mnSwitches,
1648 json.loads( devices[ controller ] ),
1649 json.loads( ports[ controller ] ) )
1650 else:
1651 currentDevicesResult = main.FALSE
1652 utilities.assert_equals( expect=main.TRUE,
1653 actual=currentDevicesResult,
1654 onpass="ONOS" + controllerStr +
1655 " Switches view is correct",
1656 onfail="ONOS" + controllerStr +
1657 " Switches view is incorrect" )
1658 if links[ controller ] and "Error" not in links[ controller ]:
1659 currentLinksResult = main.Mininet1.compareLinks(
1660 mnSwitches, mnLinks,
1661 json.loads( links[ controller ] ) )
1662 else:
1663 currentLinksResult = main.FALSE
1664 utilities.assert_equals( expect=main.TRUE,
1665 actual=currentLinksResult,
1666 onpass="ONOS" + controllerStr +
1667 " links view is correct",
1668 onfail="ONOS" + controllerStr +
1669 " links view is incorrect" )
1670
1671 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1672 currentHostsResult = main.Mininet1.compareHosts(
1673 mnHosts,
1674 hosts[ controller ] )
1675 else:
1676 currentHostsResult = main.FALSE
1677 utilities.assert_equals( expect=main.TRUE,
1678 actual=currentHostsResult,
1679 onpass="ONOS" + controllerStr +
1680 " hosts exist in Mininet",
1681 onfail="ONOS" + controllerStr +
1682 " hosts don't match Mininet" )
1683
1684 devicesResults = devicesResults and currentDevicesResult
1685 linksResults = linksResults and currentLinksResult
1686 hostsResults = hostsResults and currentHostsResult
1687
1688 main.step( "Device information is correct" )
1689 utilities.assert_equals(
1690 expect=main.TRUE,
1691 actual=devicesResults,
1692 onpass="Device information is correct",
1693 onfail="Device information is incorrect" )
1694
1695 main.step( "Links are correct" )
1696 utilities.assert_equals(
1697 expect=main.TRUE,
1698 actual=linksResults,
1699 onpass="Link are correct",
1700 onfail="Links are incorrect" )
1701
1702 main.step( "Hosts are correct" )
1703 utilities.assert_equals(
1704 expect=main.TRUE,
1705 actual=hostsResults,
1706 onpass="Hosts are correct",
1707 onfail="Hosts are incorrect" )
1708
1709 def CASE61( self, main ):
1710 """
1711 The Failure case.
1712 """
1713 import math
1714 assert main.numCtrls, "main.numCtrls not defined"
1715 assert main, "main not defined"
1716 assert utilities.assert_equals, "utilities.assert_equals not defined"
1717 assert main.CLIs, "main.CLIs not defined"
1718 assert main.nodes, "main.nodes not defined"
1719 main.case( "Partition ONOS nodes into two distinct partitions" )
1720
1721 main.step( "Checking ONOS Logs for errors" )
1722 for node in main.nodes:
1723 main.log.debug( "Checking logs for errors on " + node.name + ":" )
1724 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
1725
1726 n = len( main.nodes ) # Number of nodes
1727 p = ( ( n + 1 ) / 2 ) + 1 # Number of partitions
1728 main.partition = [ 0 ] # ONOS node to partition, listed by index in main.nodes
1729 if n > 3:
1730 main.partition.append( p - 1 )
1731 # NOTE: This only works for cluster sizes of 3,5, or 7.
1732
1733 main.step( "Partitioning ONOS nodes" )
1734 nodeList = [ str( i + 1 ) for i in main.partition ]
1735 main.log.info( "Nodes to be partitioned: " + str( nodeList ) )
1736 partitionResults = main.TRUE
1737 for i in range( 0, n ):
1738 this = main.nodes[i]
1739 if i not in main.partition:
1740 for j in main.partition:
1741 foe = main.nodes[j]
1742 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1743 #CMD HERE
1744 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1745 this.handle.sendline( cmdStr )
1746 this.handle.expect( "\$" )
1747 main.log.debug( this.handle.before )
1748 else:
1749 for j in range( 0, n ):
1750 if j not in main.partition:
1751 foe = main.nodes[j]
1752 main.log.warn( "Setting IP Tables rule from {} to {}. ".format( this.ip_address, foe.ip_address ) )
1753 #CMD HERE
1754 cmdStr = "sudo iptables -A {} -d {} -s {} -j DROP".format( "INPUT", this.ip_address, foe.ip_address )
1755 this.handle.sendline( cmdStr )
1756 this.handle.expect( "\$" )
1757 main.log.debug( this.handle.before )
1758 main.activeNodes.remove( i )
1759 # NOTE: When dynamic clustering is finished, we need to start checking
1760 # main.partion nodes still work when partitioned
1761 utilities.assert_equals( expect=main.TRUE, actual=partitionResults,
1762 onpass="Firewall rules set successfully",
1763 onfail="Error setting firewall rules" )
1764
1765 main.log.step( "Sleeping 60 seconds" )
1766 time.sleep( 60 )
1767
1768 def CASE62( self, main ):
1769 """
1770 Healing Partition
1771 """
1772 import time
1773 assert main.numCtrls, "main.numCtrls not defined"
1774 assert main, "main not defined"
1775 assert utilities.assert_equals, "utilities.assert_equals not defined"
1776 assert main.CLIs, "main.CLIs not defined"
1777 assert main.nodes, "main.nodes not defined"
1778 assert main.partition, "main.partition not defined"
1779 main.case( "Healing Partition" )
1780
1781 main.step( "Deleteing firewall rules" )
1782 healResults = main.TRUE
1783 for node in main.nodes:
1784 cmdStr = "sudo iptables -F"
1785 node.handle.sendline( cmdStr )
1786 node.handle.expect( "\$" )
1787 main.log.debug( node.handle.before )
1788 utilities.assert_equals( expect=main.TRUE, actual=healResults,
1789 onpass="Firewall rules removed",
1790 onfail="Error removing firewall rules" )
1791
1792 for node in main.partition:
1793 main.activeNodes.append( node )
1794 main.activeNodes.sort()
1795 try:
1796 assert list( set( main.activeNodes ) ) == main.activeNodes,\
1797 "List of active nodes has duplicates, this likely indicates something was run out of order"
1798 except AssertionError:
1799 main.log.exception( "" )
1800 main.cleanup()
1801 main.exit()
1802
1803 def CASE7( self, main ):
1804 """
1805 Check state after ONOS failure
1806 """
1807 import json
1808 assert main.numCtrls, "main.numCtrls not defined"
1809 assert main, "main not defined"
1810 assert utilities.assert_equals, "utilities.assert_equals not defined"
1811 assert main.CLIs, "main.CLIs not defined"
1812 assert main.nodes, "main.nodes not defined"
1813 try:
1814 main.partition
1815 except AttributeError:
1816 main.partition = []
1817
1818 main.case( "Running ONOS Constant State Tests" )
1819
1820 main.step( "Check that each switch has a master" )
1821 # Assert that each device has a master
1822 rolesNotNull = main.TRUE
1823 threads = []
1824 for i in main.activeNodes:
1825 t = main.Thread( target=main.CLIs[i].rolesNotNull,
1826 name="rolesNotNull-" + str( i ),
1827 args=[ ] )
1828 threads.append( t )
1829 t.start()
1830
1831 for t in threads:
1832 t.join()
1833 rolesNotNull = rolesNotNull and t.result
1834 utilities.assert_equals(
1835 expect=main.TRUE,
1836 actual=rolesNotNull,
1837 onpass="Each device has a master",
1838 onfail="Some devices don't have a master assigned" )
1839
1840 main.step( "Read device roles from ONOS" )
1841 ONOSMastership = []
1842 mastershipCheck = main.FALSE
1843 consistentMastership = True
1844 rolesResults = True
1845 threads = []
1846 for i in main.activeNodes:
1847 t = main.Thread( target=main.CLIs[i].roles,
1848 name="roles-" + str( i ),
1849 args=[] )
1850 threads.append( t )
1851 t.start()
1852
1853 for t in threads:
1854 t.join()
1855 ONOSMastership.append( t.result )
1856
1857 for i in range( len( ONOSMastership ) ):
1858 node = str( main.activeNodes[i] + 1 )
1859 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1860 main.log.error( "Error in getting ONOS" + node + " roles" )
1861 main.log.warn( "ONOS" + node + " mastership response: " +
1862 repr( ONOSMastership[i] ) )
1863 rolesResults = False
1864 utilities.assert_equals(
1865 expect=True,
1866 actual=rolesResults,
1867 onpass="No error in reading roles output",
1868 onfail="Error in reading roles from ONOS" )
1869
1870 main.step( "Check for consistency in roles from each controller" )
1871 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1872 main.log.info(
1873 "Switch roles are consistent across all ONOS nodes" )
1874 else:
1875 consistentMastership = False
1876 utilities.assert_equals(
1877 expect=True,
1878 actual=consistentMastership,
1879 onpass="Switch roles are consistent across all ONOS nodes",
1880 onfail="ONOS nodes have different views of switch roles" )
1881
1882 if rolesResults and not consistentMastership:
1883 for i in range( len( ONOSMastership ) ):
1884 node = str( main.activeNodes[i] + 1 )
1885 main.log.warn( "ONOS" + node + " roles: ",
1886 json.dumps( json.loads( ONOSMastership[ i ] ),
1887 sort_keys=True,
1888 indent=4,
1889 separators=( ',', ': ' ) ) )
1890
1891 # NOTE: we expect mastership to change on controller failure
1892
1893 main.step( "Get the intents and compare across all nodes" )
1894 ONOSIntents = []
1895 intentCheck = main.FALSE
1896 consistentIntents = True
1897 intentsResults = True
1898 threads = []
1899 for i in main.activeNodes:
1900 t = main.Thread( target=main.CLIs[i].intents,
1901 name="intents-" + str( i ),
1902 args=[],
1903 kwargs={ 'jsonFormat': True } )
1904 threads.append( t )
1905 t.start()
1906
1907 for t in threads:
1908 t.join()
1909 ONOSIntents.append( t.result )
1910
1911 for i in range( len( ONOSIntents) ):
1912 node = str( main.activeNodes[i] + 1 )
1913 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1914 main.log.error( "Error in getting ONOS" + node + " intents" )
1915 main.log.warn( "ONOS" + node + " intents response: " +
1916 repr( ONOSIntents[ i ] ) )
1917 intentsResults = False
1918 utilities.assert_equals(
1919 expect=True,
1920 actual=intentsResults,
1921 onpass="No error in reading intents output",
1922 onfail="Error in reading intents from ONOS" )
1923
1924 main.step( "Check for consistency in Intents from each controller" )
1925 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1926 main.log.info( "Intents are consistent across all ONOS " +
1927 "nodes" )
1928 else:
1929 consistentIntents = False
1930
1931 # Try to make it easy to figure out what is happening
1932 #
1933 # Intent ONOS1 ONOS2 ...
1934 # 0x01 INSTALLED INSTALLING
1935 # ... ... ...
1936 # ... ... ...
1937 title = " ID"
1938 for n in main.activeNodes:
1939 title += " " * 10 + "ONOS" + str( n + 1 )
1940 main.log.warn( title )
1941 # get all intent keys in the cluster
1942 keys = []
1943 for nodeStr in ONOSIntents:
1944 node = json.loads( nodeStr )
1945 for intent in node:
1946 keys.append( intent.get( 'id' ) )
1947 keys = set( keys )
1948 for key in keys:
1949 row = "%-13s" % key
1950 for nodeStr in ONOSIntents:
1951 node = json.loads( nodeStr )
1952 for intent in node:
1953 if intent.get( 'id' ) == key:
1954 row += "%-15s" % intent.get( 'state' )
1955 main.log.warn( row )
1956 # End table view
1957
1958 utilities.assert_equals(
1959 expect=True,
1960 actual=consistentIntents,
1961 onpass="Intents are consistent across all ONOS nodes",
1962 onfail="ONOS nodes have different views of intents" )
1963 intentStates = []
1964 for node in ONOSIntents: # Iter through ONOS nodes
1965 nodeStates = []
1966 # Iter through intents of a node
1967 try:
1968 for intent in json.loads( node ):
1969 nodeStates.append( intent[ 'state' ] )
1970 except ( ValueError, TypeError ):
1971 main.log.exception( "Error in parsing intents" )
1972 main.log.error( repr( node ) )
1973 intentStates.append( nodeStates )
1974 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1975 main.log.info( dict( out ) )
1976
1977 if intentsResults and not consistentIntents:
1978 for i in range( len( main.activeNodes ) ):
1979 node = str( main.activeNodes[i] + 1 )
1980 main.log.warn( "ONOS" + node + " intents: " )
1981 main.log.warn( json.dumps(
1982 json.loads( ONOSIntents[ i ] ),
1983 sort_keys=True,
1984 indent=4,
1985 separators=( ',', ': ' ) ) )
1986 elif intentsResults and consistentIntents:
1987 intentCheck = main.TRUE
1988
1989 # NOTE: Store has no durability, so intents are lost across system
1990 # restarts
1991 main.step( "Compare current intents with intents before the failure" )
1992 # NOTE: this requires case 5 to pass for intentState to be set.
1993 # maybe we should stop the test if that fails?
1994 sameIntents = main.FALSE
1995 try:
1996 intentState
1997 except NameError:
1998 main.log.warn( "No previous intent state was saved" )
1999 else:
2000 if intentState and intentState == ONOSIntents[ 0 ]:
2001 sameIntents = main.TRUE
2002 main.log.info( "Intents are consistent with before failure" )
2003 # TODO: possibly the states have changed? we may need to figure out
2004 # what the acceptable states are
2005 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2006 sameIntents = main.TRUE
2007 try:
2008 before = json.loads( intentState )
2009 after = json.loads( ONOSIntents[ 0 ] )
2010 for intent in before:
2011 if intent not in after:
2012 sameIntents = main.FALSE
2013 main.log.debug( "Intent is not currently in ONOS " +
2014 "(at least in the same form):" )
2015 main.log.debug( json.dumps( intent ) )
2016 except ( ValueError, TypeError ):
2017 main.log.exception( "Exception printing intents" )
2018 main.log.debug( repr( ONOSIntents[0] ) )
2019 main.log.debug( repr( intentState ) )
2020 if sameIntents == main.FALSE:
2021 try:
2022 main.log.debug( "ONOS intents before: " )
2023 main.log.debug( json.dumps( json.loads( intentState ),
2024 sort_keys=True, indent=4,
2025 separators=( ',', ': ' ) ) )
2026 main.log.debug( "Current ONOS intents: " )
2027 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2028 sort_keys=True, indent=4,
2029 separators=( ',', ': ' ) ) )
2030 except ( ValueError, TypeError ):
2031 main.log.exception( "Exception printing intents" )
2032 main.log.debug( repr( ONOSIntents[0] ) )
2033 main.log.debug( repr( intentState ) )
2034 utilities.assert_equals(
2035 expect=main.TRUE,
2036 actual=sameIntents,
2037 onpass="Intents are consistent with before failure",
2038 onfail="The Intents changed during failure" )
2039 intentCheck = intentCheck and sameIntents
2040
2041 main.step( "Get the OF Table entries and compare to before " +
2042 "component failure" )
2043 FlowTables = main.TRUE
2044 for i in range( 28 ):
2045 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2046 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2047 FlowTables = FlowTables and main.Mininet1.flowTableComp( flows[i], tmpFlows )
2048 if FlowTables == main.FALSE:
2049 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2050 utilities.assert_equals(
2051 expect=main.TRUE,
2052 actual=FlowTables,
2053 onpass="No changes were found in the flow tables",
2054 onfail="Changes were found in the flow tables" )
2055
2056 main.Mininet2.pingLongKill()
2057 '''
2058 main.step( "Check the continuous pings to ensure that no packets " +
2059 "were dropped during component failure" )
2060 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2061 main.params[ 'TESTONIP' ] )
2062 LossInPings = main.FALSE
2063 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2064 for i in range( 8, 18 ):
2065 main.log.info(
2066 "Checking for a loss in pings along flow from s" +
2067 str( i ) )
2068 LossInPings = main.Mininet2.checkForLoss(
2069 "/tmp/ping.h" +
2070 str( i ) ) or LossInPings
2071 if LossInPings == main.TRUE:
2072 main.log.info( "Loss in ping detected" )
2073 elif LossInPings == main.ERROR:
2074 main.log.info( "There are multiple mininet process running" )
2075 elif LossInPings == main.FALSE:
2076 main.log.info( "No Loss in the pings" )
2077 main.log.info( "No loss of dataplane connectivity" )
2078 utilities.assert_equals(
2079 expect=main.FALSE,
2080 actual=LossInPings,
2081 onpass="No Loss of connectivity",
2082 onfail="Loss of dataplane connectivity detected" )
2083 '''
2084
2085 main.step( "Leadership Election is still functional" )
2086 # Test of LeadershipElection
2087 leaderList = []
2088
2089 partitioned = []
2090 for i in main.partition:
2091 partitioned.append( main.nodes[i].ip_address )
2092 leaderResult = main.TRUE
2093
2094 for i in main.activeNodes:
2095 cli = main.CLIs[i]
2096 leaderN = cli.electionTestLeader()
2097 leaderList.append( leaderN )
2098 if leaderN == main.FALSE:
2099 # error in response
2100 main.log.error( "Something is wrong with " +
2101 "electionTestLeader function, check the" +
2102 " error logs" )
2103 leaderResult = main.FALSE
2104 elif leaderN is None:
2105 main.log.error( cli.name +
2106 " shows no leader for the election-app was" +
2107 " elected after the old one died" )
2108 leaderResult = main.FALSE
2109 elif leaderN in partitioned:
2110 main.log.error( cli.name + " shows " + str( leaderN ) +
2111 " as leader for the election-app, but it " +
2112 "was partitioned" )
2113 leaderResult = main.FALSE
2114 if len( set( leaderList ) ) != 1:
2115 leaderResult = main.FALSE
2116 main.log.error(
2117 "Inconsistent view of leader for the election test app" )
2118 # TODO: print the list
2119 utilities.assert_equals(
2120 expect=main.TRUE,
2121 actual=leaderResult,
2122 onpass="Leadership election passed",
2123 onfail="Something went wrong with Leadership election" )
2124
2125 def CASE8( self, main ):
2126 """
2127 Compare topo
2128 """
2129 import json
2130 import time
2131 assert main.numCtrls, "main.numCtrls not defined"
2132 assert main, "main not defined"
2133 assert utilities.assert_equals, "utilities.assert_equals not defined"
2134 assert main.CLIs, "main.CLIs not defined"
2135 assert main.nodes, "main.nodes not defined"
2136
2137 main.case( "Compare ONOS Topology view to Mininet topology" )
2138 main.caseExplanation = "Compare topology objects between Mininet" +\
2139 " and ONOS"
2140 topoResult = main.FALSE
2141 topoFailMsg = "ONOS topology don't match Mininet"
2142 elapsed = 0
2143 count = 0
2144 main.step( "Comparing ONOS topology to MN topology" )
2145 startTime = time.time()
2146 # Give time for Gossip to work
2147 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
2148 devicesResults = main.TRUE
2149 linksResults = main.TRUE
2150 hostsResults = main.TRUE
2151 hostAttachmentResults = True
2152 count += 1
2153 cliStart = time.time()
2154 devices = []
2155 threads = []
2156 for i in main.activeNodes:
2157 t = main.Thread( target=utilities.retry,
2158 name="devices-" + str( i ),
2159 args=[ main.CLIs[i].devices, [ None ] ],
2160 kwargs= { 'sleep': 5, 'attempts': 5,
2161 'randomTime': True } )
2162 threads.append( t )
2163 t.start()
2164
2165 for t in threads:
2166 t.join()
2167 devices.append( t.result )
2168 hosts = []
2169 ipResult = main.TRUE
2170 threads = []
2171 for i in main.activeNodes:
2172 t = main.Thread( target=utilities.retry,
2173 name="hosts-" + str( i ),
2174 args=[ main.CLIs[i].hosts, [ None ] ],
2175 kwargs= { 'sleep': 5, 'attempts': 5,
2176 'randomTime': True } )
2177 threads.append( t )
2178 t.start()
2179
2180 for t in threads:
2181 t.join()
2182 try:
2183 hosts.append( json.loads( t.result ) )
2184 except ( ValueError, TypeError ):
2185 main.log.exception( "Error parsing hosts results" )
2186 main.log.error( repr( t.result ) )
2187 hosts.append( None )
2188 for controller in range( 0, len( hosts ) ):
2189 controllerStr = str( main.activeNodes[controller] + 1 )
2190 if hosts[ controller ]:
2191 for host in hosts[ controller ]:
2192 if host is None or host.get( 'ipAddresses', [] ) == []:
2193 main.log.error(
2194 "Error with host ipAddresses on controller" +
2195 controllerStr + ": " + str( host ) )
2196 ipResult = main.FALSE
2197 ports = []
2198 threads = []
2199 for i in main.activeNodes:
2200 t = main.Thread( target=utilities.retry,
2201 name="ports-" + str( i ),
2202 args=[ main.CLIs[i].ports, [ None ] ],
2203 kwargs= { 'sleep': 5, 'attempts': 5,
2204 'randomTime': True } )
2205 threads.append( t )
2206 t.start()
2207
2208 for t in threads:
2209 t.join()
2210 ports.append( t.result )
2211 links = []
2212 threads = []
2213 for i in main.activeNodes:
2214 t = main.Thread( target=utilities.retry,
2215 name="links-" + str( i ),
2216 args=[ main.CLIs[i].links, [ None ] ],
2217 kwargs= { 'sleep': 5, 'attempts': 5,
2218 'randomTime': True } )
2219 threads.append( t )
2220 t.start()
2221
2222 for t in threads:
2223 t.join()
2224 links.append( t.result )
2225 clusters = []
2226 threads = []
2227 for i in main.activeNodes:
2228 t = main.Thread( target=utilities.retry,
2229 name="clusters-" + str( i ),
2230 args=[ main.CLIs[i].clusters, [ None ] ],
2231 kwargs= { 'sleep': 5, 'attempts': 5,
2232 'randomTime': True } )
2233 threads.append( t )
2234 t.start()
2235
2236 for t in threads:
2237 t.join()
2238 clusters.append( t.result )
2239
2240 elapsed = time.time() - startTime
2241 cliTime = time.time() - cliStart
2242 print "Elapsed time: " + str( elapsed )
2243 print "CLI time: " + str( cliTime )
2244
2245 if all( e is None for e in devices ) and\
2246 all( e is None for e in hosts ) and\
2247 all( e is None for e in ports ) and\
2248 all( e is None for e in links ) and\
2249 all( e is None for e in clusters ):
2250 topoFailMsg = "Could not get topology from ONOS"
2251 main.log.error( topoFailMsg )
2252 continue # Try again, No use trying to compare
2253
2254 mnSwitches = main.Mininet1.getSwitches()
2255 mnLinks = main.Mininet1.getLinks()
2256 mnHosts = main.Mininet1.getHosts()
2257 for controller in range( len( main.activeNodes ) ):
2258 controllerStr = str( main.activeNodes[controller] + 1 )
2259 if devices[ controller ] and ports[ controller ] and\
2260 "Error" not in devices[ controller ] and\
2261 "Error" not in ports[ controller ]:
2262
2263 try:
2264 currentDevicesResult = main.Mininet1.compareSwitches(
2265 mnSwitches,
2266 json.loads( devices[ controller ] ),
2267 json.loads( ports[ controller ] ) )
2268 except ( TypeError, ValueError ) as e:
2269 main.log.exception( "Object not as expected; devices={!r}\nports={!r}".format(
2270 devices[ controller ], ports[ controller ] ) )
2271 else:
2272 currentDevicesResult = main.FALSE
2273 utilities.assert_equals( expect=main.TRUE,
2274 actual=currentDevicesResult,
2275 onpass="ONOS" + controllerStr +
2276 " Switches view is correct",
2277 onfail="ONOS" + controllerStr +
2278 " Switches view is incorrect" )
2279
2280 if links[ controller ] and "Error" not in links[ controller ]:
2281 currentLinksResult = main.Mininet1.compareLinks(
2282 mnSwitches, mnLinks,
2283 json.loads( links[ controller ] ) )
2284 else:
2285 currentLinksResult = main.FALSE
2286 utilities.assert_equals( expect=main.TRUE,
2287 actual=currentLinksResult,
2288 onpass="ONOS" + controllerStr +
2289 " links view is correct",
2290 onfail="ONOS" + controllerStr +
2291 " links view is incorrect" )
2292 if hosts[ controller ] and "Error" not in hosts[ controller ]:
2293 currentHostsResult = main.Mininet1.compareHosts(
2294 mnHosts,
2295 hosts[ controller ] )
2296 elif hosts[ controller ] == []:
2297 currentHostsResult = main.TRUE
2298 else:
2299 currentHostsResult = main.FALSE
2300 utilities.assert_equals( expect=main.TRUE,
2301 actual=currentHostsResult,
2302 onpass="ONOS" + controllerStr +
2303 " hosts exist in Mininet",
2304 onfail="ONOS" + controllerStr +
2305 " hosts don't match Mininet" )
2306 # CHECKING HOST ATTACHMENT POINTS
2307 hostAttachment = True
2308 zeroHosts = False
2309 # FIXME: topo-HA/obelisk specific mappings:
2310 # key is mac and value is dpid
2311 mappings = {}
2312 for i in range( 1, 29 ): # hosts 1 through 28
2313 # set up correct variables:
2314 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2315 if i == 1:
2316 deviceId = "1000".zfill(16)
2317 elif i == 2:
2318 deviceId = "2000".zfill(16)
2319 elif i == 3:
2320 deviceId = "3000".zfill(16)
2321 elif i == 4:
2322 deviceId = "3004".zfill(16)
2323 elif i == 5:
2324 deviceId = "5000".zfill(16)
2325 elif i == 6:
2326 deviceId = "6000".zfill(16)
2327 elif i == 7:
2328 deviceId = "6007".zfill(16)
2329 elif i >= 8 and i <= 17:
2330 dpid = '3' + str( i ).zfill( 3 )
2331 deviceId = dpid.zfill(16)
2332 elif i >= 18 and i <= 27:
2333 dpid = '6' + str( i ).zfill( 3 )
2334 deviceId = dpid.zfill(16)
2335 elif i == 28:
2336 deviceId = "2800".zfill(16)
2337 mappings[ macId ] = deviceId
2338 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2339 if hosts[ controller ] == []:
2340 main.log.warn( "There are no hosts discovered" )
2341 zeroHosts = True
2342 else:
2343 for host in hosts[ controller ]:
2344 mac = None
2345 location = None
2346 device = None
2347 port = None
2348 try:
2349 mac = host.get( 'mac' )
2350 assert mac, "mac field could not be found for this host object"
2351
2352 location = host.get( 'location' )
2353 assert location, "location field could not be found for this host object"
2354
2355 # Trim the protocol identifier off deviceId
2356 device = str( location.get( 'elementId' ) ).split(':')[1]
2357 assert device, "elementId field could not be found for this host location object"
2358
2359 port = location.get( 'port' )
2360 assert port, "port field could not be found for this host location object"
2361
2362 # Now check if this matches where they should be
2363 if mac and device and port:
2364 if str( port ) != "1":
2365 main.log.error( "The attachment port is incorrect for " +
2366 "host " + str( mac ) +
2367 ". Expected: 1 Actual: " + str( port) )
2368 hostAttachment = False
2369 if device != mappings[ str( mac ) ]:
2370 main.log.error( "The attachment device is incorrect for " +
2371 "host " + str( mac ) +
2372 ". Expected: " + mappings[ str( mac ) ] +
2373 " Actual: " + device )
2374 hostAttachment = False
2375 else:
2376 hostAttachment = False
2377 except AssertionError:
2378 main.log.exception( "Json object not as expected" )
2379 main.log.error( repr( host ) )
2380 hostAttachment = False
2381 else:
2382 main.log.error( "No hosts json output or \"Error\"" +
2383 " in output. hosts = " +
2384 repr( hosts[ controller ] ) )
2385 if zeroHosts is False:
2386 hostAttachment = True
2387
2388 # END CHECKING HOST ATTACHMENT POINTS
2389 devicesResults = devicesResults and currentDevicesResult
2390 linksResults = linksResults and currentLinksResult
2391 hostsResults = hostsResults and currentHostsResult
2392 hostAttachmentResults = hostAttachmentResults and\
2393 hostAttachment
2394 topoResult = devicesResults and linksResults and\
2395 hostsResults and hostAttachmentResults
2396 utilities.assert_equals( expect=True,
2397 actual=topoResult,
2398 onpass="ONOS topology matches Mininet",
2399 onfail=topoFailMsg )
2400 # End of While loop to pull ONOS state
2401
2402 # Compare json objects for hosts and dataplane clusters
2403
2404 # hosts
2405 main.step( "Hosts view is consistent across all ONOS nodes" )
2406 consistentHostsResult = main.TRUE
2407 for controller in range( len( hosts ) ):
2408 controllerStr = str( main.activeNodes[controller] + 1 )
2409 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
2410 if hosts[ controller ] == hosts[ 0 ]:
2411 continue
2412 else: # hosts not consistent
2413 main.log.error( "hosts from ONOS" + controllerStr +
2414 " is inconsistent with ONOS1" )
2415 main.log.warn( repr( hosts[ controller ] ) )
2416 consistentHostsResult = main.FALSE
2417
2418 else:
2419 main.log.error( "Error in getting ONOS hosts from ONOS" +
2420 controllerStr )
2421 consistentHostsResult = main.FALSE
2422 main.log.warn( "ONOS" + controllerStr +
2423 " hosts response: " +
2424 repr( hosts[ controller ] ) )
2425 utilities.assert_equals(
2426 expect=main.TRUE,
2427 actual=consistentHostsResult,
2428 onpass="Hosts view is consistent across all ONOS nodes",
2429 onfail="ONOS nodes have different views of hosts" )
2430
2431 main.step( "Hosts information is correct" )
2432 hostsResults = hostsResults and ipResult
2433 utilities.assert_equals(
2434 expect=main.TRUE,
2435 actual=hostsResults,
2436 onpass="Host information is correct",
2437 onfail="Host information is incorrect" )
2438
2439 main.step( "Host attachment points to the network" )
2440 utilities.assert_equals(
2441 expect=True,
2442 actual=hostAttachmentResults,
2443 onpass="Hosts are correctly attached to the network",
2444 onfail="ONOS did not correctly attach hosts to the network" )
2445
2446 # Strongly connected clusters of devices
2447 main.step( "Clusters view is consistent across all ONOS nodes" )
2448 consistentClustersResult = main.TRUE
2449 for controller in range( len( clusters ) ):
2450 controllerStr = str( main.activeNodes[controller] + 1 )
2451 if "Error" not in clusters[ controller ]:
2452 if clusters[ controller ] == clusters[ 0 ]:
2453 continue
2454 else: # clusters not consistent
2455 main.log.error( "clusters from ONOS" +
2456 controllerStr +
2457 " is inconsistent with ONOS1" )
2458 consistentClustersResult = main.FALSE
2459
2460 else:
2461 main.log.error( "Error in getting dataplane clusters " +
2462 "from ONOS" + controllerStr )
2463 consistentClustersResult = main.FALSE
2464 main.log.warn( "ONOS" + controllerStr +
2465 " clusters response: " +
2466 repr( clusters[ controller ] ) )
2467 utilities.assert_equals(
2468 expect=main.TRUE,
2469 actual=consistentClustersResult,
2470 onpass="Clusters view is consistent across all ONOS nodes",
2471 onfail="ONOS nodes have different views of clusters" )
2472
2473 main.step( "There is only one SCC" )
2474 # there should always only be one cluster
2475 try:
2476 numClusters = len( json.loads( clusters[ 0 ] ) )
2477 except ( ValueError, TypeError ):
2478 main.log.exception( "Error parsing clusters[0]: " +
2479 repr( clusters[0] ) )
2480 clusterResults = main.FALSE
2481 if numClusters == 1:
2482 clusterResults = main.TRUE
2483 utilities.assert_equals(
2484 expect=1,
2485 actual=numClusters,
2486 onpass="ONOS shows 1 SCC",
2487 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2488
2489 topoResult = ( devicesResults and linksResults
2490 and hostsResults and consistentHostsResult
2491 and consistentClustersResult and clusterResults
2492 and ipResult and hostAttachmentResults )
2493
2494 topoResult = topoResult and int( count <= 2 )
2495 note = "note it takes about " + str( int( cliTime ) ) + \
2496 " seconds for the test to make all the cli calls to fetch " +\
2497 "the topology from each ONOS instance"
2498 main.log.info(
2499 "Very crass estimate for topology discovery/convergence( " +
2500 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2501 str( count ) + " tries" )
2502
2503 main.step( "Device information is correct" )
2504 utilities.assert_equals(
2505 expect=main.TRUE,
2506 actual=devicesResults,
2507 onpass="Device information is correct",
2508 onfail="Device information is incorrect" )
2509
2510 main.step( "Links are correct" )
2511 utilities.assert_equals(
2512 expect=main.TRUE,
2513 actual=linksResults,
2514 onpass="Link are correct",
2515 onfail="Links are incorrect" )
2516
2517 # FIXME: move this to an ONOS state case
2518 main.step( "Checking ONOS nodes" )
2519 nodesOutput = []
2520 nodeResults = main.TRUE
2521 threads = []
2522 for i in main.activeNodes:
2523 t = main.Thread( target=main.CLIs[i].nodes,
2524 name="nodes-" + str( i ),
2525 args=[ ] )
2526 threads.append( t )
2527 t.start()
2528
2529 for t in threads:
2530 t.join()
2531 nodesOutput.append( t.result )
2532 ips = [ main.nodes[node].ip_address for node in main.activeNodes ]
2533 ips.sort()
2534 for i in nodesOutput:
2535 try:
2536 current = json.loads( i )
2537 activeIps = []
2538 currentResult = main.FALSE
2539 for node in current:
Jon Hallbd182782016-03-28 16:42:22 -07002540 if node['state'] == 'READY':
Jon Hall6e709752016-02-01 13:38:46 -08002541 activeIps.append( node['ip'] )
2542 activeIps.sort()
2543 if ips == activeIps:
2544 currentResult = main.TRUE
2545 except ( ValueError, TypeError ):
2546 main.log.error( "Error parsing nodes output" )
2547 main.log.warn( repr( i ) )
2548 currentResult = main.FALSE
2549 nodeResults = nodeResults and currentResult
2550 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2551 onpass="Nodes check successful",
2552 onfail="Nodes check NOT successful" )
2553
2554 def CASE9( self, main ):
2555 """
2556 Link s3-s28 down
2557 """
2558 import time
2559 assert main.numCtrls, "main.numCtrls not defined"
2560 assert main, "main not defined"
2561 assert utilities.assert_equals, "utilities.assert_equals not defined"
2562 assert main.CLIs, "main.CLIs not defined"
2563 assert main.nodes, "main.nodes not defined"
2564 # NOTE: You should probably run a topology check after this
2565
2566 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2567
2568 description = "Turn off a link to ensure that Link Discovery " +\
2569 "is working properly"
2570 main.case( description )
2571
2572 main.step( "Kill Link between s3 and s28" )
2573 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2574 main.log.info( "Waiting " + str( linkSleep ) +
2575 " seconds for link down to be discovered" )
2576 time.sleep( linkSleep )
2577 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2578 onpass="Link down successful",
2579 onfail="Failed to bring link down" )
2580 # TODO do some sort of check here
2581
2582 def CASE10( self, main ):
2583 """
2584 Link s3-s28 up
2585 """
2586 import time
2587 assert main.numCtrls, "main.numCtrls not defined"
2588 assert main, "main not defined"
2589 assert utilities.assert_equals, "utilities.assert_equals not defined"
2590 assert main.CLIs, "main.CLIs not defined"
2591 assert main.nodes, "main.nodes not defined"
2592 # NOTE: You should probably run a topology check after this
2593
2594 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2595
2596 description = "Restore a link to ensure that Link Discovery is " + \
2597 "working properly"
2598 main.case( description )
2599
2600 main.step( "Bring link between s3 and s28 back up" )
2601 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2602 main.log.info( "Waiting " + str( linkSleep ) +
2603 " seconds for link up to be discovered" )
2604 time.sleep( linkSleep )
2605 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2606 onpass="Link up successful",
2607 onfail="Failed to bring link up" )
2608 # TODO do some sort of check here
2609
2610 def CASE11( self, main ):
2611 """
2612 Switch Down
2613 """
2614 # NOTE: You should probably run a topology check after this
2615 import time
2616 assert main.numCtrls, "main.numCtrls not defined"
2617 assert main, "main not defined"
2618 assert utilities.assert_equals, "utilities.assert_equals not defined"
2619 assert main.CLIs, "main.CLIs not defined"
2620 assert main.nodes, "main.nodes not defined"
2621
2622 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2623
2624 description = "Killing a switch to ensure it is discovered correctly"
2625 onosCli = main.CLIs[ main.activeNodes[0] ]
2626 main.case( description )
2627 switch = main.params[ 'kill' ][ 'switch' ]
2628 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2629
2630 # TODO: Make this switch parameterizable
2631 main.step( "Kill " + switch )
2632 main.log.info( "Deleting " + switch )
2633 main.Mininet1.delSwitch( switch )
2634 main.log.info( "Waiting " + str( switchSleep ) +
2635 " seconds for switch down to be discovered" )
2636 time.sleep( switchSleep )
2637 device = onosCli.getDevice( dpid=switchDPID )
2638 # Peek at the deleted switch
2639 main.log.warn( str( device ) )
2640 result = main.FALSE
2641 if device and device[ 'available' ] is False:
2642 result = main.TRUE
2643 utilities.assert_equals( expect=main.TRUE, actual=result,
2644 onpass="Kill switch successful",
2645 onfail="Failed to kill switch?" )
2646
2647 def CASE12( self, main ):
2648 """
2649 Switch Up
2650 """
2651 # NOTE: You should probably run a topology check after this
2652 import time
2653 assert main.numCtrls, "main.numCtrls not defined"
2654 assert main, "main not defined"
2655 assert utilities.assert_equals, "utilities.assert_equals not defined"
2656 assert main.CLIs, "main.CLIs not defined"
2657 assert main.nodes, "main.nodes not defined"
2658 assert ONOS1Port, "ONOS1Port not defined"
2659 assert ONOS2Port, "ONOS2Port not defined"
2660 assert ONOS3Port, "ONOS3Port not defined"
2661 assert ONOS4Port, "ONOS4Port not defined"
2662 assert ONOS5Port, "ONOS5Port not defined"
2663 assert ONOS6Port, "ONOS6Port not defined"
2664 assert ONOS7Port, "ONOS7Port not defined"
2665
2666 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2667 switch = main.params[ 'kill' ][ 'switch' ]
2668 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2669 links = main.params[ 'kill' ][ 'links' ].split()
2670 onosCli = main.CLIs[ main.activeNodes[0] ]
2671 description = "Adding a switch to ensure it is discovered correctly"
2672 main.case( description )
2673
2674 main.step( "Add back " + switch )
2675 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2676 for peer in links:
2677 main.Mininet1.addLink( switch, peer )
2678 ipList = [ node.ip_address for node in main.nodes ]
2679 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2680 main.log.info( "Waiting " + str( switchSleep ) +
2681 " seconds for switch up to be discovered" )
2682 time.sleep( switchSleep )
2683 device = onosCli.getDevice( dpid=switchDPID )
2684 # Peek at the deleted switch
2685 main.log.warn( str( device ) )
2686 result = main.FALSE
2687 if device and device[ 'available' ]:
2688 result = main.TRUE
2689 utilities.assert_equals( expect=main.TRUE, actual=result,
2690 onpass="add switch successful",
2691 onfail="Failed to add switch?" )
2692
2693 def CASE13( self, main ):
2694 """
2695 Clean up
2696 """
2697 import os
2698 import time
2699 assert main.numCtrls, "main.numCtrls not defined"
2700 assert main, "main not defined"
2701 assert utilities.assert_equals, "utilities.assert_equals not defined"
2702 assert main.CLIs, "main.CLIs not defined"
2703 assert main.nodes, "main.nodes not defined"
2704
2705 # printing colors to terminal
2706 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2707 'blue': '\033[94m', 'green': '\033[92m',
2708 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2709 main.case( "Test Cleanup" )
2710 main.step( "Killing tcpdumps" )
2711 main.Mininet2.stopTcpdump()
2712
2713 testname = main.TEST
2714 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2715 main.step( "Copying MN pcap and ONOS log files to test station" )
2716 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2717 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2718 # NOTE: MN Pcap file is being saved to logdir.
2719 # We scp this file as MN and TestON aren't necessarily the same vm
2720
2721 # FIXME: To be replaced with a Jenkin's post script
2722 # TODO: Load these from params
2723 # NOTE: must end in /
2724 logFolder = "/opt/onos/log/"
2725 logFiles = [ "karaf.log", "karaf.log.1" ]
2726 # NOTE: must end in /
2727 for f in logFiles:
2728 for node in main.nodes:
2729 dstName = main.logdir + "/" + node.name + "-" + f
2730 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2731 logFolder + f, dstName )
2732 # std*.log's
2733 # NOTE: must end in /
2734 logFolder = "/opt/onos/var/"
2735 logFiles = [ "stderr.log", "stdout.log" ]
2736 # NOTE: must end in /
2737 for f in logFiles:
2738 for node in main.nodes:
2739 dstName = main.logdir + "/" + node.name + "-" + f
2740 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
2741 logFolder + f, dstName )
2742 else:
2743 main.log.debug( "skipping saving log files" )
2744
2745 main.step( "Stopping Mininet" )
2746 mnResult = main.Mininet1.stopNet()
2747 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2748 onpass="Mininet stopped",
2749 onfail="MN cleanup NOT successful" )
2750
2751 main.step( "Checking ONOS Logs for errors" )
2752 for node in main.nodes:
2753 main.log.debug( "Checking logs for errors on " + node.name + ":" )
2754 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
2755
2756 try:
2757 timerLog = open( main.logdir + "/Timers.csv", 'w')
2758 # Overwrite with empty line and close
2759 labels = "Gossip Intents"
2760 data = str( gossipTime )
2761 timerLog.write( labels + "\n" + data )
2762 timerLog.close()
2763 except NameError, e:
2764 main.log.exception(e)
2765
2766 def CASE14( self, main ):
2767 """
2768 start election app on all onos nodes
2769 """
2770 assert main.numCtrls, "main.numCtrls not defined"
2771 assert main, "main not defined"
2772 assert utilities.assert_equals, "utilities.assert_equals not defined"
2773 assert main.CLIs, "main.CLIs not defined"
2774 assert main.nodes, "main.nodes not defined"
2775
2776 main.case("Start Leadership Election app")
2777 main.step( "Install leadership election app" )
2778 onosCli = main.CLIs[ main.activeNodes[0] ]
2779 appResult = onosCli.activateApp( "org.onosproject.election" )
2780 utilities.assert_equals(
2781 expect=main.TRUE,
2782 actual=appResult,
2783 onpass="Election app installed",
2784 onfail="Something went wrong with installing Leadership election" )
2785
2786 main.step( "Run for election on each node" )
2787 leaderResult = main.TRUE
2788 leaders = []
2789 for i in main.activeNodes:
2790 main.CLIs[i].electionTestRun()
2791 for i in main.activeNodes:
2792 cli = main.CLIs[i]
2793 leader = cli.electionTestLeader()
2794 if leader is None or leader == main.FALSE:
2795 main.log.error( cli.name + ": Leader for the election app " +
2796 "should be an ONOS node, instead got '" +
2797 str( leader ) + "'" )
2798 leaderResult = main.FALSE
2799 leaders.append( leader )
2800 utilities.assert_equals(
2801 expect=main.TRUE,
2802 actual=leaderResult,
2803 onpass="Successfully ran for leadership",
2804 onfail="Failed to run for leadership" )
2805
2806 main.step( "Check that each node shows the same leader" )
2807 sameLeader = main.TRUE
2808 if len( set( leaders ) ) != 1:
2809 sameLeader = main.FALSE
2810 main.log.error( "Results of electionTestLeader is order of main.CLIs:" +
2811 str( leaders ) )
2812 utilities.assert_equals(
2813 expect=main.TRUE,
2814 actual=sameLeader,
2815 onpass="Leadership is consistent for the election topic",
2816 onfail="Nodes have different leaders" )
2817
2818 def CASE15( self, main ):
2819 """
2820 Check that Leadership Election is still functional
2821 15.1 Run election on each node
2822 15.2 Check that each node has the same leaders and candidates
2823 15.3 Find current leader and withdraw
2824 15.4 Check that a new node was elected leader
2825 15.5 Check that that new leader was the candidate of old leader
2826 15.6 Run for election on old leader
2827 15.7 Check that oldLeader is a candidate, and leader if only 1 node
2828 15.8 Make sure that the old leader was added to the candidate list
2829
2830 old and new variable prefixes refer to data from before vs after
2831 withdrawl and later before withdrawl vs after re-election
2832 """
2833 import time
2834 assert main.numCtrls, "main.numCtrls not defined"
2835 assert main, "main not defined"
2836 assert utilities.assert_equals, "utilities.assert_equals not defined"
2837 assert main.CLIs, "main.CLIs not defined"
2838 assert main.nodes, "main.nodes not defined"
2839
2840 description = "Check that Leadership Election is still functional"
2841 main.case( description )
2842 # NOTE: Need to re-run since being a canidate is not persistant
2843 # TODO: add check for "Command not found:" in the driver, this
2844 # means the election test app isn't loaded
2845
2846 oldLeaders = [] # leaders by node before withdrawl from candidates
2847 newLeaders = [] # leaders by node after withdrawl from candidates
2848 oldAllCandidates = [] # list of lists of each nodes' candidates before
2849 newAllCandidates = [] # list of lists of each nodes' candidates after
2850 oldCandidates = [] # list of candidates from node 0 before withdrawl
2851 newCandidates = [] # list of candidates from node 0 after withdrawl
2852 oldLeader = '' # the old leader from oldLeaders, None if not same
2853 newLeader = '' # the new leaders fron newLoeaders, None if not same
2854 oldLeaderCLI = None # the CLI of the old leader used for re-electing
2855 expectNoLeader = False # True when there is only one leader
2856 if main.numCtrls == 1:
2857 expectNoLeader = True
2858
2859 main.step( "Run for election on each node" )
2860 electionResult = main.TRUE
2861
2862 for i in main.activeNodes: # run test election on each node
2863 if main.CLIs[i].electionTestRun() == main.FALSE:
2864 electionResult = main.FALSE
2865 utilities.assert_equals(
2866 expect=main.TRUE,
2867 actual=electionResult,
2868 onpass="All nodes successfully ran for leadership",
2869 onfail="At least one node failed to run for leadership" )
2870
2871 if electionResult == main.FALSE:
2872 main.log.error(
2873 "Skipping Test Case because Election Test App isn't loaded" )
2874 main.skipCase()
2875
2876 main.step( "Check that each node shows the same leader and candidates" )
2877 sameResult = main.TRUE
2878 failMessage = "Nodes have different leaders"
2879 for i in main.activeNodes:
2880 cli = main.CLIs[i]
2881 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2882 oldAllCandidates.append( node )
2883 if node:
2884 oldLeaders.append( node[ 0 ] )
2885 else:
2886 oldLeaders.append( None )
2887 oldCandidates = oldAllCandidates[ 0 ]
2888 if oldCandidates is None:
2889 oldCandidates = [ None ]
2890
2891 # Check that each node has the same leader. Defines oldLeader
2892 if len( set( oldLeaders ) ) != 1:
2893 sameResult = main.FALSE
2894 main.log.error( "More than one leader present:" + str( oldLeaders ) )
2895 # FIXME: for split brain, we will likely have 2. WHat should we do here?
2896 oldLeader = None
2897 else:
2898 oldLeader = oldLeaders[ 0 ]
2899
2900 # Check that each node's candidate list is the same
2901 candidateDiscrepancy = False # Boolean of candidate mismatches
2902 for candidates in oldAllCandidates:
2903 if candidates is None:
2904 main.log.warn( "Error getting candidates" )
2905 candidates = [ None ]
2906 if set( candidates ) != set( oldCandidates ):
2907 sameResult = main.FALSE
2908 candidateDiscrepancy = True
2909 if candidateDiscrepancy:
2910 failMessage += " and candidates"
2911 utilities.assert_equals(
2912 expect=main.TRUE,
2913 actual=sameResult,
2914 onpass="Leadership is consistent for the election topic",
2915 onfail=failMessage )
2916
2917 main.step( "Find current leader and withdraw" )
2918 withdrawResult = main.TRUE
2919 # do some sanity checking on leader before using it
2920 if oldLeader is None:
2921 main.log.error( "Leadership isn't consistent." )
2922 withdrawResult = main.FALSE
2923 # Get the CLI of the oldLeader
2924 for i in main.activeNodes:
2925 if oldLeader == main.nodes[ i ].ip_address:
2926 oldLeaderCLI = main.CLIs[ i ]
2927 break
2928 else: # FOR/ELSE statement
2929 main.log.error( "Leader election, could not find current leader" )
2930 if oldLeader:
2931 withdrawResult = oldLeaderCLI.electionTestWithdraw()
2932 utilities.assert_equals(
2933 expect=main.TRUE,
2934 actual=withdrawResult,
2935 onpass="Node was withdrawn from election",
2936 onfail="Node was not withdrawn from election" )
2937
2938 main.step( "Check that a new node was elected leader" )
2939 # FIXME: use threads
2940 newLeaderResult = main.TRUE
2941 failMessage = "Nodes have different leaders"
2942
2943 # Get new leaders and candidates
2944 for i in main.activeNodes:
2945 cli = main.CLIs[i]
2946 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2947 # elections might no have finished yet
2948 if node[ 0 ] == 'none' and not expectNoLeader:
2949 main.log.info( "Node has no leader, waiting 5 seconds to be " +
2950 "sure elections are complete." )
2951 time.sleep(5)
2952 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
2953 # election still isn't done or there is a problem
2954 if node[ 0 ] == 'none':
2955 main.log.error( "No leader was elected on at least 1 node" )
2956 newLeaderResult = main.FALSE
2957 newAllCandidates.append( node )
2958 newLeaders.append( node[ 0 ] )
2959 newCandidates = newAllCandidates[ 0 ]
2960
2961 # Check that each node has the same leader. Defines newLeader
2962 if len( set( newLeaders ) ) != 1:
2963 newLeaderResult = main.FALSE
2964 main.log.error( "Nodes have different leaders: " +
2965 str( newLeaders ) )
2966 newLeader = None
2967 else:
2968 newLeader = newLeaders[ 0 ]
2969
2970 # Check that each node's candidate list is the same
2971 for candidates in newAllCandidates:
2972 if set( candidates ) != set( newCandidates ):
2973 newLeaderResult = main.FALSE
2974 main.log.error( "Discrepancy in candidate lists detected" )
2975
2976 # Check that the new leader is not the older leader, which was withdrawn
2977 if newLeader == oldLeader:
2978 newLeaderResult = main.FALSE
2979 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
2980 " as the current leader" )
2981
2982 utilities.assert_equals(
2983 expect=main.TRUE,
2984 actual=newLeaderResult,
2985 onpass="Leadership election passed",
2986 onfail="Something went wrong with Leadership election" )
2987
2988 main.step( "Check that that new leader was the candidate of old leader")
2989 # candidates[ 2 ] should become the top candidate after withdrawl
2990 correctCandidateResult = main.TRUE
2991 if expectNoLeader:
2992 if newLeader == 'none':
2993 main.log.info( "No leader expected. None found. Pass" )
2994 correctCandidateResult = main.TRUE
2995 else:
2996 main.log.info( "Expected no leader, got: " + str( newLeader ) )
2997 correctCandidateResult = main.FALSE
2998 elif len( oldCandidates ) >= 3 and newLeader != oldCandidates[ 2 ]:
2999 correctCandidateResult = main.FALSE
3000 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3001 newLeader, oldCandidates[ 2 ] ) )
3002 else:
3003 main.log.warn( "Could not determine who should be the correct leader" )
3004 correctCandidateResult = main.FALSE
3005 utilities.assert_equals(
3006 expect=main.TRUE,
3007 actual=correctCandidateResult,
3008 onpass="Correct Candidate Elected",
3009 onfail="Incorrect Candidate Elected" )
3010
3011 main.step( "Run for election on old leader( just so everyone " +
3012 "is in the hat )" )
3013 if oldLeaderCLI is not None:
3014 runResult = oldLeaderCLI.electionTestRun()
3015 else:
3016 main.log.error( "No old leader to re-elect" )
3017 runResult = main.FALSE
3018 utilities.assert_equals(
3019 expect=main.TRUE,
3020 actual=runResult,
3021 onpass="App re-ran for election",
3022 onfail="App failed to run for election" )
3023 main.step(
3024 "Check that oldLeader is a candidate, and leader if only 1 node" )
3025 # verify leader didn't just change
3026 positionResult = main.TRUE
3027 # Get new leaders and candidates, wait if oldLeader is not a candidate yet
3028
3029 # Reset and reuse the new candidate and leaders lists
3030 newAllCandidates = []
3031 newCandidates = []
3032 newLeaders = []
3033 for i in main.activeNodes:
3034 cli = main.CLIs[i]
3035 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3036 if oldLeader not in node: # election might no have finished yet
3037 main.log.info( "Old Leader not elected, waiting 5 seconds to " +
3038 "be sure elections are complete" )
3039 time.sleep(5)
3040 node = cli.specificLeaderCandidate( 'org.onosproject.election' )
3041 if oldLeader not in node: # election still isn't done, errors
3042 main.log.error(
3043 "Old leader was not elected on at least one node" )
3044 positionResult = main.FALSE
3045 newAllCandidates.append( node )
3046 newLeaders.append( node[ 0 ] )
3047 newCandidates = newAllCandidates[ 0 ]
3048
3049 # Check that each node has the same leader. Defines newLeader
3050 if len( set( newLeaders ) ) != 1:
3051 positionResult = main.FALSE
3052 main.log.error( "Nodes have different leaders: " +
3053 str( newLeaders ) )
3054 newLeader = None
3055 else:
3056 newLeader = newLeaders[ 0 ]
3057
3058 # Check that each node's candidate list is the same
3059 for candidates in newAllCandidates:
3060 if set( candidates ) != set( newCandidates ):
3061 newLeaderResult = main.FALSE
3062 main.log.error( "Discrepancy in candidate lists detected" )
3063
3064 # Check that the re-elected node is last on the candidate List
3065 if oldLeader != newCandidates[ -1 ]:
3066 main.log.error( "Old Leader (" + str( oldLeader ) + ") not in the proper position " +
3067 str( newCandidates ) )
3068 positionResult = main.FALSE
3069
3070 utilities.assert_equals(
3071 expect=main.TRUE,
3072 actual=positionResult,
3073 onpass="Old leader successfully re-ran for election",
3074 onfail="Something went wrong with Leadership election after " +
3075 "the old leader re-ran for election" )
3076
3077 def CASE16( self, main ):
3078 """
3079 Install Distributed Primitives app
3080 """
3081 import time
3082 assert main.numCtrls, "main.numCtrls not defined"
3083 assert main, "main not defined"
3084 assert utilities.assert_equals, "utilities.assert_equals not defined"
3085 assert main.CLIs, "main.CLIs not defined"
3086 assert main.nodes, "main.nodes not defined"
3087
3088 # Variables for the distributed primitives tests
3089 global pCounterName
3090 global iCounterName
3091 global pCounterValue
3092 global iCounterValue
3093 global onosSet
3094 global onosSetName
3095 pCounterName = "TestON-Partitions"
3096 iCounterName = "TestON-inMemory"
3097 pCounterValue = 0
3098 iCounterValue = 0
3099 onosSet = set([])
3100 onosSetName = "TestON-set"
3101
3102 description = "Install Primitives app"
3103 main.case( description )
3104 main.step( "Install Primitives app" )
3105 appName = "org.onosproject.distributedprimitives"
3106 node = main.activeNodes[0]
3107 appResults = main.CLIs[node].activateApp( appName )
3108 utilities.assert_equals( expect=main.TRUE,
3109 actual=appResults,
3110 onpass="Primitives app activated",
3111 onfail="Primitives app not activated" )
3112 time.sleep( 5 ) # To allow all nodes to activate
3113
3114 def CASE17( self, main ):
3115 """
3116 Check for basic functionality with distributed primitives
3117 """
3118 # Make sure variables are defined/set
3119 assert main.numCtrls, "main.numCtrls not defined"
3120 assert main, "main not defined"
3121 assert utilities.assert_equals, "utilities.assert_equals not defined"
3122 assert main.CLIs, "main.CLIs not defined"
3123 assert main.nodes, "main.nodes not defined"
3124 assert pCounterName, "pCounterName not defined"
3125 assert iCounterName, "iCounterName not defined"
3126 assert onosSetName, "onosSetName not defined"
3127 # NOTE: assert fails if value is 0/None/Empty/False
3128 try:
3129 pCounterValue
3130 except NameError:
3131 main.log.error( "pCounterValue not defined, setting to 0" )
3132 pCounterValue = 0
3133 try:
3134 iCounterValue
3135 except NameError:
3136 main.log.error( "iCounterValue not defined, setting to 0" )
3137 iCounterValue = 0
3138 try:
3139 onosSet
3140 except NameError:
3141 main.log.error( "onosSet not defined, setting to empty Set" )
3142 onosSet = set([])
3143 # Variables for the distributed primitives tests. These are local only
3144 addValue = "a"
3145 addAllValue = "a b c d e f"
3146 retainValue = "c d e f"
3147
3148 description = "Check for basic functionality with distributed " +\
3149 "primitives"
3150 main.case( description )
3151 main.caseExplanation = "Test the methods of the distributed " +\
3152 "primitives (counters and sets) throught the cli"
3153 # DISTRIBUTED ATOMIC COUNTERS
3154 # Partitioned counters
3155 main.step( "Increment then get a default counter on each node" )
3156 pCounters = []
3157 threads = []
3158 addedPValues = []
3159 for i in main.activeNodes:
3160 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3161 name="counterAddAndGet-" + str( i ),
3162 args=[ pCounterName ] )
3163 pCounterValue += 1
3164 addedPValues.append( pCounterValue )
3165 threads.append( t )
3166 t.start()
3167
3168 for t in threads:
3169 t.join()
3170 pCounters.append( t.result )
3171 # Check that counter incremented numController times
3172 pCounterResults = True
3173 for i in addedPValues:
3174 tmpResult = i in pCounters
3175 pCounterResults = pCounterResults and tmpResult
3176 if not tmpResult:
3177 main.log.error( str( i ) + " is not in partitioned "
3178 "counter incremented results" )
3179 utilities.assert_equals( expect=True,
3180 actual=pCounterResults,
3181 onpass="Default counter incremented",
3182 onfail="Error incrementing default" +
3183 " counter" )
3184
3185 main.step( "Get then Increment a default counter on each node" )
3186 pCounters = []
3187 threads = []
3188 addedPValues = []
3189 for i in main.activeNodes:
3190 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3191 name="counterGetAndAdd-" + str( i ),
3192 args=[ pCounterName ] )
3193 addedPValues.append( pCounterValue )
3194 pCounterValue += 1
3195 threads.append( t )
3196 t.start()
3197
3198 for t in threads:
3199 t.join()
3200 pCounters.append( t.result )
3201 # Check that counter incremented numController times
3202 pCounterResults = True
3203 for i in addedPValues:
3204 tmpResult = i in pCounters
3205 pCounterResults = pCounterResults and tmpResult
3206 if not tmpResult:
3207 main.log.error( str( i ) + " is not in partitioned "
3208 "counter incremented results" )
3209 utilities.assert_equals( expect=True,
3210 actual=pCounterResults,
3211 onpass="Default counter incremented",
3212 onfail="Error incrementing default" +
3213 " counter" )
3214
3215 main.step( "Counters we added have the correct values" )
3216 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3217 utilities.assert_equals( expect=main.TRUE,
3218 actual=incrementCheck,
3219 onpass="Added counters are correct",
3220 onfail="Added counters are incorrect" )
3221
3222 main.step( "Add -8 to then get a default counter on each node" )
3223 pCounters = []
3224 threads = []
3225 addedPValues = []
3226 for i in main.activeNodes:
3227 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3228 name="counterIncrement-" + str( i ),
3229 args=[ pCounterName ],
3230 kwargs={ "delta": -8 } )
3231 pCounterValue += -8
3232 addedPValues.append( pCounterValue )
3233 threads.append( t )
3234 t.start()
3235
3236 for t in threads:
3237 t.join()
3238 pCounters.append( t.result )
3239 # Check that counter incremented numController times
3240 pCounterResults = True
3241 for i in addedPValues:
3242 tmpResult = i in pCounters
3243 pCounterResults = pCounterResults and tmpResult
3244 if not tmpResult:
3245 main.log.error( str( i ) + " is not in partitioned "
3246 "counter incremented results" )
3247 utilities.assert_equals( expect=True,
3248 actual=pCounterResults,
3249 onpass="Default counter incremented",
3250 onfail="Error incrementing default" +
3251 " counter" )
3252
3253 main.step( "Add 5 to then get a default counter on each node" )
3254 pCounters = []
3255 threads = []
3256 addedPValues = []
3257 for i in main.activeNodes:
3258 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3259 name="counterIncrement-" + str( i ),
3260 args=[ pCounterName ],
3261 kwargs={ "delta": 5 } )
3262 pCounterValue += 5
3263 addedPValues.append( pCounterValue )
3264 threads.append( t )
3265 t.start()
3266
3267 for t in threads:
3268 t.join()
3269 pCounters.append( t.result )
3270 # Check that counter incremented numController times
3271 pCounterResults = True
3272 for i in addedPValues:
3273 tmpResult = i in pCounters
3274 pCounterResults = pCounterResults and tmpResult
3275 if not tmpResult:
3276 main.log.error( str( i ) + " is not in partitioned "
3277 "counter incremented results" )
3278 utilities.assert_equals( expect=True,
3279 actual=pCounterResults,
3280 onpass="Default counter incremented",
3281 onfail="Error incrementing default" +
3282 " counter" )
3283
3284 main.step( "Get then add 5 to a default counter on each node" )
3285 pCounters = []
3286 threads = []
3287 addedPValues = []
3288 for i in main.activeNodes:
3289 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3290 name="counterIncrement-" + str( i ),
3291 args=[ pCounterName ],
3292 kwargs={ "delta": 5 } )
3293 addedPValues.append( pCounterValue )
3294 pCounterValue += 5
3295 threads.append( t )
3296 t.start()
3297
3298 for t in threads:
3299 t.join()
3300 pCounters.append( t.result )
3301 # Check that counter incremented numController times
3302 pCounterResults = True
3303 for i in addedPValues:
3304 tmpResult = i in pCounters
3305 pCounterResults = pCounterResults and tmpResult
3306 if not tmpResult:
3307 main.log.error( str( i ) + " is not in partitioned "
3308 "counter incremented results" )
3309 utilities.assert_equals( expect=True,
3310 actual=pCounterResults,
3311 onpass="Default counter incremented",
3312 onfail="Error incrementing default" +
3313 " counter" )
3314
3315 main.step( "Counters we added have the correct values" )
3316 incrementCheck = main.Counters.counterCheck( pCounterName, pCounterValue )
3317 utilities.assert_equals( expect=main.TRUE,
3318 actual=incrementCheck,
3319 onpass="Added counters are correct",
3320 onfail="Added counters are incorrect" )
3321
3322 # In-Memory counters
3323 main.step( "Increment and get an in-memory counter on each node" )
3324 iCounters = []
3325 addedIValues = []
3326 threads = []
3327 for i in main.activeNodes:
3328 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3329 name="icounterIncrement-" + str( i ),
3330 args=[ iCounterName ],
3331 kwargs={ "inMemory": True } )
3332 iCounterValue += 1
3333 addedIValues.append( iCounterValue )
3334 threads.append( t )
3335 t.start()
3336
3337 for t in threads:
3338 t.join()
3339 iCounters.append( t.result )
3340 # Check that counter incremented numController times
3341 iCounterResults = True
3342 for i in addedIValues:
3343 tmpResult = i in iCounters
3344 iCounterResults = iCounterResults and tmpResult
3345 if not tmpResult:
3346 main.log.error( str( i ) + " is not in the in-memory "
3347 "counter incremented results" )
3348 utilities.assert_equals( expect=True,
3349 actual=iCounterResults,
3350 onpass="In-memory counter incremented",
3351 onfail="Error incrementing in-memory" +
3352 " counter" )
3353
3354 main.step( "Get then Increment a in-memory counter on each node" )
3355 iCounters = []
3356 threads = []
3357 addedIValues = []
3358 for i in main.activeNodes:
3359 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3360 name="counterGetAndAdd-" + str( i ),
3361 args=[ iCounterName ],
3362 kwargs={ "inMemory": True } )
3363 addedIValues.append( iCounterValue )
3364 iCounterValue += 1
3365 threads.append( t )
3366 t.start()
3367
3368 for t in threads:
3369 t.join()
3370 iCounters.append( t.result )
3371 # Check that counter incremented numController times
3372 iCounterResults = True
3373 for i in addedIValues:
3374 tmpResult = i in iCounters
3375 iCounterResults = iCounterResults and tmpResult
3376 if not tmpResult:
3377 main.log.error( str( i ) + " is not in in-memory "
3378 "counter incremented results" )
3379 utilities.assert_equals( expect=True,
3380 actual=iCounterResults,
3381 onpass="In-memory counter incremented",
3382 onfail="Error incrementing in-memory" +
3383 " counter" )
3384
3385 main.step( "Counters we added have the correct values" )
3386 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3387 utilities.assert_equals( expect=main.TRUE,
3388 actual=incrementCheck,
3389 onpass="Added counters are correct",
3390 onfail="Added counters are incorrect" )
3391
3392 main.step( "Add -8 to then get a in-memory counter on each node" )
3393 iCounters = []
3394 threads = []
3395 addedIValues = []
3396 for i in main.activeNodes:
3397 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3398 name="counterIncrement-" + str( i ),
3399 args=[ iCounterName ],
3400 kwargs={ "delta": -8, "inMemory": True } )
3401 iCounterValue += -8
3402 addedIValues.append( iCounterValue )
3403 threads.append( t )
3404 t.start()
3405
3406 for t in threads:
3407 t.join()
3408 iCounters.append( t.result )
3409 # Check that counter incremented numController times
3410 iCounterResults = True
3411 for i in addedIValues:
3412 tmpResult = i in iCounters
3413 iCounterResults = iCounterResults and tmpResult
3414 if not tmpResult:
3415 main.log.error( str( i ) + " is not in in-memory "
3416 "counter incremented results" )
3417 utilities.assert_equals( expect=True,
3418 actual=pCounterResults,
3419 onpass="In-memory counter incremented",
3420 onfail="Error incrementing in-memory" +
3421 " counter" )
3422
3423 main.step( "Add 5 to then get a in-memory counter on each node" )
3424 iCounters = []
3425 threads = []
3426 addedIValues = []
3427 for i in main.activeNodes:
3428 t = main.Thread( target=main.CLIs[i].counterTestAddAndGet,
3429 name="counterIncrement-" + str( i ),
3430 args=[ iCounterName ],
3431 kwargs={ "delta": 5, "inMemory": True } )
3432 iCounterValue += 5
3433 addedIValues.append( iCounterValue )
3434 threads.append( t )
3435 t.start()
3436
3437 for t in threads:
3438 t.join()
3439 iCounters.append( t.result )
3440 # Check that counter incremented numController times
3441 iCounterResults = True
3442 for i in addedIValues:
3443 tmpResult = i in iCounters
3444 iCounterResults = iCounterResults and tmpResult
3445 if not tmpResult:
3446 main.log.error( str( i ) + " is not in in-memory "
3447 "counter incremented results" )
3448 utilities.assert_equals( expect=True,
3449 actual=pCounterResults,
3450 onpass="In-memory counter incremented",
3451 onfail="Error incrementing in-memory" +
3452 " counter" )
3453
3454 main.step( "Get then add 5 to a in-memory counter on each node" )
3455 iCounters = []
3456 threads = []
3457 addedIValues = []
3458 for i in main.activeNodes:
3459 t = main.Thread( target=main.CLIs[i].counterTestGetAndAdd,
3460 name="counterIncrement-" + str( i ),
3461 args=[ iCounterName ],
3462 kwargs={ "delta": 5, "inMemory": True } )
3463 addedIValues.append( iCounterValue )
3464 iCounterValue += 5
3465 threads.append( t )
3466 t.start()
3467
3468 for t in threads:
3469 t.join()
3470 iCounters.append( t.result )
3471 # Check that counter incremented numController times
3472 iCounterResults = True
3473 for i in addedIValues:
3474 tmpResult = i in iCounters
3475 iCounterResults = iCounterResults and tmpResult
3476 if not tmpResult:
3477 main.log.error( str( i ) + " is not in in-memory "
3478 "counter incremented results" )
3479 utilities.assert_equals( expect=True,
3480 actual=iCounterResults,
3481 onpass="In-memory counter incremented",
3482 onfail="Error incrementing in-memory" +
3483 " counter" )
3484
3485 main.step( "Counters we added have the correct values" )
3486 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3487 utilities.assert_equals( expect=main.TRUE,
3488 actual=incrementCheck,
3489 onpass="Added counters are correct",
3490 onfail="Added counters are incorrect" )
3491
3492 main.step( "Check counters are consistant across nodes" )
3493 onosCounters, consistentCounterResults = main.Counters.consistentCheck()
3494 utilities.assert_equals( expect=main.TRUE,
3495 actual=consistentCounterResults,
3496 onpass="ONOS counters are consistent " +
3497 "across nodes",
3498 onfail="ONOS Counters are inconsistent " +
3499 "across nodes" )
3500
3501 main.step( "Counters we added have the correct values" )
3502 incrementCheck = main.Counters.counterCheck( iCounterName, iCounterValue )
3503 incrementCheck = incrementCheck and \
3504 main.Counters.counterCheck( iCounterName, iCounterValue )
3505 utilities.assert_equals( expect=main.TRUE,
3506 actual=incrementCheck,
3507 onpass="Added counters are correct",
3508 onfail="Added counters are incorrect" )
3509 # DISTRIBUTED SETS
3510 main.step( "Distributed Set get" )
3511 size = len( onosSet )
3512 getResponses = []
3513 threads = []
3514 for i in main.activeNodes:
3515 t = main.Thread( target=main.CLIs[i].setTestGet,
3516 name="setTestGet-" + str( i ),
3517 args=[ onosSetName ] )
3518 threads.append( t )
3519 t.start()
3520 for t in threads:
3521 t.join()
3522 getResponses.append( t.result )
3523
3524 getResults = main.TRUE
3525 for i in range( len( main.activeNodes ) ):
3526 node = str( main.activeNodes[i] + 1 )
3527 if isinstance( getResponses[ i ], list):
3528 current = set( getResponses[ i ] )
3529 if len( current ) == len( getResponses[ i ] ):
3530 # no repeats
3531 if onosSet != current:
3532 main.log.error( "ONOS" + node +
3533 " has incorrect view" +
3534 " of set " + onosSetName + ":\n" +
3535 str( getResponses[ i ] ) )
3536 main.log.debug( "Expected: " + str( onosSet ) )
3537 main.log.debug( "Actual: " + str( current ) )
3538 getResults = main.FALSE
3539 else:
3540 # error, set is not a set
3541 main.log.error( "ONOS" + node +
3542 " has repeat elements in" +
3543 " set " + onosSetName + ":\n" +
3544 str( getResponses[ i ] ) )
3545 getResults = main.FALSE
3546 elif getResponses[ i ] == main.ERROR:
3547 getResults = main.FALSE
3548 utilities.assert_equals( expect=main.TRUE,
3549 actual=getResults,
3550 onpass="Set elements are correct",
3551 onfail="Set elements are incorrect" )
3552
3553 main.step( "Distributed Set size" )
3554 sizeResponses = []
3555 threads = []
3556 for i in main.activeNodes:
3557 t = main.Thread( target=main.CLIs[i].setTestSize,
3558 name="setTestSize-" + str( i ),
3559 args=[ onosSetName ] )
3560 threads.append( t )
3561 t.start()
3562 for t in threads:
3563 t.join()
3564 sizeResponses.append( t.result )
3565
3566 sizeResults = main.TRUE
3567 for i in range( len( main.activeNodes ) ):
3568 node = str( main.activeNodes[i] + 1 )
3569 if size != sizeResponses[ i ]:
3570 sizeResults = main.FALSE
3571 main.log.error( "ONOS" + node +
3572 " expected a size of " + str( size ) +
3573 " for set " + onosSetName +
3574 " but got " + str( sizeResponses[ i ] ) )
3575 utilities.assert_equals( expect=main.TRUE,
3576 actual=sizeResults,
3577 onpass="Set sizes are correct",
3578 onfail="Set sizes are incorrect" )
3579
3580 main.step( "Distributed Set add()" )
3581 onosSet.add( addValue )
3582 addResponses = []
3583 threads = []
3584 for i in main.activeNodes:
3585 t = main.Thread( target=main.CLIs[i].setTestAdd,
3586 name="setTestAdd-" + str( i ),
3587 args=[ onosSetName, addValue ] )
3588 threads.append( t )
3589 t.start()
3590 for t in threads:
3591 t.join()
3592 addResponses.append( t.result )
3593
3594 # main.TRUE = successfully changed the set
3595 # main.FALSE = action resulted in no change in set
3596 # main.ERROR - Some error in executing the function
3597 addResults = main.TRUE
3598 for i in range( len( main.activeNodes ) ):
3599 if addResponses[ i ] == main.TRUE:
3600 # All is well
3601 pass
3602 elif addResponses[ i ] == main.FALSE:
3603 # Already in set, probably fine
3604 pass
3605 elif addResponses[ i ] == main.ERROR:
3606 # Error in execution
3607 addResults = main.FALSE
3608 else:
3609 # unexpected result
3610 addResults = main.FALSE
3611 if addResults != main.TRUE:
3612 main.log.error( "Error executing set add" )
3613
3614 # Check if set is still correct
3615 size = len( onosSet )
3616 getResponses = []
3617 threads = []
3618 for i in main.activeNodes:
3619 t = main.Thread( target=main.CLIs[i].setTestGet,
3620 name="setTestGet-" + str( i ),
3621 args=[ onosSetName ] )
3622 threads.append( t )
3623 t.start()
3624 for t in threads:
3625 t.join()
3626 getResponses.append( t.result )
3627 getResults = main.TRUE
3628 for i in range( len( main.activeNodes ) ):
3629 node = str( main.activeNodes[i] + 1 )
3630 if isinstance( getResponses[ i ], list):
3631 current = set( getResponses[ i ] )
3632 if len( current ) == len( getResponses[ i ] ):
3633 # no repeats
3634 if onosSet != current:
3635 main.log.error( "ONOS" + node + " has incorrect view" +
3636 " of set " + onosSetName + ":\n" +
3637 str( getResponses[ i ] ) )
3638 main.log.debug( "Expected: " + str( onosSet ) )
3639 main.log.debug( "Actual: " + str( current ) )
3640 getResults = main.FALSE
3641 else:
3642 # error, set is not a set
3643 main.log.error( "ONOS" + node + " has repeat elements in" +
3644 " set " + onosSetName + ":\n" +
3645 str( getResponses[ i ] ) )
3646 getResults = main.FALSE
3647 elif getResponses[ i ] == main.ERROR:
3648 getResults = main.FALSE
3649 sizeResponses = []
3650 threads = []
3651 for i in main.activeNodes:
3652 t = main.Thread( target=main.CLIs[i].setTestSize,
3653 name="setTestSize-" + str( i ),
3654 args=[ onosSetName ] )
3655 threads.append( t )
3656 t.start()
3657 for t in threads:
3658 t.join()
3659 sizeResponses.append( t.result )
3660 sizeResults = main.TRUE
3661 for i in range( len( main.activeNodes ) ):
3662 node = str( main.activeNodes[i] + 1 )
3663 if size != sizeResponses[ i ]:
3664 sizeResults = main.FALSE
3665 main.log.error( "ONOS" + node +
3666 " expected a size of " + str( size ) +
3667 " for set " + onosSetName +
3668 " but got " + str( sizeResponses[ i ] ) )
3669 addResults = addResults and getResults and sizeResults
3670 utilities.assert_equals( expect=main.TRUE,
3671 actual=addResults,
3672 onpass="Set add correct",
3673 onfail="Set add was incorrect" )
3674
3675 main.step( "Distributed Set addAll()" )
3676 onosSet.update( addAllValue.split() )
3677 addResponses = []
3678 threads = []
3679 for i in main.activeNodes:
3680 t = main.Thread( target=main.CLIs[i].setTestAdd,
3681 name="setTestAddAll-" + str( i ),
3682 args=[ onosSetName, addAllValue ] )
3683 threads.append( t )
3684 t.start()
3685 for t in threads:
3686 t.join()
3687 addResponses.append( t.result )
3688
3689 # main.TRUE = successfully changed the set
3690 # main.FALSE = action resulted in no change in set
3691 # main.ERROR - Some error in executing the function
3692 addAllResults = main.TRUE
3693 for i in range( len( main.activeNodes ) ):
3694 if addResponses[ i ] == main.TRUE:
3695 # All is well
3696 pass
3697 elif addResponses[ i ] == main.FALSE:
3698 # Already in set, probably fine
3699 pass
3700 elif addResponses[ i ] == main.ERROR:
3701 # Error in execution
3702 addAllResults = main.FALSE
3703 else:
3704 # unexpected result
3705 addAllResults = main.FALSE
3706 if addAllResults != main.TRUE:
3707 main.log.error( "Error executing set addAll" )
3708
3709 # Check if set is still correct
3710 size = len( onosSet )
3711 getResponses = []
3712 threads = []
3713 for i in main.activeNodes:
3714 t = main.Thread( target=main.CLIs[i].setTestGet,
3715 name="setTestGet-" + str( i ),
3716 args=[ onosSetName ] )
3717 threads.append( t )
3718 t.start()
3719 for t in threads:
3720 t.join()
3721 getResponses.append( t.result )
3722 getResults = main.TRUE
3723 for i in range( len( main.activeNodes ) ):
3724 node = str( main.activeNodes[i] + 1 )
3725 if isinstance( getResponses[ i ], list):
3726 current = set( getResponses[ i ] )
3727 if len( current ) == len( getResponses[ i ] ):
3728 # no repeats
3729 if onosSet != current:
3730 main.log.error( "ONOS" + node +
3731 " has incorrect view" +
3732 " of set " + onosSetName + ":\n" +
3733 str( getResponses[ i ] ) )
3734 main.log.debug( "Expected: " + str( onosSet ) )
3735 main.log.debug( "Actual: " + str( current ) )
3736 getResults = main.FALSE
3737 else:
3738 # error, set is not a set
3739 main.log.error( "ONOS" + node +
3740 " has repeat elements in" +
3741 " set " + onosSetName + ":\n" +
3742 str( getResponses[ i ] ) )
3743 getResults = main.FALSE
3744 elif getResponses[ i ] == main.ERROR:
3745 getResults = main.FALSE
3746 sizeResponses = []
3747 threads = []
3748 for i in main.activeNodes:
3749 t = main.Thread( target=main.CLIs[i].setTestSize,
3750 name="setTestSize-" + str( i ),
3751 args=[ onosSetName ] )
3752 threads.append( t )
3753 t.start()
3754 for t in threads:
3755 t.join()
3756 sizeResponses.append( t.result )
3757 sizeResults = main.TRUE
3758 for i in range( len( main.activeNodes ) ):
3759 node = str( main.activeNodes[i] + 1 )
3760 if size != sizeResponses[ i ]:
3761 sizeResults = main.FALSE
3762 main.log.error( "ONOS" + node +
3763 " expected a size of " + str( size ) +
3764 " for set " + onosSetName +
3765 " but got " + str( sizeResponses[ i ] ) )
3766 addAllResults = addAllResults and getResults and sizeResults
3767 utilities.assert_equals( expect=main.TRUE,
3768 actual=addAllResults,
3769 onpass="Set addAll correct",
3770 onfail="Set addAll was incorrect" )
3771
3772 main.step( "Distributed Set contains()" )
3773 containsResponses = []
3774 threads = []
3775 for i in main.activeNodes:
3776 t = main.Thread( target=main.CLIs[i].setTestGet,
3777 name="setContains-" + str( i ),
3778 args=[ onosSetName ],
3779 kwargs={ "values": addValue } )
3780 threads.append( t )
3781 t.start()
3782 for t in threads:
3783 t.join()
3784 # NOTE: This is the tuple
3785 containsResponses.append( t.result )
3786
3787 containsResults = main.TRUE
3788 for i in range( len( main.activeNodes ) ):
3789 if containsResponses[ i ] == main.ERROR:
3790 containsResults = main.FALSE
3791 else:
3792 containsResults = containsResults and\
3793 containsResponses[ i ][ 1 ]
3794 utilities.assert_equals( expect=main.TRUE,
3795 actual=containsResults,
3796 onpass="Set contains is functional",
3797 onfail="Set contains failed" )
3798
3799 main.step( "Distributed Set containsAll()" )
3800 containsAllResponses = []
3801 threads = []
3802 for i in main.activeNodes:
3803 t = main.Thread( target=main.CLIs[i].setTestGet,
3804 name="setContainsAll-" + str( i ),
3805 args=[ onosSetName ],
3806 kwargs={ "values": addAllValue } )
3807 threads.append( t )
3808 t.start()
3809 for t in threads:
3810 t.join()
3811 # NOTE: This is the tuple
3812 containsAllResponses.append( t.result )
3813
3814 containsAllResults = main.TRUE
3815 for i in range( len( main.activeNodes ) ):
3816 if containsResponses[ i ] == main.ERROR:
3817 containsResults = main.FALSE
3818 else:
3819 containsResults = containsResults and\
3820 containsResponses[ i ][ 1 ]
3821 utilities.assert_equals( expect=main.TRUE,
3822 actual=containsAllResults,
3823 onpass="Set containsAll is functional",
3824 onfail="Set containsAll failed" )
3825
3826 main.step( "Distributed Set remove()" )
3827 onosSet.remove( addValue )
3828 removeResponses = []
3829 threads = []
3830 for i in main.activeNodes:
3831 t = main.Thread( target=main.CLIs[i].setTestRemove,
3832 name="setTestRemove-" + str( i ),
3833 args=[ onosSetName, addValue ] )
3834 threads.append( t )
3835 t.start()
3836 for t in threads:
3837 t.join()
3838 removeResponses.append( t.result )
3839
3840 # main.TRUE = successfully changed the set
3841 # main.FALSE = action resulted in no change in set
3842 # main.ERROR - Some error in executing the function
3843 removeResults = main.TRUE
3844 for i in range( len( main.activeNodes ) ):
3845 if removeResponses[ i ] == main.TRUE:
3846 # All is well
3847 pass
3848 elif removeResponses[ i ] == main.FALSE:
3849 # not in set, probably fine
3850 pass
3851 elif removeResponses[ i ] == main.ERROR:
3852 # Error in execution
3853 removeResults = main.FALSE
3854 else:
3855 # unexpected result
3856 removeResults = main.FALSE
3857 if removeResults != main.TRUE:
3858 main.log.error( "Error executing set remove" )
3859
3860 # Check if set is still correct
3861 size = len( onosSet )
3862 getResponses = []
3863 threads = []
3864 for i in main.activeNodes:
3865 t = main.Thread( target=main.CLIs[i].setTestGet,
3866 name="setTestGet-" + str( i ),
3867 args=[ onosSetName ] )
3868 threads.append( t )
3869 t.start()
3870 for t in threads:
3871 t.join()
3872 getResponses.append( t.result )
3873 getResults = main.TRUE
3874 for i in range( len( main.activeNodes ) ):
3875 node = str( main.activeNodes[i] + 1 )
3876 if isinstance( getResponses[ i ], list):
3877 current = set( getResponses[ i ] )
3878 if len( current ) == len( getResponses[ i ] ):
3879 # no repeats
3880 if onosSet != current:
3881 main.log.error( "ONOS" + node +
3882 " has incorrect view" +
3883 " of set " + onosSetName + ":\n" +
3884 str( getResponses[ i ] ) )
3885 main.log.debug( "Expected: " + str( onosSet ) )
3886 main.log.debug( "Actual: " + str( current ) )
3887 getResults = main.FALSE
3888 else:
3889 # error, set is not a set
3890 main.log.error( "ONOS" + node +
3891 " has repeat elements in" +
3892 " set " + onosSetName + ":\n" +
3893 str( getResponses[ i ] ) )
3894 getResults = main.FALSE
3895 elif getResponses[ i ] == main.ERROR:
3896 getResults = main.FALSE
3897 sizeResponses = []
3898 threads = []
3899 for i in main.activeNodes:
3900 t = main.Thread( target=main.CLIs[i].setTestSize,
3901 name="setTestSize-" + str( i ),
3902 args=[ onosSetName ] )
3903 threads.append( t )
3904 t.start()
3905 for t in threads:
3906 t.join()
3907 sizeResponses.append( t.result )
3908 sizeResults = main.TRUE
3909 for i in range( len( main.activeNodes ) ):
3910 node = str( main.activeNodes[i] + 1 )
3911 if size != sizeResponses[ i ]:
3912 sizeResults = main.FALSE
3913 main.log.error( "ONOS" + node +
3914 " expected a size of " + str( size ) +
3915 " for set " + onosSetName +
3916 " but got " + str( sizeResponses[ i ] ) )
3917 removeResults = removeResults and getResults and sizeResults
3918 utilities.assert_equals( expect=main.TRUE,
3919 actual=removeResults,
3920 onpass="Set remove correct",
3921 onfail="Set remove was incorrect" )
3922
3923 main.step( "Distributed Set removeAll()" )
3924 onosSet.difference_update( addAllValue.split() )
3925 removeAllResponses = []
3926 threads = []
3927 try:
3928 for i in main.activeNodes:
3929 t = main.Thread( target=main.CLIs[i].setTestRemove,
3930 name="setTestRemoveAll-" + str( i ),
3931 args=[ onosSetName, addAllValue ] )
3932 threads.append( t )
3933 t.start()
3934 for t in threads:
3935 t.join()
3936 removeAllResponses.append( t.result )
3937 except Exception, e:
3938 main.log.exception(e)
3939
3940 # main.TRUE = successfully changed the set
3941 # main.FALSE = action resulted in no change in set
3942 # main.ERROR - Some error in executing the function
3943 removeAllResults = main.TRUE
3944 for i in range( len( main.activeNodes ) ):
3945 if removeAllResponses[ i ] == main.TRUE:
3946 # All is well
3947 pass
3948 elif removeAllResponses[ i ] == main.FALSE:
3949 # not in set, probably fine
3950 pass
3951 elif removeAllResponses[ i ] == main.ERROR:
3952 # Error in execution
3953 removeAllResults = main.FALSE
3954 else:
3955 # unexpected result
3956 removeAllResults = main.FALSE
3957 if removeAllResults != main.TRUE:
3958 main.log.error( "Error executing set removeAll" )
3959
3960 # Check if set is still correct
3961 size = len( onosSet )
3962 getResponses = []
3963 threads = []
3964 for i in main.activeNodes:
3965 t = main.Thread( target=main.CLIs[i].setTestGet,
3966 name="setTestGet-" + str( i ),
3967 args=[ onosSetName ] )
3968 threads.append( t )
3969 t.start()
3970 for t in threads:
3971 t.join()
3972 getResponses.append( t.result )
3973 getResults = main.TRUE
3974 for i in range( len( main.activeNodes ) ):
3975 node = str( main.activeNodes[i] + 1 )
3976 if isinstance( getResponses[ i ], list):
3977 current = set( getResponses[ i ] )
3978 if len( current ) == len( getResponses[ i ] ):
3979 # no repeats
3980 if onosSet != current:
3981 main.log.error( "ONOS" + node +
3982 " has incorrect view" +
3983 " of set " + onosSetName + ":\n" +
3984 str( getResponses[ i ] ) )
3985 main.log.debug( "Expected: " + str( onosSet ) )
3986 main.log.debug( "Actual: " + str( current ) )
3987 getResults = main.FALSE
3988 else:
3989 # error, set is not a set
3990 main.log.error( "ONOS" + node +
3991 " has repeat elements in" +
3992 " set " + onosSetName + ":\n" +
3993 str( getResponses[ i ] ) )
3994 getResults = main.FALSE
3995 elif getResponses[ i ] == main.ERROR:
3996 getResults = main.FALSE
3997 sizeResponses = []
3998 threads = []
3999 for i in main.activeNodes:
4000 t = main.Thread( target=main.CLIs[i].setTestSize,
4001 name="setTestSize-" + str( i ),
4002 args=[ onosSetName ] )
4003 threads.append( t )
4004 t.start()
4005 for t in threads:
4006 t.join()
4007 sizeResponses.append( t.result )
4008 sizeResults = main.TRUE
4009 for i in range( len( main.activeNodes ) ):
4010 node = str( main.activeNodes[i] + 1 )
4011 if size != sizeResponses[ i ]:
4012 sizeResults = main.FALSE
4013 main.log.error( "ONOS" + node +
4014 " expected a size of " + str( size ) +
4015 " for set " + onosSetName +
4016 " but got " + str( sizeResponses[ i ] ) )
4017 removeAllResults = removeAllResults and getResults and sizeResults
4018 utilities.assert_equals( expect=main.TRUE,
4019 actual=removeAllResults,
4020 onpass="Set removeAll correct",
4021 onfail="Set removeAll was incorrect" )
4022
4023 main.step( "Distributed Set addAll()" )
4024 onosSet.update( addAllValue.split() )
4025 addResponses = []
4026 threads = []
4027 for i in main.activeNodes:
4028 t = main.Thread( target=main.CLIs[i].setTestAdd,
4029 name="setTestAddAll-" + str( i ),
4030 args=[ onosSetName, addAllValue ] )
4031 threads.append( t )
4032 t.start()
4033 for t in threads:
4034 t.join()
4035 addResponses.append( t.result )
4036
4037 # main.TRUE = successfully changed the set
4038 # main.FALSE = action resulted in no change in set
4039 # main.ERROR - Some error in executing the function
4040 addAllResults = main.TRUE
4041 for i in range( len( main.activeNodes ) ):
4042 if addResponses[ i ] == main.TRUE:
4043 # All is well
4044 pass
4045 elif addResponses[ i ] == main.FALSE:
4046 # Already in set, probably fine
4047 pass
4048 elif addResponses[ i ] == main.ERROR:
4049 # Error in execution
4050 addAllResults = main.FALSE
4051 else:
4052 # unexpected result
4053 addAllResults = main.FALSE
4054 if addAllResults != main.TRUE:
4055 main.log.error( "Error executing set addAll" )
4056
4057 # Check if set is still correct
4058 size = len( onosSet )
4059 getResponses = []
4060 threads = []
4061 for i in main.activeNodes:
4062 t = main.Thread( target=main.CLIs[i].setTestGet,
4063 name="setTestGet-" + str( i ),
4064 args=[ onosSetName ] )
4065 threads.append( t )
4066 t.start()
4067 for t in threads:
4068 t.join()
4069 getResponses.append( t.result )
4070 getResults = main.TRUE
4071 for i in range( len( main.activeNodes ) ):
4072 node = str( main.activeNodes[i] + 1 )
4073 if isinstance( getResponses[ i ], list):
4074 current = set( getResponses[ i ] )
4075 if len( current ) == len( getResponses[ i ] ):
4076 # no repeats
4077 if onosSet != current:
4078 main.log.error( "ONOS" + node +
4079 " has incorrect view" +
4080 " of set " + onosSetName + ":\n" +
4081 str( getResponses[ i ] ) )
4082 main.log.debug( "Expected: " + str( onosSet ) )
4083 main.log.debug( "Actual: " + str( current ) )
4084 getResults = main.FALSE
4085 else:
4086 # error, set is not a set
4087 main.log.error( "ONOS" + node +
4088 " has repeat elements in" +
4089 " set " + onosSetName + ":\n" +
4090 str( getResponses[ i ] ) )
4091 getResults = main.FALSE
4092 elif getResponses[ i ] == main.ERROR:
4093 getResults = main.FALSE
4094 sizeResponses = []
4095 threads = []
4096 for i in main.activeNodes:
4097 t = main.Thread( target=main.CLIs[i].setTestSize,
4098 name="setTestSize-" + str( i ),
4099 args=[ onosSetName ] )
4100 threads.append( t )
4101 t.start()
4102 for t in threads:
4103 t.join()
4104 sizeResponses.append( t.result )
4105 sizeResults = main.TRUE
4106 for i in range( len( main.activeNodes ) ):
4107 node = str( main.activeNodes[i] + 1 )
4108 if size != sizeResponses[ i ]:
4109 sizeResults = main.FALSE
4110 main.log.error( "ONOS" + node +
4111 " expected a size of " + str( size ) +
4112 " for set " + onosSetName +
4113 " but got " + str( sizeResponses[ i ] ) )
4114 addAllResults = addAllResults and getResults and sizeResults
4115 utilities.assert_equals( expect=main.TRUE,
4116 actual=addAllResults,
4117 onpass="Set addAll correct",
4118 onfail="Set addAll was incorrect" )
4119
4120 main.step( "Distributed Set clear()" )
4121 onosSet.clear()
4122 clearResponses = []
4123 threads = []
4124 for i in main.activeNodes:
4125 t = main.Thread( target=main.CLIs[i].setTestRemove,
4126 name="setTestClear-" + str( i ),
4127 args=[ onosSetName, " "], # Values doesn't matter
4128 kwargs={ "clear": True } )
4129 threads.append( t )
4130 t.start()
4131 for t in threads:
4132 t.join()
4133 clearResponses.append( t.result )
4134
4135 # main.TRUE = successfully changed the set
4136 # main.FALSE = action resulted in no change in set
4137 # main.ERROR - Some error in executing the function
4138 clearResults = main.TRUE
4139 for i in range( len( main.activeNodes ) ):
4140 if clearResponses[ i ] == main.TRUE:
4141 # All is well
4142 pass
4143 elif clearResponses[ i ] == main.FALSE:
4144 # Nothing set, probably fine
4145 pass
4146 elif clearResponses[ i ] == main.ERROR:
4147 # Error in execution
4148 clearResults = main.FALSE
4149 else:
4150 # unexpected result
4151 clearResults = main.FALSE
4152 if clearResults != main.TRUE:
4153 main.log.error( "Error executing set clear" )
4154
4155 # Check if set is still correct
4156 size = len( onosSet )
4157 getResponses = []
4158 threads = []
4159 for i in main.activeNodes:
4160 t = main.Thread( target=main.CLIs[i].setTestGet,
4161 name="setTestGet-" + str( i ),
4162 args=[ onosSetName ] )
4163 threads.append( t )
4164 t.start()
4165 for t in threads:
4166 t.join()
4167 getResponses.append( t.result )
4168 getResults = main.TRUE
4169 for i in range( len( main.activeNodes ) ):
4170 node = str( main.activeNodes[i] + 1 )
4171 if isinstance( getResponses[ i ], list):
4172 current = set( getResponses[ i ] )
4173 if len( current ) == len( getResponses[ i ] ):
4174 # no repeats
4175 if onosSet != current:
4176 main.log.error( "ONOS" + node +
4177 " has incorrect view" +
4178 " of set " + onosSetName + ":\n" +
4179 str( getResponses[ i ] ) )
4180 main.log.debug( "Expected: " + str( onosSet ) )
4181 main.log.debug( "Actual: " + str( current ) )
4182 getResults = main.FALSE
4183 else:
4184 # error, set is not a set
4185 main.log.error( "ONOS" + node +
4186 " has repeat elements in" +
4187 " set " + onosSetName + ":\n" +
4188 str( getResponses[ i ] ) )
4189 getResults = main.FALSE
4190 elif getResponses[ i ] == main.ERROR:
4191 getResults = main.FALSE
4192 sizeResponses = []
4193 threads = []
4194 for i in main.activeNodes:
4195 t = main.Thread( target=main.CLIs[i].setTestSize,
4196 name="setTestSize-" + str( i ),
4197 args=[ onosSetName ] )
4198 threads.append( t )
4199 t.start()
4200 for t in threads:
4201 t.join()
4202 sizeResponses.append( t.result )
4203 sizeResults = main.TRUE
4204 for i in range( len( main.activeNodes ) ):
4205 node = str( main.activeNodes[i] + 1 )
4206 if size != sizeResponses[ i ]:
4207 sizeResults = main.FALSE
4208 main.log.error( "ONOS" + node +
4209 " expected a size of " + str( size ) +
4210 " for set " + onosSetName +
4211 " but got " + str( sizeResponses[ i ] ) )
4212 clearResults = clearResults and getResults and sizeResults
4213 utilities.assert_equals( expect=main.TRUE,
4214 actual=clearResults,
4215 onpass="Set clear correct",
4216 onfail="Set clear was incorrect" )
4217
4218 main.step( "Distributed Set addAll()" )
4219 onosSet.update( addAllValue.split() )
4220 addResponses = []
4221 threads = []
4222 for i in main.activeNodes:
4223 t = main.Thread( target=main.CLIs[i].setTestAdd,
4224 name="setTestAddAll-" + str( i ),
4225 args=[ onosSetName, addAllValue ] )
4226 threads.append( t )
4227 t.start()
4228 for t in threads:
4229 t.join()
4230 addResponses.append( t.result )
4231
4232 # main.TRUE = successfully changed the set
4233 # main.FALSE = action resulted in no change in set
4234 # main.ERROR - Some error in executing the function
4235 addAllResults = main.TRUE
4236 for i in range( len( main.activeNodes ) ):
4237 if addResponses[ i ] == main.TRUE:
4238 # All is well
4239 pass
4240 elif addResponses[ i ] == main.FALSE:
4241 # Already in set, probably fine
4242 pass
4243 elif addResponses[ i ] == main.ERROR:
4244 # Error in execution
4245 addAllResults = main.FALSE
4246 else:
4247 # unexpected result
4248 addAllResults = main.FALSE
4249 if addAllResults != main.TRUE:
4250 main.log.error( "Error executing set addAll" )
4251
4252 # Check if set is still correct
4253 size = len( onosSet )
4254 getResponses = []
4255 threads = []
4256 for i in main.activeNodes:
4257 t = main.Thread( target=main.CLIs[i].setTestGet,
4258 name="setTestGet-" + str( i ),
4259 args=[ onosSetName ] )
4260 threads.append( t )
4261 t.start()
4262 for t in threads:
4263 t.join()
4264 getResponses.append( t.result )
4265 getResults = main.TRUE
4266 for i in range( len( main.activeNodes ) ):
4267 node = str( main.activeNodes[i] + 1 )
4268 if isinstance( getResponses[ i ], list):
4269 current = set( getResponses[ i ] )
4270 if len( current ) == len( getResponses[ i ] ):
4271 # no repeats
4272 if onosSet != current:
4273 main.log.error( "ONOS" + node +
4274 " has incorrect view" +
4275 " of set " + onosSetName + ":\n" +
4276 str( getResponses[ i ] ) )
4277 main.log.debug( "Expected: " + str( onosSet ) )
4278 main.log.debug( "Actual: " + str( current ) )
4279 getResults = main.FALSE
4280 else:
4281 # error, set is not a set
4282 main.log.error( "ONOS" + node +
4283 " has repeat elements in" +
4284 " set " + onosSetName + ":\n" +
4285 str( getResponses[ i ] ) )
4286 getResults = main.FALSE
4287 elif getResponses[ i ] == main.ERROR:
4288 getResults = main.FALSE
4289 sizeResponses = []
4290 threads = []
4291 for i in main.activeNodes:
4292 t = main.Thread( target=main.CLIs[i].setTestSize,
4293 name="setTestSize-" + str( i ),
4294 args=[ onosSetName ] )
4295 threads.append( t )
4296 t.start()
4297 for t in threads:
4298 t.join()
4299 sizeResponses.append( t.result )
4300 sizeResults = main.TRUE
4301 for i in range( len( main.activeNodes ) ):
4302 node = str( main.activeNodes[i] + 1 )
4303 if size != sizeResponses[ i ]:
4304 sizeResults = main.FALSE
4305 main.log.error( "ONOS" + node +
4306 " expected a size of " + str( size ) +
4307 " for set " + onosSetName +
4308 " but got " + str( sizeResponses[ i ] ) )
4309 addAllResults = addAllResults and getResults and sizeResults
4310 utilities.assert_equals( expect=main.TRUE,
4311 actual=addAllResults,
4312 onpass="Set addAll correct",
4313 onfail="Set addAll was incorrect" )
4314
4315 main.step( "Distributed Set retain()" )
4316 onosSet.intersection_update( retainValue.split() )
4317 retainResponses = []
4318 threads = []
4319 for i in main.activeNodes:
4320 t = main.Thread( target=main.CLIs[i].setTestRemove,
4321 name="setTestRetain-" + str( i ),
4322 args=[ onosSetName, retainValue ],
4323 kwargs={ "retain": True } )
4324 threads.append( t )
4325 t.start()
4326 for t in threads:
4327 t.join()
4328 retainResponses.append( t.result )
4329
4330 # main.TRUE = successfully changed the set
4331 # main.FALSE = action resulted in no change in set
4332 # main.ERROR - Some error in executing the function
4333 retainResults = main.TRUE
4334 for i in range( len( main.activeNodes ) ):
4335 if retainResponses[ i ] == main.TRUE:
4336 # All is well
4337 pass
4338 elif retainResponses[ i ] == main.FALSE:
4339 # Already in set, probably fine
4340 pass
4341 elif retainResponses[ i ] == main.ERROR:
4342 # Error in execution
4343 retainResults = main.FALSE
4344 else:
4345 # unexpected result
4346 retainResults = main.FALSE
4347 if retainResults != main.TRUE:
4348 main.log.error( "Error executing set retain" )
4349
4350 # Check if set is still correct
4351 size = len( onosSet )
4352 getResponses = []
4353 threads = []
4354 for i in main.activeNodes:
4355 t = main.Thread( target=main.CLIs[i].setTestGet,
4356 name="setTestGet-" + str( i ),
4357 args=[ onosSetName ] )
4358 threads.append( t )
4359 t.start()
4360 for t in threads:
4361 t.join()
4362 getResponses.append( t.result )
4363 getResults = main.TRUE
4364 for i in range( len( main.activeNodes ) ):
4365 node = str( main.activeNodes[i] + 1 )
4366 if isinstance( getResponses[ i ], list):
4367 current = set( getResponses[ i ] )
4368 if len( current ) == len( getResponses[ i ] ):
4369 # no repeats
4370 if onosSet != current:
4371 main.log.error( "ONOS" + node +
4372 " has incorrect view" +
4373 " of set " + onosSetName + ":\n" +
4374 str( getResponses[ i ] ) )
4375 main.log.debug( "Expected: " + str( onosSet ) )
4376 main.log.debug( "Actual: " + str( current ) )
4377 getResults = main.FALSE
4378 else:
4379 # error, set is not a set
4380 main.log.error( "ONOS" + node +
4381 " has repeat elements in" +
4382 " set " + onosSetName + ":\n" +
4383 str( getResponses[ i ] ) )
4384 getResults = main.FALSE
4385 elif getResponses[ i ] == main.ERROR:
4386 getResults = main.FALSE
4387 sizeResponses = []
4388 threads = []
4389 for i in main.activeNodes:
4390 t = main.Thread( target=main.CLIs[i].setTestSize,
4391 name="setTestSize-" + str( i ),
4392 args=[ onosSetName ] )
4393 threads.append( t )
4394 t.start()
4395 for t in threads:
4396 t.join()
4397 sizeResponses.append( t.result )
4398 sizeResults = main.TRUE
4399 for i in range( len( main.activeNodes ) ):
4400 node = str( main.activeNodes[i] + 1 )
4401 if size != sizeResponses[ i ]:
4402 sizeResults = main.FALSE
4403 main.log.error( "ONOS" + node + " expected a size of " +
4404 str( size ) + " for set " + onosSetName +
4405 " but got " + str( sizeResponses[ i ] ) )
4406 retainResults = retainResults and getResults and sizeResults
4407 utilities.assert_equals( expect=main.TRUE,
4408 actual=retainResults,
4409 onpass="Set retain correct",
4410 onfail="Set retain was incorrect" )
4411
4412 # Transactional maps
4413 main.step( "Partitioned Transactional maps put" )
4414 tMapValue = "Testing"
4415 numKeys = 100
4416 putResult = True
4417 node = main.activeNodes[0]
4418 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue )
4419 if putResponses and len( putResponses ) == 100:
4420 for i in putResponses:
4421 if putResponses[ i ][ 'value' ] != tMapValue:
4422 putResult = False
4423 else:
4424 putResult = False
4425 if not putResult:
4426 main.log.debug( "Put response values: " + str( putResponses ) )
4427 utilities.assert_equals( expect=True,
4428 actual=putResult,
4429 onpass="Partitioned Transactional Map put successful",
4430 onfail="Partitioned Transactional Map put values are incorrect" )
4431
4432 main.step( "Partitioned Transactional maps get" )
4433 getCheck = True
4434 for n in range( 1, numKeys + 1 ):
4435 getResponses = []
4436 threads = []
4437 valueCheck = True
4438 for i in main.activeNodes:
4439 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4440 name="TMap-get-" + str( i ),
4441 args=[ "Key" + str( n ) ] )
4442 threads.append( t )
4443 t.start()
4444 for t in threads:
4445 t.join()
4446 getResponses.append( t.result )
4447 for node in getResponses:
4448 if node != tMapValue:
4449 valueCheck = False
4450 if not valueCheck:
4451 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4452 main.log.warn( getResponses )
4453 getCheck = getCheck and valueCheck
4454 utilities.assert_equals( expect=True,
4455 actual=getCheck,
4456 onpass="Partitioned Transactional Map get values were correct",
4457 onfail="Partitioned Transactional Map values incorrect" )
4458
4459 main.step( "In-memory Transactional maps put" )
4460 tMapValue = "Testing"
4461 numKeys = 100
4462 putResult = True
4463 node = main.activeNodes[0]
4464 putResponses = main.CLIs[node].transactionalMapPut( numKeys, tMapValue, inMemory=True )
4465 if len( putResponses ) == 100:
4466 for i in putResponses:
4467 if putResponses[ i ][ 'value' ] != tMapValue:
4468 putResult = False
4469 else:
4470 putResult = False
4471 if not putResult:
4472 main.log.debug( "Put response values: " + str( putResponses ) )
4473 utilities.assert_equals( expect=True,
4474 actual=putResult,
4475 onpass="In-Memory Transactional Map put successful",
4476 onfail="In-Memory Transactional Map put values are incorrect" )
4477
4478 main.step( "In-Memory Transactional maps get" )
4479 getCheck = True
4480 for n in range( 1, numKeys + 1 ):
4481 getResponses = []
4482 threads = []
4483 valueCheck = True
4484 for i in main.activeNodes:
4485 t = main.Thread( target=main.CLIs[i].transactionalMapGet,
4486 name="TMap-get-" + str( i ),
4487 args=[ "Key" + str( n ) ],
4488 kwargs={ "inMemory": True } )
4489 threads.append( t )
4490 t.start()
4491 for t in threads:
4492 t.join()
4493 getResponses.append( t.result )
4494 for node in getResponses:
4495 if node != tMapValue:
4496 valueCheck = False
4497 if not valueCheck:
4498 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
4499 main.log.warn( getResponses )
4500 getCheck = getCheck and valueCheck
4501 utilities.assert_equals( expect=True,
4502 actual=getCheck,
4503 onpass="In-Memory Transactional Map get values were correct",
4504 onfail="In-Memory Transactional Map values incorrect" )