blob: a7f7303bc101d345a93132f721f99a12aff4b689 [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 a minority of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE6: The Failure case.
13CASE7: Check state after control plane failure
14CASE8: Compare topo
15CASE9: Link s3-s28 down
16CASE10: Link s3-s28 up
17CASE11: Switch down
18CASE12: Switch up
19CASE13: Clean up
20CASE14: start election app on all onos nodes
21CASE15: Check that Leadership Election is still functional
22CASE16: Install Distributed Primitives app
23CASE17: Check for basic functionality with distributed primitives
24"""
25
26
27class HAminorityRestart:
28
29 def __init__( self ):
30 self.default = ''
31
32 def CASE1( self, main ):
33 """
34 CASE1 is to compile ONOS and push it to the test machines
35
36 Startup sequence:
37 cell <name>
38 onos-verify-cell
39 NOTE: temporary - onos-remove-raft-logs
40 onos-uninstall
41 start mininet
42 git pull
43 mvn clean install
44 onos-package
45 onos-install -f
46 onos-wait-for-start
47 start cli sessions
48 start tcpdump
49 """
50 main.log.info( "ONOS HA test: Restart minority of ONOS nodes - " +
51 "initialization" )
52 main.case( "Setting up test environment" )
53 main.caseExplaination = "Setup the test environment including " +\
54 "installing ONOS, starting Mininet and ONOS" +\
55 "cli sessions."
56 # TODO: save all the timers and output them for plotting
57
58 # load some variables from the params file
59 PULLCODE = False
60 if main.params[ 'Git' ] == 'True':
61 PULLCODE = True
62 gitBranch = main.params[ 'branch' ]
63 cellName = main.params[ 'ENV' ][ 'cellName' ]
64
65 # set global variables
66 global numControllers
67 numControllers = int( main.params[ 'num_controllers' ] )
68 if main.ONOSbench.maxNodes:
69 if main.ONOSbench.maxNodes < numControllers:
70 numControllers = int( main.ONOSbench.maxNodes )
71 global ONOS1Port
72 global ONOS2Port
73 global ONOS3Port
74 global ONOS4Port
75 global ONOS5Port
76 global ONOS6Port
77 global ONOS7Port
78
79 # FIXME: just get controller port from params?
80 # TODO: do we really need all these?
81 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
82 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
83 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
84 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
85 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
86 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
87 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
88
89 global CLIs
90 CLIs = []
91 global nodes
92 nodes = []
93 ipList = []
94 for i in range( 1, numControllers + 1 ):
95 CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
96 nodes.append( getattr( main, 'ONOS' + str( i ) ) )
97 ipList.append( nodes[ -1 ].ip_address )
98
99 main.step( "Create cell file" )
100 cellAppString = main.params[ 'ENV' ][ 'appString' ]
101 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
102 main.Mininet1.ip_address,
103 cellAppString, ipList )
104 main.step( "Applying cell variable to environment" )
105 cellResult = main.ONOSbench.setCell( cellName )
106 verifyResult = main.ONOSbench.verifyCell()
107
108 # FIXME:this is short term fix
109 main.log.info( "Removing raft logs" )
110 main.ONOSbench.onosRemoveRaftLogs()
111
112 main.log.info( "Uninstalling ONOS" )
113 for node in nodes:
114 main.ONOSbench.onosUninstall( node.ip_address )
115
116 # Make sure ONOS is DEAD
117 main.log.info( "Killing any ONOS processes" )
118 killResults = main.TRUE
119 for node in nodes:
120 killed = main.ONOSbench.onosKill( node.ip_address )
121 killResults = killResults and killed
122
123 cleanInstallResult = main.TRUE
124 gitPullResult = main.TRUE
125
126 main.step( "Starting Mininet" )
127 # scp topo file to mininet
128 # TODO: move to params?
129 topoName = "obelisk.py"
130 filePath = main.ONOSbench.home + "/tools/test/topos/"
131 main.ONOSbench.copyMininetFile( topoName, filePath,
132 main.Mininet1.user_name,
133 main.Mininet1.ip_address )
134 mnResult = main.Mininet1.startNet( )
135 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
136 onpass="Mininet Started",
137 onfail="Error starting Mininet" )
138
139 main.step( "Git checkout and pull " + gitBranch )
140 if PULLCODE:
141 main.ONOSbench.gitCheckout( gitBranch )
142 gitPullResult = main.ONOSbench.gitPull()
143 # values of 1 or 3 are good
144 utilities.assert_lesser( expect=0, actual=gitPullResult,
145 onpass="Git pull successful",
146 onfail="Git pull failed" )
147 main.ONOSbench.getVersion( report=True )
148
149 main.step( "Using mvn clean install" )
150 cleanInstallResult = main.TRUE
151 if PULLCODE and gitPullResult == main.TRUE:
152 cleanInstallResult = main.ONOSbench.cleanInstall()
153 else:
154 main.log.warn( "Did not pull new code so skipping mvn " +
155 "clean install" )
156 utilities.assert_equals( expect=main.TRUE,
157 actual=cleanInstallResult,
158 onpass="MCI successful",
159 onfail="MCI failed" )
160 # GRAPHS
161 # NOTE: important params here:
162 # job = name of Jenkins job
163 # Plot Name = Plot-HA, only can be used if multiple plots
164 # index = The number of the graph under plot name
165 job = "HAminorityRestart"
166 plotName = "Plot-HA"
167 graphs = '<ac:structured-macro ac:name="html">\n'
168 graphs += '<ac:plain-text-body><![CDATA[\n'
169 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
170 '/plot/' + plotName + '/getPlot?index=0' +\
171 '&width=500&height=300"' +\
172 'noborder="0" width="500" height="300" scrolling="yes" ' +\
173 'seamless="seamless"></iframe>\n'
174 graphs += ']]></ac:plain-text-body>\n'
175 graphs += '</ac:structured-macro>\n'
176 main.log.wiki(graphs)
177
178 main.step( "Creating ONOS package" )
179 packageResult = main.ONOSbench.onosPackage()
180 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
181 onpass="ONOS package successful",
182 onfail="ONOS package failed" )
183
184 main.step( "Installing ONOS package" )
185 onosInstallResult = main.TRUE
186 for node in nodes:
187 tmpResult = main.ONOSbench.onosInstall( options="-f",
188 node=node.ip_address )
189 onosInstallResult = onosInstallResult and tmpResult
190 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
191 onpass="ONOS install successful",
192 onfail="ONOS install failed" )
193
194 main.step( "Checking if ONOS is up yet" )
195 for i in range( 2 ):
196 onosIsupResult = main.TRUE
197 for node in nodes:
198 started = main.ONOSbench.isup( node.ip_address )
199 if not started:
200 main.log.error( node.name + " didn't start!" )
201 main.ONOSbench.onosStop( node.ip_address )
202 main.ONOSbench.onosStart( node.ip_address )
203 onosIsupResult = onosIsupResult and started
204 if onosIsupResult == main.TRUE:
205 break
206 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
207 onpass="ONOS startup successful",
208 onfail="ONOS startup failed" )
209
210 main.log.step( "Starting ONOS CLI sessions" )
211 cliResults = main.TRUE
212 threads = []
213 for i in range( numControllers ):
214 t = main.Thread( target=CLIs[i].startOnosCli,
215 name="startOnosCli-" + str( i ),
216 args=[nodes[i].ip_address] )
217 threads.append( t )
218 t.start()
219
220 for t in threads:
221 t.join()
222 cliResults = cliResults and t.result
223 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
224 onpass="ONOS cli startup successful",
225 onfail="ONOS cli startup failed" )
226
227 if main.params[ 'tcpdump' ].lower() == "true":
228 main.step( "Start Packet Capture MN" )
229 main.Mininet2.startTcpdump(
230 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
231 + "-MN.pcap",
232 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
233 port=main.params[ 'MNtcpdump' ][ 'port' ] )
234
235 main.step( "App Ids check" )
236 appCheck = main.TRUE
237 threads = []
238 for i in range( numControllers ):
239 t = main.Thread( target=CLIs[i].appToIDCheck,
240 name="appToIDCheck-" + str( i ),
241 args=[] )
242 threads.append( t )
243 t.start()
244
245 for t in threads:
246 t.join()
247 appCheck = appCheck and t.result
248 if appCheck != main.TRUE:
249 main.log.warn( CLIs[0].apps() )
250 main.log.warn( CLIs[0].appIDs() )
251 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
252 onpass="App Ids seem to be correct",
253 onfail="Something is wrong with app Ids" )
254
255 if cliResults == main.FALSE:
256 main.log.error( "Failed to start ONOS, stopping test" )
257 main.cleanup()
258 main.exit()
259
260 def CASE2( self, main ):
261 """
262 Assign devices to controllers
263 """
264 import re
265 import time
266 assert numControllers, "numControllers not defined"
267 assert main, "main not defined"
268 assert utilities.assert_equals, "utilities.assert_equals not defined"
269 assert CLIs, "CLIs not defined"
270 assert nodes, "nodes not defined"
271 assert ONOS1Port, "ONOS1Port not defined"
272 assert ONOS2Port, "ONOS2Port not defined"
273 assert ONOS3Port, "ONOS3Port not defined"
274 assert ONOS4Port, "ONOS4Port not defined"
275 assert ONOS5Port, "ONOS5Port not defined"
276 assert ONOS6Port, "ONOS6Port not defined"
277 assert ONOS7Port, "ONOS7Port not defined"
278
279 main.case( "Assigning devices to controllers" )
280 main.caseExplaination = "Assign switches to ONOS using 'ovs-vsctl' " +\
281 "and check that an ONOS node becomes the " +\
282 "master of the device."
283 main.step( "Assign switches to controllers" )
284
285 ipList = []
286 for i in range( numControllers ):
287 ipList.append( nodes[ i ].ip_address )
288 swList = []
289 for i in range( 1, 29 ):
290 swList.append( "s" + str( i ) )
291 main.Mininet1.assignSwController( sw=swList, ip=ipList )
292
293 mastershipCheck = main.TRUE
294 for i in range( 1, 29 ):
295 response = main.Mininet1.getSwController( "s" + str( i ) )
296 try:
297 main.log.info( str( response ) )
298 except Exception:
299 main.log.info( repr( response ) )
300 for node in nodes:
301 if re.search( "tcp:" + node.ip_address, response ):
302 mastershipCheck = mastershipCheck and main.TRUE
303 else:
304 main.log.error( "Error, node " + node.ip_address + " is " +
305 "not in the list of controllers s" +
306 str( i ) + " is connecting to." )
307 mastershipCheck = main.FALSE
308 utilities.assert_equals(
309 expect=main.TRUE,
310 actual=mastershipCheck,
311 onpass="Switch mastership assigned correctly",
312 onfail="Switches not assigned correctly to controllers" )
313
314 def CASE21( self, main ):
315 """
316 Assign mastership to controllers
317 """
318 import re
319 import time
320 assert numControllers, "numControllers not defined"
321 assert main, "main not defined"
322 assert utilities.assert_equals, "utilities.assert_equals not defined"
323 assert CLIs, "CLIs not defined"
324 assert nodes, "nodes not defined"
325 assert ONOS1Port, "ONOS1Port not defined"
326 assert ONOS2Port, "ONOS2Port not defined"
327 assert ONOS3Port, "ONOS3Port not defined"
328 assert ONOS4Port, "ONOS4Port not defined"
329 assert ONOS5Port, "ONOS5Port not defined"
330 assert ONOS6Port, "ONOS6Port not defined"
331 assert ONOS7Port, "ONOS7Port not defined"
332
333 main.case( "Assigning Controller roles for switches" )
334 main.caseExplaination = "Check that ONOS is connected to each " +\
335 "device. Then manually assign" +\
336 " mastership to specific ONOS nodes using" +\
337 " 'device-role'"
338 main.step( "Assign mastership of switches to specific controllers" )
339 # Manually assign mastership to the controller we want
340 roleCall = main.TRUE
341
342 ipList = [ ]
343 deviceList = []
344 try:
345 # Assign mastership to specific controllers. This assignment was
346 # determined for a 7 node cluser, but will work with any sized
347 # cluster
348 for i in range( 1, 29 ): # switches 1 through 28
349 # set up correct variables:
350 if i == 1:
351 c = 0
352 ip = nodes[ c ].ip_address # ONOS1
353 deviceId = main.ONOScli1.getDevice( "1000" ).get( 'id' )
354 elif i == 2:
355 c = 1 % numControllers
356 ip = nodes[ c ].ip_address # ONOS2
357 deviceId = main.ONOScli1.getDevice( "2000" ).get( 'id' )
358 elif i == 3:
359 c = 1 % numControllers
360 ip = nodes[ c ].ip_address # ONOS2
361 deviceId = main.ONOScli1.getDevice( "3000" ).get( 'id' )
362 elif i == 4:
363 c = 3 % numControllers
364 ip = nodes[ c ].ip_address # ONOS4
365 deviceId = main.ONOScli1.getDevice( "3004" ).get( 'id' )
366 elif i == 5:
367 c = 2 % numControllers
368 ip = nodes[ c ].ip_address # ONOS3
369 deviceId = main.ONOScli1.getDevice( "5000" ).get( 'id' )
370 elif i == 6:
371 c = 2 % numControllers
372 ip = nodes[ c ].ip_address # ONOS3
373 deviceId = main.ONOScli1.getDevice( "6000" ).get( 'id' )
374 elif i == 7:
375 c = 5 % numControllers
376 ip = nodes[ c ].ip_address # ONOS6
377 deviceId = main.ONOScli1.getDevice( "6007" ).get( 'id' )
378 elif i >= 8 and i <= 17:
379 c = 4 % numControllers
380 ip = nodes[ c ].ip_address # ONOS5
381 dpid = '3' + str( i ).zfill( 3 )
382 deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
383 elif i >= 18 and i <= 27:
384 c = 6 % numControllers
385 ip = nodes[ c ].ip_address # ONOS7
386 dpid = '6' + str( i ).zfill( 3 )
387 deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
388 elif i == 28:
389 c = 0
390 ip = nodes[ c ].ip_address # ONOS1
391 deviceId = main.ONOScli1.getDevice( "2800" ).get( 'id' )
392 else:
393 main.log.error( "You didn't write an else statement for " +
394 "switch s" + str( i ) )
395 roleCall = main.FALSE
396 # Assign switch
397 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
398 # TODO: make this controller dynamic
399 roleCall = roleCall and main.ONOScli1.deviceRole( deviceId,
400 ip )
401 ipList.append( ip )
402 deviceList.append( deviceId )
403 except ( AttributeError, AssertionError ):
404 main.log.exception( "Something is wrong with ONOS device view" )
405 main.log.info( main.ONOScli1.devices() )
406 utilities.assert_equals(
407 expect=main.TRUE,
408 actual=roleCall,
409 onpass="Re-assigned switch mastership to designated controller",
410 onfail="Something wrong with deviceRole calls" )
411
412 main.step( "Check mastership was correctly assigned" )
413 roleCheck = main.TRUE
414 # NOTE: This is due to the fact that device mastership change is not
415 # atomic and is actually a multi step process
416 time.sleep( 5 )
417 for i in range( len( ipList ) ):
418 ip = ipList[i]
419 deviceId = deviceList[i]
420 # Check assignment
421 master = main.ONOScli1.getRole( deviceId ).get( 'master' )
422 if ip in master:
423 roleCheck = roleCheck and main.TRUE
424 else:
425 roleCheck = roleCheck and main.FALSE
426 main.log.error( "Error, controller " + ip + " is not" +
427 " master " + "of device " +
428 str( deviceId ) + ". Master is " +
429 repr( master ) + "." )
430 utilities.assert_equals(
431 expect=main.TRUE,
432 actual=roleCheck,
433 onpass="Switches were successfully reassigned to designated " +
434 "controller",
435 onfail="Switches were not successfully reassigned" )
436
437 def CASE3( self, main ):
438 """
439 Assign intents
440 """
441 import time
442 import json
443 assert numControllers, "numControllers not defined"
444 assert main, "main not defined"
445 assert utilities.assert_equals, "utilities.assert_equals not defined"
446 assert CLIs, "CLIs not defined"
447 assert nodes, "nodes not defined"
448 main.case( "Adding host Intents" )
449 main.caseExplaination = "Discover hosts by using pingall then " +\
450 "assign predetermined host-to-host intents." +\
451 " After installation, check that the intent" +\
452 " is distributed to all nodes and the state" +\
453 " is INSTALLED"
454
455 # install onos-app-fwd
456 main.step( "Install reactive forwarding app" )
457 installResults = CLIs[0].activateApp( "org.onosproject.fwd" )
458 utilities.assert_equals( expect=main.TRUE, actual=installResults,
459 onpass="Install fwd successful",
460 onfail="Install fwd failed" )
461
462 main.step( "Check app ids" )
463 appCheck = main.TRUE
464 threads = []
465 for i in range( numControllers ):
466 t = main.Thread( target=CLIs[i].appToIDCheck,
467 name="appToIDCheck-" + str( i ),
468 args=[] )
469 threads.append( t )
470 t.start()
471
472 for t in threads:
473 t.join()
474 appCheck = appCheck and t.result
475 if appCheck != main.TRUE:
476 main.log.warn( CLIs[0].apps() )
477 main.log.warn( CLIs[0].appIDs() )
478 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
479 onpass="App Ids seem to be correct",
480 onfail="Something is wrong with app Ids" )
481
482 main.step( "Discovering Hosts( Via pingall for now )" )
483 # FIXME: Once we have a host discovery mechanism, use that instead
484 # REACTIVE FWD test
485 pingResult = main.FALSE
486 for i in range(2): # Retry if pingall fails first time
487 time1 = time.time()
488 pingResult = main.Mininet1.pingall()
489 if i == 0:
490 utilities.assert_equals(
491 expect=main.TRUE,
492 actual=pingResult,
493 onpass="Reactive Pingall test passed",
494 onfail="Reactive Pingall failed, " +
495 "one or more ping pairs failed" )
496 time2 = time.time()
497 main.log.info( "Time for pingall: %2f seconds" %
498 ( time2 - time1 ) )
499 # timeout for fwd flows
500 time.sleep( 11 )
501 # uninstall onos-app-fwd
502 main.step( "Uninstall reactive forwarding app" )
503 uninstallResult = CLIs[0].deactivateApp( "org.onosproject.fwd" )
504 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
505 onpass="Uninstall fwd successful",
506 onfail="Uninstall fwd failed" )
507
508 main.step( "Check app ids" )
509 threads = []
510 appCheck2 = main.TRUE
511 for i in range( numControllers ):
512 t = main.Thread( target=CLIs[i].appToIDCheck,
513 name="appToIDCheck-" + str( i ),
514 args=[] )
515 threads.append( t )
516 t.start()
517
518 for t in threads:
519 t.join()
520 appCheck2 = appCheck2 and t.result
521 if appCheck2 != main.TRUE:
522 main.log.warn( CLIs[0].apps() )
523 main.log.warn( CLIs[0].appIDs() )
524 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
525 onpass="App Ids seem to be correct",
526 onfail="Something is wrong with app Ids" )
527
528 main.step( "Add host intents via cli" )
529 intentIds = []
530 # TODO: move the host numbers to params
531 # Maybe look at all the paths we ping?
532 intentAddResult = True
533 hostResult = main.TRUE
534 for i in range( 8, 18 ):
535 main.log.info( "Adding host intent between h" + str( i ) +
536 " and h" + str( i + 10 ) )
537 host1 = "00:00:00:00:00:" + \
538 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
539 host2 = "00:00:00:00:00:" + \
540 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
541 # NOTE: getHost can return None
542 host1Dict = main.ONOScli1.getHost( host1 )
543 host2Dict = main.ONOScli1.getHost( host2 )
544 host1Id = None
545 host2Id = None
546 if host1Dict and host2Dict:
547 host1Id = host1Dict.get( 'id', None )
548 host2Id = host2Dict.get( 'id', None )
549 if host1Id and host2Id:
550 nodeNum = ( i % numControllers )
551 tmpId = CLIs[ nodeNum ].addHostIntent( host1Id, host2Id )
552 if tmpId:
553 main.log.info( "Added intent with id: " + tmpId )
554 intentIds.append( tmpId )
555 else:
556 main.log.error( "addHostIntent returned: " +
557 repr( tmpId ) )
558 else:
559 main.log.error( "Error, getHost() failed for h" + str( i ) +
560 " and/or h" + str( i + 10 ) )
561 hosts = CLIs[ 0 ].hosts()
562 main.log.warn( "Hosts output: " )
563 try:
564 main.log.warn( json.dumps( json.loads( hosts ),
565 sort_keys=True,
566 indent=4,
567 separators=( ',', ': ' ) ) )
568 except ( ValueError, TypeError ):
569 main.log.warn( repr( hosts ) )
570 hostResult = main.FALSE
571 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
572 onpass="Found a host id for each host",
573 onfail="Error looking up host ids" )
574
575 intentStart = time.time()
576 onosIds = main.ONOScli1.getAllIntentsId()
577 main.log.info( "Submitted intents: " + str( intentIds ) )
578 main.log.info( "Intents in ONOS: " + str( onosIds ) )
579 for intent in intentIds:
580 if intent in onosIds:
581 pass # intent submitted is in onos
582 else:
583 intentAddResult = False
584 if intentAddResult:
585 intentStop = time.time()
586 else:
587 intentStop = None
588 # Print the intent states
589 intents = main.ONOScli1.intents()
590 intentStates = []
591 installedCheck = True
592 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
593 count = 0
594 try:
595 for intent in json.loads( intents ):
596 state = intent.get( 'state', None )
597 if "INSTALLED" not in state:
598 installedCheck = False
599 intentId = intent.get( 'id', None )
600 intentStates.append( ( intentId, state ) )
601 except ( ValueError, TypeError ):
602 main.log.exception( "Error parsing intents" )
603 # add submitted intents not in the store
604 tmplist = [ i for i, s in intentStates ]
605 missingIntents = False
606 for i in intentIds:
607 if i not in tmplist:
608 intentStates.append( ( i, " - " ) )
609 missingIntents = True
610 intentStates.sort()
611 for i, s in intentStates:
612 count += 1
613 main.log.info( "%-6s%-15s%-15s" %
614 ( str( count ), str( i ), str( s ) ) )
615 leaders = main.ONOScli1.leaders()
616 try:
617 missing = False
618 if leaders:
619 parsedLeaders = json.loads( leaders )
620 main.log.warn( json.dumps( parsedLeaders,
621 sort_keys=True,
622 indent=4,
623 separators=( ',', ': ' ) ) )
624 # check for all intent partitions
625 topics = []
626 for i in range( 14 ):
627 topics.append( "intent-partition-" + str( i ) )
628 main.log.debug( topics )
629 ONOStopics = [ j['topic'] for j in parsedLeaders ]
630 for topic in topics:
631 if topic not in ONOStopics:
632 main.log.error( "Error: " + topic +
633 " not in leaders" )
634 missing = True
635 else:
636 main.log.error( "leaders() returned None" )
637 except ( ValueError, TypeError ):
638 main.log.exception( "Error parsing leaders" )
639 main.log.error( repr( leaders ) )
640 # Check all nodes
641 if missing:
642 for node in CLIs:
643 response = node.leaders( jsonFormat=False)
644 main.log.warn( str( node.name ) + " leaders output: \n" +
645 str( response ) )
646
647 partitions = main.ONOScli1.partitions()
648 try:
649 if partitions :
650 parsedPartitions = json.loads( partitions )
651 main.log.warn( json.dumps( parsedPartitions,
652 sort_keys=True,
653 indent=4,
654 separators=( ',', ': ' ) ) )
655 # TODO check for a leader in all paritions
656 # TODO check for consistency among nodes
657 else:
658 main.log.error( "partitions() returned None" )
659 except ( ValueError, TypeError ):
660 main.log.exception( "Error parsing partitions" )
661 main.log.error( repr( partitions ) )
662 pendingMap = main.ONOScli1.pendingMap()
663 try:
664 if pendingMap :
665 parsedPending = json.loads( pendingMap )
666 main.log.warn( json.dumps( parsedPending,
667 sort_keys=True,
668 indent=4,
669 separators=( ',', ': ' ) ) )
670 # TODO check something here?
671 else:
672 main.log.error( "pendingMap() returned None" )
673 except ( ValueError, TypeError ):
674 main.log.exception( "Error parsing pending map" )
675 main.log.error( repr( pendingMap ) )
676
677 intentAddResult = bool( intentAddResult and not missingIntents and
678 installedCheck )
679 if not intentAddResult:
680 main.log.error( "Error in pushing host intents to ONOS" )
681
682 main.step( "Intent Anti-Entropy dispersion" )
683 for i in range(100):
684 correct = True
685 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
686 for cli in CLIs:
687 onosIds = []
688 ids = cli.getAllIntentsId()
689 onosIds.append( ids )
690 main.log.debug( "Intents in " + cli.name + ": " +
691 str( sorted( onosIds ) ) )
692 if sorted( ids ) != sorted( intentIds ):
693 main.log.warn( "Set of intent IDs doesn't match" )
694 correct = False
695 break
696 else:
697 intents = json.loads( cli.intents() )
698 for intent in intents:
699 if intent[ 'state' ] != "INSTALLED":
700 main.log.warn( "Intent " + intent[ 'id' ] +
701 " is " + intent[ 'state' ] )
702 correct = False
703 break
704 if correct:
705 break
706 else:
707 time.sleep(1)
708 if not intentStop:
709 intentStop = time.time()
710 global gossipTime
711 gossipTime = intentStop - intentStart
712 main.log.info( "It took about " + str( gossipTime ) +
713 " seconds for all intents to appear in each node" )
714 # FIXME: make this time configurable/calculate based off of number of
715 # nodes and gossip rounds
716 utilities.assert_greater_equals(
717 expect=40, actual=gossipTime,
718 onpass="ECM anti-entropy for intents worked within " +
719 "expected time",
720 onfail="Intent ECM anti-entropy took too long" )
721 if gossipTime <= 40:
722 intentAddResult = True
723
724 if not intentAddResult or "key" in pendingMap:
725 import time
726 installedCheck = True
727 main.log.info( "Sleeping 60 seconds to see if intents are found" )
728 time.sleep( 60 )
729 onosIds = main.ONOScli1.getAllIntentsId()
730 main.log.info( "Submitted intents: " + str( intentIds ) )
731 main.log.info( "Intents in ONOS: " + str( onosIds ) )
732 # Print the intent states
733 intents = main.ONOScli1.intents()
734 intentStates = []
735 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
736 count = 0
737 try:
738 for intent in json.loads( intents ):
739 # Iter through intents of a node
740 state = intent.get( 'state', None )
741 if "INSTALLED" not in state:
742 installedCheck = False
743 intentId = intent.get( 'id', None )
744 intentStates.append( ( intentId, state ) )
745 except ( ValueError, TypeError ):
746 main.log.exception( "Error parsing intents" )
747 # add submitted intents not in the store
748 tmplist = [ i for i, s in intentStates ]
749 for i in intentIds:
750 if i not in tmplist:
751 intentStates.append( ( i, " - " ) )
752 intentStates.sort()
753 for i, s in intentStates:
754 count += 1
755 main.log.info( "%-6s%-15s%-15s" %
756 ( str( count ), str( i ), str( s ) ) )
757 leaders = main.ONOScli1.leaders()
758 try:
759 missing = False
760 if leaders:
761 parsedLeaders = json.loads( leaders )
762 main.log.warn( json.dumps( parsedLeaders,
763 sort_keys=True,
764 indent=4,
765 separators=( ',', ': ' ) ) )
766 # check for all intent partitions
767 # check for election
768 topics = []
769 for i in range( 14 ):
770 topics.append( "intent-partition-" + str( i ) )
771 # FIXME: this should only be after we start the app
772 topics.append( "org.onosproject.election" )
773 main.log.debug( topics )
774 ONOStopics = [ j['topic'] for j in parsedLeaders ]
775 for topic in topics:
776 if topic not in ONOStopics:
777 main.log.error( "Error: " + topic +
778 " not in leaders" )
779 missing = True
780 else:
781 main.log.error( "leaders() returned None" )
782 except ( ValueError, TypeError ):
783 main.log.exception( "Error parsing leaders" )
784 main.log.error( repr( leaders ) )
785 # Check all nodes
786 if missing:
787 for node in CLIs:
788 response = node.leaders( jsonFormat=False)
789 main.log.warn( str( node.name ) + " leaders output: \n" +
790 str( response ) )
791
792 partitions = main.ONOScli1.partitions()
793 try:
794 if partitions :
795 parsedPartitions = json.loads( partitions )
796 main.log.warn( json.dumps( parsedPartitions,
797 sort_keys=True,
798 indent=4,
799 separators=( ',', ': ' ) ) )
800 # TODO check for a leader in all paritions
801 # TODO check for consistency among nodes
802 else:
803 main.log.error( "partitions() returned None" )
804 except ( ValueError, TypeError ):
805 main.log.exception( "Error parsing partitions" )
806 main.log.error( repr( partitions ) )
807 pendingMap = main.ONOScli1.pendingMap()
808 try:
809 if pendingMap :
810 parsedPending = json.loads( pendingMap )
811 main.log.warn( json.dumps( parsedPending,
812 sort_keys=True,
813 indent=4,
814 separators=( ',', ': ' ) ) )
815 # TODO check something here?
816 else:
817 main.log.error( "pendingMap() returned None" )
818 except ( ValueError, TypeError ):
819 main.log.exception( "Error parsing pending map" )
820 main.log.error( repr( pendingMap ) )
821
822 def CASE4( self, main ):
823 """
824 Ping across added host intents
825 """
826 import json
827 import time
828 assert numControllers, "numControllers not defined"
829 assert main, "main not defined"
830 assert utilities.assert_equals, "utilities.assert_equals not defined"
831 assert CLIs, "CLIs not defined"
832 assert nodes, "nodes not defined"
833 main.case( "Verify connectivity by sendind traffic across Intents" )
834 main.caseExplaination = "Ping across added host intents to check " +\
835 "functionality and check the state of " +\
836 "the intent"
837 main.step( "Ping across added host intents" )
838 PingResult = main.TRUE
839 for i in range( 8, 18 ):
840 ping = main.Mininet1.pingHost( src="h" + str( i ),
841 target="h" + str( i + 10 ) )
842 PingResult = PingResult and ping
843 if ping == main.FALSE:
844 main.log.warn( "Ping failed between h" + str( i ) +
845 " and h" + str( i + 10 ) )
846 elif ping == main.TRUE:
847 main.log.info( "Ping test passed!" )
848 # Don't set PingResult or you'd override failures
849 if PingResult == main.FALSE:
850 main.log.error(
851 "Intents have not been installed correctly, pings failed." )
852 # TODO: pretty print
853 main.log.warn( "ONOS1 intents: " )
854 try:
855 tmpIntents = main.ONOScli1.intents()
856 main.log.warn( json.dumps( json.loads( tmpIntents ),
857 sort_keys=True,
858 indent=4,
859 separators=( ',', ': ' ) ) )
860 except ( ValueError, TypeError ):
861 main.log.warn( repr( tmpIntents ) )
862 utilities.assert_equals(
863 expect=main.TRUE,
864 actual=PingResult,
865 onpass="Intents have been installed correctly and pings work",
866 onfail="Intents have not been installed correctly, pings failed." )
867
868 main.step( "Check Intent state" )
869 installedCheck = False
870 loopCount = 0
871 while not installedCheck and loopCount < 40:
872 installedCheck = True
873 # Print the intent states
874 intents = main.ONOScli1.intents()
875 intentStates = []
876 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
877 count = 0
878 # Iter through intents of a node
879 try:
880 for intent in json.loads( intents ):
881 state = intent.get( 'state', None )
882 if "INSTALLED" not in state:
883 installedCheck = False
884 intentId = intent.get( 'id', None )
885 intentStates.append( ( intentId, state ) )
886 except ( ValueError, TypeError ):
887 main.log.exception( "Error parsing intents." )
888 # Print states
889 intentStates.sort()
890 for i, s in intentStates:
891 count += 1
892 main.log.info( "%-6s%-15s%-15s" %
893 ( str( count ), str( i ), str( s ) ) )
894 if not installedCheck:
895 time.sleep( 1 )
896 loopCount += 1
897 utilities.assert_equals( expect=True, actual=installedCheck,
898 onpass="Intents are all INSTALLED",
899 onfail="Intents are not all in " +
900 "INSTALLED state" )
901
902 main.step( "Check leadership of topics" )
903 leaders = main.ONOScli1.leaders()
904 topicCheck = main.TRUE
905 try:
906 if leaders:
907 parsedLeaders = json.loads( leaders )
908 main.log.warn( json.dumps( parsedLeaders,
909 sort_keys=True,
910 indent=4,
911 separators=( ',', ': ' ) ) )
912 # check for all intent partitions
913 # check for election
914 # TODO: Look at Devices as topics now that it uses this system
915 topics = []
916 for i in range( 14 ):
917 topics.append( "intent-partition-" + str( i ) )
918 # FIXME: this should only be after we start the app
919 # FIXME: topics.append( "org.onosproject.election" )
920 # Print leaders output
921 main.log.debug( topics )
922 ONOStopics = [ j['topic'] for j in parsedLeaders ]
923 for topic in topics:
924 if topic not in ONOStopics:
925 main.log.error( "Error: " + topic +
926 " not in leaders" )
927 topicCheck = main.FALSE
928 else:
929 main.log.error( "leaders() returned None" )
930 topicCheck = main.FALSE
931 except ( ValueError, TypeError ):
932 topicCheck = main.FALSE
933 main.log.exception( "Error parsing leaders" )
934 main.log.error( repr( leaders ) )
935 # TODO: Check for a leader of these topics
936 # Check all nodes
937 if topicCheck:
938 for node in CLIs:
939 response = node.leaders( jsonFormat=False)
940 main.log.warn( str( node.name ) + " leaders output: \n" +
941 str( response ) )
942
943 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
944 onpass="intent Partitions is in leaders",
945 onfail="Some topics were lost " )
946 # Print partitions
947 partitions = main.ONOScli1.partitions()
948 try:
949 if partitions :
950 parsedPartitions = json.loads( partitions )
951 main.log.warn( json.dumps( parsedPartitions,
952 sort_keys=True,
953 indent=4,
954 separators=( ',', ': ' ) ) )
955 # TODO check for a leader in all paritions
956 # TODO check for consistency among nodes
957 else:
958 main.log.error( "partitions() returned None" )
959 except ( ValueError, TypeError ):
960 main.log.exception( "Error parsing partitions" )
961 main.log.error( repr( partitions ) )
962 # Print Pending Map
963 pendingMap = main.ONOScli1.pendingMap()
964 try:
965 if pendingMap :
966 parsedPending = json.loads( pendingMap )
967 main.log.warn( json.dumps( parsedPending,
968 sort_keys=True,
969 indent=4,
970 separators=( ',', ': ' ) ) )
971 # TODO check something here?
972 else:
973 main.log.error( "pendingMap() returned None" )
974 except ( ValueError, TypeError ):
975 main.log.exception( "Error parsing pending map" )
976 main.log.error( repr( pendingMap ) )
977
978 if not installedCheck:
979 main.log.info( "Waiting 60 seconds to see if the state of " +
980 "intents change" )
981 time.sleep( 60 )
982 # Print the intent states
983 intents = main.ONOScli1.intents()
984 intentStates = []
985 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
986 count = 0
987 # Iter through intents of a node
988 try:
989 for intent in json.loads( intents ):
990 state = intent.get( 'state', None )
991 if "INSTALLED" not in state:
992 installedCheck = False
993 intentId = intent.get( 'id', None )
994 intentStates.append( ( intentId, state ) )
995 except ( ValueError, TypeError ):
996 main.log.exception( "Error parsing intents." )
997 intentStates.sort()
998 for i, s in intentStates:
999 count += 1
1000 main.log.info( "%-6s%-15s%-15s" %
1001 ( str( count ), str( i ), str( s ) ) )
1002 leaders = main.ONOScli1.leaders()
1003 try:
1004 missing = False
1005 if leaders:
1006 parsedLeaders = json.loads( leaders )
1007 main.log.warn( json.dumps( parsedLeaders,
1008 sort_keys=True,
1009 indent=4,
1010 separators=( ',', ': ' ) ) )
1011 # check for all intent partitions
1012 # check for election
1013 topics = []
1014 for i in range( 14 ):
1015 topics.append( "intent-partition-" + str( i ) )
1016 # FIXME: this should only be after we start the app
1017 topics.append( "org.onosproject.election" )
1018 main.log.debug( topics )
1019 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1020 for topic in topics:
1021 if topic not in ONOStopics:
1022 main.log.error( "Error: " + topic +
1023 " not in leaders" )
1024 missing = True
1025 else:
1026 main.log.error( "leaders() returned None" )
1027 except ( ValueError, TypeError ):
1028 main.log.exception( "Error parsing leaders" )
1029 main.log.error( repr( leaders ) )
1030 if missing:
1031 for node in CLIs:
1032 response = node.leaders( jsonFormat=False)
1033 main.log.warn( str( node.name ) + " leaders output: \n" +
1034 str( response ) )
1035
1036 partitions = main.ONOScli1.partitions()
1037 try:
1038 if partitions :
1039 parsedPartitions = json.loads( partitions )
1040 main.log.warn( json.dumps( parsedPartitions,
1041 sort_keys=True,
1042 indent=4,
1043 separators=( ',', ': ' ) ) )
1044 # TODO check for a leader in all paritions
1045 # TODO check for consistency among nodes
1046 else:
1047 main.log.error( "partitions() returned None" )
1048 except ( ValueError, TypeError ):
1049 main.log.exception( "Error parsing partitions" )
1050 main.log.error( repr( partitions ) )
1051 pendingMap = main.ONOScli1.pendingMap()
1052 try:
1053 if pendingMap :
1054 parsedPending = json.loads( pendingMap )
1055 main.log.warn( json.dumps( parsedPending,
1056 sort_keys=True,
1057 indent=4,
1058 separators=( ',', ': ' ) ) )
1059 # TODO check something here?
1060 else:
1061 main.log.error( "pendingMap() returned None" )
1062 except ( ValueError, TypeError ):
1063 main.log.exception( "Error parsing pending map" )
1064 main.log.error( repr( pendingMap ) )
1065 # Print flowrules
1066 main.log.debug( CLIs[0].flows( jsonFormat=False ) )
1067 main.step( "Wait a minute then ping again" )
1068 # the wait is above
1069 PingResult = main.TRUE
1070 for i in range( 8, 18 ):
1071 ping = main.Mininet1.pingHost( src="h" + str( i ),
1072 target="h" + str( i + 10 ) )
1073 PingResult = PingResult and ping
1074 if ping == main.FALSE:
1075 main.log.warn( "Ping failed between h" + str( i ) +
1076 " and h" + str( i + 10 ) )
1077 elif ping == main.TRUE:
1078 main.log.info( "Ping test passed!" )
1079 # Don't set PingResult or you'd override failures
1080 if PingResult == main.FALSE:
1081 main.log.error(
1082 "Intents have not been installed correctly, pings failed." )
1083 # TODO: pretty print
1084 main.log.warn( "ONOS1 intents: " )
1085 try:
1086 tmpIntents = main.ONOScli1.intents()
1087 main.log.warn( json.dumps( json.loads( tmpIntents ),
1088 sort_keys=True,
1089 indent=4,
1090 separators=( ',', ': ' ) ) )
1091 except ( ValueError, TypeError ):
1092 main.log.warn( repr( tmpIntents ) )
1093 utilities.assert_equals(
1094 expect=main.TRUE,
1095 actual=PingResult,
1096 onpass="Intents have been installed correctly and pings work",
1097 onfail="Intents have not been installed correctly, pings failed." )
1098
1099 def CASE5( self, main ):
1100 """
1101 Reading state of ONOS
1102 """
1103 import json
1104 import time
1105 assert numControllers, "numControllers not defined"
1106 assert main, "main not defined"
1107 assert utilities.assert_equals, "utilities.assert_equals not defined"
1108 assert CLIs, "CLIs not defined"
1109 assert nodes, "nodes not defined"
1110
1111 main.case( "Setting up and gathering data for current state" )
1112 # The general idea for this test case is to pull the state of
1113 # ( intents,flows, topology,... ) from each ONOS node
1114 # We can then compare them with each other and also with past states
1115
1116 main.step( "Check that each switch has a master" )
1117 global mastershipState
1118 mastershipState = '[]'
1119
1120 # Assert that each device has a master
1121 rolesNotNull = main.TRUE
1122 threads = []
1123 for i in range( numControllers ):
1124 t = main.Thread( target=CLIs[i].rolesNotNull,
1125 name="rolesNotNull-" + str( i ),
1126 args=[] )
1127 threads.append( t )
1128 t.start()
1129
1130 for t in threads:
1131 t.join()
1132 rolesNotNull = rolesNotNull and t.result
1133 utilities.assert_equals(
1134 expect=main.TRUE,
1135 actual=rolesNotNull,
1136 onpass="Each device has a master",
1137 onfail="Some devices don't have a master assigned" )
1138
1139 main.step( "Get the Mastership of each switch from each controller" )
1140 ONOSMastership = []
1141 mastershipCheck = main.FALSE
1142 consistentMastership = True
1143 rolesResults = True
1144 threads = []
1145 for i in range( numControllers ):
1146 t = main.Thread( target=CLIs[i].roles,
1147 name="roles-" + str( i ),
1148 args=[] )
1149 threads.append( t )
1150 t.start()
1151
1152 for t in threads:
1153 t.join()
1154 ONOSMastership.append( t.result )
1155
1156 for i in range( numControllers ):
1157 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1158 main.log.error( "Error in getting ONOS" + str( i + 1 ) +
1159 " roles" )
1160 main.log.warn(
1161 "ONOS" + str( i + 1 ) + " mastership response: " +
1162 repr( ONOSMastership[i] ) )
1163 rolesResults = False
1164 utilities.assert_equals(
1165 expect=True,
1166 actual=rolesResults,
1167 onpass="No error in reading roles output",
1168 onfail="Error in reading roles from ONOS" )
1169
1170 main.step( "Check for consistency in roles from each controller" )
1171 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1172 main.log.info(
1173 "Switch roles are consistent across all ONOS nodes" )
1174 else:
1175 consistentMastership = False
1176 utilities.assert_equals(
1177 expect=True,
1178 actual=consistentMastership,
1179 onpass="Switch roles are consistent across all ONOS nodes",
1180 onfail="ONOS nodes have different views of switch roles" )
1181
1182 if rolesResults and not consistentMastership:
1183 for i in range( numControllers ):
1184 try:
1185 main.log.warn(
1186 "ONOS" + str( i + 1 ) + " roles: ",
1187 json.dumps(
1188 json.loads( ONOSMastership[ i ] ),
1189 sort_keys=True,
1190 indent=4,
1191 separators=( ',', ': ' ) ) )
1192 except ( ValueError, TypeError ):
1193 main.log.warn( repr( ONOSMastership[ i ] ) )
1194 elif rolesResults and consistentMastership:
1195 mastershipCheck = main.TRUE
1196 mastershipState = ONOSMastership[ 0 ]
1197
1198 main.step( "Get the intents from each controller" )
1199 global intentState
1200 intentState = []
1201 ONOSIntents = []
1202 intentCheck = main.FALSE
1203 consistentIntents = True
1204 intentsResults = True
1205 threads = []
1206 for i in range( numControllers ):
1207 t = main.Thread( target=CLIs[i].intents,
1208 name="intents-" + str( i ),
1209 args=[],
1210 kwargs={ 'jsonFormat': True } )
1211 threads.append( t )
1212 t.start()
1213
1214 for t in threads:
1215 t.join()
1216 ONOSIntents.append( t.result )
1217
1218 for i in range( numControllers ):
1219 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1220 main.log.error( "Error in getting ONOS" + str( i + 1 ) +
1221 " intents" )
1222 main.log.warn( "ONOS" + str( i + 1 ) + " intents response: " +
1223 repr( ONOSIntents[ i ] ) )
1224 intentsResults = False
1225 utilities.assert_equals(
1226 expect=True,
1227 actual=intentsResults,
1228 onpass="No error in reading intents output",
1229 onfail="Error in reading intents from ONOS" )
1230
1231 main.step( "Check for consistency in Intents from each controller" )
1232 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1233 main.log.info( "Intents are consistent across all ONOS " +
1234 "nodes" )
1235 else:
1236 consistentIntents = False
1237 main.log.error( "Intents not consistent" )
1238 utilities.assert_equals(
1239 expect=True,
1240 actual=consistentIntents,
1241 onpass="Intents are consistent across all ONOS nodes",
1242 onfail="ONOS nodes have different views of intents" )
1243
1244 if intentsResults:
1245 # Try to make it easy to figure out what is happening
1246 #
1247 # Intent ONOS1 ONOS2 ...
1248 # 0x01 INSTALLED INSTALLING
1249 # ... ... ...
1250 # ... ... ...
1251 title = " Id"
1252 for n in range( numControllers ):
1253 title += " " * 10 + "ONOS" + str( n + 1 )
1254 main.log.warn( title )
1255 # get all intent keys in the cluster
1256 keys = []
1257 for nodeStr in ONOSIntents:
1258 node = json.loads( nodeStr )
1259 for intent in node:
1260 keys.append( intent.get( 'id' ) )
1261 keys = set( keys )
1262 for key in keys:
1263 row = "%-13s" % key
1264 for nodeStr in ONOSIntents:
1265 node = json.loads( nodeStr )
1266 for intent in node:
1267 if intent.get( 'id', "Error" ) == key:
1268 row += "%-15s" % intent.get( 'state' )
1269 main.log.warn( row )
1270 # End table view
1271
1272 if intentsResults and not consistentIntents:
1273 # print the json objects
1274 n = len(ONOSIntents)
1275 main.log.debug( "ONOS" + str( n ) + " intents: " )
1276 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1277 sort_keys=True,
1278 indent=4,
1279 separators=( ',', ': ' ) ) )
1280 for i in range( numControllers ):
1281 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1282 main.log.debug( "ONOS" + str( i + 1 ) + " intents: " )
1283 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1284 sort_keys=True,
1285 indent=4,
1286 separators=( ',', ': ' ) ) )
1287 else:
1288 main.log.debug( nodes[ i ].name + " intents match ONOS" +
1289 str( n ) + " intents" )
1290 elif intentsResults and consistentIntents:
1291 intentCheck = main.TRUE
1292 intentState = ONOSIntents[ 0 ]
1293
1294 main.step( "Get the flows from each controller" )
1295 global flowState
1296 flowState = []
1297 ONOSFlows = []
1298 ONOSFlowsJson = []
1299 flowCheck = main.FALSE
1300 consistentFlows = True
1301 flowsResults = True
1302 threads = []
1303 for i in range( numControllers ):
1304 t = main.Thread( target=CLIs[i].flows,
1305 name="flows-" + str( i ),
1306 args=[],
1307 kwargs={ 'jsonFormat': True } )
1308 threads.append( t )
1309 t.start()
1310
1311 # NOTE: Flows command can take some time to run
1312 time.sleep(30)
1313 for t in threads:
1314 t.join()
1315 result = t.result
1316 ONOSFlows.append( result )
1317
1318 for i in range( numControllers ):
1319 num = str( i + 1 )
1320 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1321 main.log.error( "Error in getting ONOS" + num + " flows" )
1322 main.log.warn( "ONOS" + num + " flows response: " +
1323 repr( ONOSFlows[ i ] ) )
1324 flowsResults = False
1325 ONOSFlowsJson.append( None )
1326 else:
1327 try:
1328 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1329 except ( ValueError, TypeError ):
1330 # FIXME: change this to log.error?
1331 main.log.exception( "Error in parsing ONOS" + num +
1332 " response as json." )
1333 main.log.error( repr( ONOSFlows[ i ] ) )
1334 ONOSFlowsJson.append( None )
1335 flowsResults = False
1336 utilities.assert_equals(
1337 expect=True,
1338 actual=flowsResults,
1339 onpass="No error in reading flows output",
1340 onfail="Error in reading flows from ONOS" )
1341
1342 main.step( "Check for consistency in Flows from each controller" )
1343 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1344 if all( tmp ):
1345 main.log.info( "Flow count is consistent across all ONOS nodes" )
1346 else:
1347 consistentFlows = False
1348 utilities.assert_equals(
1349 expect=True,
1350 actual=consistentFlows,
1351 onpass="The flow count is consistent across all ONOS nodes",
1352 onfail="ONOS nodes have different flow counts" )
1353
1354 if flowsResults and not consistentFlows:
1355 for i in range( numControllers ):
1356 try:
1357 main.log.warn(
1358 "ONOS" + str( i + 1 ) + " flows: " +
1359 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1360 indent=4, separators=( ',', ': ' ) ) )
1361 except ( ValueError, TypeError ):
1362 main.log.warn(
1363 "ONOS" + str( i + 1 ) + " flows: " +
1364 repr( ONOSFlows[ i ] ) )
1365 elif flowsResults and consistentFlows:
1366 flowCheck = main.TRUE
1367 flowState = ONOSFlows[ 0 ]
1368
1369 main.step( "Get the OF Table entries" )
1370 global flows
1371 flows = []
1372 for i in range( 1, 29 ):
1373 flows.append( main.Mininet2.getFlowTable( 1.3, "s" + str( i ) ) )
1374 if flowCheck == main.FALSE:
1375 for table in flows:
1376 main.log.warn( table )
1377 # TODO: Compare switch flow tables with ONOS flow tables
1378
1379 main.step( "Start continuous pings" )
1380 main.Mininet2.pingLong(
1381 src=main.params[ 'PING' ][ 'source1' ],
1382 target=main.params[ 'PING' ][ 'target1' ],
1383 pingTime=500 )
1384 main.Mininet2.pingLong(
1385 src=main.params[ 'PING' ][ 'source2' ],
1386 target=main.params[ 'PING' ][ 'target2' ],
1387 pingTime=500 )
1388 main.Mininet2.pingLong(
1389 src=main.params[ 'PING' ][ 'source3' ],
1390 target=main.params[ 'PING' ][ 'target3' ],
1391 pingTime=500 )
1392 main.Mininet2.pingLong(
1393 src=main.params[ 'PING' ][ 'source4' ],
1394 target=main.params[ 'PING' ][ 'target4' ],
1395 pingTime=500 )
1396 main.Mininet2.pingLong(
1397 src=main.params[ 'PING' ][ 'source5' ],
1398 target=main.params[ 'PING' ][ 'target5' ],
1399 pingTime=500 )
1400 main.Mininet2.pingLong(
1401 src=main.params[ 'PING' ][ 'source6' ],
1402 target=main.params[ 'PING' ][ 'target6' ],
1403 pingTime=500 )
1404 main.Mininet2.pingLong(
1405 src=main.params[ 'PING' ][ 'source7' ],
1406 target=main.params[ 'PING' ][ 'target7' ],
1407 pingTime=500 )
1408 main.Mininet2.pingLong(
1409 src=main.params[ 'PING' ][ 'source8' ],
1410 target=main.params[ 'PING' ][ 'target8' ],
1411 pingTime=500 )
1412 main.Mininet2.pingLong(
1413 src=main.params[ 'PING' ][ 'source9' ],
1414 target=main.params[ 'PING' ][ 'target9' ],
1415 pingTime=500 )
1416 main.Mininet2.pingLong(
1417 src=main.params[ 'PING' ][ 'source10' ],
1418 target=main.params[ 'PING' ][ 'target10' ],
1419 pingTime=500 )
1420
1421 main.step( "Collecting topology information from ONOS" )
1422 devices = []
1423 threads = []
1424 for i in range( numControllers ):
1425 t = main.Thread( target=CLIs[i].devices,
1426 name="devices-" + str( i ),
1427 args=[ ] )
1428 threads.append( t )
1429 t.start()
1430
1431 for t in threads:
1432 t.join()
1433 devices.append( t.result )
1434 hosts = []
1435 threads = []
1436 for i in range( numControllers ):
1437 t = main.Thread( target=CLIs[i].hosts,
1438 name="hosts-" + str( i ),
1439 args=[ ] )
1440 threads.append( t )
1441 t.start()
1442
1443 for t in threads:
1444 t.join()
1445 try:
1446 hosts.append( json.loads( t.result ) )
1447 except ( ValueError, TypeError ):
1448 # FIXME: better handling of this, print which node
1449 # Maybe use thread name?
1450 main.log.exception( "Error parsing json output of hosts" )
1451 # FIXME: should this be an empty json object instead?
1452 hosts.append( None )
1453
1454 ports = []
1455 threads = []
1456 for i in range( numControllers ):
1457 t = main.Thread( target=CLIs[i].ports,
1458 name="ports-" + str( i ),
1459 args=[ ] )
1460 threads.append( t )
1461 t.start()
1462
1463 for t in threads:
1464 t.join()
1465 ports.append( t.result )
1466 links = []
1467 threads = []
1468 for i in range( numControllers ):
1469 t = main.Thread( target=CLIs[i].links,
1470 name="links-" + str( i ),
1471 args=[ ] )
1472 threads.append( t )
1473 t.start()
1474
1475 for t in threads:
1476 t.join()
1477 links.append( t.result )
1478 clusters = []
1479 threads = []
1480 for i in range( numControllers ):
1481 t = main.Thread( target=CLIs[i].clusters,
1482 name="clusters-" + str( i ),
1483 args=[ ] )
1484 threads.append( t )
1485 t.start()
1486
1487 for t in threads:
1488 t.join()
1489 clusters.append( t.result )
1490 # Compare json objects for hosts and dataplane clusters
1491
1492 # hosts
1493 main.step( "Host view is consistent across ONOS nodes" )
1494 consistentHostsResult = main.TRUE
1495 for controller in range( len( hosts ) ):
1496 controllerStr = str( controller + 1 )
1497 if "Error" not in hosts[ controller ]:
1498 if hosts[ controller ] == hosts[ 0 ]:
1499 continue
1500 else: # hosts not consistent
1501 main.log.error( "hosts from ONOS" +
1502 controllerStr +
1503 " is inconsistent with ONOS1" )
1504 main.log.warn( repr( hosts[ controller ] ) )
1505 consistentHostsResult = main.FALSE
1506
1507 else:
1508 main.log.error( "Error in getting ONOS hosts from ONOS" +
1509 controllerStr )
1510 consistentHostsResult = main.FALSE
1511 main.log.warn( "ONOS" + controllerStr +
1512 " hosts response: " +
1513 repr( hosts[ controller ] ) )
1514 utilities.assert_equals(
1515 expect=main.TRUE,
1516 actual=consistentHostsResult,
1517 onpass="Hosts view is consistent across all ONOS nodes",
1518 onfail="ONOS nodes have different views of hosts" )
1519
1520 main.step( "Each host has an IP address" )
1521 ipResult = main.TRUE
1522 for controller in range( 0, len( hosts ) ):
1523 controllerStr = str( controller + 1 )
1524 for host in hosts[ controller ]:
1525 if not host.get( 'ipAddresses', [ ] ):
1526 main.log.error( "DEBUG:Error with host ips on controller" +
1527 controllerStr + ": " + str( host ) )
1528 ipResult = main.FALSE
1529 utilities.assert_equals(
1530 expect=main.TRUE,
1531 actual=ipResult,
1532 onpass="The ips of the hosts aren't empty",
1533 onfail="The ip of at least one host is missing" )
1534
1535 # Strongly connected clusters of devices
1536 main.step( "Cluster view is consistent across ONOS nodes" )
1537 consistentClustersResult = main.TRUE
1538 for controller in range( len( clusters ) ):
1539 controllerStr = str( controller + 1 )
1540 if "Error" not in clusters[ controller ]:
1541 if clusters[ controller ] == clusters[ 0 ]:
1542 continue
1543 else: # clusters not consistent
1544 main.log.error( "clusters from ONOS" + controllerStr +
1545 " is inconsistent with ONOS1" )
1546 consistentClustersResult = main.FALSE
1547
1548 else:
1549 main.log.error( "Error in getting dataplane clusters " +
1550 "from ONOS" + controllerStr )
1551 consistentClustersResult = main.FALSE
1552 main.log.warn( "ONOS" + controllerStr +
1553 " clusters response: " +
1554 repr( clusters[ controller ] ) )
1555 utilities.assert_equals(
1556 expect=main.TRUE,
1557 actual=consistentClustersResult,
1558 onpass="Clusters view is consistent across all ONOS nodes",
1559 onfail="ONOS nodes have different views of clusters" )
1560 # there should always only be one cluster
1561 main.step( "Cluster view correct across ONOS nodes" )
1562 try:
1563 numClusters = len( json.loads( clusters[ 0 ] ) )
1564 except ( ValueError, TypeError ):
1565 main.log.exception( "Error parsing clusters[0]: " +
1566 repr( clusters[ 0 ] ) )
1567 clusterResults = main.FALSE
1568 if numClusters == 1:
1569 clusterResults = main.TRUE
1570 utilities.assert_equals(
1571 expect=1,
1572 actual=numClusters,
1573 onpass="ONOS shows 1 SCC",
1574 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1575
1576 main.step( "Comparing ONOS topology to MN" )
1577 devicesResults = main.TRUE
1578 linksResults = main.TRUE
1579 hostsResults = main.TRUE
1580 mnSwitches = main.Mininet1.getSwitches()
1581 mnLinks = main.Mininet1.getLinks()
1582 mnHosts = main.Mininet1.getHosts()
1583 for controller in range( numControllers ):
1584 controllerStr = str( controller + 1 )
1585 if devices[ controller ] and ports[ controller ] and\
1586 "Error" not in devices[ controller ] and\
1587 "Error" not in ports[ controller ]:
1588
1589 currentDevicesResult = main.Mininet1.compareSwitches(
1590 mnSwitches,
1591 json.loads( devices[ controller ] ),
1592 json.loads( ports[ controller ] ) )
1593 else:
1594 currentDevicesResult = main.FALSE
1595 utilities.assert_equals( expect=main.TRUE,
1596 actual=currentDevicesResult,
1597 onpass="ONOS" + controllerStr +
1598 " Switches view is correct",
1599 onfail="ONOS" + controllerStr +
1600 " Switches view is incorrect" )
1601 if links[ controller ] and "Error" not in links[ controller ]:
1602 currentLinksResult = main.Mininet1.compareLinks(
1603 mnSwitches, mnLinks,
1604 json.loads( links[ controller ] ) )
1605 else:
1606 currentLinksResult = main.FALSE
1607 utilities.assert_equals( expect=main.TRUE,
1608 actual=currentLinksResult,
1609 onpass="ONOS" + controllerStr +
1610 " links view is correct",
1611 onfail="ONOS" + controllerStr +
1612 " links view is incorrect" )
1613
1614 if hosts[ controller ] or "Error" not in hosts[ controller ]:
1615 currentHostsResult = main.Mininet1.compareHosts(
1616 mnHosts,
1617 hosts[ controller ] )
1618 else:
1619 currentHostsResult = main.FALSE
1620 utilities.assert_equals( expect=main.TRUE,
1621 actual=currentHostsResult,
1622 onpass="ONOS" + controllerStr +
1623 " hosts exist in Mininet",
1624 onfail="ONOS" + controllerStr +
1625 " hosts don't match Mininet" )
1626
1627 devicesResults = devicesResults and currentDevicesResult
1628 linksResults = linksResults and currentLinksResult
1629 hostsResults = hostsResults and currentHostsResult
1630
1631 main.step( "Device information is correct" )
1632 utilities.assert_equals(
1633 expect=main.TRUE,
1634 actual=devicesResults,
1635 onpass="Device information is correct",
1636 onfail="Device information is incorrect" )
1637
1638 main.step( "Links are correct" )
1639 utilities.assert_equals(
1640 expect=main.TRUE,
1641 actual=linksResults,
1642 onpass="Link are correct",
1643 onfail="Links are incorrect" )
1644
1645 main.step( "Hosts are correct" )
1646 utilities.assert_equals(
1647 expect=main.TRUE,
1648 actual=hostsResults,
1649 onpass="Hosts are correct",
1650 onfail="Hosts are incorrect" )
1651
1652 def CASE6( self, main ):
1653 """
1654 The Failure case.
1655 """
1656 import time
1657 assert numControllers, "numControllers not defined"
1658 assert main, "main not defined"
1659 assert utilities.assert_equals, "utilities.assert_equals not defined"
1660 assert CLIs, "CLIs not defined"
1661 assert nodes, "nodes not defined"
1662 main.case( "Restart minority of ONOS nodes" )
1663 main.step( "Killing 3 ONOS nodes" )
1664 killTime = time.time()
1665 # TODO: Randomize these nodes or base this on partitions
1666 # TODO: use threads in this case
1667 killResults = main.ONOSbench.onosKill( nodes[0].ip_address )
1668 time.sleep( 10 )
1669 killResults = killResults and\
1670 main.ONOSbench.onosKill( nodes[1].ip_address )
1671 time.sleep( 10 )
1672 killResults = killResults and\
1673 main.ONOSbench.onosKill( nodes[2].ip_address )
1674 utilities.assert_equals( expect=main.TRUE, actual=killResults,
1675 onpass="ONOS Killed successfully",
1676 onfail="ONOS kill NOT successful" )
1677
1678 main.step( "Checking if ONOS is up yet" )
1679 count = 0
1680 onosIsupResult = main.FALSE
1681 while onosIsupResult == main.FALSE and count < 10:
1682 onos1Isup = main.ONOSbench.isup( nodes[0].ip_address )
1683 onos2Isup = main.ONOSbench.isup( nodes[1].ip_address )
1684 onos3Isup = main.ONOSbench.isup( nodes[2].ip_address )
1685 onosIsupResult = onos1Isup and onos2Isup and onos3Isup
1686 count = count + 1
1687 # TODO: if it becomes an issue, we can retry this step a few times
1688 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1689 onpass="ONOS restarted successfully",
1690 onfail="ONOS restart NOT successful" )
1691
1692 main.step( "Restarting ONOS CLIs" )
1693 cliResult1 = main.ONOScli1.startOnosCli( nodes[0].ip_address )
1694 cliResult2 = main.ONOScli2.startOnosCli( nodes[1].ip_address )
1695 cliResult3 = main.ONOScli3.startOnosCli( nodes[2].ip_address )
1696 cliResults = cliResult1 and cliResult2 and cliResult3
1697 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1698 onpass="ONOS cli restarted",
1699 onfail="ONOS cli did not restart" )
1700
1701 # Grab the time of restart so we chan check how long the gossip
1702 # protocol has had time to work
1703 main.restartTime = time.time() - killTime
1704 main.log.debug( "Restart time: " + str( main.restartTime ) )
1705 '''
1706 # FIXME: revisit test plan for election with madan
1707 # Rerun for election on restarted nodes
1708 run1 = CLIs[0].electionTestRun()
1709 run2 = CLIs[1].electionTestRun()
1710 run3 = CLIs[2].electionTestRun()
1711 runResults = run1 and run2 and run3
1712 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1713 onpass="Reran for election",
1714 onfail="Failed to rerun for election" )
1715 '''
1716 # TODO: MAke this configurable. Also, we are breaking the above timer
1717 time.sleep( 60 )
1718 main.log.debug( CLIs[0].nodes( jsonFormat=False ) )
1719 main.log.debug( CLIs[0].leaders( jsonFormat=False ) )
1720 main.log.debug( CLIs[0].partitions( jsonFormat=False ) )
1721
1722 def CASE7( self, main ):
1723 """
1724 Check state after ONOS failure
1725 """
1726 import json
1727 assert numControllers, "numControllers not defined"
1728 assert main, "main not defined"
1729 assert utilities.assert_equals, "utilities.assert_equals not defined"
1730 assert CLIs, "CLIs not defined"
1731 assert nodes, "nodes not defined"
1732 main.case( "Running ONOS Constant State Tests" )
1733
1734 main.step( "Check that each switch has a master" )
1735 # Assert that each device has a master
1736 rolesNotNull = main.TRUE
1737 threads = []
1738 for i in range( numControllers ):
1739 t = main.Thread( target=CLIs[i].rolesNotNull,
1740 name="rolesNotNull-" + str( i ),
1741 args=[ ] )
1742 threads.append( t )
1743 t.start()
1744
1745 for t in threads:
1746 t.join()
1747 rolesNotNull = rolesNotNull and t.result
1748 utilities.assert_equals(
1749 expect=main.TRUE,
1750 actual=rolesNotNull,
1751 onpass="Each device has a master",
1752 onfail="Some devices don't have a master assigned" )
1753
1754 main.step( "Read device roles from ONOS" )
1755 ONOSMastership = []
1756 consistentMastership = True
1757 rolesResults = True
1758 threads = []
1759 for i in range( numControllers ):
1760 t = main.Thread( target=CLIs[i].roles,
1761 name="roles-" + str( i ),
1762 args=[] )
1763 threads.append( t )
1764 t.start()
1765
1766 for t in threads:
1767 t.join()
1768 ONOSMastership.append( t.result )
1769
1770 for i in range( numControllers ):
1771 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1772 main.log.error( "Error in getting ONOS" + str( i + 1 ) +
1773 " roles" )
1774 main.log.warn(
1775 "ONOS" + str( i + 1 ) + " mastership response: " +
1776 repr( ONOSMastership[i] ) )
1777 rolesResults = False
1778 utilities.assert_equals(
1779 expect=True,
1780 actual=rolesResults,
1781 onpass="No error in reading roles output",
1782 onfail="Error in reading roles from ONOS" )
1783
1784 main.step( "Check for consistency in roles from each controller" )
1785 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1786 main.log.info(
1787 "Switch roles are consistent across all ONOS nodes" )
1788 else:
1789 consistentMastership = False
1790 utilities.assert_equals(
1791 expect=True,
1792 actual=consistentMastership,
1793 onpass="Switch roles are consistent across all ONOS nodes",
1794 onfail="ONOS nodes have different views of switch roles" )
1795
1796 if rolesResults and not consistentMastership:
1797 for i in range( numControllers ):
1798 main.log.warn(
1799 "ONOS" + str( i + 1 ) + " roles: ",
1800 json.dumps(
1801 json.loads( ONOSMastership[ i ] ),
1802 sort_keys=True,
1803 indent=4,
1804 separators=( ',', ': ' ) ) )
1805
1806 # NOTE: we expect mastership to change on controller failure
1807 '''
1808 description2 = "Compare switch roles from before failure"
1809 main.step( description2 )
1810 try:
1811 currentJson = json.loads( ONOSMastership[0] )
1812 oldJson = json.loads( mastershipState )
1813 except ( ValueError, TypeError ):
1814 main.log.exception( "Something is wrong with parsing " +
1815 "ONOSMastership[0] or mastershipState" )
1816 main.log.error( "ONOSMastership[0]: " + repr( ONOSMastership[0] ) )
1817 main.log.error( "mastershipState" + repr( mastershipState ) )
1818 main.cleanup()
1819 main.exit()
1820 mastershipCheck = main.TRUE
1821 for i in range( 1, 29 ):
1822 switchDPID = str(
1823 main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
1824 current = [ switch[ 'master' ] for switch in currentJson
1825 if switchDPID in switch[ 'id' ] ]
1826 old = [ switch[ 'master' ] for switch in oldJson
1827 if switchDPID in switch[ 'id' ] ]
1828 if current == old:
1829 mastershipCheck = mastershipCheck and main.TRUE
1830 else:
1831 main.log.warn( "Mastership of switch %s changed" % switchDPID )
1832 mastershipCheck = main.FALSE
1833 utilities.assert_equals(
1834 expect=main.TRUE,
1835 actual=mastershipCheck,
1836 onpass="Mastership of Switches was not changed",
1837 onfail="Mastership of some switches changed" )
1838 '''
1839
1840 main.step( "Get the intents and compare across all nodes" )
1841 ONOSIntents = []
1842 intentCheck = main.FALSE
1843 consistentIntents = True
1844 intentsResults = True
1845 threads = []
1846 for i in range( numControllers ):
1847 t = main.Thread( target=CLIs[i].intents,
1848 name="intents-" + str( i ),
1849 args=[],
1850 kwargs={ 'jsonFormat': True } )
1851 threads.append( t )
1852 t.start()
1853
1854 for t in threads:
1855 t.join()
1856 ONOSIntents.append( t.result )
1857
1858 for i in range( numControllers ):
1859 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1860 main.log.error( "Error in getting ONOS" + str( i + 1 ) +
1861 " intents" )
1862 main.log.warn( "ONOS" + str( i + 1 ) + " intents response: " +
1863 repr( ONOSIntents[ i ] ) )
1864 intentsResults = False
1865 utilities.assert_equals(
1866 expect=True,
1867 actual=intentsResults,
1868 onpass="No error in reading intents output",
1869 onfail="Error in reading intents from ONOS" )
1870
1871 main.step( "Check for consistency in Intents from each controller" )
1872 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1873 main.log.info( "Intents are consistent across all ONOS " +
1874 "nodes" )
1875 else:
1876 consistentIntents = False
1877
1878 # Try to make it easy to figure out what is happening
1879 #
1880 # Intent ONOS1 ONOS2 ...
1881 # 0x01 INSTALLED INSTALLING
1882 # ... ... ...
1883 # ... ... ...
1884 title = " ID"
1885 for n in range( numControllers ):
1886 title += " " * 10 + "ONOS" + str( n + 1 )
1887 main.log.warn( title )
1888 # get all intent keys in the cluster
1889 keys = []
1890 for nodeStr in ONOSIntents:
1891 node = json.loads( nodeStr )
1892 for intent in node:
1893 keys.append( intent.get( 'id' ) )
1894 keys = set( keys )
1895 for key in keys:
1896 row = "%-13s" % key
1897 for nodeStr in ONOSIntents:
1898 node = json.loads( nodeStr )
1899 for intent in node:
1900 if intent.get( 'id' ) == key:
1901 row += "%-15s" % intent.get( 'state' )
1902 main.log.warn( row )
1903 # End table view
1904
1905 utilities.assert_equals(
1906 expect=True,
1907 actual=consistentIntents,
1908 onpass="Intents are consistent across all ONOS nodes",
1909 onfail="ONOS nodes have different views of intents" )
1910 intentStates = []
1911 for node in ONOSIntents: # Iter through ONOS nodes
1912 nodeStates = []
1913 # Iter through intents of a node
1914 try:
1915 for intent in json.loads( node ):
1916 nodeStates.append( intent[ 'state' ] )
1917 except ( ValueError, TypeError ):
1918 main.log.exception( "Error in parsing intents" )
1919 main.log.error( repr( node ) )
1920 intentStates.append( nodeStates )
1921 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1922 main.log.info( dict( out ) )
1923
1924 if intentsResults and not consistentIntents:
1925 for i in range( numControllers ):
1926 main.log.warn( "ONOS" + str( i + 1 ) + " intents: " )
1927 main.log.warn( json.dumps(
1928 json.loads( ONOSIntents[ i ] ),
1929 sort_keys=True,
1930 indent=4,
1931 separators=( ',', ': ' ) ) )
1932 elif intentsResults and consistentIntents:
1933 intentCheck = main.TRUE
1934
1935 # NOTE: Store has no durability, so intents are lost across system
1936 # restarts
1937 main.step( "Compare current intents with intents before the failure" )
1938 # NOTE: this requires case 5 to pass for intentState to be set.
1939 # maybe we should stop the test if that fails?
1940 sameIntents = main.FALSE
1941 if intentState and intentState == ONOSIntents[ 0 ]:
1942 sameIntents = main.TRUE
1943 main.log.info( "Intents are consistent with before failure" )
1944 # TODO: possibly the states have changed? we may need to figure out
1945 # what the acceptable states are
1946 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
1947 sameIntents = main.TRUE
1948 try:
1949 before = json.loads( intentState )
1950 after = json.loads( ONOSIntents[ 0 ] )
1951 for intent in before:
1952 if intent not in after:
1953 sameIntents = main.FALSE
1954 main.log.debug( "Intent is not currently in ONOS " +
1955 "(at least in the same form):" )
1956 main.log.debug( json.dumps( intent ) )
1957 except ( ValueError, TypeError ):
1958 main.log.exception( "Exception printing intents" )
1959 main.log.debug( repr( ONOSIntents[0] ) )
1960 main.log.debug( repr( intentState ) )
1961 if sameIntents == main.FALSE:
1962 try:
1963 main.log.debug( "ONOS intents before: " )
1964 main.log.debug( json.dumps( json.loads( intentState ),
1965 sort_keys=True, indent=4,
1966 separators=( ',', ': ' ) ) )
1967 main.log.debug( "Current ONOS intents: " )
1968 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
1969 sort_keys=True, indent=4,
1970 separators=( ',', ': ' ) ) )
1971 except ( ValueError, TypeError ):
1972 main.log.exception( "Exception printing intents" )
1973 main.log.debug( repr( ONOSIntents[0] ) )
1974 main.log.debug( repr( intentState ) )
1975 utilities.assert_equals(
1976 expect=main.TRUE,
1977 actual=sameIntents,
1978 onpass="Intents are consistent with before failure",
1979 onfail="The Intents changed during failure" )
1980 intentCheck = intentCheck and sameIntents
1981
1982 main.step( "Get the OF Table entries and compare to before " +
1983 "component failure" )
1984 FlowTables = main.TRUE
1985 flows2 = []
1986 for i in range( 28 ):
1987 main.log.info( "Checking flow table on s" + str( i + 1 ) )
1988 tmpFlows = main.Mininet2.getFlowTable( 1.3, "s" + str( i + 1 ) )
1989 flows2.append( tmpFlows )
1990 tempResult = main.Mininet2.flowComp(
1991 flow1=flows[ i ],
1992 flow2=tmpFlows )
1993 FlowTables = FlowTables and tempResult
1994 if FlowTables == main.FALSE:
1995 main.log.info( "Differences in flow table for switch: s" +
1996 str( i + 1 ) )
1997 utilities.assert_equals(
1998 expect=main.TRUE,
1999 actual=FlowTables,
2000 onpass="No changes were found in the flow tables",
2001 onfail="Changes were found in the flow tables" )
2002
2003 main.Mininet2.pingLongKill()
2004 '''
2005 main.step( "Check the continuous pings to ensure that no packets " +
2006 "were dropped during component failure" )
2007 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2008 main.params[ 'TESTONIP' ] )
2009 LossInPings = main.FALSE
2010 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2011 for i in range( 8, 18 ):
2012 main.log.info(
2013 "Checking for a loss in pings along flow from s" +
2014 str( i ) )
2015 LossInPings = main.Mininet2.checkForLoss(
2016 "/tmp/ping.h" +
2017 str( i ) ) or LossInPings
2018 if LossInPings == main.TRUE:
2019 main.log.info( "Loss in ping detected" )
2020 elif LossInPings == main.ERROR:
2021 main.log.info( "There are multiple mininet process running" )
2022 elif LossInPings == main.FALSE:
2023 main.log.info( "No Loss in the pings" )
2024 main.log.info( "No loss of dataplane connectivity" )
2025 utilities.assert_equals(
2026 expect=main.FALSE,
2027 actual=LossInPings,
2028 onpass="No Loss of connectivity",
2029 onfail="Loss of dataplane connectivity detected" )
2030 '''
2031
2032 main.step( "Leadership Election is still functional" )
2033 # Test of LeadershipElection
2034 leaderList = []
2035 # FIXME: make sure this matches nodes that were restarted
2036 restarted = [ nodes[0].ip_address, nodes[1].ip_address,
2037 nodes[2].ip_address ]
2038
2039 leaderResult = main.TRUE
2040 for cli in CLIs:
2041 leaderN = cli.electionTestLeader()
2042 leaderList.append( leaderN )
2043 if leaderN == main.FALSE:
2044 # error in response
2045 main.log.error( "Something is wrong with " +
2046 "electionTestLeader function, check the" +
2047 " error logs" )
2048 leaderResult = main.FALSE
2049 elif leaderN is None:
2050 main.log.error( cli.name +
2051 " shows no leader for the election-app was" +
2052 " elected after the old one died" )
2053 leaderResult = main.FALSE
2054 elif leaderN in restarted:
2055 main.log.error( cli.name + " shows " + str( leaderN ) +
2056 " as leader for the election-app, but it " +
2057 "was restarted" )
2058 leaderResult = main.FALSE
2059 if len( set( leaderList ) ) != 1:
2060 leaderResult = main.FALSE
2061 main.log.error(
2062 "Inconsistent view of leader for the election test app" )
2063 # TODO: print the list
2064 utilities.assert_equals(
2065 expect=main.TRUE,
2066 actual=leaderResult,
2067 onpass="Leadership election passed",
2068 onfail="Something went wrong with Leadership election" )
2069
2070 def CASE8( self, main ):
2071 """
2072 Compare topo
2073 """
2074 import json
2075 import time
2076 assert numControllers, "numControllers not defined"
2077 assert main, "main not defined"
2078 assert utilities.assert_equals, "utilities.assert_equals not defined"
2079 assert CLIs, "CLIs not defined"
2080 assert nodes, "nodes not defined"
2081
2082 main.case( "Compare ONOS Topology view to Mininet topology" )
2083 main.caseExplaination = "Compare topology objects between Mininet" +\
2084 " and ONOS"
2085
2086 main.step( "Comparing ONOS topology to MN" )
2087 devicesResults = main.TRUE
2088 linksResults = main.TRUE
2089 hostsResults = main.TRUE
2090 hostAttachmentResults = True
2091 topoResult = main.FALSE
2092 elapsed = 0
2093 count = 0
2094 main.step( "Collecting topology information from ONOS" )
2095 startTime = time.time()
2096 # Give time for Gossip to work
2097 while topoResult == main.FALSE and elapsed < 60:
2098 count += 1
2099 cliStart = time.time()
2100 devices = []
2101 threads = []
2102 for i in range( numControllers ):
2103 t = main.Thread( target=CLIs[i].devices,
2104 name="devices-" + str( i ),
2105 args=[ ] )
2106 threads.append( t )
2107 t.start()
2108
2109 for t in threads:
2110 t.join()
2111 devices.append( t.result )
2112 hosts = []
2113 ipResult = main.TRUE
2114 threads = []
2115 for i in range( numControllers ):
2116 t = main.Thread( target=CLIs[i].hosts,
2117 name="hosts-" + str( i ),
2118 args=[ ] )
2119 threads.append( t )
2120 t.start()
2121
2122 for t in threads:
2123 t.join()
2124 try:
2125 hosts.append( json.loads( t.result ) )
2126 except ( ValueError, TypeError ):
2127 main.log.exception( "Error parsing hosts results" )
2128 main.log.error( repr( t.result ) )
2129 for controller in range( 0, len( hosts ) ):
2130 controllerStr = str( controller + 1 )
2131 for host in hosts[ controller ]:
2132 if host is None or host.get( 'ipAddresses', [] ) == []:
2133 main.log.error(
2134 "DEBUG:Error with host ipAddresses on controller" +
2135 controllerStr + ": " + str( host ) )
2136 ipResult = main.FALSE
2137 ports = []
2138 threads = []
2139 for i in range( numControllers ):
2140 t = main.Thread( target=CLIs[i].ports,
2141 name="ports-" + str( i ),
2142 args=[ ] )
2143 threads.append( t )
2144 t.start()
2145
2146 for t in threads:
2147 t.join()
2148 ports.append( t.result )
2149 links = []
2150 threads = []
2151 for i in range( numControllers ):
2152 t = main.Thread( target=CLIs[i].links,
2153 name="links-" + str( i ),
2154 args=[ ] )
2155 threads.append( t )
2156 t.start()
2157
2158 for t in threads:
2159 t.join()
2160 links.append( t.result )
2161 clusters = []
2162 threads = []
2163 for i in range( numControllers ):
2164 t = main.Thread( target=CLIs[i].clusters,
2165 name="clusters-" + str( i ),
2166 args=[ ] )
2167 threads.append( t )
2168 t.start()
2169
2170 for t in threads:
2171 t.join()
2172 clusters.append( t.result )
2173
2174 elapsed = time.time() - startTime
2175 cliTime = time.time() - cliStart
2176 print "Elapsed time: " + str( elapsed )
2177 print "CLI time: " + str( cliTime )
2178
2179 mnSwitches = main.Mininet1.getSwitches()
2180 mnLinks = main.Mininet1.getLinks()
2181 mnHosts = main.Mininet1.getHosts()
2182 for controller in range( numControllers ):
2183 controllerStr = str( controller + 1 )
2184 if devices[ controller ] and ports[ controller ] and\
2185 "Error" not in devices[ controller ] and\
2186 "Error" not in ports[ controller ]:
2187
2188 currentDevicesResult = main.Mininet1.compareSwitches(
2189 mnSwitches,
2190 json.loads( devices[ controller ] ),
2191 json.loads( ports[ controller ] ) )
2192 else:
2193 currentDevicesResult = main.FALSE
2194 utilities.assert_equals( expect=main.TRUE,
2195 actual=currentDevicesResult,
2196 onpass="ONOS" + controllerStr +
2197 " Switches view is correct",
2198 onfail="ONOS" + controllerStr +
2199 " Switches view is incorrect" )
2200
2201 if links[ controller ] and "Error" not in links[ controller ]:
2202 currentLinksResult = main.Mininet1.compareLinks(
2203 mnSwitches, mnLinks,
2204 json.loads( links[ controller ] ) )
2205 else:
2206 currentLinksResult = main.FALSE
2207 utilities.assert_equals( expect=main.TRUE,
2208 actual=currentLinksResult,
2209 onpass="ONOS" + controllerStr +
2210 " links view is correct",
2211 onfail="ONOS" + controllerStr +
2212 " links view is incorrect" )
2213
2214 if hosts[ controller ] or "Error" not in hosts[ controller ]:
2215 currentHostsResult = main.Mininet1.compareHosts(
2216 mnHosts,
2217 hosts[ controller ] )
2218 else:
2219 currentHostsResult = main.FALSE
2220 utilities.assert_equals( expect=main.TRUE,
2221 actual=currentHostsResult,
2222 onpass="ONOS" + controllerStr +
2223 " hosts exist in Mininet",
2224 onfail="ONOS" + controllerStr +
2225 " hosts don't match Mininet" )
2226 # CHECKING HOST ATTACHMENT POINTS
2227 hostAttachment = True
2228 zeroHosts = False
2229 # FIXME: topo-HA/obelisk specific mappings:
2230 # key is mac and value is dpid
2231 mappings = {}
2232 for i in range( 1, 29 ): # hosts 1 through 28
2233 # set up correct variables:
2234 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2235 if i == 1:
2236 deviceId = "1000".zfill(16)
2237 elif i == 2:
2238 deviceId = "2000".zfill(16)
2239 elif i == 3:
2240 deviceId = "3000".zfill(16)
2241 elif i == 4:
2242 deviceId = "3004".zfill(16)
2243 elif i == 5:
2244 deviceId = "5000".zfill(16)
2245 elif i == 6:
2246 deviceId = "6000".zfill(16)
2247 elif i == 7:
2248 deviceId = "6007".zfill(16)
2249 elif i >= 8 and i <= 17:
2250 dpid = '3' + str( i ).zfill( 3 )
2251 deviceId = dpid.zfill(16)
2252 elif i >= 18 and i <= 27:
2253 dpid = '6' + str( i ).zfill( 3 )
2254 deviceId = dpid.zfill(16)
2255 elif i == 28:
2256 deviceId = "2800".zfill(16)
2257 mappings[ macId ] = deviceId
2258 if hosts[ controller ] or "Error" not in hosts[ controller ]:
2259 if hosts[ controller ] == []:
2260 main.log.warn( "There are no hosts discovered" )
2261 zeroHosts = True
2262 else:
2263 for host in hosts[ controller ]:
2264 mac = None
2265 location = None
2266 device = None
2267 port = None
2268 try:
2269 mac = host.get( 'mac' )
2270 assert mac, "mac field could not be found for this host object"
2271
2272 location = host.get( 'location' )
2273 assert location, "location field could not be found for this host object"
2274
2275 # Trim the protocol identifier off deviceId
2276 device = str( location.get( 'elementId' ) ).split(':')[1]
2277 assert device, "elementId field could not be found for this host location object"
2278
2279 port = location.get( 'port' )
2280 assert port, "port field could not be found for this host location object"
2281
2282 # Now check if this matches where they should be
2283 if mac and device and port:
2284 if str( port ) != "1":
2285 main.log.error( "The attachment port is incorrect for " +
2286 "host " + str( mac ) +
2287 ". Expected: 1 Actual: " + str( port) )
2288 hostAttachment = False
2289 if device != mappings[ str( mac ) ]:
2290 main.log.error( "The attachment device is incorrect for " +
2291 "host " + str( mac ) +
2292 ". Expected: " + mappings[ str( mac ) ] +
2293 " Actual: " + device )
2294 hostAttachment = False
2295 else:
2296 hostAttachment = False
2297 except AssertionError:
2298 main.log.exception( "Json object not as expected" )
2299 main.log.error( repr( host ) )
2300 hostAttachment = False
2301 else:
2302 main.log.error( "No hosts json output or \"Error\"" +
2303 " in output. hosts = " +
2304 repr( hosts[ controller ] ) )
2305 if zeroHosts is False:
2306 hostAttachment = True
2307
2308 # END CHECKING HOST ATTACHMENT POINTS
2309 devicesResults = devicesResults and currentDevicesResult
2310 linksResults = linksResults and currentLinksResult
2311 hostsResults = hostsResults and currentHostsResult
2312 hostAttachmentResults = hostAttachmentResults and\
2313 hostAttachment
2314
2315 # Compare json objects for hosts and dataplane clusters
2316
2317 # hosts
2318 main.step( "Hosts view is consistent across all ONOS nodes" )
2319 consistentHostsResult = main.TRUE
2320 for controller in range( len( hosts ) ):
2321 controllerStr = str( controller + 1 )
2322 if "Error" not in hosts[ controller ]:
2323 if hosts[ controller ] == hosts[ 0 ]:
2324 continue
2325 else: # hosts not consistent
2326 main.log.error( "hosts from ONOS" + controllerStr +
2327 " is inconsistent with ONOS1" )
2328 main.log.warn( repr( hosts[ controller ] ) )
2329 consistentHostsResult = main.FALSE
2330
2331 else:
2332 main.log.error( "Error in getting ONOS hosts from ONOS" +
2333 controllerStr )
2334 consistentHostsResult = main.FALSE
2335 main.log.warn( "ONOS" + controllerStr +
2336 " hosts response: " +
2337 repr( hosts[ controller ] ) )
2338 utilities.assert_equals(
2339 expect=main.TRUE,
2340 actual=consistentHostsResult,
2341 onpass="Hosts view is consistent across all ONOS nodes",
2342 onfail="ONOS nodes have different views of hosts" )
2343
2344 main.step( "Hosts information is correct" )
2345 hostsResults = hostsResults and ipResult
2346 utilities.assert_equals(
2347 expect=main.TRUE,
2348 actual=hostsResults,
2349 onpass="Host information is correct",
2350 onfail="Host information is incorrect" )
2351
2352 main.step( "Host attachment points to the network" )
2353 utilities.assert_equals(
2354 expect=True,
2355 actual=hostAttachmentResults,
2356 onpass="Hosts are correctly attached to the network",
2357 onfail="ONOS did not correctly attach hosts to the network" )
2358
2359 # Strongly connected clusters of devices
2360 main.step( "Clusters view is consistent across all ONOS nodes" )
2361 consistentClustersResult = main.TRUE
2362 for controller in range( len( clusters ) ):
2363 controllerStr = str( controller + 1 )
2364 if "Error" not in clusters[ controller ]:
2365 if clusters[ controller ] == clusters[ 0 ]:
2366 continue
2367 else: # clusters not consistent
2368 main.log.error( "clusters from ONOS" +
2369 controllerStr +
2370 " is inconsistent with ONOS1" )
2371 consistentClustersResult = main.FALSE
2372
2373 else:
2374 main.log.error( "Error in getting dataplane clusters " +
2375 "from ONOS" + controllerStr )
2376 consistentClustersResult = main.FALSE
2377 main.log.warn( "ONOS" + controllerStr +
2378 " clusters response: " +
2379 repr( clusters[ controller ] ) )
2380 utilities.assert_equals(
2381 expect=main.TRUE,
2382 actual=consistentClustersResult,
2383 onpass="Clusters view is consistent across all ONOS nodes",
2384 onfail="ONOS nodes have different views of clusters" )
2385
2386 main.step( "There is only one SCC" )
2387 # there should always only be one cluster
2388 try:
2389 numClusters = len( json.loads( clusters[ 0 ] ) )
2390 except ( ValueError, TypeError ):
2391 main.log.exception( "Error parsing clusters[0]: " +
2392 repr( clusters[0] ) )
2393 clusterResults = main.FALSE
2394 if numClusters == 1:
2395 clusterResults = main.TRUE
2396 utilities.assert_equals(
2397 expect=1,
2398 actual=numClusters,
2399 onpass="ONOS shows 1 SCC",
2400 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2401
2402 topoResult = ( devicesResults and linksResults
2403 and hostsResults and consistentHostsResult
2404 and consistentClustersResult and clusterResults
2405 and ipResult and hostAttachmentResults )
2406
2407 topoResult = topoResult and int( count <= 2 )
2408 note = "note it takes about " + str( int( cliTime ) ) + \
2409 " seconds for the test to make all the cli calls to fetch " +\
2410 "the topology from each ONOS instance"
2411 main.log.info(
2412 "Very crass estimate for topology discovery/convergence( " +
2413 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2414 str( count ) + " tries" )
2415
2416 main.step( "Device information is correct" )
2417 utilities.assert_equals(
2418 expect=main.TRUE,
2419 actual=devicesResults,
2420 onpass="Device information is correct",
2421 onfail="Device information is incorrect" )
2422
2423 main.step( "Links are correct" )
2424 utilities.assert_equals(
2425 expect=main.TRUE,
2426 actual=linksResults,
2427 onpass="Link are correct",
2428 onfail="Links are incorrect" )
2429
2430 # FIXME: move this to an ONOS state case
2431 main.step( "Checking ONOS nodes" )
2432 nodesOutput = []
2433 nodeResults = main.TRUE
2434 threads = []
2435 for i in range( numControllers ):
2436 t = main.Thread( target=CLIs[i].nodes,
2437 name="nodes-" + str( i ),
2438 args=[ ] )
2439 threads.append( t )
2440 t.start()
2441
2442 for t in threads:
2443 t.join()
2444 nodesOutput.append( t.result )
2445 ips = [ node.ip_address for node in nodes ]
2446 for i in nodesOutput:
2447 try:
2448 current = json.loads( i )
2449 for node in current:
2450 currentResult = main.FALSE
2451 if node['ip'] in ips: # node in nodes() output is in cell
2452 if node['state'] == 'ACTIVE':
2453 currentResult = main.TRUE
2454 else:
2455 main.log.error( "Error in ONOS node availability" )
2456 main.log.error(
2457 json.dumps( current,
2458 sort_keys=True,
2459 indent=4,
2460 separators=( ',', ': ' ) ) )
2461 break
2462 nodeResults = nodeResults and currentResult
2463 except ( ValueError, TypeError ):
2464 main.log.error( "Error parsing nodes output" )
2465 main.log.warn( repr( i ) )
2466 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2467 onpass="Nodes check successful",
2468 onfail="Nodes check NOT successful" )
2469
2470 def CASE9( self, main ):
2471 """
2472 Link s3-s28 down
2473 """
2474 import time
2475 assert numControllers, "numControllers not defined"
2476 assert main, "main not defined"
2477 assert utilities.assert_equals, "utilities.assert_equals not defined"
2478 assert CLIs, "CLIs not defined"
2479 assert nodes, "nodes not defined"
2480 # NOTE: You should probably run a topology check after this
2481
2482 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2483
2484 description = "Turn off a link to ensure that Link Discovery " +\
2485 "is working properly"
2486 main.case( description )
2487
2488 main.step( "Kill Link between s3 and s28" )
2489 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2490 main.log.info( "Waiting " + str( linkSleep ) +
2491 " seconds for link down to be discovered" )
2492 time.sleep( linkSleep )
2493 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2494 onpass="Link down successful",
2495 onfail="Failed to bring link down" )
2496 # TODO do some sort of check here
2497
2498 def CASE10( self, main ):
2499 """
2500 Link s3-s28 up
2501 """
2502 import time
2503 assert numControllers, "numControllers not defined"
2504 assert main, "main not defined"
2505 assert utilities.assert_equals, "utilities.assert_equals not defined"
2506 assert CLIs, "CLIs not defined"
2507 assert nodes, "nodes not defined"
2508 # NOTE: You should probably run a topology check after this
2509
2510 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2511
2512 description = "Restore a link to ensure that Link Discovery is " + \
2513 "working properly"
2514 main.case( description )
2515
2516 main.step( "Bring link between s3 and s28 back up" )
2517 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2518 main.log.info( "Waiting " + str( linkSleep ) +
2519 " seconds for link up to be discovered" )
2520 time.sleep( linkSleep )
2521 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2522 onpass="Link up successful",
2523 onfail="Failed to bring link up" )
2524 # TODO do some sort of check here
2525
2526 def CASE11( self, main ):
2527 """
2528 Switch Down
2529 """
2530 # NOTE: You should probably run a topology check after this
2531 import time
2532 assert numControllers, "numControllers not defined"
2533 assert main, "main not defined"
2534 assert utilities.assert_equals, "utilities.assert_equals not defined"
2535 assert CLIs, "CLIs not defined"
2536 assert nodes, "nodes not defined"
2537
2538 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2539
2540 description = "Killing a switch to ensure it is discovered correctly"
2541 main.case( description )
2542 switch = main.params[ 'kill' ][ 'switch' ]
2543 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2544
2545 # TODO: Make this switch parameterizable
2546 main.step( "Kill " + switch )
2547 main.log.info( "Deleting " + switch )
2548 main.Mininet1.delSwitch( switch )
2549 main.log.info( "Waiting " + str( switchSleep ) +
2550 " seconds for switch down to be discovered" )
2551 time.sleep( switchSleep )
2552 device = main.ONOScli1.getDevice( dpid=switchDPID )
2553 # Peek at the deleted switch
2554 main.log.warn( str( device ) )
2555 result = main.FALSE
2556 if device and device[ 'available' ] is False:
2557 result = main.TRUE
2558 utilities.assert_equals( expect=main.TRUE, actual=result,
2559 onpass="Kill switch successful",
2560 onfail="Failed to kill switch?" )
2561
2562 def CASE12( self, main ):
2563 """
2564 Switch Up
2565 """
2566 # NOTE: You should probably run a topology check after this
2567 import time
2568 assert numControllers, "numControllers not defined"
2569 assert main, "main not defined"
2570 assert utilities.assert_equals, "utilities.assert_equals not defined"
2571 assert CLIs, "CLIs not defined"
2572 assert nodes, "nodes not defined"
2573 assert ONOS1Port, "ONOS1Port not defined"
2574 assert ONOS2Port, "ONOS2Port not defined"
2575 assert ONOS3Port, "ONOS3Port not defined"
2576 assert ONOS4Port, "ONOS4Port not defined"
2577 assert ONOS5Port, "ONOS5Port not defined"
2578 assert ONOS6Port, "ONOS6Port not defined"
2579 assert ONOS7Port, "ONOS7Port not defined"
2580
2581 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2582 switch = main.params[ 'kill' ][ 'switch' ]
2583 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2584 links = main.params[ 'kill' ][ 'links' ].split()
2585 description = "Adding a switch to ensure it is discovered correctly"
2586 main.case( description )
2587
2588 main.step( "Add back " + switch )
2589 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2590 for peer in links:
2591 main.Mininet1.addLink( switch, peer )
2592 ipList = []
2593 for i in range( numControllers ):
2594 ipList.append( nodes[ i ].ip_address )
2595 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2596 main.log.info( "Waiting " + str( switchSleep ) +
2597 " seconds for switch up to be discovered" )
2598 time.sleep( switchSleep )
2599 device = main.ONOScli1.getDevice( dpid=switchDPID )
2600 # Peek at the deleted switch
2601 main.log.warn( str( device ) )
2602 result = main.FALSE
2603 if device and device[ 'available' ]:
2604 result = main.TRUE
2605 utilities.assert_equals( expect=main.TRUE, actual=result,
2606 onpass="add switch successful",
2607 onfail="Failed to add switch?" )
2608
2609 def CASE13( self, main ):
2610 """
2611 Clean up
2612 """
2613 import os
2614 import time
2615 assert numControllers, "numControllers not defined"
2616 assert main, "main not defined"
2617 assert utilities.assert_equals, "utilities.assert_equals not defined"
2618 assert CLIs, "CLIs not defined"
2619 assert nodes, "nodes not defined"
2620
2621 # printing colors to terminal
2622 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2623 'blue': '\033[94m', 'green': '\033[92m',
2624 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2625 main.case( "Test Cleanup" )
2626 main.step( "Killing tcpdumps" )
2627 main.Mininet2.stopTcpdump()
2628
2629 testname = main.TEST
2630 if main.params[ 'BACKUP' ] == "True":
2631 main.step( "Copying MN pcap and ONOS log files to test station" )
2632 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2633 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2634 # NOTE: MN Pcap file is being saved to ~/packet_captures
2635 # scp this file as MN and TestON aren't necessarily the same vm
2636 # FIXME: scp
2637 # mn files
2638 # TODO: Load these from params
2639 # NOTE: must end in /
2640 logFolder = "/opt/onos/log/"
2641 logFiles = [ "karaf.log", "karaf.log.1" ]
2642 # NOTE: must end in /
2643 dstDir = "~/packet_captures/"
2644 for f in logFiles:
2645 for node in nodes:
2646 main.ONOSbench.handle.sendline( "scp sdn@" + node.ip_address +
2647 ":" + logFolder + f + " " +
2648 teststationUser + "@" +
2649 teststationIP + ":" +
2650 dstDir + str( testname ) +
2651 "-" + node.name + "-" + f )
2652 main.ONOSbench.handle.expect( "\$" )
2653
2654 # std*.log's
2655 # NOTE: must end in /
2656 logFolder = "/opt/onos/var/"
2657 logFiles = [ "stderr.log", "stdout.log" ]
2658 # NOTE: must end in /
2659 dstDir = "~/packet_captures/"
2660 for f in logFiles:
2661 for node in nodes:
2662 main.ONOSbench.handle.sendline( "scp sdn@" + node.ip_address +
2663 ":" + logFolder + f + " " +
2664 teststationUser + "@" +
2665 teststationIP + ":" +
2666 dstDir + str( testname ) +
2667 "-" + node.name + "-" + f )
2668 main.ONOSbench.handle.expect( "\$" )
2669 # sleep so scp can finish
2670 time.sleep( 10 )
2671 main.step( "Packing and rotating pcap archives" )
2672 os.system( "~/TestON/dependencies/rotate.sh " + str( testname ) )
2673
2674 main.step( "Stopping Mininet" )
2675 mnResult = main.Mininet1.stopNet()
2676 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2677 onpass="Mininet stopped",
2678 onfail="MN cleanup NOT successful" )
2679
2680 main.step( "Checking ONOS Logs for errors" )
2681 for node in nodes:
2682 print colors[ 'purple' ] + "Checking logs for errors on " + \
2683 node.name + ":" + colors[ 'end' ]
2684 print main.ONOSbench.checkLogs( node.ip_address, restart=True )
2685
2686 try:
2687 timerLog = open( main.logdir + "/Timers.csv", 'w')
2688 # Overwrite with empty line and close
2689 labels = "Gossip Intents, Restart"
2690 data = str( gossipTime ) + ", " + str( main.restartTime )
2691 timerLog.write( labels + "\n" + data )
2692 timerLog.close()
2693 except NameError, e:
2694 main.log.exception(e)
2695
2696 def CASE14( self, main ):
2697 """
2698 start election app on all onos nodes
2699 """
2700 assert numControllers, "numControllers not defined"
2701 assert main, "main not defined"
2702 assert utilities.assert_equals, "utilities.assert_equals not defined"
2703 assert CLIs, "CLIs not defined"
2704 assert nodes, "nodes not defined"
2705
2706 main.case("Start Leadership Election app")
2707 main.step( "Install leadership election app" )
2708 appResult = main.ONOScli1.activateApp( "org.onosproject.election" )
2709 utilities.assert_equals(
2710 expect=main.TRUE,
2711 actual=appResult,
2712 onpass="Election app installed",
2713 onfail="Something went wrong with installing Leadership election" )
2714
2715 main.step( "Run for election on each node" )
2716 leaderResult = main.TRUE
2717 leaders = []
2718 for cli in CLIs:
2719 cli.electionTestRun()
2720 for cli in CLIs:
2721 leader = cli.electionTestLeader()
2722 if leader is None or leader == main.FALSE:
2723 main.log.error( cli.name + ": Leader for the election app " +
2724 "should be an ONOS node, instead got '" +
2725 str( leader ) + "'" )
2726 leaderResult = main.FALSE
2727 leaders.append( leader )
2728 utilities.assert_equals(
2729 expect=main.TRUE,
2730 actual=leaderResult,
2731 onpass="Successfully ran for leadership",
2732 onfail="Failed to run for leadership" )
2733
2734 main.step( "Check that each node shows the same leader" )
2735 sameLeader = main.TRUE
2736 if len( set( leaders ) ) != 1:
2737 sameLeader = main.FALSE
2738 main.log.error( "Results of electionTestLeader is order of CLIs:" +
2739 str( leaders ) )
2740 utilities.assert_equals(
2741 expect=main.TRUE,
2742 actual=sameLeader,
2743 onpass="Leadership is consistent for the election topic",
2744 onfail="Nodes have different leaders" )
2745
2746 def CASE15( self, main ):
2747 """
2748 Check that Leadership Election is still functional
2749 """
2750 import time
2751 assert numControllers, "numControllers not defined"
2752 assert main, "main not defined"
2753 assert utilities.assert_equals, "utilities.assert_equals not defined"
2754 assert CLIs, "CLIs not defined"
2755 assert nodes, "nodes not defined"
2756
2757 leaderResult = main.TRUE
2758 description = "Check that Leadership Election is still functional"
2759 main.case( description )
2760
2761 main.step( "Check that each node shows the same leader" )
2762 sameLeader = main.TRUE
2763 leaders = []
2764 for cli in CLIs:
2765 leader = cli.electionTestLeader()
2766 leaders.append( leader )
2767 if len( set( leaders ) ) != 1:
2768 sameLeader = main.FALSE
2769 main.log.error( "Results of electionTestLeader is order of CLIs:" +
2770 str( leaders ) )
2771 utilities.assert_equals(
2772 expect=main.TRUE,
2773 actual=sameLeader,
2774 onpass="Leadership is consistent for the election topic",
2775 onfail="Nodes have different leaders" )
2776
2777 main.step( "Find current leader and withdraw" )
2778 leader = main.ONOScli1.electionTestLeader()
2779 # do some sanity checking on leader before using it
2780 withdrawResult = main.FALSE
2781 if leader is None or leader == main.FALSE:
2782 main.log.error(
2783 "Leader for the election app should be an ONOS node," +
2784 "instead got '" + str( leader ) + "'" )
2785 leaderResult = main.FALSE
2786 oldLeader = None
2787 for i in range( len( CLIs ) ):
2788 if leader == nodes[ i ].ip_address:
2789 oldLeader = CLIs[ i ]
2790 break
2791 else: # FOR/ELSE statement
2792 main.log.error( "Leader election, could not find current leader" )
2793 if oldLeader:
2794 withdrawResult = oldLeader.electionTestWithdraw()
2795 utilities.assert_equals(
2796 expect=main.TRUE,
2797 actual=withdrawResult,
2798 onpass="Node was withdrawn from election",
2799 onfail="Node was not withdrawn from election" )
2800
2801 main.step( "Make sure new leader is elected" )
2802 # FIXME: use threads
2803 leaderList = []
2804 for cli in CLIs:
2805 leaderN = cli.electionTestLeader()
2806 leaderList.append( leaderN )
2807 if leaderN == leader:
2808 main.log.error( cli.name + " still sees " + str( leader ) +
2809 " as leader after they withdrew" )
2810 leaderResult = main.FALSE
2811 elif leaderN == main.FALSE:
2812 # error in response
2813 # TODO: add check for "Command not found:" in the driver, this
2814 # means the app isn't loaded
2815 main.log.error( "Something is wrong with " +
2816 "electionTestLeader function, " +
2817 "check the error logs" )
2818 leaderResult = main.FALSE
2819 elif leaderN is None:
2820 # node may not have recieved the event yet
2821 time.sleep(7)
2822 leaderN = cli.electionTestLeader()
2823 leaderList.pop()
2824 leaderList.append( leaderN )
2825 consistentLeader = main.FALSE
2826 if len( set( leaderList ) ) == 1:
2827 main.log.info( "Each Election-app sees '" +
2828 str( leaderList[ 0 ] ) +
2829 "' as the leader" )
2830 consistentLeader = main.TRUE
2831 else:
2832 main.log.error(
2833 "Inconsistent responses for leader of Election-app:" )
2834 for n in range( len( leaderList ) ):
2835 main.log.error( "ONOS" + str( n + 1 ) + " response: " +
2836 str( leaderList[ n ] ) )
2837 leaderResult = leaderResult and consistentLeader
2838 utilities.assert_equals(
2839 expect=main.TRUE,
2840 actual=leaderResult,
2841 onpass="Leadership election passed",
2842 onfail="Something went wrong with Leadership election" )
2843
2844 main.step( "Run for election on old leader( just so everyone " +
2845 "is in the hat )" )
2846 if oldLeader:
2847 runResult = oldLeader.electionTestRun()
2848 else:
2849 runResult = main.FALSE
2850 utilities.assert_equals(
2851 expect=main.TRUE,
2852 actual=runResult,
2853 onpass="App re-ran for election",
2854 onfail="App failed to run for election" )
2855
2856 main.step( "Leader did not change when old leader re-ran" )
2857 afterRun = main.ONOScli1.electionTestLeader()
2858 # verify leader didn't just change
2859 if afterRun == leaderList[ 0 ]:
2860 afterResult = main.TRUE
2861 else:
2862 afterResult = main.FALSE
2863
2864 utilities.assert_equals(
2865 expect=main.TRUE,
2866 actual=afterResult,
2867 onpass="Old leader successfully re-ran for election",
2868 onfail="Something went wrong with Leadership election after " +
2869 "the old leader re-ran for election" )
2870
2871 def CASE16( self, main ):
2872 """
2873 Install Distributed Primitives app
2874 """
2875 import time
2876 assert numControllers, "numControllers not defined"
2877 assert main, "main not defined"
2878 assert utilities.assert_equals, "utilities.assert_equals not defined"
2879 assert CLIs, "CLIs not defined"
2880 assert nodes, "nodes not defined"
2881
2882 # Variables for the distributed primitives tests
2883 global pCounterName
2884 global iCounterName
2885 global pCounterValue
2886 global iCounterValue
2887 global onosSet
2888 global onosSetName
2889 pCounterName = "TestON-Partitions"
2890 iCounterName = "TestON-inMemory"
2891 pCounterValue = 0
2892 iCounterValue = 0
2893 onosSet = set([])
2894 onosSetName = "TestON-set"
2895
2896 description = "Install Primitives app"
2897 main.case( description )
2898 main.step( "Install Primitives app" )
2899 appName = "org.onosproject.distributedprimitives"
2900 appResults = CLIs[0].activateApp( appName )
2901 utilities.assert_equals( expect=main.TRUE,
2902 actual=appResults,
2903 onpass="Primitives app activated",
2904 onfail="Primitives app not activated" )
2905 time.sleep( 5 ) # To allow all nodes to activate
2906
2907 def CASE17( self, main ):
2908 """
2909 Check for basic functionality with distributed primitives
2910 """
2911 import json
2912 # Make sure variables are defined/set
2913 assert numControllers, "numControllers not defined"
2914 assert main, "main not defined"
2915 assert utilities.assert_equals, "utilities.assert_equals not defined"
2916 assert CLIs, "CLIs not defined"
2917 assert nodes, "nodes not defined"
2918 assert pCounterName, "pCounterName not defined"
2919 assert iCounterName, "iCounterName not defined"
2920 assert onosSetName, "onosSetName not defined"
2921 # NOTE: assert fails if value is 0/None/Empty/False
2922 try:
2923 pCounterValue
2924 except NameError:
2925 main.log.error( "pCounterValue not defined, setting to 0" )
2926 pCounterValue = 0
2927 try:
2928 iCounterValue
2929 except NameError:
2930 main.log.error( "iCounterValue not defined, setting to 0" )
2931 iCounterValue = 0
2932 try:
2933 onosSet
2934 except NameError:
2935 main.log.error( "onosSet not defined, setting to empty Set" )
2936 onosSet = set([])
2937 # Variables for the distributed primitives tests. These are local only
2938 addValue = "a"
2939 addAllValue = "a b c d e f"
2940 retainValue = "c d e f"
2941
2942 description = "Check for basic functionality with distributed " +\
2943 "primitives"
2944 main.case( description )
2945 main.caseExplaination = "Test the methods of the distributed primitives (counters and sets) throught the cli"
2946 # DISTRIBUTED ATOMIC COUNTERS
2947 main.step( "Increment and get a default counter on each node" )
2948 pCounters = []
2949 threads = []
2950 addedPValues = []
2951 for i in range( numControllers ):
2952 t = main.Thread( target=CLIs[i].counterTestIncrement,
2953 name="counterIncrement-" + str( i ),
2954 args=[ pCounterName ] )
2955 pCounterValue += 1
2956 addedPValues.append( pCounterValue )
2957 threads.append( t )
2958 t.start()
2959
2960 for t in threads:
2961 t.join()
2962 pCounters.append( t.result )
2963 # Check that counter incremented numController times
2964 pCounterResults = True
2965 for i in addedPValues:
2966 tmpResult = i in pCounters
2967 pCounterResults = pCounterResults and tmpResult
2968 if not tmpResult:
2969 main.log.error( str( i ) + " is not in partitioned "
2970 "counter incremented results" )
2971 utilities.assert_equals( expect=True,
2972 actual=pCounterResults,
2973 onpass="Default counter incremented",
2974 onfail="Error incrementing default" +
2975 " counter" )
2976
2977 main.step( "Increment and get an in memory counter on each node" )
2978 iCounters = []
2979 addedIValues = []
2980 threads = []
2981 for i in range( numControllers ):
2982 t = main.Thread( target=CLIs[i].counterTestIncrement,
2983 name="icounterIncrement-" + str( i ),
2984 args=[ iCounterName ],
2985 kwargs={ "inMemory": True } )
2986 iCounterValue += 1
2987 addedIValues.append( iCounterValue )
2988 threads.append( t )
2989 t.start()
2990
2991 for t in threads:
2992 t.join()
2993 iCounters.append( t.result )
2994 # Check that counter incremented numController times
2995 iCounterResults = True
2996 for i in addedIValues:
2997 tmpResult = i in iCounters
2998 iCounterResults = iCounterResults and tmpResult
2999 if not tmpResult:
3000 main.log.error( str( i ) + " is not in the in-memory "
3001 "counter incremented results" )
3002 utilities.assert_equals( expect=True,
3003 actual=iCounterResults,
3004 onpass="In memory counter incremented",
3005 onfail="Error incrementing in memory" +
3006 " counter" )
3007
3008 main.step( "Check counters are consistant across nodes" )
3009 onosCounters = []
3010 threads = []
3011 for i in range( numControllers ):
3012 t = main.Thread( target=CLIs[i].counters,
3013 name="counters-" + str( i ) )
3014 threads.append( t )
3015 t.start()
3016 for t in threads:
3017 t.join()
3018 onosCounters.append( t.result )
3019 tmp = [ i == onosCounters[ 0 ] for i in onosCounters ]
3020 if all( tmp ):
3021 main.log.info( "Counters are consistent across all nodes" )
3022 consistentCounterResults = main.TRUE
3023 else:
3024 main.log.error( "Counters are not consistent across all nodes" )
3025 consistentCounterResults = main.FALSE
3026 utilities.assert_equals( expect=main.TRUE,
3027 actual=consistentCounterResults,
3028 onpass="ONOS counters are consistent " +
3029 "across nodes",
3030 onfail="ONOS Counters are inconsistent " +
3031 "across nodes" )
3032
3033 main.step( "Counters we added have the correct values" )
3034 correctResults = main.TRUE
3035 for i in range( numControllers ):
3036 try:
3037 current = json.loads( onosCounters[i] )
3038 except ( ValueError, TypeError ):
3039 main.log.error( "Could not parse counters response from ONOS" +
3040 str( i + 1 ) )
3041 main.log.warn( repr( onosCounters[ i ] ) )
3042 pValue = None
3043 iValue = None
3044 try:
3045 for database in current:
3046 partitioned = database.get( 'partitionedDatabaseCounters' )
3047 if partitioned:
3048 for value in partitioned:
3049 if value.get( 'name' ) == pCounterName:
3050 pValue = value.get( 'value' )
3051 break
3052 inMemory = database.get( 'inMemoryDatabaseCounters' )
3053 if inMemory:
3054 for value in inMemory:
3055 if value.get( 'name' ) == iCounterName:
3056 iValue = value.get( 'value' )
3057 break
3058 except AttributeError, e:
3059 main.log.error( "ONOS" + str( i + 1 ) + " counters result " +
3060 "is not as expected" )
3061 correctResults = main.FALSE
3062 if pValue == pCounterValue:
3063 main.log.info( "Partitioned counter value is correct" )
3064 else:
3065 main.log.error( "Partitioned counter value is incorrect," +
3066 " expected value: " + str( pCounterValue )
3067 + " current value: " + str( pValue ) )
3068 correctResults = main.FALSE
3069 if iValue == iCounterValue:
3070 main.log.info( "In memory counter value is correct" )
3071 else:
3072 main.log.error( "In memory counter value is incorrect, " +
3073 "expected value: " + str( iCounterValue ) +
3074 " current value: " + str( iValue ) )
3075 correctResults = main.FALSE
3076 utilities.assert_equals( expect=main.TRUE,
3077 actual=correctResults,
3078 onpass="Added counters are correct",
3079 onfail="Added counters are incorrect" )
3080 # DISTRIBUTED SETS
3081 main.step( "Distributed Set get" )
3082 size = len( onosSet )
3083 getResponses = []
3084 threads = []
3085 for i in range( numControllers ):
3086 t = main.Thread( target=CLIs[i].setTestGet,
3087 name="setTestGet-" + str( i ),
3088 args=[ onosSetName ] )
3089 threads.append( t )
3090 t.start()
3091 for t in threads:
3092 t.join()
3093 getResponses.append( t.result )
3094
3095 getResults = main.TRUE
3096 for i in range( numControllers ):
3097 if isinstance( getResponses[ i ], list):
3098 current = set( getResponses[ i ] )
3099 if len( current ) == len( getResponses[ i ] ):
3100 # no repeats
3101 if onosSet != current:
3102 main.log.error( "ONOS" + str( i + 1 ) +
3103 " has incorrect view" +
3104 " of set " + onosSetName + ":\n" +
3105 str( getResponses[ i ] ) )
3106 main.log.debug( "Expected: " + str( onosSet ) )
3107 main.log.debug( "Actual: " + str( current ) )
3108 getResults = main.FALSE
3109 else:
3110 # error, set is not a set
3111 main.log.error( "ONOS" + str( i + 1 ) +
3112 " has repeat elements in" +
3113 " set " + onosSetName + ":\n" +
3114 str( getResponses[ i ] ) )
3115 getResults = main.FALSE
3116 elif getResponses[ i ] == main.ERROR:
3117 getResults = main.FALSE
3118 utilities.assert_equals( expect=main.TRUE,
3119 actual=getResults,
3120 onpass="Set elements are correct",
3121 onfail="Set elements are incorrect" )
3122
3123 main.step( "Distributed Set size" )
3124 sizeResponses = []
3125 threads = []
3126 for i in range( numControllers ):
3127 t = main.Thread( target=CLIs[i].setTestSize,
3128 name="setTestSize-" + str( i ),
3129 args=[ onosSetName ] )
3130 threads.append( t )
3131 t.start()
3132 for t in threads:
3133 t.join()
3134 sizeResponses.append( t.result )
3135
3136 sizeResults = main.TRUE
3137 for i in range( numControllers ):
3138 if size != sizeResponses[ i ]:
3139 sizeResults = main.FALSE
3140 main.log.error( "ONOS" + str( i + 1 ) +
3141 " expected a size of " + str( size ) +
3142 " for set " + onosSetName +
3143 " but got " + str( sizeResponses[ i ] ) )
3144 utilities.assert_equals( expect=main.TRUE,
3145 actual=sizeResults,
3146 onpass="Set sizes are correct",
3147 onfail="Set sizes are incorrect" )
3148
3149 main.step( "Distributed Set add()" )
3150 onosSet.add( addValue )
3151 addResponses = []
3152 threads = []
3153 for i in range( numControllers ):
3154 t = main.Thread( target=CLIs[i].setTestAdd,
3155 name="setTestAdd-" + str( i ),
3156 args=[ onosSetName, addValue ] )
3157 threads.append( t )
3158 t.start()
3159 for t in threads:
3160 t.join()
3161 addResponses.append( t.result )
3162
3163 # main.TRUE = successfully changed the set
3164 # main.FALSE = action resulted in no change in set
3165 # main.ERROR - Some error in executing the function
3166 addResults = main.TRUE
3167 for i in range( numControllers ):
3168 if addResponses[ i ] == main.TRUE:
3169 # All is well
3170 pass
3171 elif addResponses[ i ] == main.FALSE:
3172 # Already in set, probably fine
3173 pass
3174 elif addResponses[ i ] == main.ERROR:
3175 # Error in execution
3176 addResults = main.FALSE
3177 else:
3178 # unexpected result
3179 addResults = main.FALSE
3180 if addResults != main.TRUE:
3181 main.log.error( "Error executing set add" )
3182
3183 # Check if set is still correct
3184 size = len( onosSet )
3185 getResponses = []
3186 threads = []
3187 for i in range( numControllers ):
3188 t = main.Thread( target=CLIs[i].setTestGet,
3189 name="setTestGet-" + str( i ),
3190 args=[ onosSetName ] )
3191 threads.append( t )
3192 t.start()
3193 for t in threads:
3194 t.join()
3195 getResponses.append( t.result )
3196 getResults = main.TRUE
3197 for i in range( numControllers ):
3198 if isinstance( getResponses[ i ], list):
3199 current = set( getResponses[ i ] )
3200 if len( current ) == len( getResponses[ i ] ):
3201 # no repeats
3202 if onosSet != current:
3203 main.log.error( "ONOS" + str( i + 1 ) +
3204 " has incorrect view" +
3205 " of set " + onosSetName + ":\n" +
3206 str( getResponses[ i ] ) )
3207 main.log.debug( "Expected: " + str( onosSet ) )
3208 main.log.debug( "Actual: " + str( current ) )
3209 getResults = main.FALSE
3210 else:
3211 # error, set is not a set
3212 main.log.error( "ONOS" + str( i + 1 ) +
3213 " has repeat elements in" +
3214 " set " + onosSetName + ":\n" +
3215 str( getResponses[ i ] ) )
3216 getResults = main.FALSE
3217 elif getResponses[ i ] == main.ERROR:
3218 getResults = main.FALSE
3219 sizeResponses = []
3220 threads = []
3221 for i in range( numControllers ):
3222 t = main.Thread( target=CLIs[i].setTestSize,
3223 name="setTestSize-" + str( i ),
3224 args=[ onosSetName ] )
3225 threads.append( t )
3226 t.start()
3227 for t in threads:
3228 t.join()
3229 sizeResponses.append( t.result )
3230 sizeResults = main.TRUE
3231 for i in range( numControllers ):
3232 if size != sizeResponses[ i ]:
3233 sizeResults = main.FALSE
3234 main.log.error( "ONOS" + str( i + 1 ) +
3235 " expected a size of " + str( size ) +
3236 " for set " + onosSetName +
3237 " but got " + str( sizeResponses[ i ] ) )
3238 addResults = addResults and getResults and sizeResults
3239 utilities.assert_equals( expect=main.TRUE,
3240 actual=addResults,
3241 onpass="Set add correct",
3242 onfail="Set add was incorrect" )
3243
3244 main.step( "Distributed Set addAll()" )
3245 onosSet.update( addAllValue.split() )
3246 addResponses = []
3247 threads = []
3248 for i in range( numControllers ):
3249 t = main.Thread( target=CLIs[i].setTestAdd,
3250 name="setTestAddAll-" + str( i ),
3251 args=[ onosSetName, addAllValue ] )
3252 threads.append( t )
3253 t.start()
3254 for t in threads:
3255 t.join()
3256 addResponses.append( t.result )
3257
3258 # main.TRUE = successfully changed the set
3259 # main.FALSE = action resulted in no change in set
3260 # main.ERROR - Some error in executing the function
3261 addAllResults = main.TRUE
3262 for i in range( numControllers ):
3263 if addResponses[ i ] == main.TRUE:
3264 # All is well
3265 pass
3266 elif addResponses[ i ] == main.FALSE:
3267 # Already in set, probably fine
3268 pass
3269 elif addResponses[ i ] == main.ERROR:
3270 # Error in execution
3271 addAllResults = main.FALSE
3272 else:
3273 # unexpected result
3274 addAllResults = main.FALSE
3275 if addAllResults != main.TRUE:
3276 main.log.error( "Error executing set addAll" )
3277
3278 # Check if set is still correct
3279 size = len( onosSet )
3280 getResponses = []
3281 threads = []
3282 for i in range( numControllers ):
3283 t = main.Thread( target=CLIs[i].setTestGet,
3284 name="setTestGet-" + str( i ),
3285 args=[ onosSetName ] )
3286 threads.append( t )
3287 t.start()
3288 for t in threads:
3289 t.join()
3290 getResponses.append( t.result )
3291 getResults = main.TRUE
3292 for i in range( numControllers ):
3293 if isinstance( getResponses[ i ], list):
3294 current = set( getResponses[ i ] )
3295 if len( current ) == len( getResponses[ i ] ):
3296 # no repeats
3297 if onosSet != current:
3298 main.log.error( "ONOS" + str( i + 1 ) +
3299 " has incorrect view" +
3300 " of set " + onosSetName + ":\n" +
3301 str( getResponses[ i ] ) )
3302 main.log.debug( "Expected: " + str( onosSet ) )
3303 main.log.debug( "Actual: " + str( current ) )
3304 getResults = main.FALSE
3305 else:
3306 # error, set is not a set
3307 main.log.error( "ONOS" + str( i + 1 ) +
3308 " has repeat elements in" +
3309 " set " + onosSetName + ":\n" +
3310 str( getResponses[ i ] ) )
3311 getResults = main.FALSE
3312 elif getResponses[ i ] == main.ERROR:
3313 getResults = main.FALSE
3314 sizeResponses = []
3315 threads = []
3316 for i in range( numControllers ):
3317 t = main.Thread( target=CLIs[i].setTestSize,
3318 name="setTestSize-" + str( i ),
3319 args=[ onosSetName ] )
3320 threads.append( t )
3321 t.start()
3322 for t in threads:
3323 t.join()
3324 sizeResponses.append( t.result )
3325 sizeResults = main.TRUE
3326 for i in range( numControllers ):
3327 if size != sizeResponses[ i ]:
3328 sizeResults = main.FALSE
3329 main.log.error( "ONOS" + str( i + 1 ) +
3330 " expected a size of " + str( size ) +
3331 " for set " + onosSetName +
3332 " but got " + str( sizeResponses[ i ] ) )
3333 addAllResults = addAllResults and getResults and sizeResults
3334 utilities.assert_equals( expect=main.TRUE,
3335 actual=addAllResults,
3336 onpass="Set addAll correct",
3337 onfail="Set addAll was incorrect" )
3338
3339 main.step( "Distributed Set contains()" )
3340 containsResponses = []
3341 threads = []
3342 for i in range( numControllers ):
3343 t = main.Thread( target=CLIs[i].setTestGet,
3344 name="setContains-" + str( i ),
3345 args=[ onosSetName ],
3346 kwargs={ "values": addValue } )
3347 threads.append( t )
3348 t.start()
3349 for t in threads:
3350 t.join()
3351 # NOTE: This is the tuple
3352 containsResponses.append( t.result )
3353
3354 containsResults = main.TRUE
3355 for i in range( numControllers ):
3356 if containsResponses[ i ] == main.ERROR:
3357 containsResults = main.FALSE
3358 else:
3359 containsResults = containsResults and\
3360 containsResponses[ i ][ 1 ]
3361 utilities.assert_equals( expect=main.TRUE,
3362 actual=containsResults,
3363 onpass="Set contains is functional",
3364 onfail="Set contains failed" )
3365
3366 main.step( "Distributed Set containsAll()" )
3367 containsAllResponses = []
3368 threads = []
3369 for i in range( numControllers ):
3370 t = main.Thread( target=CLIs[i].setTestGet,
3371 name="setContainsAll-" + str( i ),
3372 args=[ onosSetName ],
3373 kwargs={ "values": addAllValue } )
3374 threads.append( t )
3375 t.start()
3376 for t in threads:
3377 t.join()
3378 # NOTE: This is the tuple
3379 containsAllResponses.append( t.result )
3380
3381 containsAllResults = main.TRUE
3382 for i in range( numControllers ):
3383 if containsResponses[ i ] == main.ERROR:
3384 containsResults = main.FALSE
3385 else:
3386 containsResults = containsResults and\
3387 containsResponses[ i ][ 1 ]
3388 utilities.assert_equals( expect=main.TRUE,
3389 actual=containsAllResults,
3390 onpass="Set containsAll is functional",
3391 onfail="Set containsAll failed" )
3392
3393 main.step( "Distributed Set remove()" )
3394 onosSet.remove( addValue )
3395 removeResponses = []
3396 threads = []
3397 for i in range( numControllers ):
3398 t = main.Thread( target=CLIs[i].setTestRemove,
3399 name="setTestRemove-" + str( i ),
3400 args=[ onosSetName, addValue ] )
3401 threads.append( t )
3402 t.start()
3403 for t in threads:
3404 t.join()
3405 removeResponses.append( t.result )
3406
3407 # main.TRUE = successfully changed the set
3408 # main.FALSE = action resulted in no change in set
3409 # main.ERROR - Some error in executing the function
3410 removeResults = main.TRUE
3411 for i in range( numControllers ):
3412 if removeResponses[ i ] == main.TRUE:
3413 # All is well
3414 pass
3415 elif removeResponses[ i ] == main.FALSE:
3416 # not in set, probably fine
3417 pass
3418 elif removeResponses[ i ] == main.ERROR:
3419 # Error in execution
3420 removeResults = main.FALSE
3421 else:
3422 # unexpected result
3423 removeResults = main.FALSE
3424 if removeResults != main.TRUE:
3425 main.log.error( "Error executing set remove" )
3426
3427 # Check if set is still correct
3428 size = len( onosSet )
3429 getResponses = []
3430 threads = []
3431 for i in range( numControllers ):
3432 t = main.Thread( target=CLIs[i].setTestGet,
3433 name="setTestGet-" + str( i ),
3434 args=[ onosSetName ] )
3435 threads.append( t )
3436 t.start()
3437 for t in threads:
3438 t.join()
3439 getResponses.append( t.result )
3440 getResults = main.TRUE
3441 for i in range( numControllers ):
3442 if isinstance( getResponses[ i ], list):
3443 current = set( getResponses[ i ] )
3444 if len( current ) == len( getResponses[ i ] ):
3445 # no repeats
3446 if onosSet != current:
3447 main.log.error( "ONOS" + str( i + 1 ) +
3448 " has incorrect view" +
3449 " of set " + onosSetName + ":\n" +
3450 str( getResponses[ i ] ) )
3451 main.log.debug( "Expected: " + str( onosSet ) )
3452 main.log.debug( "Actual: " + str( current ) )
3453 getResults = main.FALSE
3454 else:
3455 # error, set is not a set
3456 main.log.error( "ONOS" + str( i + 1 ) +
3457 " has repeat elements in" +
3458 " set " + onosSetName + ":\n" +
3459 str( getResponses[ i ] ) )
3460 getResults = main.FALSE
3461 elif getResponses[ i ] == main.ERROR:
3462 getResults = main.FALSE
3463 sizeResponses = []
3464 threads = []
3465 for i in range( numControllers ):
3466 t = main.Thread( target=CLIs[i].setTestSize,
3467 name="setTestSize-" + str( i ),
3468 args=[ onosSetName ] )
3469 threads.append( t )
3470 t.start()
3471 for t in threads:
3472 t.join()
3473 sizeResponses.append( t.result )
3474 sizeResults = main.TRUE
3475 for i in range( numControllers ):
3476 if size != sizeResponses[ i ]:
3477 sizeResults = main.FALSE
3478 main.log.error( "ONOS" + str( i + 1 ) +
3479 " expected a size of " + str( size ) +
3480 " for set " + onosSetName +
3481 " but got " + str( sizeResponses[ i ] ) )
3482 removeResults = removeResults and getResults and sizeResults
3483 utilities.assert_equals( expect=main.TRUE,
3484 actual=removeResults,
3485 onpass="Set remove correct",
3486 onfail="Set remove was incorrect" )
3487
3488 main.step( "Distributed Set removeAll()" )
3489 onosSet.difference_update( addAllValue.split() )
3490 removeAllResponses = []
3491 threads = []
3492 try:
3493 for i in range( numControllers ):
3494 t = main.Thread( target=CLIs[i].setTestRemove,
3495 name="setTestRemoveAll-" + str( i ),
3496 args=[ onosSetName, addAllValue ] )
3497 threads.append( t )
3498 t.start()
3499 for t in threads:
3500 t.join()
3501 removeAllResponses.append( t.result )
3502 except Exception, e:
3503 main.log.exception(e)
3504
3505 # main.TRUE = successfully changed the set
3506 # main.FALSE = action resulted in no change in set
3507 # main.ERROR - Some error in executing the function
3508 removeAllResults = main.TRUE
3509 for i in range( numControllers ):
3510 if removeAllResponses[ i ] == main.TRUE:
3511 # All is well
3512 pass
3513 elif removeAllResponses[ i ] == main.FALSE:
3514 # not in set, probably fine
3515 pass
3516 elif removeAllResponses[ i ] == main.ERROR:
3517 # Error in execution
3518 removeAllResults = main.FALSE
3519 else:
3520 # unexpected result
3521 removeAllResults = main.FALSE
3522 if removeAllResults != main.TRUE:
3523 main.log.error( "Error executing set removeAll" )
3524
3525 # Check if set is still correct
3526 size = len( onosSet )
3527 getResponses = []
3528 threads = []
3529 for i in range( numControllers ):
3530 t = main.Thread( target=CLIs[i].setTestGet,
3531 name="setTestGet-" + str( i ),
3532 args=[ onosSetName ] )
3533 threads.append( t )
3534 t.start()
3535 for t in threads:
3536 t.join()
3537 getResponses.append( t.result )
3538 getResults = main.TRUE
3539 for i in range( numControllers ):
3540 if isinstance( getResponses[ i ], list):
3541 current = set( getResponses[ i ] )
3542 if len( current ) == len( getResponses[ i ] ):
3543 # no repeats
3544 if onosSet != current:
3545 main.log.error( "ONOS" + str( i + 1 ) +
3546 " has incorrect view" +
3547 " of set " + onosSetName + ":\n" +
3548 str( getResponses[ i ] ) )
3549 main.log.debug( "Expected: " + str( onosSet ) )
3550 main.log.debug( "Actual: " + str( current ) )
3551 getResults = main.FALSE
3552 else:
3553 # error, set is not a set
3554 main.log.error( "ONOS" + str( i + 1 ) +
3555 " has repeat elements in" +
3556 " set " + onosSetName + ":\n" +
3557 str( getResponses[ i ] ) )
3558 getResults = main.FALSE
3559 elif getResponses[ i ] == main.ERROR:
3560 getResults = main.FALSE
3561 sizeResponses = []
3562 threads = []
3563 for i in range( numControllers ):
3564 t = main.Thread( target=CLIs[i].setTestSize,
3565 name="setTestSize-" + str( i ),
3566 args=[ onosSetName ] )
3567 threads.append( t )
3568 t.start()
3569 for t in threads:
3570 t.join()
3571 sizeResponses.append( t.result )
3572 sizeResults = main.TRUE
3573 for i in range( numControllers ):
3574 if size != sizeResponses[ i ]:
3575 sizeResults = main.FALSE
3576 main.log.error( "ONOS" + str( i + 1 ) +
3577 " expected a size of " + str( size ) +
3578 " for set " + onosSetName +
3579 " but got " + str( sizeResponses[ i ] ) )
3580 removeAllResults = removeAllResults and getResults and sizeResults
3581 utilities.assert_equals( expect=main.TRUE,
3582 actual=removeAllResults,
3583 onpass="Set removeAll correct",
3584 onfail="Set removeAll was incorrect" )
3585
3586 main.step( "Distributed Set addAll()" )
3587 onosSet.update( addAllValue.split() )
3588 addResponses = []
3589 threads = []
3590 for i in range( numControllers ):
3591 t = main.Thread( target=CLIs[i].setTestAdd,
3592 name="setTestAddAll-" + str( i ),
3593 args=[ onosSetName, addAllValue ] )
3594 threads.append( t )
3595 t.start()
3596 for t in threads:
3597 t.join()
3598 addResponses.append( t.result )
3599
3600 # main.TRUE = successfully changed the set
3601 # main.FALSE = action resulted in no change in set
3602 # main.ERROR - Some error in executing the function
3603 addAllResults = main.TRUE
3604 for i in range( numControllers ):
3605 if addResponses[ i ] == main.TRUE:
3606 # All is well
3607 pass
3608 elif addResponses[ i ] == main.FALSE:
3609 # Already in set, probably fine
3610 pass
3611 elif addResponses[ i ] == main.ERROR:
3612 # Error in execution
3613 addAllResults = main.FALSE
3614 else:
3615 # unexpected result
3616 addAllResults = main.FALSE
3617 if addAllResults != main.TRUE:
3618 main.log.error( "Error executing set addAll" )
3619
3620 # Check if set is still correct
3621 size = len( onosSet )
3622 getResponses = []
3623 threads = []
3624 for i in range( numControllers ):
3625 t = main.Thread( target=CLIs[i].setTestGet,
3626 name="setTestGet-" + str( i ),
3627 args=[ onosSetName ] )
3628 threads.append( t )
3629 t.start()
3630 for t in threads:
3631 t.join()
3632 getResponses.append( t.result )
3633 getResults = main.TRUE
3634 for i in range( numControllers ):
3635 if isinstance( getResponses[ i ], list):
3636 current = set( getResponses[ i ] )
3637 if len( current ) == len( getResponses[ i ] ):
3638 # no repeats
3639 if onosSet != current:
3640 main.log.error( "ONOS" + str( i + 1 ) +
3641 " has incorrect view" +
3642 " of set " + onosSetName + ":\n" +
3643 str( getResponses[ i ] ) )
3644 main.log.debug( "Expected: " + str( onosSet ) )
3645 main.log.debug( "Actual: " + str( current ) )
3646 getResults = main.FALSE
3647 else:
3648 # error, set is not a set
3649 main.log.error( "ONOS" + str( i + 1 ) +
3650 " has repeat elements in" +
3651 " set " + onosSetName + ":\n" +
3652 str( getResponses[ i ] ) )
3653 getResults = main.FALSE
3654 elif getResponses[ i ] == main.ERROR:
3655 getResults = main.FALSE
3656 sizeResponses = []
3657 threads = []
3658 for i in range( numControllers ):
3659 t = main.Thread( target=CLIs[i].setTestSize,
3660 name="setTestSize-" + str( i ),
3661 args=[ onosSetName ] )
3662 threads.append( t )
3663 t.start()
3664 for t in threads:
3665 t.join()
3666 sizeResponses.append( t.result )
3667 sizeResults = main.TRUE
3668 for i in range( numControllers ):
3669 if size != sizeResponses[ i ]:
3670 sizeResults = main.FALSE
3671 main.log.error( "ONOS" + str( i + 1 ) +
3672 " expected a size of " + str( size ) +
3673 " for set " + onosSetName +
3674 " but got " + str( sizeResponses[ i ] ) )
3675 addAllResults = addAllResults and getResults and sizeResults
3676 utilities.assert_equals( expect=main.TRUE,
3677 actual=addAllResults,
3678 onpass="Set addAll correct",
3679 onfail="Set addAll was incorrect" )
3680
3681 main.step( "Distributed Set clear()" )
3682 onosSet.clear()
3683 clearResponses = []
3684 threads = []
3685 for i in range( numControllers ):
3686 t = main.Thread( target=CLIs[i].setTestRemove,
3687 name="setTestClear-" + str( i ),
3688 args=[ onosSetName, " "], # Values doesn't matter
3689 kwargs={ "clear": True } )
3690 threads.append( t )
3691 t.start()
3692 for t in threads:
3693 t.join()
3694 clearResponses.append( t.result )
3695
3696 # main.TRUE = successfully changed the set
3697 # main.FALSE = action resulted in no change in set
3698 # main.ERROR - Some error in executing the function
3699 clearResults = main.TRUE
3700 for i in range( numControllers ):
3701 if clearResponses[ i ] == main.TRUE:
3702 # All is well
3703 pass
3704 elif clearResponses[ i ] == main.FALSE:
3705 # Nothing set, probably fine
3706 pass
3707 elif clearResponses[ i ] == main.ERROR:
3708 # Error in execution
3709 clearResults = main.FALSE
3710 else:
3711 # unexpected result
3712 clearResults = main.FALSE
3713 if clearResults != main.TRUE:
3714 main.log.error( "Error executing set clear" )
3715
3716 # Check if set is still correct
3717 size = len( onosSet )
3718 getResponses = []
3719 threads = []
3720 for i in range( numControllers ):
3721 t = main.Thread( target=CLIs[i].setTestGet,
3722 name="setTestGet-" + str( i ),
3723 args=[ onosSetName ] )
3724 threads.append( t )
3725 t.start()
3726 for t in threads:
3727 t.join()
3728 getResponses.append( t.result )
3729 getResults = main.TRUE
3730 for i in range( numControllers ):
3731 if isinstance( getResponses[ i ], list):
3732 current = set( getResponses[ i ] )
3733 if len( current ) == len( getResponses[ i ] ):
3734 # no repeats
3735 if onosSet != current:
3736 main.log.error( "ONOS" + str( i + 1 ) +
3737 " has incorrect view" +
3738 " of set " + onosSetName + ":\n" +
3739 str( getResponses[ i ] ) )
3740 main.log.debug( "Expected: " + str( onosSet ) )
3741 main.log.debug( "Actual: " + str( current ) )
3742 getResults = main.FALSE
3743 else:
3744 # error, set is not a set
3745 main.log.error( "ONOS" + str( i + 1 ) +
3746 " has repeat elements in" +
3747 " set " + onosSetName + ":\n" +
3748 str( getResponses[ i ] ) )
3749 getResults = main.FALSE
3750 elif getResponses[ i ] == main.ERROR:
3751 getResults = main.FALSE
3752 sizeResponses = []
3753 threads = []
3754 for i in range( numControllers ):
3755 t = main.Thread( target=CLIs[i].setTestSize,
3756 name="setTestSize-" + str( i ),
3757 args=[ onosSetName ] )
3758 threads.append( t )
3759 t.start()
3760 for t in threads:
3761 t.join()
3762 sizeResponses.append( t.result )
3763 sizeResults = main.TRUE
3764 for i in range( numControllers ):
3765 if size != sizeResponses[ i ]:
3766 sizeResults = main.FALSE
3767 main.log.error( "ONOS" + str( i + 1 ) +
3768 " expected a size of " + str( size ) +
3769 " for set " + onosSetName +
3770 " but got " + str( sizeResponses[ i ] ) )
3771 clearResults = clearResults and getResults and sizeResults
3772 utilities.assert_equals( expect=main.TRUE,
3773 actual=clearResults,
3774 onpass="Set clear correct",
3775 onfail="Set clear was incorrect" )
3776
3777 main.step( "Distributed Set addAll()" )
3778 onosSet.update( addAllValue.split() )
3779 addResponses = []
3780 threads = []
3781 for i in range( numControllers ):
3782 t = main.Thread( target=CLIs[i].setTestAdd,
3783 name="setTestAddAll-" + str( i ),
3784 args=[ onosSetName, addAllValue ] )
3785 threads.append( t )
3786 t.start()
3787 for t in threads:
3788 t.join()
3789 addResponses.append( t.result )
3790
3791 # main.TRUE = successfully changed the set
3792 # main.FALSE = action resulted in no change in set
3793 # main.ERROR - Some error in executing the function
3794 addAllResults = main.TRUE
3795 for i in range( numControllers ):
3796 if addResponses[ i ] == main.TRUE:
3797 # All is well
3798 pass
3799 elif addResponses[ i ] == main.FALSE:
3800 # Already in set, probably fine
3801 pass
3802 elif addResponses[ i ] == main.ERROR:
3803 # Error in execution
3804 addAllResults = main.FALSE
3805 else:
3806 # unexpected result
3807 addAllResults = main.FALSE
3808 if addAllResults != main.TRUE:
3809 main.log.error( "Error executing set addAll" )
3810
3811 # Check if set is still correct
3812 size = len( onosSet )
3813 getResponses = []
3814 threads = []
3815 for i in range( numControllers ):
3816 t = main.Thread( target=CLIs[i].setTestGet,
3817 name="setTestGet-" + str( i ),
3818 args=[ onosSetName ] )
3819 threads.append( t )
3820 t.start()
3821 for t in threads:
3822 t.join()
3823 getResponses.append( t.result )
3824 getResults = main.TRUE
3825 for i in range( numControllers ):
3826 if isinstance( getResponses[ i ], list):
3827 current = set( getResponses[ i ] )
3828 if len( current ) == len( getResponses[ i ] ):
3829 # no repeats
3830 if onosSet != current:
3831 main.log.error( "ONOS" + str( i + 1 ) +
3832 " has incorrect view" +
3833 " of set " + onosSetName + ":\n" +
3834 str( getResponses[ i ] ) )
3835 main.log.debug( "Expected: " + str( onosSet ) )
3836 main.log.debug( "Actual: " + str( current ) )
3837 getResults = main.FALSE
3838 else:
3839 # error, set is not a set
3840 main.log.error( "ONOS" + str( i + 1 ) +
3841 " has repeat elements in" +
3842 " set " + onosSetName + ":\n" +
3843 str( getResponses[ i ] ) )
3844 getResults = main.FALSE
3845 elif getResponses[ i ] == main.ERROR:
3846 getResults = main.FALSE
3847 sizeResponses = []
3848 threads = []
3849 for i in range( numControllers ):
3850 t = main.Thread( target=CLIs[i].setTestSize,
3851 name="setTestSize-" + str( i ),
3852 args=[ onosSetName ] )
3853 threads.append( t )
3854 t.start()
3855 for t in threads:
3856 t.join()
3857 sizeResponses.append( t.result )
3858 sizeResults = main.TRUE
3859 for i in range( numControllers ):
3860 if size != sizeResponses[ i ]:
3861 sizeResults = main.FALSE
3862 main.log.error( "ONOS" + str( i + 1 ) +
3863 " expected a size of " + str( size ) +
3864 " for set " + onosSetName +
3865 " but got " + str( sizeResponses[ i ] ) )
3866 addAllResults = addAllResults and getResults and sizeResults
3867 utilities.assert_equals( expect=main.TRUE,
3868 actual=addAllResults,
3869 onpass="Set addAll correct",
3870 onfail="Set addAll was incorrect" )
3871
3872 main.step( "Distributed Set retain()" )
3873 onosSet.intersection_update( retainValue.split() )
3874 retainResponses = []
3875 threads = []
3876 for i in range( numControllers ):
3877 t = main.Thread( target=CLIs[i].setTestRemove,
3878 name="setTestRetain-" + str( i ),
3879 args=[ onosSetName, retainValue ],
3880 kwargs={ "retain": True } )
3881 threads.append( t )
3882 t.start()
3883 for t in threads:
3884 t.join()
3885 retainResponses.append( t.result )
3886
3887 # main.TRUE = successfully changed the set
3888 # main.FALSE = action resulted in no change in set
3889 # main.ERROR - Some error in executing the function
3890 retainResults = main.TRUE
3891 for i in range( numControllers ):
3892 if retainResponses[ i ] == main.TRUE:
3893 # All is well
3894 pass
3895 elif retainResponses[ i ] == main.FALSE:
3896 # Already in set, probably fine
3897 pass
3898 elif retainResponses[ i ] == main.ERROR:
3899 # Error in execution
3900 retainResults = main.FALSE
3901 else:
3902 # unexpected result
3903 retainResults = main.FALSE
3904 if retainResults != main.TRUE:
3905 main.log.error( "Error executing set retain" )
3906
3907 # Check if set is still correct
3908 size = len( onosSet )
3909 getResponses = []
3910 threads = []
3911 for i in range( numControllers ):
3912 t = main.Thread( target=CLIs[i].setTestGet,
3913 name="setTestGet-" + str( i ),
3914 args=[ onosSetName ] )
3915 threads.append( t )
3916 t.start()
3917 for t in threads:
3918 t.join()
3919 getResponses.append( t.result )
3920 getResults = main.TRUE
3921 for i in range( numControllers ):
3922 if isinstance( getResponses[ i ], list):
3923 current = set( getResponses[ i ] )
3924 if len( current ) == len( getResponses[ i ] ):
3925 # no repeats
3926 if onosSet != current:
3927 main.log.error( "ONOS" + str( i + 1 ) +
3928 " has incorrect view" +
3929 " of set " + onosSetName + ":\n" +
3930 str( getResponses[ i ] ) )
3931 main.log.debug( "Expected: " + str( onosSet ) )
3932 main.log.debug( "Actual: " + str( current ) )
3933 getResults = main.FALSE
3934 else:
3935 # error, set is not a set
3936 main.log.error( "ONOS" + str( i + 1 ) +
3937 " has repeat elements in" +
3938 " set " + onosSetName + ":\n" +
3939 str( getResponses[ i ] ) )
3940 getResults = main.FALSE
3941 elif getResponses[ i ] == main.ERROR:
3942 getResults = main.FALSE
3943 sizeResponses = []
3944 threads = []
3945 for i in range( numControllers ):
3946 t = main.Thread( target=CLIs[i].setTestSize,
3947 name="setTestSize-" + str( i ),
3948 args=[ onosSetName ] )
3949 threads.append( t )
3950 t.start()
3951 for t in threads:
3952 t.join()
3953 sizeResponses.append( t.result )
3954 sizeResults = main.TRUE
3955 for i in range( numControllers ):
3956 if size != sizeResponses[ i ]:
3957 sizeResults = main.FALSE
3958 main.log.error( "ONOS" + str( i + 1 ) +
3959 " expected a size of " +
3960 str( size ) + " for set " + onosSetName +
3961 " but got " + str( sizeResponses[ i ] ) )
3962 retainResults = retainResults and getResults and sizeResults
3963 utilities.assert_equals( expect=main.TRUE,
3964 actual=retainResults,
3965 onpass="Set retain correct",
3966 onfail="Set retain was incorrect" )
3967