blob: c223c3f7ba42039b03ddd169f8e280c42a1b3102 [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if the HA test setup is
3 working correctly. There are no failures so this test should
4 have a 100% pass rate
5
6List of test cases:
7CASE1: Compile ONOS and push it to the test machines
8CASE2: Assign devices to controllers
9CASE21: Assign mastership to controllers
10CASE3: Assign intents
11CASE4: Ping across added host intents
12CASE5: Reading state of ONOS
13CASE6: The Failure case. Since this is the Sanity test, we do nothing.
14CASE7: Check state after control plane failure
15CASE8: Compare topo
16CASE9: Link s3-s28 down
17CASE10: Link s3-s28 up
18CASE11: Switch down
19CASE12: Switch up
20CASE13: Clean up
21CASE14: start election app on all onos nodes
22CASE15: Check that Leadership Election is still functional
23CASE16: Install Distributed Primitives app
24CASE17: Check for basic functionality with distributed primitives
25"""
26
27
28class HAsanity:
29
30 def __init__( self ):
31 self.default = ''
32
33 def CASE1( self, main ):
34 """
35 CASE1 is to compile ONOS and push it to the test machines
36
37 Startup sequence:
38 cell <name>
39 onos-verify-cell
40 NOTE: temporary - onos-remove-raft-logs
41 onos-uninstall
42 start mininet
43 git pull
44 mvn clean install
45 onos-package
46 onos-install -f
47 onos-wait-for-start
48 start cli sessions
49 start tcpdump
50 """
51 main.log.info( "ONOS HA Sanity test - initialization" )
52 main.case( "Setting up test environment" )
53 main.caseExplaination = "Setup the test environment including " +\
54 "installing ONOS, starting Mininet and ONOS" +\
55 "cli sessions."
56 # TODO: save all the timers and output them for plotting
57
58 # load some variables from the params file
59 PULLCODE = False
60 if main.params[ 'Git' ] == 'True':
61 PULLCODE = True
62 gitBranch = main.params[ 'branch' ]
63 cellName = main.params[ 'ENV' ][ 'cellName' ]
64
65 # set global variables
66 global numControllers
67 numControllers = int( main.params[ 'num_controllers' ] )
68 if main.ONOSbench.maxNodes:
69 if main.ONOSbench.maxNodes < numControllers:
70 numControllers = int( main.ONOSbench.maxNodes )
71 # TODO: refactor how to get onos port, maybe put into component tag?
72 global ONOS1Port
73 global ONOS2Port
74 global ONOS3Port
75 global ONOS4Port
76 global ONOS5Port
77 global ONOS6Port
78 global ONOS7Port
79
80 # FIXME: just get controller port from params?
81 # TODO: do we really need all these?
82 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
83 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
84 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
85 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
86 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
87 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
88 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
89
90 global CLIs
91 CLIs = []
92 global nodes
93 nodes = []
94 ipList = []
95 for i in range( 1, numControllers + 1 ):
96 CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
97 nodes.append( getattr( main, 'ONOS' + str( i ) ) )
98 ipList.append( nodes[ -1 ].ip_address )
99
100 main.step( "Create cell file" )
101 cellAppString = main.params[ 'ENV' ][ 'appString' ]
102 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
103 main.Mininet1.ip_address,
104 cellAppString, ipList )
105 main.step( "Applying cell variable to environment" )
106 cellResult = main.ONOSbench.setCell( cellName )
107 verifyResult = main.ONOSbench.verifyCell()
108
109 # FIXME:this is short term fix
110 main.log.info( "Removing raft logs" )
111 main.ONOSbench.onosRemoveRaftLogs()
112
113 main.log.info( "Uninstalling ONOS" )
114 for node in nodes:
115 main.ONOSbench.onosUninstall( node.ip_address )
116
117 # Make sure ONOS is DEAD
118 main.log.info( "Killing any ONOS processes" )
119 killResults = main.TRUE
120 for node in nodes:
121 killed = main.ONOSbench.onosKill( node.ip_address )
122 killResults = killResults and killed
123
124 cleanInstallResult = main.TRUE
125 gitPullResult = main.TRUE
126
127 main.step( "Starting Mininet" )
128 # scp topo file to mininet
129 # TODO: move to params?
130 topoName = "obelisk.py"
131 filePath = main.ONOSbench.home + "/tools/test/topos/"
132 main.ONOSbench.copyMininetFile( topoName, filePath,
133 main.Mininet1.user_name,
134 main.Mininet1.ip_address )
135 mnResult = main.Mininet1.startNet( )
136 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
137 onpass="Mininet Started",
138 onfail="Error starting Mininet" )
139
140 main.step( "Git checkout and pull " + gitBranch )
141 if PULLCODE:
142 main.ONOSbench.gitCheckout( gitBranch )
143 gitPullResult = main.ONOSbench.gitPull()
144 # values of 1 or 3 are good
145 utilities.assert_lesser( expect=0, actual=gitPullResult,
146 onpass="Git pull successful",
147 onfail="Git pull failed" )
148 main.ONOSbench.getVersion( report=True )
149
150 main.step( "Using mvn clean install" )
151 cleanInstallResult = main.TRUE
152 if PULLCODE and gitPullResult == main.TRUE:
153 cleanInstallResult = main.ONOSbench.cleanInstall()
154 else:
155 main.log.warn( "Did not pull new code so skipping mvn " +
156 "clean install" )
157 utilities.assert_equals( expect=main.TRUE,
158 actual=cleanInstallResult,
159 onpass="MCI successful",
160 onfail="MCI failed" )
161 # GRAPHS
162 # NOTE: important params here:
163 # job = name of Jenkins job
164 # Plot Name = Plot-HA, only can be used if multiple plots
165 # index = The number of the graph under plot name
166 job = "HAsanity"
167 plotName = "Plot-HA"
168 graphs = '<ac:structured-macro ac:name="html">\n'
169 graphs += '<ac:plain-text-body><![CDATA[\n'
170 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
171 '/plot/' + plotName + '/getPlot?index=0' +\
172 '&width=500&height=300"' +\
173 'noborder="0" width="500" height="300" scrolling="yes" ' +\
174 'seamless="seamless"></iframe>\n'
175 graphs += ']]></ac:plain-text-body>\n'
176 graphs += '</ac:structured-macro>\n'
177 main.log.wiki(graphs)
178
179 main.step( "Creating ONOS package" )
180 packageResult = main.ONOSbench.onosPackage()
181 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
182 onpass="ONOS package successful",
183 onfail="ONOS package failed" )
184
185 main.step( "Installing ONOS package" )
186 onosInstallResult = main.TRUE
187 for node in nodes:
188 tmpResult = main.ONOSbench.onosInstall( options="-f",
189 node=node.ip_address )
190 onosInstallResult = onosInstallResult and tmpResult
191 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
192 onpass="ONOS install successful",
193 onfail="ONOS install failed" )
194
195 main.step( "Checking if ONOS is up yet" )
196 for i in range( 2 ):
197 onosIsupResult = main.TRUE
198 for node in nodes:
199 started = main.ONOSbench.isup( node.ip_address )
200 if not started:
201 main.log.error( node.name + " didn't start!" )
202 main.ONOSbench.onosStop( node.ip_address )
203 main.ONOSbench.onosStart( node.ip_address )
204 onosIsupResult = onosIsupResult and started
205 if onosIsupResult == main.TRUE:
206 break
207 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
208 onpass="ONOS startup successful",
209 onfail="ONOS startup failed" )
210
211 main.log.step( "Starting ONOS CLI sessions" )
212 cliResults = main.TRUE
213 threads = []
214 for i in range( numControllers ):
215 t = main.Thread( target=CLIs[i].startOnosCli,
216 name="startOnosCli-" + str( i ),
217 args=[nodes[i].ip_address] )
218 threads.append( t )
219 t.start()
220
221 for t in threads:
222 t.join()
223 cliResults = cliResults and t.result
224 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
225 onpass="ONOS cli startup successful",
226 onfail="ONOS cli startup failed" )
227
228 if main.params[ 'tcpdump' ].lower() == "true":
229 main.step( "Start Packet Capture MN" )
230 main.Mininet2.startTcpdump(
231 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
232 + "-MN.pcap",
233 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
234 port=main.params[ 'MNtcpdump' ][ 'port' ] )
235
236 main.step( "App Ids check" )
237 appCheck = main.TRUE
238 threads = []
239 for i in range( numControllers ):
240 t = main.Thread( target=CLIs[i].appToIDCheck,
241 name="appToIDCheck-" + str( i ),
242 args=[] )
243 threads.append( t )
244 t.start()
245
246 for t in threads:
247 t.join()
248 appCheck = appCheck and t.result
249 if appCheck != main.TRUE:
250 main.log.warn( CLIs[0].apps() )
251 main.log.warn( CLIs[0].appIDs() )
252 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
253 onpass="App Ids seem to be correct",
254 onfail="Something is wrong with app Ids" )
255
256 if cliResults == main.FALSE:
257 main.log.error( "Failed to start ONOS, stopping test" )
258 main.cleanup()
259 main.exit()
260
261 def CASE2( self, main ):
262 """
263 Assign devices to controllers
264 """
265 import re
266 import time
267 assert numControllers, "numControllers not defined"
268 assert main, "main not defined"
269 assert utilities.assert_equals, "utilities.assert_equals not defined"
270 assert CLIs, "CLIs not defined"
271 assert nodes, "nodes not defined"
272 assert ONOS1Port, "ONOS1Port not defined"
273 assert ONOS2Port, "ONOS2Port not defined"
274 assert ONOS3Port, "ONOS3Port not defined"
275 assert ONOS4Port, "ONOS4Port not defined"
276 assert ONOS5Port, "ONOS5Port not defined"
277 assert ONOS6Port, "ONOS6Port not defined"
278 assert ONOS7Port, "ONOS7Port not defined"
279
280 main.case( "Assigning devices to controllers" )
281 main.caseExplaination = "Assign switches to ONOS using 'ovs-vsctl' " +\
282 "and check that an ONOS node becomes the " +\
283 "master of the device."
284 main.step( "Assign switches to controllers" )
285
286 ipList = []
287 for i in range( numControllers ):
288 ipList.append( nodes[ i ].ip_address )
289 swList = []
290 for i in range( 1, 29 ):
291 swList.append( "s" + str( i ) )
292 main.Mininet1.assignSwController( sw=swList, ip=ipList )
293
294 mastershipCheck = main.TRUE
295 for i in range( 1, 29 ):
296 response = main.Mininet1.getSwController( "s" + str( i ) )
297 try:
298 main.log.info( str( response ) )
299 except Exception:
300 main.log.info( repr( response ) )
301 for node in nodes:
302 if re.search( "tcp:" + node.ip_address, response ):
303 mastershipCheck = mastershipCheck and main.TRUE
304 else:
305 main.log.error( "Error, node " + node.ip_address + " is " +
306 "not in the list of controllers s" +
307 str( i ) + " is connecting to." )
308 mastershipCheck = main.FALSE
309 utilities.assert_equals(
310 expect=main.TRUE,
311 actual=mastershipCheck,
312 onpass="Switch mastership assigned correctly",
313 onfail="Switches not assigned correctly to controllers" )
314
315 def CASE21( self, main ):
316 """
317 Assign mastership to controllers
318 """
319 import re
320 import time
321 assert numControllers, "numControllers not defined"
322 assert main, "main not defined"
323 assert utilities.assert_equals, "utilities.assert_equals not defined"
324 assert CLIs, "CLIs not defined"
325 assert nodes, "nodes not defined"
326 assert ONOS1Port, "ONOS1Port not defined"
327 assert ONOS2Port, "ONOS2Port not defined"
328 assert ONOS3Port, "ONOS3Port not defined"
329 assert ONOS4Port, "ONOS4Port not defined"
330 assert ONOS5Port, "ONOS5Port not defined"
331 assert ONOS6Port, "ONOS6Port not defined"
332 assert ONOS7Port, "ONOS7Port not defined"
333
334 main.case( "Assigning Controller roles for switches" )
335 main.caseExplaination = "Check that ONOS is connected to each " +\
336 "device. Then manually assign" +\
337 " mastership to specific ONOS nodes using" +\
338 " 'device-role'"
339 main.step( "Assign mastership of switches to specific controllers" )
340 # Manually assign mastership to the controller we want
341 roleCall = main.TRUE
342
343 ipList = [ ]
344 deviceList = []
345 try:
346 # Assign mastership to specific controllers. This assignment was
347 # determined for a 7 node cluser, but will work with any sized
348 # cluster
349 for i in range( 1, 29 ): # switches 1 through 28
350 # set up correct variables:
351 if i == 1:
352 c = 0
353 ip = nodes[ c ].ip_address # ONOS1
354 deviceId = main.ONOScli1.getDevice( "1000" ).get( 'id' )
355 elif i == 2:
356 c = 1 % numControllers
357 ip = nodes[ c ].ip_address # ONOS2
358 deviceId = main.ONOScli1.getDevice( "2000" ).get( 'id' )
359 elif i == 3:
360 c = 1 % numControllers
361 ip = nodes[ c ].ip_address # ONOS2
362 deviceId = main.ONOScli1.getDevice( "3000" ).get( 'id' )
363 elif i == 4:
364 c = 3 % numControllers
365 ip = nodes[ c ].ip_address # ONOS4
366 deviceId = main.ONOScli1.getDevice( "3004" ).get( 'id' )
367 elif i == 5:
368 c = 2 % numControllers
369 ip = nodes[ c ].ip_address # ONOS3
370 deviceId = main.ONOScli1.getDevice( "5000" ).get( 'id' )
371 elif i == 6:
372 c = 2 % numControllers
373 ip = nodes[ c ].ip_address # ONOS3
374 deviceId = main.ONOScli1.getDevice( "6000" ).get( 'id' )
375 elif i == 7:
376 c = 5 % numControllers
377 ip = nodes[ c ].ip_address # ONOS6
378 deviceId = main.ONOScli1.getDevice( "6007" ).get( 'id' )
379 elif i >= 8 and i <= 17:
380 c = 4 % numControllers
381 ip = nodes[ c ].ip_address # ONOS5
382 dpid = '3' + str( i ).zfill( 3 )
383 deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
384 elif i >= 18 and i <= 27:
385 c = 6 % numControllers
386 ip = nodes[ c ].ip_address # ONOS7
387 dpid = '6' + str( i ).zfill( 3 )
388 deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
389 elif i == 28:
390 c = 0
391 ip = nodes[ c ].ip_address # ONOS1
392 deviceId = main.ONOScli1.getDevice( "2800" ).get( 'id' )
393 else:
394 main.log.error( "You didn't write an else statement for " +
395 "switch s" + str( i ) )
396 roleCall = main.FALSE
397 # Assign switch
398 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
399 # TODO: make this controller dynamic
400 roleCall = roleCall and main.ONOScli1.deviceRole( deviceId,
401 ip )
402 ipList.append( ip )
403 deviceList.append( deviceId )
404 except ( AttributeError, AssertionError ):
405 main.log.exception( "Something is wrong with ONOS device view" )
406 main.log.info( main.ONOScli1.devices() )
407 utilities.assert_equals(
408 expect=main.TRUE,
409 actual=roleCall,
410 onpass="Re-assigned switch mastership to designated controller",
411 onfail="Something wrong with deviceRole calls" )
412
413 main.step( "Check mastership was correctly assigned" )
414 roleCheck = main.TRUE
415 # NOTE: This is due to the fact that device mastership change is not
416 # atomic and is actually a multi step process
417 time.sleep( 5 )
418 for i in range( len( ipList ) ):
419 ip = ipList[i]
420 deviceId = deviceList[i]
421 # Check assignment
422 master = main.ONOScli1.getRole( deviceId ).get( 'master' )
423 if ip in master:
424 roleCheck = roleCheck and main.TRUE
425 else:
426 roleCheck = roleCheck and main.FALSE
427 main.log.error( "Error, controller " + ip + " is not" +
428 " master " + "of device " +
429 str( deviceId ) + ". Master is " +
430 repr( master ) + "." )
431 utilities.assert_equals(
432 expect=main.TRUE,
433 actual=roleCheck,
434 onpass="Switches were successfully reassigned to designated " +
435 "controller",
436 onfail="Switches were not successfully reassigned" )
437
438 def CASE3( self, main ):
439 """
440 Assign intents
441 """
442 import time
443 import json
444 assert numControllers, "numControllers not defined"
445 assert main, "main not defined"
446 assert utilities.assert_equals, "utilities.assert_equals not defined"
447 assert CLIs, "CLIs not defined"
448 assert nodes, "nodes not defined"
449 main.case( "Adding host Intents" )
450 main.caseExplaination = "Discover hosts by using pingall then " +\
451 "assign predetermined host-to-host intents." +\
452 " After installation, check that the intent" +\
453 " is distributed to all nodes and the state" +\
454 " is INSTALLED"
455
456 # install onos-app-fwd
457 main.step( "Install reactive forwarding app" )
458 installResults = CLIs[0].activateApp( "org.onosproject.fwd" )
459 utilities.assert_equals( expect=main.TRUE, actual=installResults,
460 onpass="Install fwd successful",
461 onfail="Install fwd failed" )
462
463 main.step( "Check app ids" )
464 appCheck = main.TRUE
465 threads = []
466 for i in range( numControllers ):
467 t = main.Thread( target=CLIs[i].appToIDCheck,
468 name="appToIDCheck-" + str( i ),
469 args=[] )
470 threads.append( t )
471 t.start()
472
473 for t in threads:
474 t.join()
475 appCheck = appCheck and t.result
476 if appCheck != main.TRUE:
477 main.log.warn( CLIs[0].apps() )
478 main.log.warn( CLIs[0].appIDs() )
479 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
480 onpass="App Ids seem to be correct",
481 onfail="Something is wrong with app Ids" )
482
483 main.step( "Discovering Hosts( Via pingall for now )" )
484 # FIXME: Once we have a host discovery mechanism, use that instead
485 # REACTIVE FWD test
486 pingResult = main.FALSE
487 for i in range(2): # Retry if pingall fails first time
488 time1 = time.time()
489 pingResult = main.Mininet1.pingall()
490 if i == 0:
491 utilities.assert_equals(
492 expect=main.TRUE,
493 actual=pingResult,
494 onpass="Reactive Pingall test passed",
495 onfail="Reactive Pingall failed, " +
496 "one or more ping pairs failed" )
497 time2 = time.time()
498 main.log.info( "Time for pingall: %2f seconds" %
499 ( time2 - time1 ) )
500 # timeout for fwd flows
501 time.sleep( 11 )
502 # uninstall onos-app-fwd
503 main.step( "Uninstall reactive forwarding app" )
504 uninstallResult = CLIs[0].deactivateApp( "org.onosproject.fwd" )
505 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
506 onpass="Uninstall fwd successful",
507 onfail="Uninstall fwd failed" )
508 '''
509 main.Mininet1.handle.sendline( "py [ h.cmd( \"arping -c 1 10.1.1.1 \" ) for h in net.hosts ] ")
510 import time
511 time.sleep(60)
512 '''
513
514 main.step( "Check app ids" )
515 threads = []
516 appCheck2 = main.TRUE
517 for i in range( numControllers ):
518 t = main.Thread( target=CLIs[i].appToIDCheck,
519 name="appToIDCheck-" + str( i ),
520 args=[] )
521 threads.append( t )
522 t.start()
523
524 for t in threads:
525 t.join()
526 appCheck2 = appCheck2 and t.result
527 if appCheck2 != main.TRUE:
528 main.log.warn( CLIs[0].apps() )
529 main.log.warn( CLIs[0].appIDs() )
530 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
531 onpass="App Ids seem to be correct",
532 onfail="Something is wrong with app Ids" )
533
534 main.step( "Add host intents via cli" )
535 intentIds = []
536 # TODO: move the host numbers to params
537 # Maybe look at all the paths we ping?
538 intentAddResult = True
539 hostResult = main.TRUE
540 for i in range( 8, 18 ):
541 main.log.info( "Adding host intent between h" + str( i ) +
542 " and h" + str( i + 10 ) )
543 host1 = "00:00:00:00:00:" + \
544 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
545 host2 = "00:00:00:00:00:" + \
546 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
547 # NOTE: getHost can return None
548 host1Dict = main.ONOScli1.getHost( host1 )
549 host2Dict = main.ONOScli1.getHost( host2 )
550 host1Id = None
551 host2Id = None
552 if host1Dict and host2Dict:
553 host1Id = host1Dict.get( 'id', None )
554 host2Id = host2Dict.get( 'id', None )
555 if host1Id and host2Id:
556 nodeNum = ( i % numControllers )
557 tmpId = CLIs[ nodeNum ].addHostIntent( host1Id, host2Id )
558 if tmpId:
559 main.log.info( "Added intent with id: " + tmpId )
560 intentIds.append( tmpId )
561 else:
562 main.log.error( "addHostIntent returned: " +
563 repr( tmpId ) )
564 else:
565 main.log.error( "Error, getHost() failed for h" + str( i ) +
566 " and/or h" + str( i + 10 ) )
567 hosts = CLIs[ 0 ].hosts()
568 main.log.warn( "Hosts output: " )
569 try:
570 main.log.warn( json.dumps( json.loads( hosts ),
571 sort_keys=True,
572 indent=4,
573 separators=( ',', ': ' ) ) )
574 except ( ValueError, TypeError ):
575 main.log.warn( repr( hosts ) )
576 hostResult = main.FALSE
577 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
578 onpass="Found a host id for each host",
579 onfail="Error looking up host ids" )
580
581 intentStart = time.time()
582 onosIds = main.ONOScli1.getAllIntentsId()
583 main.log.info( "Submitted intents: " + str( intentIds ) )
584 main.log.info( "Intents in ONOS: " + str( onosIds ) )
585 for intent in intentIds:
586 if intent in onosIds:
587 pass # intent submitted is in onos
588 else:
589 intentAddResult = False
590 if intentAddResult:
591 intentStop = time.time()
592 else:
593 intentStop = None
594 # Print the intent states
595 intents = main.ONOScli1.intents()
596 intentStates = []
597 installedCheck = True
598 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
599 count = 0
600 try:
601 for intent in json.loads( intents ):
602 state = intent.get( 'state', None )
603 if "INSTALLED" not in state:
604 installedCheck = False
605 intentId = intent.get( 'id', None )
606 intentStates.append( ( intentId, state ) )
607 except ( ValueError, TypeError ):
608 main.log.exception( "Error parsing intents" )
609 # add submitted intents not in the store
610 tmplist = [ i for i, s in intentStates ]
611 missingIntents = False
612 for i in intentIds:
613 if i not in tmplist:
614 intentStates.append( ( i, " - " ) )
615 missingIntents = True
616 intentStates.sort()
617 for i, s in intentStates:
618 count += 1
619 main.log.info( "%-6s%-15s%-15s" %
620 ( str( count ), str( i ), str( s ) ) )
621 leaders = main.ONOScli1.leaders()
622 try:
623 missing = False
624 if leaders:
625 parsedLeaders = json.loads( leaders )
626 main.log.warn( json.dumps( parsedLeaders,
627 sort_keys=True,
628 indent=4,
629 separators=( ',', ': ' ) ) )
630 # check for all intent partitions
631 topics = []
632 for i in range( 14 ):
633 topics.append( "intent-partition-" + str( i ) )
634 main.log.debug( topics )
635 ONOStopics = [ j['topic'] for j in parsedLeaders ]
636 for topic in topics:
637 if topic not in ONOStopics:
638 main.log.error( "Error: " + topic +
639 " not in leaders" )
640 missing = True
641 else:
642 main.log.error( "leaders() returned None" )
643 except ( ValueError, TypeError ):
644 main.log.exception( "Error parsing leaders" )
645 main.log.error( repr( leaders ) )
646 # Check all nodes
647 if missing:
648 for node in CLIs:
649 response = node.leaders( jsonFormat=False)
650 main.log.warn( str( node.name ) + " leaders output: \n" +
651 str( response ) )
652
653 partitions = main.ONOScli1.partitions()
654 try:
655 if partitions :
656 parsedPartitions = json.loads( partitions )
657 main.log.warn( json.dumps( parsedPartitions,
658 sort_keys=True,
659 indent=4,
660 separators=( ',', ': ' ) ) )
661 # TODO check for a leader in all paritions
662 # TODO check for consistency among nodes
663 else:
664 main.log.error( "partitions() returned None" )
665 except ( ValueError, TypeError ):
666 main.log.exception( "Error parsing partitions" )
667 main.log.error( repr( partitions ) )
668 pendingMap = main.ONOScli1.pendingMap()
669 try:
670 if pendingMap :
671 parsedPending = json.loads( pendingMap )
672 main.log.warn( json.dumps( parsedPending,
673 sort_keys=True,
674 indent=4,
675 separators=( ',', ': ' ) ) )
676 # TODO check something here?
677 else:
678 main.log.error( "pendingMap() returned None" )
679 except ( ValueError, TypeError ):
680 main.log.exception( "Error parsing pending map" )
681 main.log.error( repr( pendingMap ) )
682
683 intentAddResult = bool( intentAddResult and not missingIntents and
684 installedCheck )
685 if not intentAddResult:
686 main.log.error( "Error in pushing host intents to ONOS" )
687
688 main.step( "Intent Anti-Entropy dispersion" )
689 for i in range(100):
690 correct = True
691 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
692 for cli in CLIs:
693 onosIds = []
694 ids = cli.getAllIntentsId()
695 onosIds.append( ids )
696 main.log.debug( "Intents in " + cli.name + ": " +
697 str( sorted( onosIds ) ) )
698 if sorted( ids ) != sorted( intentIds ):
699 main.log.warn( "Set of intent IDs doesn't match" )
700 correct = False
701 break
702 else:
703 intents = json.loads( cli.intents() )
704 for intent in intents:
705 if intent[ 'state' ] != "INSTALLED":
706 main.log.warn( "Intent " + intent[ 'id' ] +
707 " is " + intent[ 'state' ] )
708 correct = False
709 break
710 if correct:
711 break
712 else:
713 time.sleep(1)
714 if not intentStop:
715 intentStop = time.time()
716 global gossipTime
717 gossipTime = intentStop - intentStart
718 main.log.info( "It took about " + str( gossipTime ) +
719 " seconds for all intents to appear in each node" )
720 # FIXME: make this time configurable/calculate based off of number of
721 # nodes and gossip rounds
722 utilities.assert_greater_equals(
723 expect=40, actual=gossipTime,
724 onpass="ECM anti-entropy for intents worked within " +
725 "expected time",
726 onfail="Intent ECM anti-entropy took too long" )
727 if gossipTime <= 40:
728 intentAddResult = True
729
730 if not intentAddResult or "key" in pendingMap:
731 import time
732 installedCheck = True
733 main.log.info( "Sleeping 60 seconds to see if intents are found" )
734 time.sleep( 60 )
735 onosIds = main.ONOScli1.getAllIntentsId()
736 main.log.info( "Submitted intents: " + str( intentIds ) )
737 main.log.info( "Intents in ONOS: " + str( onosIds ) )
738 # Print the intent states
739 intents = main.ONOScli1.intents()
740 intentStates = []
741 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
742 count = 0
743 try:
744 for intent in json.loads( intents ):
745 # Iter through intents of a node
746 state = intent.get( 'state', None )
747 if "INSTALLED" not in state:
748 installedCheck = False
749 intentId = intent.get( 'id', None )
750 intentStates.append( ( intentId, state ) )
751 except ( ValueError, TypeError ):
752 main.log.exception( "Error parsing intents" )
753 # add submitted intents not in the store
754 tmplist = [ i for i, s in intentStates ]
755 for i in intentIds:
756 if i not in tmplist:
757 intentStates.append( ( i, " - " ) )
758 intentStates.sort()
759 for i, s in intentStates:
760 count += 1
761 main.log.info( "%-6s%-15s%-15s" %
762 ( str( count ), str( i ), str( s ) ) )
763 leaders = main.ONOScli1.leaders()
764 try:
765 missing = False
766 if leaders:
767 parsedLeaders = json.loads( leaders )
768 main.log.warn( json.dumps( parsedLeaders,
769 sort_keys=True,
770 indent=4,
771 separators=( ',', ': ' ) ) )
772 # check for all intent partitions
773 # check for election
774 topics = []
775 for i in range( 14 ):
776 topics.append( "intent-partition-" + str( i ) )
777 # FIXME: this should only be after we start the app
778 topics.append( "org.onosproject.election" )
779 main.log.debug( topics )
780 ONOStopics = [ j['topic'] for j in parsedLeaders ]
781 for topic in topics:
782 if topic not in ONOStopics:
783 main.log.error( "Error: " + topic +
784 " not in leaders" )
785 missing = True
786 else:
787 main.log.error( "leaders() returned None" )
788 except ( ValueError, TypeError ):
789 main.log.exception( "Error parsing leaders" )
790 main.log.error( repr( leaders ) )
791 # Check all nodes
792 if missing:
793 for node in CLIs:
794 response = node.leaders( jsonFormat=False)
795 main.log.warn( str( node.name ) + " leaders output: \n" +
796 str( response ) )
797
798 partitions = main.ONOScli1.partitions()
799 try:
800 if partitions :
801 parsedPartitions = json.loads( partitions )
802 main.log.warn( json.dumps( parsedPartitions,
803 sort_keys=True,
804 indent=4,
805 separators=( ',', ': ' ) ) )
806 # TODO check for a leader in all paritions
807 # TODO check for consistency among nodes
808 else:
809 main.log.error( "partitions() returned None" )
810 except ( ValueError, TypeError ):
811 main.log.exception( "Error parsing partitions" )
812 main.log.error( repr( partitions ) )
813 pendingMap = main.ONOScli1.pendingMap()
814 try:
815 if pendingMap :
816 parsedPending = json.loads( pendingMap )
817 main.log.warn( json.dumps( parsedPending,
818 sort_keys=True,
819 indent=4,
820 separators=( ',', ': ' ) ) )
821 # TODO check something here?
822 else:
823 main.log.error( "pendingMap() returned None" )
824 except ( ValueError, TypeError ):
825 main.log.exception( "Error parsing pending map" )
826 main.log.error( repr( pendingMap ) )
827
828 def CASE4( self, main ):
829 """
830 Ping across added host intents
831 """
832 import json
833 import time
834 assert numControllers, "numControllers not defined"
835 assert main, "main not defined"
836 assert utilities.assert_equals, "utilities.assert_equals not defined"
837 assert CLIs, "CLIs not defined"
838 assert nodes, "nodes not defined"
839 main.case( "Verify connectivity by sendind traffic across Intents" )
840 main.caseExplaination = "Ping across added host intents to check " +\
841 "functionality and check the state of " +\
842 "the intent"
843 main.step( "Ping across added host intents" )
844 PingResult = main.TRUE
845 for i in range( 8, 18 ):
846 ping = main.Mininet1.pingHost( src="h" + str( i ),
847 target="h" + str( i + 10 ) )
848 PingResult = PingResult and ping
849 if ping == main.FALSE:
850 main.log.warn( "Ping failed between h" + str( i ) +
851 " and h" + str( i + 10 ) )
852 elif ping == main.TRUE:
853 main.log.info( "Ping test passed!" )
854 # Don't set PingResult or you'd override failures
855 if PingResult == main.FALSE:
856 main.log.error(
857 "Intents have not been installed correctly, pings failed." )
858 # TODO: pretty print
859 main.log.warn( "ONOS1 intents: " )
860 try:
861 tmpIntents = main.ONOScli1.intents()
862 main.log.warn( json.dumps( json.loads( tmpIntents ),
863 sort_keys=True,
864 indent=4,
865 separators=( ',', ': ' ) ) )
866 except ( ValueError, TypeError ):
867 main.log.warn( repr( tmpIntents ) )
868 utilities.assert_equals(
869 expect=main.TRUE,
870 actual=PingResult,
871 onpass="Intents have been installed correctly and pings work",
872 onfail="Intents have not been installed correctly, pings failed." )
873
874 main.step( "Check Intent state" )
875 installedCheck = False
876 loopCount = 0
877 while not installedCheck and loopCount < 40:
878 installedCheck = True
879 # Print the intent states
880 intents = main.ONOScli1.intents()
881 intentStates = []
882 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
883 count = 0
884 # Iter through intents of a node
885 try:
886 for intent in json.loads( intents ):
887 state = intent.get( 'state', None )
888 if "INSTALLED" not in state:
889 installedCheck = False
890 intentId = intent.get( 'id', None )
891 intentStates.append( ( intentId, state ) )
892 except ( ValueError, TypeError ):
893 main.log.exception( "Error parsing intents." )
894 # Print states
895 intentStates.sort()
896 for i, s in intentStates:
897 count += 1
898 main.log.info( "%-6s%-15s%-15s" %
899 ( str( count ), str( i ), str( s ) ) )
900 if not installedCheck:
901 time.sleep( 1 )
902 loopCount += 1
903 utilities.assert_equals( expect=True, actual=installedCheck,
904 onpass="Intents are all INSTALLED",
905 onfail="Intents are not all in " +
906 "INSTALLED state" )
907
908 main.step( "Check leadership of topics" )
909 leaders = main.ONOScli1.leaders()
910 topicCheck = main.TRUE
911 try:
912 if leaders:
913 parsedLeaders = json.loads( leaders )
914 main.log.warn( json.dumps( parsedLeaders,
915 sort_keys=True,
916 indent=4,
917 separators=( ',', ': ' ) ) )
918 # check for all intent partitions
919 # check for election
920 # TODO: Look at Devices as topics now that it uses this system
921 topics = []
922 for i in range( 14 ):
923 topics.append( "intent-partition-" + str( i ) )
924 # FIXME: this should only be after we start the app
925 # FIXME: topics.append( "org.onosproject.election" )
926 # Print leaders output
927 main.log.debug( topics )
928 ONOStopics = [ j['topic'] for j in parsedLeaders ]
929 for topic in topics:
930 if topic not in ONOStopics:
931 main.log.error( "Error: " + topic +
932 " not in leaders" )
933 topicCheck = main.FALSE
934 else:
935 main.log.error( "leaders() returned None" )
936 topicCheck = main.FALSE
937 except ( ValueError, TypeError ):
938 topicCheck = main.FALSE
939 main.log.exception( "Error parsing leaders" )
940 main.log.error( repr( leaders ) )
941 # TODO: Check for a leader of these topics
942 # Check all nodes
943 if topicCheck:
944 for node in CLIs:
945 response = node.leaders( jsonFormat=False)
946 main.log.warn( str( node.name ) + " leaders output: \n" +
947 str( response ) )
948
949 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
950 onpass="intent Partitions is in leaders",
951 onfail="Some topics were lost " )
952 # Print partitions
953 partitions = main.ONOScli1.partitions()
954 try:
955 if partitions :
956 parsedPartitions = json.loads( partitions )
957 main.log.warn( json.dumps( parsedPartitions,
958 sort_keys=True,
959 indent=4,
960 separators=( ',', ': ' ) ) )
961 # TODO check for a leader in all paritions
962 # TODO check for consistency among nodes
963 else:
964 main.log.error( "partitions() returned None" )
965 except ( ValueError, TypeError ):
966 main.log.exception( "Error parsing partitions" )
967 main.log.error( repr( partitions ) )
968 # Print Pending Map
969 pendingMap = main.ONOScli1.pendingMap()
970 try:
971 if pendingMap :
972 parsedPending = json.loads( pendingMap )
973 main.log.warn( json.dumps( parsedPending,
974 sort_keys=True,
975 indent=4,
976 separators=( ',', ': ' ) ) )
977 # TODO check something here?
978 else:
979 main.log.error( "pendingMap() returned None" )
980 except ( ValueError, TypeError ):
981 main.log.exception( "Error parsing pending map" )
982 main.log.error( repr( pendingMap ) )
983
984 if not installedCheck:
985 main.log.info( "Waiting 60 seconds to see if the state of " +
986 "intents change" )
987 time.sleep( 60 )
988 # Print the intent states
989 intents = main.ONOScli1.intents()
990 intentStates = []
991 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
992 count = 0
993 # Iter through intents of a node
994 try:
995 for intent in json.loads( intents ):
996 state = intent.get( 'state', None )
997 if "INSTALLED" not in state:
998 installedCheck = False
999 intentId = intent.get( 'id', None )
1000 intentStates.append( ( intentId, state ) )
1001 except ( ValueError, TypeError ):
1002 main.log.exception( "Error parsing intents." )
1003 intentStates.sort()
1004 for i, s in intentStates:
1005 count += 1
1006 main.log.info( "%-6s%-15s%-15s" %
1007 ( str( count ), str( i ), str( s ) ) )
1008 leaders = main.ONOScli1.leaders()
1009 try:
1010 missing = False
1011 if leaders:
1012 parsedLeaders = json.loads( leaders )
1013 main.log.warn( json.dumps( parsedLeaders,
1014 sort_keys=True,
1015 indent=4,
1016 separators=( ',', ': ' ) ) )
1017 # check for all intent partitions
1018 # check for election
1019 topics = []
1020 for i in range( 14 ):
1021 topics.append( "intent-partition-" + str( i ) )
1022 # FIXME: this should only be after we start the app
1023 topics.append( "org.onosproject.election" )
1024 main.log.debug( topics )
1025 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1026 for topic in topics:
1027 if topic not in ONOStopics:
1028 main.log.error( "Error: " + topic +
1029 " not in leaders" )
1030 missing = True
1031 else:
1032 main.log.error( "leaders() returned None" )
1033 except ( ValueError, TypeError ):
1034 main.log.exception( "Error parsing leaders" )
1035 main.log.error( repr( leaders ) )
1036 if missing:
1037 for node in CLIs:
1038 response = node.leaders( jsonFormat=False)
1039 main.log.warn( str( node.name ) + " leaders output: \n" +
1040 str( response ) )
1041
1042 partitions = main.ONOScli1.partitions()
1043 try:
1044 if partitions :
1045 parsedPartitions = json.loads( partitions )
1046 main.log.warn( json.dumps( parsedPartitions,
1047 sort_keys=True,
1048 indent=4,
1049 separators=( ',', ': ' ) ) )
1050 # TODO check for a leader in all paritions
1051 # TODO check for consistency among nodes
1052 else:
1053 main.log.error( "partitions() returned None" )
1054 except ( ValueError, TypeError ):
1055 main.log.exception( "Error parsing partitions" )
1056 main.log.error( repr( partitions ) )
1057 pendingMap = main.ONOScli1.pendingMap()
1058 try:
1059 if pendingMap :
1060 parsedPending = json.loads( pendingMap )
1061 main.log.warn( json.dumps( parsedPending,
1062 sort_keys=True,
1063 indent=4,
1064 separators=( ',', ': ' ) ) )
1065 # TODO check something here?
1066 else:
1067 main.log.error( "pendingMap() returned None" )
1068 except ( ValueError, TypeError ):
1069 main.log.exception( "Error parsing pending map" )
1070 main.log.error( repr( pendingMap ) )
1071 # Print flowrules
1072 main.log.debug( CLIs[0].flows( jsonFormat=False ) )
1073 main.step( "Wait a minute then ping again" )
1074 # the wait is above
1075 PingResult = main.TRUE
1076 for i in range( 8, 18 ):
1077 ping = main.Mininet1.pingHost( src="h" + str( i ),
1078 target="h" + str( i + 10 ) )
1079 PingResult = PingResult and ping
1080 if ping == main.FALSE:
1081 main.log.warn( "Ping failed between h" + str( i ) +
1082 " and h" + str( i + 10 ) )
1083 elif ping == main.TRUE:
1084 main.log.info( "Ping test passed!" )
1085 # Don't set PingResult or you'd override failures
1086 if PingResult == main.FALSE:
1087 main.log.error(
1088 "Intents have not been installed correctly, pings failed." )
1089 # TODO: pretty print
1090 main.log.warn( "ONOS1 intents: " )
1091 try:
1092 tmpIntents = main.ONOScli1.intents()
1093 main.log.warn( json.dumps( json.loads( tmpIntents ),
1094 sort_keys=True,
1095 indent=4,
1096 separators=( ',', ': ' ) ) )
1097 except ( ValueError, TypeError ):
1098 main.log.warn( repr( tmpIntents ) )
1099 utilities.assert_equals(
1100 expect=main.TRUE,
1101 actual=PingResult,
1102 onpass="Intents have been installed correctly and pings work",
1103 onfail="Intents have not been installed correctly, pings failed." )
1104
1105 def CASE5( self, main ):
1106 """
1107 Reading state of ONOS
1108 """
1109 import json
1110 import time
1111 assert numControllers, "numControllers not defined"
1112 assert main, "main not defined"
1113 assert utilities.assert_equals, "utilities.assert_equals not defined"
1114 assert CLIs, "CLIs not defined"
1115 assert nodes, "nodes not defined"
1116
1117 main.case( "Setting up and gathering data for current state" )
1118 # The general idea for this test case is to pull the state of
1119 # ( intents,flows, topology,... ) from each ONOS node
1120 # We can then compare them with each other and also with past states
1121
1122 main.step( "Check that each switch has a master" )
1123 global mastershipState
1124 mastershipState = '[]'
1125
1126 # Assert that each device has a master
1127 rolesNotNull = main.TRUE
1128 threads = []
1129 for i in range( numControllers ):
1130 t = main.Thread( target=CLIs[i].rolesNotNull,
1131 name="rolesNotNull-" + str( i ),
1132 args=[] )
1133 threads.append( t )
1134 t.start()
1135
1136 for t in threads:
1137 t.join()
1138 rolesNotNull = rolesNotNull and t.result
1139 utilities.assert_equals(
1140 expect=main.TRUE,
1141 actual=rolesNotNull,
1142 onpass="Each device has a master",
1143 onfail="Some devices don't have a master assigned" )
1144
1145 main.step( "Get the Mastership of each switch from each controller" )
1146 ONOSMastership = []
1147 mastershipCheck = main.FALSE
1148 consistentMastership = True
1149 rolesResults = True
1150 threads = []
1151 for i in range( numControllers ):
1152 t = main.Thread( target=CLIs[i].roles,
1153 name="roles-" + str( i ),
1154 args=[] )
1155 threads.append( t )
1156 t.start()
1157
1158 for t in threads:
1159 t.join()
1160 ONOSMastership.append( t.result )
1161
1162 for i in range( numControllers ):
1163 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1164 main.log.error( "Error in getting ONOS" + str( i + 1 ) +
1165 " roles" )
1166 main.log.warn(
1167 "ONOS" + str( i + 1 ) + " mastership response: " +
1168 repr( ONOSMastership[i] ) )
1169 rolesResults = False
1170 utilities.assert_equals(
1171 expect=True,
1172 actual=rolesResults,
1173 onpass="No error in reading roles output",
1174 onfail="Error in reading roles from ONOS" )
1175
1176 main.step( "Check for consistency in roles from each controller" )
1177 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1178 main.log.info(
1179 "Switch roles are consistent across all ONOS nodes" )
1180 else:
1181 consistentMastership = False
1182 utilities.assert_equals(
1183 expect=True,
1184 actual=consistentMastership,
1185 onpass="Switch roles are consistent across all ONOS nodes",
1186 onfail="ONOS nodes have different views of switch roles" )
1187
1188 if rolesResults and not consistentMastership:
1189 for i in range( numControllers ):
1190 try:
1191 main.log.warn(
1192 "ONOS" + str( i + 1 ) + " roles: ",
1193 json.dumps(
1194 json.loads( ONOSMastership[ i ] ),
1195 sort_keys=True,
1196 indent=4,
1197 separators=( ',', ': ' ) ) )
1198 except ( ValueError, TypeError ):
1199 main.log.warn( repr( ONOSMastership[ i ] ) )
1200 elif rolesResults and consistentMastership:
1201 mastershipCheck = main.TRUE
1202 mastershipState = ONOSMastership[ 0 ]
1203
1204 main.step( "Get the intents from each controller" )
1205 global intentState
1206 intentState = []
1207 ONOSIntents = []
1208 intentCheck = main.FALSE
1209 consistentIntents = True
1210 intentsResults = True
1211 threads = []
1212 for i in range( numControllers ):
1213 t = main.Thread( target=CLIs[i].intents,
1214 name="intents-" + str( i ),
1215 args=[],
1216 kwargs={ 'jsonFormat': True } )
1217 threads.append( t )
1218 t.start()
1219
1220 for t in threads:
1221 t.join()
1222 ONOSIntents.append( t.result )
1223
1224 for i in range( numControllers ):
1225 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1226 main.log.error( "Error in getting ONOS" + str( i + 1 ) +
1227 " intents" )
1228 main.log.warn( "ONOS" + str( i + 1 ) + " intents response: " +
1229 repr( ONOSIntents[ i ] ) )
1230 intentsResults = False
1231 utilities.assert_equals(
1232 expect=True,
1233 actual=intentsResults,
1234 onpass="No error in reading intents output",
1235 onfail="Error in reading intents from ONOS" )
1236
1237 main.step( "Check for consistency in Intents from each controller" )
1238 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1239 main.log.info( "Intents are consistent across all ONOS " +
1240 "nodes" )
1241 else:
1242 consistentIntents = False
1243 main.log.error( "Intents not consistent" )
1244 utilities.assert_equals(
1245 expect=True,
1246 actual=consistentIntents,
1247 onpass="Intents are consistent across all ONOS nodes",
1248 onfail="ONOS nodes have different views of intents" )
1249
1250 if intentsResults:
1251 # Try to make it easy to figure out what is happening
1252 #
1253 # Intent ONOS1 ONOS2 ...
1254 # 0x01 INSTALLED INSTALLING
1255 # ... ... ...
1256 # ... ... ...
1257 title = " Id"
1258 for n in range( numControllers ):
1259 title += " " * 10 + "ONOS" + str( n + 1 )
1260 main.log.warn( title )
1261 keys = []
1262 try:
1263 # Get the set of all intent keys
1264 for nodeStr in ONOSIntents:
1265 node = json.loads( nodeStr )
1266 for intent in node:
1267 keys.append( intent.get( 'id' ) )
1268 keys = set( keys )
1269 # For each intent key, print the state on each node
1270 for key in keys:
1271 row = "%-13s" % key
1272 for nodeStr in ONOSIntents:
1273 node = json.loads( nodeStr )
1274 for intent in node:
1275 if intent.get( 'id', "Error" ) == key:
1276 row += "%-15s" % intent.get( 'state' )
1277 main.log.warn( row )
1278 # End of intent state table
1279 except ValueError as e:
1280 main.log.exception( e )
1281 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1282
1283 if intentsResults and not consistentIntents:
1284 # print the json objects
1285 n = len(ONOSIntents)
1286 main.log.debug( "ONOS" + str( n ) + " intents: " )
1287 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1288 sort_keys=True,
1289 indent=4,
1290 separators=( ',', ': ' ) ) )
1291 for i in range( numControllers ):
1292 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1293 main.log.debug( "ONOS" + str( i + 1 ) + " intents: " )
1294 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1295 sort_keys=True,
1296 indent=4,
1297 separators=( ',', ': ' ) ) )
1298 else:
1299 main.log.debug( nodes[ i ].name + " intents match ONOS" +
1300 str( n ) + " intents" )
1301 elif intentsResults and consistentIntents:
1302 intentCheck = main.TRUE
1303 intentState = ONOSIntents[ 0 ]
1304
1305 main.step( "Get the flows from each controller" )
1306 global flowState
1307 flowState = []
1308 ONOSFlows = []
1309 ONOSFlowsJson = []
1310 flowCheck = main.FALSE
1311 consistentFlows = True
1312 flowsResults = True
1313 threads = []
1314 for i in range( numControllers ):
1315 t = main.Thread( target=CLIs[i].flows,
1316 name="flows-" + str( i ),
1317 args=[],
1318 kwargs={ 'jsonFormat': True } )
1319 threads.append( t )
1320 t.start()
1321
1322 # NOTE: Flows command can take some time to run
1323 time.sleep(30)
1324 for t in threads:
1325 t.join()
1326 result = t.result
1327 ONOSFlows.append( result )
1328
1329 for i in range( numControllers ):
1330 num = str( i + 1 )
1331 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1332 main.log.error( "Error in getting ONOS" + num + " flows" )
1333 main.log.warn( "ONOS" + num + " flows response: " +
1334 repr( ONOSFlows[ i ] ) )
1335 flowsResults = False
1336 ONOSFlowsJson.append( None )
1337 else:
1338 try:
1339 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1340 except ( ValueError, TypeError ):
1341 # FIXME: change this to log.error?
1342 main.log.exception( "Error in parsing ONOS" + num +
1343 " response as json." )
1344 main.log.error( repr( ONOSFlows[ i ] ) )
1345 ONOSFlowsJson.append( None )
1346 flowsResults = False
1347 utilities.assert_equals(
1348 expect=True,
1349 actual=flowsResults,
1350 onpass="No error in reading flows output",
1351 onfail="Error in reading flows from ONOS" )
1352
1353 main.step( "Check for consistency in Flows from each controller" )
1354 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1355 if all( tmp ):
1356 main.log.info( "Flow count is consistent across all ONOS nodes" )
1357 else:
1358 consistentFlows = False
1359 utilities.assert_equals(
1360 expect=True,
1361 actual=consistentFlows,
1362 onpass="The flow count is consistent across all ONOS nodes",
1363 onfail="ONOS nodes have different flow counts" )
1364
1365 if flowsResults and not consistentFlows:
1366 for i in range( numControllers ):
1367 try:
1368 main.log.warn(
1369 "ONOS" + str( i + 1 ) + " flows: " +
1370 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1371 indent=4, separators=( ',', ': ' ) ) )
1372 except ( ValueError, TypeError ):
1373 main.log.warn(
1374 "ONOS" + str( i + 1 ) + " flows: " +
1375 repr( ONOSFlows[ i ] ) )
1376 elif flowsResults and consistentFlows:
1377 flowCheck = main.TRUE
1378 flowState = ONOSFlows[ 0 ]
1379
1380 main.step( "Get the OF Table entries" )
1381 global flows
1382 flows = []
1383 for i in range( 1, 29 ):
1384 flows.append( main.Mininet2.getFlowTable( 1.3, "s" + str( i ) ) )
1385 if flowCheck == main.FALSE:
1386 for table in flows:
1387 main.log.warn( table )
1388 # TODO: Compare switch flow tables with ONOS flow tables
1389
1390 main.step( "Start continuous pings" )
1391 main.Mininet2.pingLong(
1392 src=main.params[ 'PING' ][ 'source1' ],
1393 target=main.params[ 'PING' ][ 'target1' ],
1394 pingTime=500 )
1395 main.Mininet2.pingLong(
1396 src=main.params[ 'PING' ][ 'source2' ],
1397 target=main.params[ 'PING' ][ 'target2' ],
1398 pingTime=500 )
1399 main.Mininet2.pingLong(
1400 src=main.params[ 'PING' ][ 'source3' ],
1401 target=main.params[ 'PING' ][ 'target3' ],
1402 pingTime=500 )
1403 main.Mininet2.pingLong(
1404 src=main.params[ 'PING' ][ 'source4' ],
1405 target=main.params[ 'PING' ][ 'target4' ],
1406 pingTime=500 )
1407 main.Mininet2.pingLong(
1408 src=main.params[ 'PING' ][ 'source5' ],
1409 target=main.params[ 'PING' ][ 'target5' ],
1410 pingTime=500 )
1411 main.Mininet2.pingLong(
1412 src=main.params[ 'PING' ][ 'source6' ],
1413 target=main.params[ 'PING' ][ 'target6' ],
1414 pingTime=500 )
1415 main.Mininet2.pingLong(
1416 src=main.params[ 'PING' ][ 'source7' ],
1417 target=main.params[ 'PING' ][ 'target7' ],
1418 pingTime=500 )
1419 main.Mininet2.pingLong(
1420 src=main.params[ 'PING' ][ 'source8' ],
1421 target=main.params[ 'PING' ][ 'target8' ],
1422 pingTime=500 )
1423 main.Mininet2.pingLong(
1424 src=main.params[ 'PING' ][ 'source9' ],
1425 target=main.params[ 'PING' ][ 'target9' ],
1426 pingTime=500 )
1427 main.Mininet2.pingLong(
1428 src=main.params[ 'PING' ][ 'source10' ],
1429 target=main.params[ 'PING' ][ 'target10' ],
1430 pingTime=500 )
1431
1432 main.step( "Collecting topology information from ONOS" )
1433 devices = []
1434 threads = []
1435 for i in range( numControllers ):
1436 t = main.Thread( target=CLIs[i].devices,
1437 name="devices-" + str( i ),
1438 args=[ ] )
1439 threads.append( t )
1440 t.start()
1441
1442 for t in threads:
1443 t.join()
1444 devices.append( t.result )
1445 hosts = []
1446 threads = []
1447 for i in range( numControllers ):
1448 t = main.Thread( target=CLIs[i].hosts,
1449 name="hosts-" + str( i ),
1450 args=[ ] )
1451 threads.append( t )
1452 t.start()
1453
1454 for t in threads:
1455 t.join()
1456 try:
1457 hosts.append( json.loads( t.result ) )
1458 except ( ValueError, TypeError ):
1459 # FIXME: better handling of this, print which node
1460 # Maybe use thread name?
1461 main.log.exception( "Error parsing json output of hosts" )
1462 # FIXME: should this be an empty json object instead?
1463 hosts.append( None )
1464
1465 ports = []
1466 threads = []
1467 for i in range( numControllers ):
1468 t = main.Thread( target=CLIs[i].ports,
1469 name="ports-" + str( i ),
1470 args=[ ] )
1471 threads.append( t )
1472 t.start()
1473
1474 for t in threads:
1475 t.join()
1476 ports.append( t.result )
1477 links = []
1478 threads = []
1479 for i in range( numControllers ):
1480 t = main.Thread( target=CLIs[i].links,
1481 name="links-" + str( i ),
1482 args=[ ] )
1483 threads.append( t )
1484 t.start()
1485
1486 for t in threads:
1487 t.join()
1488 links.append( t.result )
1489 clusters = []
1490 threads = []
1491 for i in range( numControllers ):
1492 t = main.Thread( target=CLIs[i].clusters,
1493 name="clusters-" + str( i ),
1494 args=[ ] )
1495 threads.append( t )
1496 t.start()
1497
1498 for t in threads:
1499 t.join()
1500 clusters.append( t.result )
1501 # Compare json objects for hosts and dataplane clusters
1502
1503 # hosts
1504 main.step( "Host view is consistent across ONOS nodes" )
1505 consistentHostsResult = main.TRUE
1506 for controller in range( len( hosts ) ):
1507 controllerStr = str( controller + 1 )
1508 if "Error" not in hosts[ controller ]:
1509 if hosts[ controller ] == hosts[ 0 ]:
1510 continue
1511 else: # hosts not consistent
1512 main.log.error( "hosts from ONOS" +
1513 controllerStr +
1514 " is inconsistent with ONOS1" )
1515 main.log.warn( repr( hosts[ controller ] ) )
1516 consistentHostsResult = main.FALSE
1517
1518 else:
1519 main.log.error( "Error in getting ONOS hosts from ONOS" +
1520 controllerStr )
1521 consistentHostsResult = main.FALSE
1522 main.log.warn( "ONOS" + controllerStr +
1523 " hosts response: " +
1524 repr( hosts[ controller ] ) )
1525 utilities.assert_equals(
1526 expect=main.TRUE,
1527 actual=consistentHostsResult,
1528 onpass="Hosts view is consistent across all ONOS nodes",
1529 onfail="ONOS nodes have different views of hosts" )
1530
1531 main.step( "Each host has an IP address" )
1532 ipResult = main.TRUE
1533 for controller in range( 0, len( hosts ) ):
1534 controllerStr = str( controller + 1 )
1535 for host in hosts[ controller ]:
1536 if not host.get( 'ipAddresses', [ ] ):
1537 main.log.error( "DEBUG:Error with host ips on controller" +
1538 controllerStr + ": " + str( host ) )
1539 ipResult = main.FALSE
1540 utilities.assert_equals(
1541 expect=main.TRUE,
1542 actual=ipResult,
1543 onpass="The ips of the hosts aren't empty",
1544 onfail="The ip of at least one host is missing" )
1545
1546 # Strongly connected clusters of devices
1547 main.step( "Cluster view is consistent across ONOS nodes" )
1548 consistentClustersResult = main.TRUE
1549 for controller in range( len( clusters ) ):
1550 controllerStr = str( controller + 1 )
1551 if "Error" not in clusters[ controller ]:
1552 if clusters[ controller ] == clusters[ 0 ]:
1553 continue
1554 else: # clusters not consistent
1555 main.log.error( "clusters from ONOS" + controllerStr +
1556 " is inconsistent with ONOS1" )
1557 consistentClustersResult = main.FALSE
1558
1559 else:
1560 main.log.error( "Error in getting dataplane clusters " +
1561 "from ONOS" + controllerStr )
1562 consistentClustersResult = main.FALSE
1563 main.log.warn( "ONOS" + controllerStr +
1564 " clusters response: " +
1565 repr( clusters[ controller ] ) )
1566 utilities.assert_equals(
1567 expect=main.TRUE,
1568 actual=consistentClustersResult,
1569 onpass="Clusters view is consistent across all ONOS nodes",
1570 onfail="ONOS nodes have different views of clusters" )
1571 # there should always only be one cluster
1572 main.step( "Cluster view correct across ONOS nodes" )
1573 try:
1574 numClusters = len( json.loads( clusters[ 0 ] ) )
1575 except ( ValueError, TypeError ):
1576 main.log.exception( "Error parsing clusters[0]: " +
1577 repr( clusters[ 0 ] ) )
1578 clusterResults = main.FALSE
1579 if numClusters == 1:
1580 clusterResults = main.TRUE
1581 utilities.assert_equals(
1582 expect=1,
1583 actual=numClusters,
1584 onpass="ONOS shows 1 SCC",
1585 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1586
1587 main.step( "Comparing ONOS topology to MN" )
1588 devicesResults = main.TRUE
1589 linksResults = main.TRUE
1590 hostsResults = main.TRUE
1591 mnSwitches = main.Mininet1.getSwitches()
1592 mnLinks = main.Mininet1.getLinks()
1593 mnHosts = main.Mininet1.getHosts()
1594 for controller in range( numControllers ):
1595 controllerStr = str( controller + 1 )
1596 if devices[ controller ] and ports[ controller ] and\
1597 "Error" not in devices[ controller ] and\
1598 "Error" not in ports[ controller ]:
1599
1600 currentDevicesResult = main.Mininet1.compareSwitches(
1601 mnSwitches,
1602 json.loads( devices[ controller ] ),
1603 json.loads( ports[ controller ] ) )
1604 else:
1605 currentDevicesResult = main.FALSE
1606 utilities.assert_equals( expect=main.TRUE,
1607 actual=currentDevicesResult,
1608 onpass="ONOS" + controllerStr +
1609 " Switches view is correct",
1610 onfail="ONOS" + controllerStr +
1611 " Switches view is incorrect" )
1612 if links[ controller ] and "Error" not in links[ controller ]:
1613 currentLinksResult = main.Mininet1.compareLinks(
1614 mnSwitches, mnLinks,
1615 json.loads( links[ controller ] ) )
1616 else:
1617 currentLinksResult = main.FALSE
1618 utilities.assert_equals( expect=main.TRUE,
1619 actual=currentLinksResult,
1620 onpass="ONOS" + controllerStr +
1621 " links view is correct",
1622 onfail="ONOS" + controllerStr +
1623 " links view is incorrect" )
1624
1625 if hosts[ controller ] or "Error" not in hosts[ controller ]:
1626 currentHostsResult = main.Mininet1.compareHosts(
1627 mnHosts,
1628 hosts[ controller ] )
1629 else:
1630 currentHostsResult = main.FALSE
1631 utilities.assert_equals( expect=main.TRUE,
1632 actual=currentHostsResult,
1633 onpass="ONOS" + controllerStr +
1634 " hosts exist in Mininet",
1635 onfail="ONOS" + controllerStr +
1636 " hosts don't match Mininet" )
1637
1638 devicesResults = devicesResults and currentDevicesResult
1639 linksResults = linksResults and currentLinksResult
1640 hostsResults = hostsResults and currentHostsResult
1641
1642 main.step( "Device information is correct" )
1643 utilities.assert_equals(
1644 expect=main.TRUE,
1645 actual=devicesResults,
1646 onpass="Device information is correct",
1647 onfail="Device information is incorrect" )
1648
1649 main.step( "Links are correct" )
1650 utilities.assert_equals(
1651 expect=main.TRUE,
1652 actual=linksResults,
1653 onpass="Link are correct",
1654 onfail="Links are incorrect" )
1655
1656 main.step( "Hosts are correct" )
1657 utilities.assert_equals(
1658 expect=main.TRUE,
1659 actual=hostsResults,
1660 onpass="Hosts are correct",
1661 onfail="Hosts are incorrect" )
1662
1663 def CASE6( self, main ):
1664 """
1665 The Failure case. Since this is the Sanity test, we do nothing.
1666 """
1667 import time
1668 assert numControllers, "numControllers not defined"
1669 assert main, "main not defined"
1670 assert utilities.assert_equals, "utilities.assert_equals not defined"
1671 assert CLIs, "CLIs not defined"
1672 assert nodes, "nodes not defined"
1673 main.case( "Wait 60 seconds instead of inducing a failure" )
1674 time.sleep( 60 )
1675 utilities.assert_equals(
1676 expect=main.TRUE,
1677 actual=main.TRUE,
1678 onpass="Sleeping 60 seconds",
1679 onfail="Something is terribly wrong with my math" )
1680
1681 def CASE7( self, main ):
1682 """
1683 Check state after ONOS failure
1684 """
1685 import json
1686 assert numControllers, "numControllers not defined"
1687 assert main, "main not defined"
1688 assert utilities.assert_equals, "utilities.assert_equals not defined"
1689 assert CLIs, "CLIs not defined"
1690 assert nodes, "nodes not defined"
1691 main.case( "Running ONOS Constant State Tests" )
1692
1693 main.step( "Check that each switch has a master" )
1694 # Assert that each device has a master
1695 rolesNotNull = main.TRUE
1696 threads = []
1697 for i in range( numControllers ):
1698 t = main.Thread( target=CLIs[i].rolesNotNull,
1699 name="rolesNotNull-" + str( i ),
1700 args=[ ] )
1701 threads.append( t )
1702 t.start()
1703
1704 for t in threads:
1705 t.join()
1706 rolesNotNull = rolesNotNull and t.result
1707 utilities.assert_equals(
1708 expect=main.TRUE,
1709 actual=rolesNotNull,
1710 onpass="Each device has a master",
1711 onfail="Some devices don't have a master assigned" )
1712
1713 main.step( "Read device roles from ONOS" )
1714 ONOSMastership = []
1715 mastershipCheck = main.FALSE
1716 consistentMastership = True
1717 rolesResults = True
1718 threads = []
1719 for i in range( numControllers ):
1720 t = main.Thread( target=CLIs[i].roles,
1721 name="roles-" + str( i ),
1722 args=[] )
1723 threads.append( t )
1724 t.start()
1725
1726 for t in threads:
1727 t.join()
1728 ONOSMastership.append( t.result )
1729
1730 for i in range( numControllers ):
1731 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1732 main.log.error( "Error in getting ONOS" + str( i + 1 ) +
1733 " roles" )
1734 main.log.warn(
1735 "ONOS" + str( i + 1 ) + " mastership response: " +
1736 repr( ONOSMastership[i] ) )
1737 rolesResults = False
1738 utilities.assert_equals(
1739 expect=True,
1740 actual=rolesResults,
1741 onpass="No error in reading roles output",
1742 onfail="Error in reading roles from ONOS" )
1743
1744 main.step( "Check for consistency in roles from each controller" )
1745 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1746 main.log.info(
1747 "Switch roles are consistent across all ONOS nodes" )
1748 else:
1749 consistentMastership = False
1750 utilities.assert_equals(
1751 expect=True,
1752 actual=consistentMastership,
1753 onpass="Switch roles are consistent across all ONOS nodes",
1754 onfail="ONOS nodes have different views of switch roles" )
1755
1756 if rolesResults and not consistentMastership:
1757 for i in range( numControllers ):
1758 main.log.warn(
1759 "ONOS" + str( i + 1 ) + " roles: ",
1760 json.dumps(
1761 json.loads( ONOSMastership[ i ] ),
1762 sort_keys=True,
1763 indent=4,
1764 separators=( ',', ': ' ) ) )
1765 elif rolesResults and not consistentMastership:
1766 mastershipCheck = main.TRUE
1767
1768 description2 = "Compare switch roles from before failure"
1769 main.step( description2 )
1770 try:
1771 currentJson = json.loads( ONOSMastership[0] )
1772 oldJson = json.loads( mastershipState )
1773 except ( ValueError, TypeError ):
1774 main.log.exception( "Something is wrong with parsing " +
1775 "ONOSMastership[0] or mastershipState" )
1776 main.log.error( "ONOSMastership[0]: " + repr( ONOSMastership[0] ) )
1777 main.log.error( "mastershipState" + repr( mastershipState ) )
1778 main.cleanup()
1779 main.exit()
1780 mastershipCheck = main.TRUE
1781 for i in range( 1, 29 ):
1782 switchDPID = str(
1783 main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
1784 current = [ switch[ 'master' ] for switch in currentJson
1785 if switchDPID in switch[ 'id' ] ]
1786 old = [ switch[ 'master' ] for switch in oldJson
1787 if switchDPID in switch[ 'id' ] ]
1788 if current == old:
1789 mastershipCheck = mastershipCheck and main.TRUE
1790 else:
1791 main.log.warn( "Mastership of switch %s changed" % switchDPID )
1792 mastershipCheck = main.FALSE
1793 utilities.assert_equals(
1794 expect=main.TRUE,
1795 actual=mastershipCheck,
1796 onpass="Mastership of Switches was not changed",
1797 onfail="Mastership of some switches changed" )
1798 mastershipCheck = mastershipCheck and consistentMastership
1799
1800 main.step( "Get the intents and compare across all nodes" )
1801 ONOSIntents = []
1802 intentCheck = main.FALSE
1803 consistentIntents = True
1804 intentsResults = True
1805 threads = []
1806 for i in range( numControllers ):
1807 t = main.Thread( target=CLIs[i].intents,
1808 name="intents-" + str( i ),
1809 args=[],
1810 kwargs={ 'jsonFormat': True } )
1811 threads.append( t )
1812 t.start()
1813
1814 for t in threads:
1815 t.join()
1816 ONOSIntents.append( t.result )
1817
1818 for i in range( numControllers ):
1819 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1820 main.log.error( "Error in getting ONOS" + str( i + 1 ) +
1821 " intents" )
1822 main.log.warn( "ONOS" + str( i + 1 ) + " intents response: " +
1823 repr( ONOSIntents[ i ] ) )
1824 intentsResults = False
1825 utilities.assert_equals(
1826 expect=True,
1827 actual=intentsResults,
1828 onpass="No error in reading intents output",
1829 onfail="Error in reading intents from ONOS" )
1830
1831 main.step( "Check for consistency in Intents from each controller" )
1832 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1833 main.log.info( "Intents are consistent across all ONOS " +
1834 "nodes" )
1835 else:
1836 consistentIntents = False
1837
1838 # Try to make it easy to figure out what is happening
1839 #
1840 # Intent ONOS1 ONOS2 ...
1841 # 0x01 INSTALLED INSTALLING
1842 # ... ... ...
1843 # ... ... ...
1844 title = " ID"
1845 for n in range( numControllers ):
1846 title += " " * 10 + "ONOS" + str( n + 1 )
1847 main.log.warn( title )
1848 # get all intent keys in the cluster
1849 keys = []
1850 for nodeStr in ONOSIntents:
1851 node = json.loads( nodeStr )
1852 for intent in node:
1853 keys.append( intent.get( 'id' ) )
1854 keys = set( keys )
1855 for key in keys:
1856 row = "%-13s" % key
1857 for nodeStr in ONOSIntents:
1858 node = json.loads( nodeStr )
1859 for intent in node:
1860 if intent.get( 'id' ) == key:
1861 row += "%-15s" % intent.get( 'state' )
1862 main.log.warn( row )
1863 # End table view
1864
1865 utilities.assert_equals(
1866 expect=True,
1867 actual=consistentIntents,
1868 onpass="Intents are consistent across all ONOS nodes",
1869 onfail="ONOS nodes have different views of intents" )
1870 intentStates = []
1871 for node in ONOSIntents: # Iter through ONOS nodes
1872 nodeStates = []
1873 # Iter through intents of a node
1874 try:
1875 for intent in json.loads( node ):
1876 nodeStates.append( intent[ 'state' ] )
1877 except ( ValueError, TypeError ):
1878 main.log.exception( "Error in parsing intents" )
1879 main.log.error( repr( node ) )
1880 intentStates.append( nodeStates )
1881 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1882 main.log.info( dict( out ) )
1883
1884 if intentsResults and not consistentIntents:
1885 for i in range( numControllers ):
1886 main.log.warn( "ONOS" + str( i + 1 ) + " intents: " )
1887 main.log.warn( json.dumps(
1888 json.loads( ONOSIntents[ i ] ),
1889 sort_keys=True,
1890 indent=4,
1891 separators=( ',', ': ' ) ) )
1892 elif intentsResults and consistentIntents:
1893 intentCheck = main.TRUE
1894
1895 # NOTE: Store has no durability, so intents are lost across system
1896 # restarts
1897 main.step( "Compare current intents with intents before the failure" )
1898 # NOTE: this requires case 5 to pass for intentState to be set.
1899 # maybe we should stop the test if that fails?
1900 sameIntents = main.FALSE
1901 if intentState and intentState == ONOSIntents[ 0 ]:
1902 sameIntents = main.TRUE
1903 main.log.info( "Intents are consistent with before failure" )
1904 # TODO: possibly the states have changed? we may need to figure out
1905 # what the acceptable states are
1906 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
1907 sameIntents = main.TRUE
1908 try:
1909 before = json.loads( intentState )
1910 after = json.loads( ONOSIntents[ 0 ] )
1911 for intent in before:
1912 if intent not in after:
1913 sameIntents = main.FALSE
1914 main.log.debug( "Intent is not currently in ONOS " +
1915 "(at least in the same form):" )
1916 main.log.debug( json.dumps( intent ) )
1917 except ( ValueError, TypeError ):
1918 main.log.exception( "Exception printing intents" )
1919 main.log.debug( repr( ONOSIntents[0] ) )
1920 main.log.debug( repr( intentState ) )
1921 if sameIntents == main.FALSE:
1922 try:
1923 main.log.debug( "ONOS intents before: " )
1924 main.log.debug( json.dumps( json.loads( intentState ),
1925 sort_keys=True, indent=4,
1926 separators=( ',', ': ' ) ) )
1927 main.log.debug( "Current ONOS intents: " )
1928 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
1929 sort_keys=True, indent=4,
1930 separators=( ',', ': ' ) ) )
1931 except ( ValueError, TypeError ):
1932 main.log.exception( "Exception printing intents" )
1933 main.log.debug( repr( ONOSIntents[0] ) )
1934 main.log.debug( repr( intentState ) )
1935 utilities.assert_equals(
1936 expect=main.TRUE,
1937 actual=sameIntents,
1938 onpass="Intents are consistent with before failure",
1939 onfail="The Intents changed during failure" )
1940 intentCheck = intentCheck and sameIntents
1941
1942 main.step( "Get the OF Table entries and compare to before " +
1943 "component failure" )
1944 FlowTables = main.TRUE
1945 flows2 = []
1946 for i in range( 28 ):
1947 main.log.info( "Checking flow table on s" + str( i + 1 ) )
1948 tmpFlows = main.Mininet2.getFlowTable( 1.3, "s" + str( i + 1 ) )
1949 flows2.append( tmpFlows )
1950 tempResult = main.Mininet2.flowComp(
1951 flow1=flows[ i ],
1952 flow2=tmpFlows )
1953 FlowTables = FlowTables and tempResult
1954 if FlowTables == main.FALSE:
1955 main.log.info( "Differences in flow table for switch: s" +
1956 str( i + 1 ) )
1957 utilities.assert_equals(
1958 expect=main.TRUE,
1959 actual=FlowTables,
1960 onpass="No changes were found in the flow tables",
1961 onfail="Changes were found in the flow tables" )
1962
1963 main.Mininet2.pingLongKill()
1964 '''
1965 main.step( "Check the continuous pings to ensure that no packets " +
1966 "were dropped during component failure" )
1967 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
1968 main.params[ 'TESTONIP' ] )
1969 LossInPings = main.FALSE
1970 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
1971 for i in range( 8, 18 ):
1972 main.log.info(
1973 "Checking for a loss in pings along flow from s" +
1974 str( i ) )
1975 LossInPings = main.Mininet2.checkForLoss(
1976 "/tmp/ping.h" +
1977 str( i ) ) or LossInPings
1978 if LossInPings == main.TRUE:
1979 main.log.info( "Loss in ping detected" )
1980 elif LossInPings == main.ERROR:
1981 main.log.info( "There are multiple mininet process running" )
1982 elif LossInPings == main.FALSE:
1983 main.log.info( "No Loss in the pings" )
1984 main.log.info( "No loss of dataplane connectivity" )
1985 utilities.assert_equals(
1986 expect=main.FALSE,
1987 actual=LossInPings,
1988 onpass="No Loss of connectivity",
1989 onfail="Loss of dataplane connectivity detected" )
1990 '''
1991
1992 main.step( "Leadership Election is still functional" )
1993 # Test of LeadershipElection
1994 # NOTE: this only works for the sanity test. In case of failures,
1995 # leader will likely change
1996 leader = nodes[ 0 ].ip_address
1997 leaderResult = main.TRUE
1998 for cli in CLIs:
1999 leaderN = cli.electionTestLeader()
2000 # verify leader is ONOS1
2001 if leaderN == leader:
2002 # all is well
2003 # NOTE: In failure scenario, this could be a new node, maybe
2004 # check != ONOS1
2005 pass
2006 elif leaderN == main.FALSE:
2007 # error in response
2008 main.log.error( "Something is wrong with " +
2009 "electionTestLeader function, check the" +
2010 " error logs" )
2011 leaderResult = main.FALSE
2012 elif leader != leaderN:
2013 leaderResult = main.FALSE
2014 main.log.error( cli.name + " sees " + str( leaderN ) +
2015 " as the leader of the election app. " +
2016 "Leader should be " + str( leader ) )
2017 utilities.assert_equals(
2018 expect=main.TRUE,
2019 actual=leaderResult,
2020 onpass="Leadership election passed",
2021 onfail="Something went wrong with Leadership election" )
2022
2023 def CASE8( self, main ):
2024 """
2025 Compare topo
2026 """
2027 import json
2028 import time
2029 assert numControllers, "numControllers not defined"
2030 assert main, "main not defined"
2031 assert utilities.assert_equals, "utilities.assert_equals not defined"
2032 assert CLIs, "CLIs not defined"
2033 assert nodes, "nodes not defined"
2034
2035 main.case( "Compare ONOS Topology view to Mininet topology" )
2036 main.caseExplaination = "Compare topology objects between Mininet" +\
2037 " and ONOS"
2038
2039 main.step( "Comparing ONOS topology to MN" )
2040 devicesResults = main.TRUE
2041 linksResults = main.TRUE
2042 hostsResults = main.TRUE
2043 hostAttachmentResults = True
2044 topoResult = main.FALSE
2045 elapsed = 0
2046 count = 0
2047 main.step( "Collecting topology information from ONOS" )
2048 startTime = time.time()
2049 # Give time for Gossip to work
2050 while topoResult == main.FALSE and elapsed < 60:
2051 count += 1
2052 cliStart = time.time()
2053 devices = []
2054 threads = []
2055 for i in range( numControllers ):
2056 t = main.Thread( target=CLIs[i].devices,
2057 name="devices-" + str( i ),
2058 args=[ ] )
2059 threads.append( t )
2060 t.start()
2061
2062 for t in threads:
2063 t.join()
2064 devices.append( t.result )
2065 hosts = []
2066 ipResult = main.TRUE
2067 threads = []
2068 for i in range( numControllers ):
2069 t = main.Thread( target=CLIs[i].hosts,
2070 name="hosts-" + str( i ),
2071 args=[ ] )
2072 threads.append( t )
2073 t.start()
2074
2075 for t in threads:
2076 t.join()
2077 try:
2078 hosts.append( json.loads( t.result ) )
2079 except ( ValueError, TypeError ):
2080 main.log.exception( "Error parsing hosts results" )
2081 main.log.error( repr( t.result ) )
2082 for controller in range( 0, len( hosts ) ):
2083 controllerStr = str( controller + 1 )
2084 for host in hosts[ controller ]:
2085 if host is None or host.get( 'ipAddresses', [] ) == []:
2086 main.log.error(
2087 "DEBUG:Error with host ipAddresses on controller" +
2088 controllerStr + ": " + str( host ) )
2089 ipResult = main.FALSE
2090 ports = []
2091 threads = []
2092 for i in range( numControllers ):
2093 t = main.Thread( target=CLIs[i].ports,
2094 name="ports-" + str( i ),
2095 args=[ ] )
2096 threads.append( t )
2097 t.start()
2098
2099 for t in threads:
2100 t.join()
2101 ports.append( t.result )
2102 links = []
2103 threads = []
2104 for i in range( numControllers ):
2105 t = main.Thread( target=CLIs[i].links,
2106 name="links-" + str( i ),
2107 args=[ ] )
2108 threads.append( t )
2109 t.start()
2110
2111 for t in threads:
2112 t.join()
2113 links.append( t.result )
2114 clusters = []
2115 threads = []
2116 for i in range( numControllers ):
2117 t = main.Thread( target=CLIs[i].clusters,
2118 name="clusters-" + str( i ),
2119 args=[ ] )
2120 threads.append( t )
2121 t.start()
2122
2123 for t in threads:
2124 t.join()
2125 clusters.append( t.result )
2126
2127 elapsed = time.time() - startTime
2128 cliTime = time.time() - cliStart
2129 print "Elapsed time: " + str( elapsed )
2130 print "CLI time: " + str( cliTime )
2131
2132 mnSwitches = main.Mininet1.getSwitches()
2133 mnLinks = main.Mininet1.getLinks()
2134 mnHosts = main.Mininet1.getHosts()
2135 for controller in range( numControllers ):
2136 controllerStr = str( controller + 1 )
2137 if devices[ controller ] and ports[ controller ] and\
2138 "Error" not in devices[ controller ] and\
2139 "Error" not in ports[ controller ]:
2140
2141 currentDevicesResult = main.Mininet1.compareSwitches(
2142 mnSwitches,
2143 json.loads( devices[ controller ] ),
2144 json.loads( ports[ controller ] ) )
2145 else:
2146 currentDevicesResult = main.FALSE
2147 utilities.assert_equals( expect=main.TRUE,
2148 actual=currentDevicesResult,
2149 onpass="ONOS" + controllerStr +
2150 " Switches view is correct",
2151 onfail="ONOS" + controllerStr +
2152 " Switches view is incorrect" )
2153
2154 if links[ controller ] and "Error" not in links[ controller ]:
2155 currentLinksResult = main.Mininet1.compareLinks(
2156 mnSwitches, mnLinks,
2157 json.loads( links[ controller ] ) )
2158 else:
2159 currentLinksResult = main.FALSE
2160 utilities.assert_equals( expect=main.TRUE,
2161 actual=currentLinksResult,
2162 onpass="ONOS" + controllerStr +
2163 " links view is correct",
2164 onfail="ONOS" + controllerStr +
2165 " links view is incorrect" )
2166
2167 if hosts[ controller ] or "Error" not in hosts[ controller ]:
2168 currentHostsResult = main.Mininet1.compareHosts(
2169 mnHosts,
2170 hosts[ controller ] )
2171 else:
2172 currentHostsResult = main.FALSE
2173 utilities.assert_equals( expect=main.TRUE,
2174 actual=currentHostsResult,
2175 onpass="ONOS" + controllerStr +
2176 " hosts exist in Mininet",
2177 onfail="ONOS" + controllerStr +
2178 " hosts don't match Mininet" )
2179 # CHECKING HOST ATTACHMENT POINTS
2180 hostAttachment = True
2181 zeroHosts = False
2182 # FIXME: topo-HA/obelisk specific mappings:
2183 # key is mac and value is dpid
2184 mappings = {}
2185 for i in range( 1, 29 ): # hosts 1 through 28
2186 # set up correct variables:
2187 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2188 if i == 1:
2189 deviceId = "1000".zfill(16)
2190 elif i == 2:
2191 deviceId = "2000".zfill(16)
2192 elif i == 3:
2193 deviceId = "3000".zfill(16)
2194 elif i == 4:
2195 deviceId = "3004".zfill(16)
2196 elif i == 5:
2197 deviceId = "5000".zfill(16)
2198 elif i == 6:
2199 deviceId = "6000".zfill(16)
2200 elif i == 7:
2201 deviceId = "6007".zfill(16)
2202 elif i >= 8 and i <= 17:
2203 dpid = '3' + str( i ).zfill( 3 )
2204 deviceId = dpid.zfill(16)
2205 elif i >= 18 and i <= 27:
2206 dpid = '6' + str( i ).zfill( 3 )
2207 deviceId = dpid.zfill(16)
2208 elif i == 28:
2209 deviceId = "2800".zfill(16)
2210 mappings[ macId ] = deviceId
2211 if hosts[ controller ] or "Error" not in hosts[ controller ]:
2212 if hosts[ controller ] == []:
2213 main.log.warn( "There are no hosts discovered" )
2214 zeroHosts = True
2215 else:
2216 for host in hosts[ controller ]:
2217 mac = None
2218 location = None
2219 device = None
2220 port = None
2221 try:
2222 mac = host.get( 'mac' )
2223 assert mac, "mac field could not be found for this host object"
2224
2225 location = host.get( 'location' )
2226 assert location, "location field could not be found for this host object"
2227
2228 # Trim the protocol identifier off deviceId
2229 device = str( location.get( 'elementId' ) ).split(':')[1]
2230 assert device, "elementId field could not be found for this host location object"
2231
2232 port = location.get( 'port' )
2233 assert port, "port field could not be found for this host location object"
2234
2235 # Now check if this matches where they should be
2236 if mac and device and port:
2237 if str( port ) != "1":
2238 main.log.error( "The attachment port is incorrect for " +
2239 "host " + str( mac ) +
2240 ". Expected: 1 Actual: " + str( port) )
2241 hostAttachment = False
2242 if device != mappings[ str( mac ) ]:
2243 main.log.error( "The attachment device is incorrect for " +
2244 "host " + str( mac ) +
2245 ". Expected: " + mappings[ str( mac ) ] +
2246 " Actual: " + device )
2247 hostAttachment = False
2248 else:
2249 hostAttachment = False
2250 except AssertionError:
2251 main.log.exception( "Json object not as expected" )
2252 main.log.error( repr( host ) )
2253 hostAttachment = False
2254 else:
2255 main.log.error( "No hosts json output or \"Error\"" +
2256 " in output. hosts = " +
2257 repr( hosts[ controller ] ) )
2258 if zeroHosts is False:
2259 hostAttachment = True
2260
2261 # END CHECKING HOST ATTACHMENT POINTS
2262 devicesResults = devicesResults and currentDevicesResult
2263 linksResults = linksResults and currentLinksResult
2264 hostsResults = hostsResults and currentHostsResult
2265 hostAttachmentResults = hostAttachmentResults and\
2266 hostAttachment
2267 topoResult = ( devicesResults and linksResults
2268 and hostsResults and ipResult and
2269 hostAttachmentResults )
2270
2271 # Compare json objects for hosts and dataplane clusters
2272
2273 # hosts
2274 main.step( "Hosts view is consistent across all ONOS nodes" )
2275 consistentHostsResult = main.TRUE
2276 for controller in range( len( hosts ) ):
2277 controllerStr = str( controller + 1 )
2278 if "Error" not in hosts[ controller ]:
2279 if hosts[ controller ] == hosts[ 0 ]:
2280 continue
2281 else: # hosts not consistent
2282 main.log.error( "hosts from ONOS" + controllerStr +
2283 " is inconsistent with ONOS1" )
2284 main.log.warn( repr( hosts[ controller ] ) )
2285 consistentHostsResult = main.FALSE
2286
2287 else:
2288 main.log.error( "Error in getting ONOS hosts from ONOS" +
2289 controllerStr )
2290 consistentHostsResult = main.FALSE
2291 main.log.warn( "ONOS" + controllerStr +
2292 " hosts response: " +
2293 repr( hosts[ controller ] ) )
2294 utilities.assert_equals(
2295 expect=main.TRUE,
2296 actual=consistentHostsResult,
2297 onpass="Hosts view is consistent across all ONOS nodes",
2298 onfail="ONOS nodes have different views of hosts" )
2299
2300 main.step( "Hosts information is correct" )
2301 hostsResults = hostsResults and ipResult
2302 utilities.assert_equals(
2303 expect=main.TRUE,
2304 actual=hostsResults,
2305 onpass="Host information is correct",
2306 onfail="Host information is incorrect" )
2307
2308 main.step( "Host attachment points to the network" )
2309 utilities.assert_equals(
2310 expect=True,
2311 actual=hostAttachmentResults,
2312 onpass="Hosts are correctly attached to the network",
2313 onfail="ONOS did not correctly attach hosts to the network" )
2314
2315 # Strongly connected clusters of devices
2316 main.step( "Clusters view is consistent across all ONOS nodes" )
2317 consistentClustersResult = main.TRUE
2318 for controller in range( len( clusters ) ):
2319 controllerStr = str( controller + 1 )
2320 if "Error" not in clusters[ controller ]:
2321 if clusters[ controller ] == clusters[ 0 ]:
2322 continue
2323 else: # clusters not consistent
2324 main.log.error( "clusters from ONOS" +
2325 controllerStr +
2326 " is inconsistent with ONOS1" )
2327 consistentClustersResult = main.FALSE
2328
2329 else:
2330 main.log.error( "Error in getting dataplane clusters " +
2331 "from ONOS" + controllerStr )
2332 consistentClustersResult = main.FALSE
2333 main.log.warn( "ONOS" + controllerStr +
2334 " clusters response: " +
2335 repr( clusters[ controller ] ) )
2336 utilities.assert_equals(
2337 expect=main.TRUE,
2338 actual=consistentClustersResult,
2339 onpass="Clusters view is consistent across all ONOS nodes",
2340 onfail="ONOS nodes have different views of clusters" )
2341
2342 main.step( "There is only one SCC" )
2343 # there should always only be one cluster
2344 try:
2345 numClusters = len( json.loads( clusters[ 0 ] ) )
2346 except ( ValueError, TypeError ):
2347 main.log.exception( "Error parsing clusters[0]: " +
2348 repr( clusters[0] ) )
2349 clusterResults = main.FALSE
2350 if numClusters == 1:
2351 clusterResults = main.TRUE
2352 utilities.assert_equals(
2353 expect=1,
2354 actual=numClusters,
2355 onpass="ONOS shows 1 SCC",
2356 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2357
2358 topoResult = ( devicesResults and linksResults
2359 and hostsResults and consistentHostsResult
2360 and consistentClustersResult and clusterResults
2361 and ipResult and hostAttachmentResults )
2362
2363 topoResult = topoResult and int( count <= 2 )
2364 note = "note it takes about " + str( int( cliTime ) ) + \
2365 " seconds for the test to make all the cli calls to fetch " +\
2366 "the topology from each ONOS instance"
2367 main.log.info(
2368 "Very crass estimate for topology discovery/convergence( " +
2369 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2370 str( count ) + " tries" )
2371
2372 main.step( "Device information is correct" )
2373 utilities.assert_equals(
2374 expect=main.TRUE,
2375 actual=devicesResults,
2376 onpass="Device information is correct",
2377 onfail="Device information is incorrect" )
2378
2379 main.step( "Links are correct" )
2380 utilities.assert_equals(
2381 expect=main.TRUE,
2382 actual=linksResults,
2383 onpass="Link are correct",
2384 onfail="Links are incorrect" )
2385
2386 main.step( "Hosts are correct" )
2387 utilities.assert_equals(
2388 expect=main.TRUE,
2389 actual=hostsResults,
2390 onpass="Hosts are correct",
2391 onfail="Hosts are incorrect" )
2392
2393 # FIXME: move this to an ONOS state case
2394 main.step( "Checking ONOS nodes" )
2395 nodesOutput = []
2396 nodeResults = main.TRUE
2397 threads = []
2398 for i in range( numControllers ):
2399 t = main.Thread( target=CLIs[i].nodes,
2400 name="nodes-" + str( i ),
2401 args=[ ] )
2402 threads.append( t )
2403 t.start()
2404
2405 for t in threads:
2406 t.join()
2407 nodesOutput.append( t.result )
2408 ips = [ node.ip_address for node in nodes ]
2409 for i in nodesOutput:
2410 try:
2411 current = json.loads( i )
2412 for node in current:
2413 currentResult = main.FALSE
2414 if node['ip'] in ips: # node in nodes() output is in cell
2415 if node['state'] == 'ACTIVE':
2416 currentResult = main.TRUE
2417 else:
2418 main.log.error( "Error in ONOS node availability" )
2419 main.log.error(
2420 json.dumps( current,
2421 sort_keys=True,
2422 indent=4,
2423 separators=( ',', ': ' ) ) )
2424 break
2425 nodeResults = nodeResults and currentResult
2426 except ( ValueError, TypeError ):
2427 main.log.error( "Error parsing nodes output" )
2428 main.log.warn( repr( i ) )
2429 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2430 onpass="Nodes check successful",
2431 onfail="Nodes check NOT successful" )
2432
2433 def CASE9( self, main ):
2434 """
2435 Link s3-s28 down
2436 """
2437 import time
2438 assert numControllers, "numControllers not defined"
2439 assert main, "main not defined"
2440 assert utilities.assert_equals, "utilities.assert_equals not defined"
2441 assert CLIs, "CLIs not defined"
2442 assert nodes, "nodes not defined"
2443 # NOTE: You should probably run a topology check after this
2444
2445 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2446
2447 description = "Turn off a link to ensure that Link Discovery " +\
2448 "is working properly"
2449 main.case( description )
2450
2451 main.step( "Kill Link between s3 and s28" )
2452 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2453 main.log.info( "Waiting " + str( linkSleep ) +
2454 " seconds for link down to be discovered" )
2455 time.sleep( linkSleep )
2456 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2457 onpass="Link down successful",
2458 onfail="Failed to bring link down" )
2459 # TODO do some sort of check here
2460
2461 def CASE10( self, main ):
2462 """
2463 Link s3-s28 up
2464 """
2465 import time
2466 assert numControllers, "numControllers not defined"
2467 assert main, "main not defined"
2468 assert utilities.assert_equals, "utilities.assert_equals not defined"
2469 assert CLIs, "CLIs not defined"
2470 assert nodes, "nodes not defined"
2471 # NOTE: You should probably run a topology check after this
2472
2473 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2474
2475 description = "Restore a link to ensure that Link Discovery is " + \
2476 "working properly"
2477 main.case( description )
2478
2479 main.step( "Bring link between s3 and s28 back up" )
2480 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2481 main.log.info( "Waiting " + str( linkSleep ) +
2482 " seconds for link up to be discovered" )
2483 time.sleep( linkSleep )
2484 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2485 onpass="Link up successful",
2486 onfail="Failed to bring link up" )
2487 # TODO do some sort of check here
2488
2489 def CASE11( self, main ):
2490 """
2491 Switch Down
2492 """
2493 # NOTE: You should probably run a topology check after this
2494 import time
2495 assert numControllers, "numControllers not defined"
2496 assert main, "main not defined"
2497 assert utilities.assert_equals, "utilities.assert_equals not defined"
2498 assert CLIs, "CLIs not defined"
2499 assert nodes, "nodes not defined"
2500
2501 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2502
2503 description = "Killing a switch to ensure it is discovered correctly"
2504 main.case( description )
2505 switch = main.params[ 'kill' ][ 'switch' ]
2506 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2507
2508 # TODO: Make this switch parameterizable
2509 main.step( "Kill " + switch )
2510 main.log.info( "Deleting " + switch )
2511 main.Mininet1.delSwitch( switch )
2512 main.log.info( "Waiting " + str( switchSleep ) +
2513 " seconds for switch down to be discovered" )
2514 time.sleep( switchSleep )
2515 device = main.ONOScli1.getDevice( dpid=switchDPID )
2516 # Peek at the deleted switch
2517 main.log.warn( str( device ) )
2518 result = main.FALSE
2519 if device and device[ 'available' ] is False:
2520 result = main.TRUE
2521 utilities.assert_equals( expect=main.TRUE, actual=result,
2522 onpass="Kill switch successful",
2523 onfail="Failed to kill switch?" )
2524
2525 def CASE12( self, main ):
2526 """
2527 Switch Up
2528 """
2529 # NOTE: You should probably run a topology check after this
2530 import time
2531 assert numControllers, "numControllers not defined"
2532 assert main, "main not defined"
2533 assert utilities.assert_equals, "utilities.assert_equals not defined"
2534 assert CLIs, "CLIs not defined"
2535 assert nodes, "nodes not defined"
2536 assert ONOS1Port, "ONOS1Port not defined"
2537 assert ONOS2Port, "ONOS2Port not defined"
2538 assert ONOS3Port, "ONOS3Port not defined"
2539 assert ONOS4Port, "ONOS4Port not defined"
2540 assert ONOS5Port, "ONOS5Port not defined"
2541 assert ONOS6Port, "ONOS6Port not defined"
2542 assert ONOS7Port, "ONOS7Port not defined"
2543
2544 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2545 switch = main.params[ 'kill' ][ 'switch' ]
2546 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2547 links = main.params[ 'kill' ][ 'links' ].split()
2548 description = "Adding a switch to ensure it is discovered correctly"
2549 main.case( description )
2550
2551 main.step( "Add back " + switch )
2552 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2553 for peer in links:
2554 main.Mininet1.addLink( switch, peer )
2555 ipList = []
2556 for i in range( numControllers ):
2557 ipList.append( nodes[ i ].ip_address )
2558 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2559 main.log.info( "Waiting " + str( switchSleep ) +
2560 " seconds for switch up to be discovered" )
2561 time.sleep( switchSleep )
2562 device = main.ONOScli1.getDevice( dpid=switchDPID )
2563 # Peek at the deleted switch
2564 main.log.warn( str( device ) )
2565 result = main.FALSE
2566 if device and device[ 'available' ]:
2567 result = main.TRUE
2568 utilities.assert_equals( expect=main.TRUE, actual=result,
2569 onpass="add switch successful",
2570 onfail="Failed to add switch?" )
2571
2572 def CASE13( self, main ):
2573 """
2574 Clean up
2575 """
2576 import os
2577 import time
2578 assert numControllers, "numControllers not defined"
2579 assert main, "main not defined"
2580 assert utilities.assert_equals, "utilities.assert_equals not defined"
2581 assert CLIs, "CLIs not defined"
2582 assert nodes, "nodes not defined"
2583
2584 # printing colors to terminal
2585 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2586 'blue': '\033[94m', 'green': '\033[92m',
2587 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2588 main.case( "Test Cleanup" )
2589 main.step( "Killing tcpdumps" )
2590 main.Mininet2.stopTcpdump()
2591
2592 testname = main.TEST
2593 if main.params[ 'BACKUP' ] == "True":
2594 main.step( "Copying MN pcap and ONOS log files to test station" )
2595 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2596 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2597 # NOTE: MN Pcap file is being saved to ~/packet_captures
2598 # scp this file as MN and TestON aren't necessarily the same vm
2599 # FIXME: scp
2600 # mn files
2601 # TODO: Load these from params
2602 # NOTE: must end in /
2603 logFolder = "/opt/onos/log/"
2604 logFiles = [ "karaf.log", "karaf.log.1" ]
2605 # NOTE: must end in /
2606 dstDir = "~/packet_captures/"
2607 for f in logFiles:
2608 for node in nodes:
2609 main.ONOSbench.handle.sendline( "scp sdn@" + node.ip_address +
2610 ":" + logFolder + f + " " +
2611 teststationUser + "@" +
2612 teststationIP + ":" +
2613 dstDir + str( testname ) +
2614 "-" + node.name + "-" + f )
2615 main.ONOSbench.handle.expect( "\$" )
2616
2617 # std*.log's
2618 # NOTE: must end in /
2619 logFolder = "/opt/onos/var/"
2620 logFiles = [ "stderr.log", "stdout.log" ]
2621 # NOTE: must end in /
2622 dstDir = "~/packet_captures/"
2623 for f in logFiles:
2624 for node in nodes:
2625 main.ONOSbench.handle.sendline( "scp sdn@" + node.ip_address +
2626 ":" + logFolder + f + " " +
2627 teststationUser + "@" +
2628 teststationIP + ":" +
2629 dstDir + str( testname ) +
2630 "-" + node.name + "-" + f )
2631 main.ONOSbench.handle.expect( "\$" )
2632 # sleep so scp can finish
2633 time.sleep( 10 )
2634 main.step( "Packing and rotating pcap archives" )
2635 os.system( "~/TestON/dependencies/rotate.sh " + str( testname ) )
2636
2637 main.step( "Stopping Mininet" )
2638 mnResult = main.Mininet1.stopNet()
2639 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2640 onpass="Mininet stopped",
2641 onfail="MN cleanup NOT successful" )
2642
2643 main.step( "Checking ONOS Logs for errors" )
2644 for node in nodes:
2645 print colors[ 'purple' ] + "Checking logs for errors on " + \
2646 node.name + ":" + colors[ 'end' ]
2647 print main.ONOSbench.checkLogs( node.ip_address )
2648
2649 try:
2650 timerLog = open( main.logdir + "/Timers.csv", 'w')
2651 # Overwrite with empty line and close
2652 labels = "Gossip Intents"
2653 data = str( gossipTime )
2654 timerLog.write( labels + "\n" + data )
2655 timerLog.close()
2656 except NameError, e:
2657 main.log.exception(e)
2658
2659 def CASE14( self, main ):
2660 """
2661 start election app on all onos nodes
2662 """
2663 assert numControllers, "numControllers not defined"
2664 assert main, "main not defined"
2665 assert utilities.assert_equals, "utilities.assert_equals not defined"
2666 assert CLIs, "CLIs not defined"
2667 assert nodes, "nodes not defined"
2668
2669 main.case("Start Leadership Election app")
2670 main.step( "Install leadership election app" )
2671 appResult = main.ONOScli1.activateApp( "org.onosproject.election" )
2672 utilities.assert_equals(
2673 expect=main.TRUE,
2674 actual=appResult,
2675 onpass="Election app installed",
2676 onfail="Something went wrong with installing Leadership election" )
2677
2678 main.step( "Run for election on each node" )
2679 leaderResult = main.TRUE
2680 leaders = []
2681 for cli in CLIs:
2682 cli.electionTestRun()
2683 for cli in CLIs:
2684 leader = cli.electionTestLeader()
2685 if leader is None or leader == main.FALSE:
2686 main.log.error( cli.name + ": Leader for the election app " +
2687 "should be an ONOS node, instead got '" +
2688 str( leader ) + "'" )
2689 leaderResult = main.FALSE
2690 leaders.append( leader )
2691 utilities.assert_equals(
2692 expect=main.TRUE,
2693 actual=leaderResult,
2694 onpass="Successfully ran for leadership",
2695 onfail="Failed to run for leadership" )
2696
2697 main.step( "Check that each node shows the same leader" )
2698 sameLeader = main.TRUE
2699 if len( set( leaders ) ) != 1:
2700 sameLeader = main.FALSE
2701 main.log.error( "Results of electionTestLeader is order of CLIs:" +
2702 str( leaders ) )
2703 utilities.assert_equals(
2704 expect=main.TRUE,
2705 actual=sameLeader,
2706 onpass="Leadership is consistent for the election topic",
2707 onfail="Nodes have different leaders" )
2708
2709 def CASE15( self, main ):
2710 """
2711 Check that Leadership Election is still functional
2712 """
2713 import time
2714 assert numControllers, "numControllers not defined"
2715 assert main, "main not defined"
2716 assert utilities.assert_equals, "utilities.assert_equals not defined"
2717 assert CLIs, "CLIs not defined"
2718 assert nodes, "nodes not defined"
2719
2720 leaderResult = main.TRUE
2721 description = "Check that Leadership Election is still functional"
2722 main.case( description )
2723
2724 main.step( "Check that each node shows the same leader" )
2725 sameLeader = main.TRUE
2726 leaders = []
2727 for cli in CLIs:
2728 leader = cli.electionTestLeader()
2729 leaders.append( leader )
2730 if len( set( leaders ) ) != 1:
2731 sameLeader = main.FALSE
2732 main.log.error( "Results of electionTestLeader is order of CLIs:" +
2733 str( leaders ) )
2734 utilities.assert_equals(
2735 expect=main.TRUE,
2736 actual=sameLeader,
2737 onpass="Leadership is consistent for the election topic",
2738 onfail="Nodes have different leaders" )
2739
2740 main.step( "Find current leader and withdraw" )
2741 leader = main.ONOScli1.electionTestLeader()
2742 # do some sanity checking on leader before using it
2743 withdrawResult = main.FALSE
2744 if leader is None or leader == main.FALSE:
2745 main.log.error(
2746 "Leader for the election app should be an ONOS node," +
2747 "instead got '" + str( leader ) + "'" )
2748 leaderResult = main.FALSE
2749 oldLeader = None
2750 for i in range( len( CLIs ) ):
2751 if leader == nodes[ i ].ip_address:
2752 oldLeader = CLIs[ i ]
2753 break
2754 else: # FOR/ELSE statement
2755 main.log.error( "Leader election, could not find current leader" )
2756 if oldLeader:
2757 withdrawResult = oldLeader.electionTestWithdraw()
2758 utilities.assert_equals(
2759 expect=main.TRUE,
2760 actual=withdrawResult,
2761 onpass="Node was withdrawn from election",
2762 onfail="Node was not withdrawn from election" )
2763
2764 main.step( "Make sure new leader is elected" )
2765 # FIXME: use threads
2766 leaderList = []
2767 for cli in CLIs:
2768 leaderN = cli.electionTestLeader()
2769 leaderList.append( leaderN )
2770 if leaderN == leader:
2771 main.log.error( cli.name + " still sees " + str( leader ) +
2772 " as leader after they withdrew" )
2773 leaderResult = main.FALSE
2774 elif leaderN == main.FALSE:
2775 # error in response
2776 # TODO: add check for "Command not found:" in the driver, this
2777 # means the app isn't loaded
2778 main.log.error( "Something is wrong with " +
2779 "electionTestLeader function, " +
2780 "check the error logs" )
2781 leaderResult = main.FALSE
2782 elif leaderN is None:
2783 # node may not have recieved the event yet
2784 time.sleep(7)
2785 leaderN = cli.electionTestLeader()
2786 leaderList.pop()
2787 leaderList.append( leaderN )
2788 consistentLeader = main.FALSE
2789 if len( set( leaderList ) ) == 1:
2790 main.log.info( "Each Election-app sees '" +
2791 str( leaderList[ 0 ] ) +
2792 "' as the leader" )
2793 consistentLeader = main.TRUE
2794 else:
2795 main.log.error(
2796 "Inconsistent responses for leader of Election-app:" )
2797 for n in range( len( leaderList ) ):
2798 main.log.error( "ONOS" + str( n + 1 ) + " response: " +
2799 str( leaderList[ n ] ) )
2800 leaderResult = leaderResult and consistentLeader
2801 utilities.assert_equals(
2802 expect=main.TRUE,
2803 actual=leaderResult,
2804 onpass="Leadership election passed",
2805 onfail="Something went wrong with Leadership election" )
2806
2807 main.step( "Run for election on old leader( just so everyone " +
2808 "is in the hat )" )
2809 if oldLeader:
2810 runResult = oldLeader.electionTestRun()
2811 else:
2812 runResult = main.FALSE
2813 utilities.assert_equals(
2814 expect=main.TRUE,
2815 actual=runResult,
2816 onpass="App re-ran for election",
2817 onfail="App failed to run for election" )
2818
2819 main.step( "Leader did not change when old leader re-ran" )
2820 afterRun = main.ONOScli1.electionTestLeader()
2821 # verify leader didn't just change
2822 if afterRun == leaderList[ 0 ]:
2823 afterResult = main.TRUE
2824 else:
2825 afterResult = main.FALSE
2826
2827 utilities.assert_equals(
2828 expect=main.TRUE,
2829 actual=afterResult,
2830 onpass="Old leader successfully re-ran for election",
2831 onfail="Something went wrong with Leadership election after " +
2832 "the old leader re-ran for election" )
2833
2834 def CASE16( self, main ):
2835 """
2836 Install Distributed Primitives app
2837 """
2838 import time
2839 assert numControllers, "numControllers not defined"
2840 assert main, "main not defined"
2841 assert utilities.assert_equals, "utilities.assert_equals not defined"
2842 assert CLIs, "CLIs not defined"
2843 assert nodes, "nodes not defined"
2844
2845 # Variables for the distributed primitives tests
2846 global pCounterName
2847 global iCounterName
2848 global pCounterValue
2849 global iCounterValue
2850 global onosSet
2851 global onosSetName
2852 pCounterName = "TestON-Partitions"
2853 iCounterName = "TestON-inMemory"
2854 pCounterValue = 0
2855 iCounterValue = 0
2856 onosSet = set([])
2857 onosSetName = "TestON-set"
2858
2859 description = "Install Primitives app"
2860 main.case( description )
2861 main.step( "Install Primitives app" )
2862 appName = "org.onosproject.distributedprimitives"
2863 appResults = CLIs[0].activateApp( appName )
2864 utilities.assert_equals( expect=main.TRUE,
2865 actual=appResults,
2866 onpass="Primitives app activated",
2867 onfail="Primitives app not activated" )
2868 time.sleep( 5 ) # To allow all nodes to activate
2869
2870 def CASE17( self, main ):
2871 """
2872 Check for basic functionality with distributed primitives
2873 """
2874 import json
2875 # Make sure variables are defined/set
2876 assert numControllers, "numControllers not defined"
2877 assert main, "main not defined"
2878 assert utilities.assert_equals, "utilities.assert_equals not defined"
2879 assert CLIs, "CLIs not defined"
2880 assert nodes, "nodes not defined"
2881 assert pCounterName, "pCounterName not defined"
2882 assert iCounterName, "iCounterName not defined"
2883 assert onosSetName, "onosSetName not defined"
2884 # NOTE: assert fails if value is 0/None/Empty/False
2885 try:
2886 pCounterValue
2887 except NameError:
2888 main.log.error( "pCounterValue not defined, setting to 0" )
2889 pCounterValue = 0
2890 try:
2891 iCounterValue
2892 except NameError:
2893 main.log.error( "iCounterValue not defined, setting to 0" )
2894 iCounterValue = 0
2895 try:
2896 onosSet
2897 except NameError:
2898 main.log.error( "onosSet not defined, setting to empty Set" )
2899 onosSet = set([])
2900 # Variables for the distributed primitives tests. These are local only
2901 addValue = "a"
2902 addAllValue = "a b c d e f"
2903 retainValue = "c d e f"
2904
2905 description = "Check for basic functionality with distributed " +\
2906 "primitives"
2907 main.case( description )
2908 main.caseExplaination = "Test the methods of the distributed primitives (counters and sets) throught the cli"
2909 # DISTRIBUTED ATOMIC COUNTERS
2910 main.step( "Increment and get a default counter on each node" )
2911 pCounters = []
2912 threads = []
2913 addedPValues = []
2914 for i in range( numControllers ):
2915 t = main.Thread( target=CLIs[i].counterTestIncrement,
2916 name="counterIncrement-" + str( i ),
2917 args=[ pCounterName ] )
2918 pCounterValue += 1
2919 addedPValues.append( pCounterValue )
2920 threads.append( t )
2921 t.start()
2922
2923 for t in threads:
2924 t.join()
2925 pCounters.append( t.result )
2926 # Check that counter incremented numController times
2927 pCounterResults = True
2928 for i in addedPValues:
2929 tmpResult = i in pCounters
2930 pCounterResults = pCounterResults and tmpResult
2931 if not tmpResult:
2932 main.log.error( str( i ) + " is not in partitioned "
2933 "counter incremented results" )
2934 utilities.assert_equals( expect=True,
2935 actual=pCounterResults,
2936 onpass="Default counter incremented",
2937 onfail="Error incrementing default" +
2938 " counter" )
2939
2940 main.step( "Increment and get an in memory counter on each node" )
2941 iCounters = []
2942 addedIValues = []
2943 threads = []
2944 for i in range( numControllers ):
2945 t = main.Thread( target=CLIs[i].counterTestIncrement,
2946 name="icounterIncrement-" + str( i ),
2947 args=[ iCounterName ],
2948 kwargs={ "inMemory": True } )
2949 iCounterValue += 1
2950 addedIValues.append( iCounterValue )
2951 threads.append( t )
2952 t.start()
2953
2954 for t in threads:
2955 t.join()
2956 iCounters.append( t.result )
2957 # Check that counter incremented numController times
2958 iCounterResults = True
2959 for i in addedIValues:
2960 tmpResult = i in iCounters
2961 iCounterResults = iCounterResults and tmpResult
2962 if not tmpResult:
2963 main.log.error( str( i ) + " is not in the in-memory "
2964 "counter incremented results" )
2965 utilities.assert_equals( expect=True,
2966 actual=iCounterResults,
2967 onpass="In memory counter incremented",
2968 onfail="Error incrementing in memory" +
2969 " counter" )
2970
2971 main.step( "Check counters are consistant across nodes" )
2972 onosCounters = []
2973 threads = []
2974 for i in range( numControllers ):
2975 t = main.Thread( target=CLIs[i].counters,
2976 name="counters-" + str( i ) )
2977 threads.append( t )
2978 t.start()
2979 for t in threads:
2980 t.join()
2981 onosCounters.append( t.result )
2982 tmp = [ i == onosCounters[ 0 ] for i in onosCounters ]
2983 if all( tmp ):
2984 main.log.info( "Counters are consistent across all nodes" )
2985 consistentCounterResults = main.TRUE
2986 else:
2987 main.log.error( "Counters are not consistent across all nodes" )
2988 consistentCounterResults = main.FALSE
2989 utilities.assert_equals( expect=main.TRUE,
2990 actual=consistentCounterResults,
2991 onpass="ONOS counters are consistent " +
2992 "across nodes",
2993 onfail="ONOS Counters are inconsistent " +
2994 "across nodes" )
2995
2996 main.step( "Counters we added have the correct values" )
2997 correctResults = main.TRUE
2998 for i in range( numControllers ):
2999 try:
3000 current = json.loads( onosCounters[i] )
3001 except ( ValueError, TypeError ):
3002 main.log.error( "Could not parse counters response from ONOS" +
3003 str( i + 1 ) )
3004 main.log.warn( repr( onosCounters[ i ] ) )
3005 pValue = None
3006 iValue = None
3007 try:
3008 for database in current:
3009 partitioned = database.get( 'partitionedDatabaseCounters' )
3010 if partitioned:
3011 for value in partitioned:
3012 if value.get( 'name' ) == pCounterName:
3013 pValue = value.get( 'value' )
3014 break
3015 inMemory = database.get( 'inMemoryDatabaseCounters' )
3016 if inMemory:
3017 for value in inMemory:
3018 if value.get( 'name' ) == iCounterName:
3019 iValue = value.get( 'value' )
3020 break
3021 except AttributeError, e:
3022 main.log.error( "ONOS" + str( i + 1 ) + " counters result " +
3023 "is not as expected" )
3024 correctResults = main.FALSE
3025 if pValue == pCounterValue:
3026 main.log.info( "Partitioned counter value is correct" )
3027 else:
3028 main.log.error( "Partitioned counter value is incorrect," +
3029 " expected value: " + str( pCounterValue )
3030 + " current value: " + str( pValue ) )
3031 correctResults = main.FALSE
3032 if iValue == iCounterValue:
3033 main.log.info( "In memory counter value is correct" )
3034 else:
3035 main.log.error( "In memory counter value is incorrect, " +
3036 "expected value: " + str( iCounterValue ) +
3037 " current value: " + str( iValue ) )
3038 correctResults = main.FALSE
3039 utilities.assert_equals( expect=main.TRUE,
3040 actual=correctResults,
3041 onpass="Added counters are correct",
3042 onfail="Added counters are incorrect" )
3043 # DISTRIBUTED SETS
3044 main.step( "Distributed Set get" )
3045 size = len( onosSet )
3046 getResponses = []
3047 threads = []
3048 for i in range( numControllers ):
3049 t = main.Thread( target=CLIs[i].setTestGet,
3050 name="setTestGet-" + str( i ),
3051 args=[ onosSetName ] )
3052 threads.append( t )
3053 t.start()
3054 for t in threads:
3055 t.join()
3056 getResponses.append( t.result )
3057
3058 getResults = main.TRUE
3059 for i in range( numControllers ):
3060 if isinstance( getResponses[ i ], list):
3061 current = set( getResponses[ i ] )
3062 if len( current ) == len( getResponses[ i ] ):
3063 # no repeats
3064 if onosSet != current:
3065 main.log.error( "ONOS" + str( i + 1 ) +
3066 " has incorrect view" +
3067 " of set " + onosSetName + ":\n" +
3068 str( getResponses[ i ] ) )
3069 main.log.debug( "Expected: " + str( onosSet ) )
3070 main.log.debug( "Actual: " + str( current ) )
3071 getResults = main.FALSE
3072 else:
3073 # error, set is not a set
3074 main.log.error( "ONOS" + str( i + 1 ) +
3075 " has repeat elements in" +
3076 " set " + onosSetName + ":\n" +
3077 str( getResponses[ i ] ) )
3078 getResults = main.FALSE
3079 elif getResponses[ i ] == main.ERROR:
3080 getResults = main.FALSE
3081 utilities.assert_equals( expect=main.TRUE,
3082 actual=getResults,
3083 onpass="Set elements are correct",
3084 onfail="Set elements are incorrect" )
3085
3086 main.step( "Distributed Set size" )
3087 sizeResponses = []
3088 threads = []
3089 for i in range( numControllers ):
3090 t = main.Thread( target=CLIs[i].setTestSize,
3091 name="setTestSize-" + str( i ),
3092 args=[ onosSetName ] )
3093 threads.append( t )
3094 t.start()
3095 for t in threads:
3096 t.join()
3097 sizeResponses.append( t.result )
3098
3099 sizeResults = main.TRUE
3100 for i in range( numControllers ):
3101 if size != sizeResponses[ i ]:
3102 sizeResults = main.FALSE
3103 main.log.error( "ONOS" + str( i + 1 ) +
3104 " expected a size of " + str( size ) +
3105 " for set " + onosSetName +
3106 " but got " + str( sizeResponses[ i ] ) )
3107 utilities.assert_equals( expect=main.TRUE,
3108 actual=sizeResults,
3109 onpass="Set sizes are correct",
3110 onfail="Set sizes are incorrect" )
3111
3112 main.step( "Distributed Set add()" )
3113 onosSet.add( addValue )
3114 addResponses = []
3115 threads = []
3116 for i in range( numControllers ):
3117 t = main.Thread( target=CLIs[i].setTestAdd,
3118 name="setTestAdd-" + str( i ),
3119 args=[ onosSetName, addValue ] )
3120 threads.append( t )
3121 t.start()
3122 for t in threads:
3123 t.join()
3124 addResponses.append( t.result )
3125
3126 # main.TRUE = successfully changed the set
3127 # main.FALSE = action resulted in no change in set
3128 # main.ERROR - Some error in executing the function
3129 addResults = main.TRUE
3130 for i in range( numControllers ):
3131 if addResponses[ i ] == main.TRUE:
3132 # All is well
3133 pass
3134 elif addResponses[ i ] == main.FALSE:
3135 # Already in set, probably fine
3136 pass
3137 elif addResponses[ i ] == main.ERROR:
3138 # Error in execution
3139 addResults = main.FALSE
3140 else:
3141 # unexpected result
3142 addResults = main.FALSE
3143 if addResults != main.TRUE:
3144 main.log.error( "Error executing set add" )
3145
3146 # Check if set is still correct
3147 size = len( onosSet )
3148 getResponses = []
3149 threads = []
3150 for i in range( numControllers ):
3151 t = main.Thread( target=CLIs[i].setTestGet,
3152 name="setTestGet-" + str( i ),
3153 args=[ onosSetName ] )
3154 threads.append( t )
3155 t.start()
3156 for t in threads:
3157 t.join()
3158 getResponses.append( t.result )
3159 getResults = main.TRUE
3160 for i in range( numControllers ):
3161 if isinstance( getResponses[ i ], list):
3162 current = set( getResponses[ i ] )
3163 if len( current ) == len( getResponses[ i ] ):
3164 # no repeats
3165 if onosSet != current:
3166 main.log.error( "ONOS" + str( i + 1 ) +
3167 " has incorrect view" +
3168 " of set " + onosSetName + ":\n" +
3169 str( getResponses[ i ] ) )
3170 main.log.debug( "Expected: " + str( onosSet ) )
3171 main.log.debug( "Actual: " + str( current ) )
3172 getResults = main.FALSE
3173 else:
3174 # error, set is not a set
3175 main.log.error( "ONOS" + str( i + 1 ) +
3176 " has repeat elements in" +
3177 " set " + onosSetName + ":\n" +
3178 str( getResponses[ i ] ) )
3179 getResults = main.FALSE
3180 elif getResponses[ i ] == main.ERROR:
3181 getResults = main.FALSE
3182 sizeResponses = []
3183 threads = []
3184 for i in range( numControllers ):
3185 t = main.Thread( target=CLIs[i].setTestSize,
3186 name="setTestSize-" + str( i ),
3187 args=[ onosSetName ] )
3188 threads.append( t )
3189 t.start()
3190 for t in threads:
3191 t.join()
3192 sizeResponses.append( t.result )
3193 sizeResults = main.TRUE
3194 for i in range( numControllers ):
3195 if size != sizeResponses[ i ]:
3196 sizeResults = main.FALSE
3197 main.log.error( "ONOS" + str( i + 1 ) +
3198 " expected a size of " + str( size ) +
3199 " for set " + onosSetName +
3200 " but got " + str( sizeResponses[ i ] ) )
3201 addResults = addResults and getResults and sizeResults
3202 utilities.assert_equals( expect=main.TRUE,
3203 actual=addResults,
3204 onpass="Set add correct",
3205 onfail="Set add was incorrect" )
3206
3207 main.step( "Distributed Set addAll()" )
3208 onosSet.update( addAllValue.split() )
3209 addResponses = []
3210 threads = []
3211 for i in range( numControllers ):
3212 t = main.Thread( target=CLIs[i].setTestAdd,
3213 name="setTestAddAll-" + str( i ),
3214 args=[ onosSetName, addAllValue ] )
3215 threads.append( t )
3216 t.start()
3217 for t in threads:
3218 t.join()
3219 addResponses.append( t.result )
3220
3221 # main.TRUE = successfully changed the set
3222 # main.FALSE = action resulted in no change in set
3223 # main.ERROR - Some error in executing the function
3224 addAllResults = main.TRUE
3225 for i in range( numControllers ):
3226 if addResponses[ i ] == main.TRUE:
3227 # All is well
3228 pass
3229 elif addResponses[ i ] == main.FALSE:
3230 # Already in set, probably fine
3231 pass
3232 elif addResponses[ i ] == main.ERROR:
3233 # Error in execution
3234 addAllResults = main.FALSE
3235 else:
3236 # unexpected result
3237 addAllResults = main.FALSE
3238 if addAllResults != main.TRUE:
3239 main.log.error( "Error executing set addAll" )
3240
3241 # Check if set is still correct
3242 size = len( onosSet )
3243 getResponses = []
3244 threads = []
3245 for i in range( numControllers ):
3246 t = main.Thread( target=CLIs[i].setTestGet,
3247 name="setTestGet-" + str( i ),
3248 args=[ onosSetName ] )
3249 threads.append( t )
3250 t.start()
3251 for t in threads:
3252 t.join()
3253 getResponses.append( t.result )
3254 getResults = main.TRUE
3255 for i in range( numControllers ):
3256 if isinstance( getResponses[ i ], list):
3257 current = set( getResponses[ i ] )
3258 if len( current ) == len( getResponses[ i ] ):
3259 # no repeats
3260 if onosSet != current:
3261 main.log.error( "ONOS" + str( i + 1 ) +
3262 " has incorrect view" +
3263 " of set " + onosSetName + ":\n" +
3264 str( getResponses[ i ] ) )
3265 main.log.debug( "Expected: " + str( onosSet ) )
3266 main.log.debug( "Actual: " + str( current ) )
3267 getResults = main.FALSE
3268 else:
3269 # error, set is not a set
3270 main.log.error( "ONOS" + str( i + 1 ) +
3271 " has repeat elements in" +
3272 " set " + onosSetName + ":\n" +
3273 str( getResponses[ i ] ) )
3274 getResults = main.FALSE
3275 elif getResponses[ i ] == main.ERROR:
3276 getResults = main.FALSE
3277 sizeResponses = []
3278 threads = []
3279 for i in range( numControllers ):
3280 t = main.Thread( target=CLIs[i].setTestSize,
3281 name="setTestSize-" + str( i ),
3282 args=[ onosSetName ] )
3283 threads.append( t )
3284 t.start()
3285 for t in threads:
3286 t.join()
3287 sizeResponses.append( t.result )
3288 sizeResults = main.TRUE
3289 for i in range( numControllers ):
3290 if size != sizeResponses[ i ]:
3291 sizeResults = main.FALSE
3292 main.log.error( "ONOS" + str( i + 1 ) +
3293 " expected a size of " + str( size ) +
3294 " for set " + onosSetName +
3295 " but got " + str( sizeResponses[ i ] ) )
3296 addAllResults = addAllResults and getResults and sizeResults
3297 utilities.assert_equals( expect=main.TRUE,
3298 actual=addAllResults,
3299 onpass="Set addAll correct",
3300 onfail="Set addAll was incorrect" )
3301
3302 main.step( "Distributed Set contains()" )
3303 containsResponses = []
3304 threads = []
3305 for i in range( numControllers ):
3306 t = main.Thread( target=CLIs[i].setTestGet,
3307 name="setContains-" + str( i ),
3308 args=[ onosSetName ],
3309 kwargs={ "values": addValue } )
3310 threads.append( t )
3311 t.start()
3312 for t in threads:
3313 t.join()
3314 # NOTE: This is the tuple
3315 containsResponses.append( t.result )
3316
3317 containsResults = main.TRUE
3318 for i in range( numControllers ):
3319 if containsResponses[ i ] == main.ERROR:
3320 containsResults = main.FALSE
3321 else:
3322 containsResults = containsResults and\
3323 containsResponses[ i ][ 1 ]
3324 utilities.assert_equals( expect=main.TRUE,
3325 actual=containsResults,
3326 onpass="Set contains is functional",
3327 onfail="Set contains failed" )
3328
3329 main.step( "Distributed Set containsAll()" )
3330 containsAllResponses = []
3331 threads = []
3332 for i in range( numControllers ):
3333 t = main.Thread( target=CLIs[i].setTestGet,
3334 name="setContainsAll-" + str( i ),
3335 args=[ onosSetName ],
3336 kwargs={ "values": addAllValue } )
3337 threads.append( t )
3338 t.start()
3339 for t in threads:
3340 t.join()
3341 # NOTE: This is the tuple
3342 containsAllResponses.append( t.result )
3343
3344 containsAllResults = main.TRUE
3345 for i in range( numControllers ):
3346 if containsResponses[ i ] == main.ERROR:
3347 containsResults = main.FALSE
3348 else:
3349 containsResults = containsResults and\
3350 containsResponses[ i ][ 1 ]
3351 utilities.assert_equals( expect=main.TRUE,
3352 actual=containsAllResults,
3353 onpass="Set containsAll is functional",
3354 onfail="Set containsAll failed" )
3355
3356 main.step( "Distributed Set remove()" )
3357 onosSet.remove( addValue )
3358 removeResponses = []
3359 threads = []
3360 for i in range( numControllers ):
3361 t = main.Thread( target=CLIs[i].setTestRemove,
3362 name="setTestRemove-" + str( i ),
3363 args=[ onosSetName, addValue ] )
3364 threads.append( t )
3365 t.start()
3366 for t in threads:
3367 t.join()
3368 removeResponses.append( t.result )
3369
3370 # main.TRUE = successfully changed the set
3371 # main.FALSE = action resulted in no change in set
3372 # main.ERROR - Some error in executing the function
3373 removeResults = main.TRUE
3374 for i in range( numControllers ):
3375 if removeResponses[ i ] == main.TRUE:
3376 # All is well
3377 pass
3378 elif removeResponses[ i ] == main.FALSE:
3379 # not in set, probably fine
3380 pass
3381 elif removeResponses[ i ] == main.ERROR:
3382 # Error in execution
3383 removeResults = main.FALSE
3384 else:
3385 # unexpected result
3386 removeResults = main.FALSE
3387 if removeResults != main.TRUE:
3388 main.log.error( "Error executing set remove" )
3389
3390 # Check if set is still correct
3391 size = len( onosSet )
3392 getResponses = []
3393 threads = []
3394 for i in range( numControllers ):
3395 t = main.Thread( target=CLIs[i].setTestGet,
3396 name="setTestGet-" + str( i ),
3397 args=[ onosSetName ] )
3398 threads.append( t )
3399 t.start()
3400 for t in threads:
3401 t.join()
3402 getResponses.append( t.result )
3403 getResults = main.TRUE
3404 for i in range( numControllers ):
3405 if isinstance( getResponses[ i ], list):
3406 current = set( getResponses[ i ] )
3407 if len( current ) == len( getResponses[ i ] ):
3408 # no repeats
3409 if onosSet != current:
3410 main.log.error( "ONOS" + str( i + 1 ) +
3411 " has incorrect view" +
3412 " of set " + onosSetName + ":\n" +
3413 str( getResponses[ i ] ) )
3414 main.log.debug( "Expected: " + str( onosSet ) )
3415 main.log.debug( "Actual: " + str( current ) )
3416 getResults = main.FALSE
3417 else:
3418 # error, set is not a set
3419 main.log.error( "ONOS" + str( i + 1 ) +
3420 " has repeat elements in" +
3421 " set " + onosSetName + ":\n" +
3422 str( getResponses[ i ] ) )
3423 getResults = main.FALSE
3424 elif getResponses[ i ] == main.ERROR:
3425 getResults = main.FALSE
3426 sizeResponses = []
3427 threads = []
3428 for i in range( numControllers ):
3429 t = main.Thread( target=CLIs[i].setTestSize,
3430 name="setTestSize-" + str( i ),
3431 args=[ onosSetName ] )
3432 threads.append( t )
3433 t.start()
3434 for t in threads:
3435 t.join()
3436 sizeResponses.append( t.result )
3437 sizeResults = main.TRUE
3438 for i in range( numControllers ):
3439 if size != sizeResponses[ i ]:
3440 sizeResults = main.FALSE
3441 main.log.error( "ONOS" + str( i + 1 ) +
3442 " expected a size of " + str( size ) +
3443 " for set " + onosSetName +
3444 " but got " + str( sizeResponses[ i ] ) )
3445 removeResults = removeResults and getResults and sizeResults
3446 utilities.assert_equals( expect=main.TRUE,
3447 actual=removeResults,
3448 onpass="Set remove correct",
3449 onfail="Set remove was incorrect" )
3450
3451 main.step( "Distributed Set removeAll()" )
3452 onosSet.difference_update( addAllValue.split() )
3453 removeAllResponses = []
3454 threads = []
3455 try:
3456 for i in range( numControllers ):
3457 t = main.Thread( target=CLIs[i].setTestRemove,
3458 name="setTestRemoveAll-" + str( i ),
3459 args=[ onosSetName, addAllValue ] )
3460 threads.append( t )
3461 t.start()
3462 for t in threads:
3463 t.join()
3464 removeAllResponses.append( t.result )
3465 except Exception, e:
3466 main.log.exception(e)
3467
3468 # main.TRUE = successfully changed the set
3469 # main.FALSE = action resulted in no change in set
3470 # main.ERROR - Some error in executing the function
3471 removeAllResults = main.TRUE
3472 for i in range( numControllers ):
3473 if removeAllResponses[ i ] == main.TRUE:
3474 # All is well
3475 pass
3476 elif removeAllResponses[ i ] == main.FALSE:
3477 # not in set, probably fine
3478 pass
3479 elif removeAllResponses[ i ] == main.ERROR:
3480 # Error in execution
3481 removeAllResults = main.FALSE
3482 else:
3483 # unexpected result
3484 removeAllResults = main.FALSE
3485 if removeAllResults != main.TRUE:
3486 main.log.error( "Error executing set removeAll" )
3487
3488 # Check if set is still correct
3489 size = len( onosSet )
3490 getResponses = []
3491 threads = []
3492 for i in range( numControllers ):
3493 t = main.Thread( target=CLIs[i].setTestGet,
3494 name="setTestGet-" + str( i ),
3495 args=[ onosSetName ] )
3496 threads.append( t )
3497 t.start()
3498 for t in threads:
3499 t.join()
3500 getResponses.append( t.result )
3501 getResults = main.TRUE
3502 for i in range( numControllers ):
3503 if isinstance( getResponses[ i ], list):
3504 current = set( getResponses[ i ] )
3505 if len( current ) == len( getResponses[ i ] ):
3506 # no repeats
3507 if onosSet != current:
3508 main.log.error( "ONOS" + str( i + 1 ) +
3509 " has incorrect view" +
3510 " of set " + onosSetName + ":\n" +
3511 str( getResponses[ i ] ) )
3512 main.log.debug( "Expected: " + str( onosSet ) )
3513 main.log.debug( "Actual: " + str( current ) )
3514 getResults = main.FALSE
3515 else:
3516 # error, set is not a set
3517 main.log.error( "ONOS" + str( i + 1 ) +
3518 " has repeat elements in" +
3519 " set " + onosSetName + ":\n" +
3520 str( getResponses[ i ] ) )
3521 getResults = main.FALSE
3522 elif getResponses[ i ] == main.ERROR:
3523 getResults = main.FALSE
3524 sizeResponses = []
3525 threads = []
3526 for i in range( numControllers ):
3527 t = main.Thread( target=CLIs[i].setTestSize,
3528 name="setTestSize-" + str( i ),
3529 args=[ onosSetName ] )
3530 threads.append( t )
3531 t.start()
3532 for t in threads:
3533 t.join()
3534 sizeResponses.append( t.result )
3535 sizeResults = main.TRUE
3536 for i in range( numControllers ):
3537 if size != sizeResponses[ i ]:
3538 sizeResults = main.FALSE
3539 main.log.error( "ONOS" + str( i + 1 ) +
3540 " expected a size of " + str( size ) +
3541 " for set " + onosSetName +
3542 " but got " + str( sizeResponses[ i ] ) )
3543 removeAllResults = removeAllResults and getResults and sizeResults
3544 utilities.assert_equals( expect=main.TRUE,
3545 actual=removeAllResults,
3546 onpass="Set removeAll correct",
3547 onfail="Set removeAll was incorrect" )
3548
3549 main.step( "Distributed Set addAll()" )
3550 onosSet.update( addAllValue.split() )
3551 addResponses = []
3552 threads = []
3553 for i in range( numControllers ):
3554 t = main.Thread( target=CLIs[i].setTestAdd,
3555 name="setTestAddAll-" + str( i ),
3556 args=[ onosSetName, addAllValue ] )
3557 threads.append( t )
3558 t.start()
3559 for t in threads:
3560 t.join()
3561 addResponses.append( t.result )
3562
3563 # main.TRUE = successfully changed the set
3564 # main.FALSE = action resulted in no change in set
3565 # main.ERROR - Some error in executing the function
3566 addAllResults = main.TRUE
3567 for i in range( numControllers ):
3568 if addResponses[ i ] == main.TRUE:
3569 # All is well
3570 pass
3571 elif addResponses[ i ] == main.FALSE:
3572 # Already in set, probably fine
3573 pass
3574 elif addResponses[ i ] == main.ERROR:
3575 # Error in execution
3576 addAllResults = main.FALSE
3577 else:
3578 # unexpected result
3579 addAllResults = main.FALSE
3580 if addAllResults != main.TRUE:
3581 main.log.error( "Error executing set addAll" )
3582
3583 # Check if set is still correct
3584 size = len( onosSet )
3585 getResponses = []
3586 threads = []
3587 for i in range( numControllers ):
3588 t = main.Thread( target=CLIs[i].setTestGet,
3589 name="setTestGet-" + str( i ),
3590 args=[ onosSetName ] )
3591 threads.append( t )
3592 t.start()
3593 for t in threads:
3594 t.join()
3595 getResponses.append( t.result )
3596 getResults = main.TRUE
3597 for i in range( numControllers ):
3598 if isinstance( getResponses[ i ], list):
3599 current = set( getResponses[ i ] )
3600 if len( current ) == len( getResponses[ i ] ):
3601 # no repeats
3602 if onosSet != current:
3603 main.log.error( "ONOS" + str( i + 1 ) +
3604 " has incorrect view" +
3605 " of set " + onosSetName + ":\n" +
3606 str( getResponses[ i ] ) )
3607 main.log.debug( "Expected: " + str( onosSet ) )
3608 main.log.debug( "Actual: " + str( current ) )
3609 getResults = main.FALSE
3610 else:
3611 # error, set is not a set
3612 main.log.error( "ONOS" + str( i + 1 ) +
3613 " has repeat elements in" +
3614 " set " + onosSetName + ":\n" +
3615 str( getResponses[ i ] ) )
3616 getResults = main.FALSE
3617 elif getResponses[ i ] == main.ERROR:
3618 getResults = main.FALSE
3619 sizeResponses = []
3620 threads = []
3621 for i in range( numControllers ):
3622 t = main.Thread( target=CLIs[i].setTestSize,
3623 name="setTestSize-" + str( i ),
3624 args=[ onosSetName ] )
3625 threads.append( t )
3626 t.start()
3627 for t in threads:
3628 t.join()
3629 sizeResponses.append( t.result )
3630 sizeResults = main.TRUE
3631 for i in range( numControllers ):
3632 if size != sizeResponses[ i ]:
3633 sizeResults = main.FALSE
3634 main.log.error( "ONOS" + str( i + 1 ) +
3635 " expected a size of " + str( size ) +
3636 " for set " + onosSetName +
3637 " but got " + str( sizeResponses[ i ] ) )
3638 addAllResults = addAllResults and getResults and sizeResults
3639 utilities.assert_equals( expect=main.TRUE,
3640 actual=addAllResults,
3641 onpass="Set addAll correct",
3642 onfail="Set addAll was incorrect" )
3643
3644 main.step( "Distributed Set clear()" )
3645 onosSet.clear()
3646 clearResponses = []
3647 threads = []
3648 for i in range( numControllers ):
3649 t = main.Thread( target=CLIs[i].setTestRemove,
3650 name="setTestClear-" + str( i ),
3651 args=[ onosSetName, " "], # Values doesn't matter
3652 kwargs={ "clear": True } )
3653 threads.append( t )
3654 t.start()
3655 for t in threads:
3656 t.join()
3657 clearResponses.append( t.result )
3658
3659 # main.TRUE = successfully changed the set
3660 # main.FALSE = action resulted in no change in set
3661 # main.ERROR - Some error in executing the function
3662 clearResults = main.TRUE
3663 for i in range( numControllers ):
3664 if clearResponses[ i ] == main.TRUE:
3665 # All is well
3666 pass
3667 elif clearResponses[ i ] == main.FALSE:
3668 # Nothing set, probably fine
3669 pass
3670 elif clearResponses[ i ] == main.ERROR:
3671 # Error in execution
3672 clearResults = main.FALSE
3673 else:
3674 # unexpected result
3675 clearResults = main.FALSE
3676 if clearResults != main.TRUE:
3677 main.log.error( "Error executing set clear" )
3678
3679 # Check if set is still correct
3680 size = len( onosSet )
3681 getResponses = []
3682 threads = []
3683 for i in range( numControllers ):
3684 t = main.Thread( target=CLIs[i].setTestGet,
3685 name="setTestGet-" + str( i ),
3686 args=[ onosSetName ] )
3687 threads.append( t )
3688 t.start()
3689 for t in threads:
3690 t.join()
3691 getResponses.append( t.result )
3692 getResults = main.TRUE
3693 for i in range( numControllers ):
3694 if isinstance( getResponses[ i ], list):
3695 current = set( getResponses[ i ] )
3696 if len( current ) == len( getResponses[ i ] ):
3697 # no repeats
3698 if onosSet != current:
3699 main.log.error( "ONOS" + str( i + 1 ) +
3700 " has incorrect view" +
3701 " of set " + onosSetName + ":\n" +
3702 str( getResponses[ i ] ) )
3703 main.log.debug( "Expected: " + str( onosSet ) )
3704 main.log.debug( "Actual: " + str( current ) )
3705 getResults = main.FALSE
3706 else:
3707 # error, set is not a set
3708 main.log.error( "ONOS" + str( i + 1 ) +
3709 " has repeat elements in" +
3710 " set " + onosSetName + ":\n" +
3711 str( getResponses[ i ] ) )
3712 getResults = main.FALSE
3713 elif getResponses[ i ] == main.ERROR:
3714 getResults = main.FALSE
3715 sizeResponses = []
3716 threads = []
3717 for i in range( numControllers ):
3718 t = main.Thread( target=CLIs[i].setTestSize,
3719 name="setTestSize-" + str( i ),
3720 args=[ onosSetName ] )
3721 threads.append( t )
3722 t.start()
3723 for t in threads:
3724 t.join()
3725 sizeResponses.append( t.result )
3726 sizeResults = main.TRUE
3727 for i in range( numControllers ):
3728 if size != sizeResponses[ i ]:
3729 sizeResults = main.FALSE
3730 main.log.error( "ONOS" + str( i + 1 ) +
3731 " expected a size of " + str( size ) +
3732 " for set " + onosSetName +
3733 " but got " + str( sizeResponses[ i ] ) )
3734 clearResults = clearResults and getResults and sizeResults
3735 utilities.assert_equals( expect=main.TRUE,
3736 actual=clearResults,
3737 onpass="Set clear correct",
3738 onfail="Set clear was incorrect" )
3739
3740 main.step( "Distributed Set addAll()" )
3741 onosSet.update( addAllValue.split() )
3742 addResponses = []
3743 threads = []
3744 for i in range( numControllers ):
3745 t = main.Thread( target=CLIs[i].setTestAdd,
3746 name="setTestAddAll-" + str( i ),
3747 args=[ onosSetName, addAllValue ] )
3748 threads.append( t )
3749 t.start()
3750 for t in threads:
3751 t.join()
3752 addResponses.append( t.result )
3753
3754 # main.TRUE = successfully changed the set
3755 # main.FALSE = action resulted in no change in set
3756 # main.ERROR - Some error in executing the function
3757 addAllResults = main.TRUE
3758 for i in range( numControllers ):
3759 if addResponses[ i ] == main.TRUE:
3760 # All is well
3761 pass
3762 elif addResponses[ i ] == main.FALSE:
3763 # Already in set, probably fine
3764 pass
3765 elif addResponses[ i ] == main.ERROR:
3766 # Error in execution
3767 addAllResults = main.FALSE
3768 else:
3769 # unexpected result
3770 addAllResults = main.FALSE
3771 if addAllResults != main.TRUE:
3772 main.log.error( "Error executing set addAll" )
3773
3774 # Check if set is still correct
3775 size = len( onosSet )
3776 getResponses = []
3777 threads = []
3778 for i in range( numControllers ):
3779 t = main.Thread( target=CLIs[i].setTestGet,
3780 name="setTestGet-" + str( i ),
3781 args=[ onosSetName ] )
3782 threads.append( t )
3783 t.start()
3784 for t in threads:
3785 t.join()
3786 getResponses.append( t.result )
3787 getResults = main.TRUE
3788 for i in range( numControllers ):
3789 if isinstance( getResponses[ i ], list):
3790 current = set( getResponses[ i ] )
3791 if len( current ) == len( getResponses[ i ] ):
3792 # no repeats
3793 if onosSet != current:
3794 main.log.error( "ONOS" + str( i + 1 ) +
3795 " has incorrect view" +
3796 " of set " + onosSetName + ":\n" +
3797 str( getResponses[ i ] ) )
3798 main.log.debug( "Expected: " + str( onosSet ) )
3799 main.log.debug( "Actual: " + str( current ) )
3800 getResults = main.FALSE
3801 else:
3802 # error, set is not a set
3803 main.log.error( "ONOS" + str( i + 1 ) +
3804 " has repeat elements in" +
3805 " set " + onosSetName + ":\n" +
3806 str( getResponses[ i ] ) )
3807 getResults = main.FALSE
3808 elif getResponses[ i ] == main.ERROR:
3809 getResults = main.FALSE
3810 sizeResponses = []
3811 threads = []
3812 for i in range( numControllers ):
3813 t = main.Thread( target=CLIs[i].setTestSize,
3814 name="setTestSize-" + str( i ),
3815 args=[ onosSetName ] )
3816 threads.append( t )
3817 t.start()
3818 for t in threads:
3819 t.join()
3820 sizeResponses.append( t.result )
3821 sizeResults = main.TRUE
3822 for i in range( numControllers ):
3823 if size != sizeResponses[ i ]:
3824 sizeResults = main.FALSE
3825 main.log.error( "ONOS" + str( i + 1 ) +
3826 " expected a size of " + str( size ) +
3827 " for set " + onosSetName +
3828 " but got " + str( sizeResponses[ i ] ) )
3829 addAllResults = addAllResults and getResults and sizeResults
3830 utilities.assert_equals( expect=main.TRUE,
3831 actual=addAllResults,
3832 onpass="Set addAll correct",
3833 onfail="Set addAll was incorrect" )
3834
3835 main.step( "Distributed Set retain()" )
3836 onosSet.intersection_update( retainValue.split() )
3837 retainResponses = []
3838 threads = []
3839 for i in range( numControllers ):
3840 t = main.Thread( target=CLIs[i].setTestRemove,
3841 name="setTestRetain-" + str( i ),
3842 args=[ onosSetName, retainValue ],
3843 kwargs={ "retain": True } )
3844 threads.append( t )
3845 t.start()
3846 for t in threads:
3847 t.join()
3848 retainResponses.append( t.result )
3849
3850 # main.TRUE = successfully changed the set
3851 # main.FALSE = action resulted in no change in set
3852 # main.ERROR - Some error in executing the function
3853 retainResults = main.TRUE
3854 for i in range( numControllers ):
3855 if retainResponses[ i ] == main.TRUE:
3856 # All is well
3857 pass
3858 elif retainResponses[ i ] == main.FALSE:
3859 # Already in set, probably fine
3860 pass
3861 elif retainResponses[ i ] == main.ERROR:
3862 # Error in execution
3863 retainResults = main.FALSE
3864 else:
3865 # unexpected result
3866 retainResults = main.FALSE
3867 if retainResults != main.TRUE:
3868 main.log.error( "Error executing set retain" )
3869
3870 # Check if set is still correct
3871 size = len( onosSet )
3872 getResponses = []
3873 threads = []
3874 for i in range( numControllers ):
3875 t = main.Thread( target=CLIs[i].setTestGet,
3876 name="setTestGet-" + str( i ),
3877 args=[ onosSetName ] )
3878 threads.append( t )
3879 t.start()
3880 for t in threads:
3881 t.join()
3882 getResponses.append( t.result )
3883 getResults = main.TRUE
3884 for i in range( numControllers ):
3885 if isinstance( getResponses[ i ], list):
3886 current = set( getResponses[ i ] )
3887 if len( current ) == len( getResponses[ i ] ):
3888 # no repeats
3889 if onosSet != current:
3890 main.log.error( "ONOS" + str( i + 1 ) +
3891 " has incorrect view" +
3892 " of set " + onosSetName + ":\n" +
3893 str( getResponses[ i ] ) )
3894 main.log.debug( "Expected: " + str( onosSet ) )
3895 main.log.debug( "Actual: " + str( current ) )
3896 getResults = main.FALSE
3897 else:
3898 # error, set is not a set
3899 main.log.error( "ONOS" + str( i + 1 ) +
3900 " has repeat elements in" +
3901 " set " + onosSetName + ":\n" +
3902 str( getResponses[ i ] ) )
3903 getResults = main.FALSE
3904 elif getResponses[ i ] == main.ERROR:
3905 getResults = main.FALSE
3906 sizeResponses = []
3907 threads = []
3908 for i in range( numControllers ):
3909 t = main.Thread( target=CLIs[i].setTestSize,
3910 name="setTestSize-" + str( i ),
3911 args=[ onosSetName ] )
3912 threads.append( t )
3913 t.start()
3914 for t in threads:
3915 t.join()
3916 sizeResponses.append( t.result )
3917 sizeResults = main.TRUE
3918 for i in range( numControllers ):
3919 if size != sizeResponses[ i ]:
3920 sizeResults = main.FALSE
3921 main.log.error( "ONOS" + str( i + 1 ) +
3922 " expected a size of " +
3923 str( size ) + " for set " + onosSetName +
3924 " but got " + str( sizeResponses[ i ] ) )
3925 retainResults = retainResults and getResults and sizeResults
3926 utilities.assert_equals( expect=main.TRUE,
3927 actual=retainResults,
3928 onpass="Set retain correct",
3929 onfail="Set retain was incorrect" )
3930