blob: 755a89c506961e8b34e8d7612e329cea8b5b5be8 [file] [log] [blame]
Jon Hall5cf14d52015-07-16 12:15:19 -07001"""
2Description: This test is to determine if ONOS can handle
3 all of it's nodes restarting
4
5List of test cases:
6CASE1: Compile ONOS and push it to the test machines
7CASE2: Assign devices to controllers
8CASE21: Assign mastership to controllers
9CASE3: Assign intents
10CASE4: Ping across added host intents
11CASE5: Reading state of ONOS
12CASE6: The Failure case.
13CASE7: Check state after control plane failure
14CASE8: Compare topo
15CASE9: Link s3-s28 down
16CASE10: Link s3-s28 up
17CASE11: Switch down
18CASE12: Switch up
19CASE13: Clean up
20CASE14: start election app on all onos nodes
21CASE15: Check that Leadership Election is still functional
22CASE16: Install Distributed Primitives app
23CASE17: Check for basic functionality with distributed primitives
24"""
25
26
27class HAclusterRestart:
28
29 def __init__( self ):
30 self.default = ''
31
32 def CASE1( self, main ):
33 """
34 CASE1 is to compile ONOS and push it to the test machines
35
36 Startup sequence:
37 cell <name>
38 onos-verify-cell
39 NOTE: temporary - onos-remove-raft-logs
40 onos-uninstall
41 start mininet
42 git pull
43 mvn clean install
44 onos-package
45 onos-install -f
46 onos-wait-for-start
47 start cli sessions
48 start tcpdump
49 """
50 main.log.info( "ONOS HA test: Restart all ONOS nodes - " +
51 "initialization" )
52 main.case( "Setting up test environment" )
53 main.caseExplaination = "Setup the test environment including " +\
54 "installing ONOS, starting Mininet and ONOS" +\
55 "cli sessions."
56 # TODO: save all the timers and output them for plotting
57
58 # load some variables from the params file
59 PULLCODE = False
60 if main.params[ 'Git' ] == 'True':
61 PULLCODE = True
62 gitBranch = main.params[ 'branch' ]
63 cellName = main.params[ 'ENV' ][ 'cellName' ]
64
65 # set global variables
66 global numControllers
67 numControllers = int( main.params[ 'num_controllers' ] )
68 if main.ONOSbench.maxNodes:
69 if main.ONOSbench.maxNodes < numControllers:
70 numControllers = int( main.ONOSbench.maxNodes )
71 global ONOS1Port
72 global ONOS2Port
73 global ONOS3Port
74 global ONOS4Port
75 global ONOS5Port
76 global ONOS6Port
77 global ONOS7Port
78 # These are for csv plotting in jenkins
79 global labels
80 global data
81 labels = []
82 data = []
83
84 # FIXME: just get controller port from params?
85 # TODO: do we really need all these?
86 ONOS1Port = main.params[ 'CTRL' ][ 'port1' ]
87 ONOS2Port = main.params[ 'CTRL' ][ 'port2' ]
88 ONOS3Port = main.params[ 'CTRL' ][ 'port3' ]
89 ONOS4Port = main.params[ 'CTRL' ][ 'port4' ]
90 ONOS5Port = main.params[ 'CTRL' ][ 'port5' ]
91 ONOS6Port = main.params[ 'CTRL' ][ 'port6' ]
92 ONOS7Port = main.params[ 'CTRL' ][ 'port7' ]
93
94 global CLIs
95 CLIs = []
96 global nodes
97 nodes = []
98 ipList = []
99 for i in range( 1, numControllers + 1 ):
100 CLIs.append( getattr( main, 'ONOScli' + str( i ) ) )
101 nodes.append( getattr( main, 'ONOS' + str( i ) ) )
102 ipList.append( nodes[ -1 ].ip_address )
103
104 main.step( "Create cell file" )
105 cellAppString = main.params[ 'ENV' ][ 'appString' ]
106 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
107 main.Mininet1.ip_address,
108 cellAppString, ipList )
109 main.step( "Applying cell variable to environment" )
110 cellResult = main.ONOSbench.setCell( cellName )
111 verifyResult = main.ONOSbench.verifyCell()
112
113 # FIXME:this is short term fix
114 main.log.info( "Removing raft logs" )
115 main.ONOSbench.onosRemoveRaftLogs()
116
117 main.log.info( "Uninstalling ONOS" )
118 for node in nodes:
119 main.ONOSbench.onosUninstall( node.ip_address )
120
121 # Make sure ONOS is DEAD
122 main.log.info( "Killing any ONOS processes" )
123 killResults = main.TRUE
124 for node in nodes:
125 killed = main.ONOSbench.onosKill( node.ip_address )
126 killResults = killResults and killed
127
128 cleanInstallResult = main.TRUE
129 gitPullResult = main.TRUE
130
131 main.step( "Starting Mininet" )
132 # scp topo file to mininet
133 # TODO: move to params?
134 topoName = "obelisk.py"
135 filePath = main.ONOSbench.home + "/tools/test/topos/"
136 main.ONOSbench.copyMininetFile( topoName, filePath,
137 main.Mininet1.user_name,
138 main.Mininet1.ip_address )
139 mnResult = main.Mininet1.startNet( )
140 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
141 onpass="Mininet Started",
142 onfail="Error starting Mininet" )
143
144 main.step( "Git checkout and pull " + gitBranch )
145 if PULLCODE:
146 main.ONOSbench.gitCheckout( gitBranch )
147 gitPullResult = main.ONOSbench.gitPull()
148 # values of 1 or 3 are good
149 utilities.assert_lesser( expect=0, actual=gitPullResult,
150 onpass="Git pull successful",
151 onfail="Git pull failed" )
152 main.ONOSbench.getVersion( report=True )
153
154 main.step( "Using mvn clean install" )
155 cleanInstallResult = main.TRUE
156 if PULLCODE and gitPullResult == main.TRUE:
157 cleanInstallResult = main.ONOSbench.cleanInstall()
158 else:
159 main.log.warn( "Did not pull new code so skipping mvn " +
160 "clean install" )
161 utilities.assert_equals( expect=main.TRUE,
162 actual=cleanInstallResult,
163 onpass="MCI successful",
164 onfail="MCI failed" )
165 # GRAPHS
166 # NOTE: important params here:
167 # job = name of Jenkins job
168 # Plot Name = Plot-HA, only can be used if multiple plots
169 # index = The number of the graph under plot name
170 job = "HAclusterRestart"
171 plotName = "Plot-HA"
172 graphs = '<ac:structured-macro ac:name="html">\n'
173 graphs += '<ac:plain-text-body><![CDATA[\n'
174 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
175 '/plot/' + plotName + '/getPlot?index=0' +\
176 '&width=500&height=300"' +\
177 'noborder="0" width="500" height="300" scrolling="yes" ' +\
178 'seamless="seamless"></iframe>\n'
179 graphs += ']]></ac:plain-text-body>\n'
180 graphs += '</ac:structured-macro>\n'
181 main.log.wiki(graphs)
182
183 main.step( "Creating ONOS package" )
184 packageResult = main.ONOSbench.onosPackage()
185 utilities.assert_equals( expect=main.TRUE, actual=packageResult,
186 onpass="ONOS package successful",
187 onfail="ONOS package failed" )
188
189 main.step( "Installing ONOS package" )
190 onosInstallResult = main.TRUE
191 for node in nodes:
192 tmpResult = main.ONOSbench.onosInstall( options="-f",
193 node=node.ip_address )
194 onosInstallResult = onosInstallResult and tmpResult
195 utilities.assert_equals( expect=main.TRUE, actual=onosInstallResult,
196 onpass="ONOS install successful",
197 onfail="ONOS install failed" )
198
199 main.step( "Checking if ONOS is up yet" )
200 for i in range( 2 ):
201 onosIsupResult = main.TRUE
202 for node in nodes:
203 started = main.ONOSbench.isup( node.ip_address )
204 if not started:
205 main.log.error( node.name + " didn't start!" )
206 main.ONOSbench.onosStop( node.ip_address )
207 main.ONOSbench.onosStart( node.ip_address )
208 onosIsupResult = onosIsupResult and started
209 if onosIsupResult == main.TRUE:
210 break
211 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
212 onpass="ONOS startup successful",
213 onfail="ONOS startup failed" )
214
215 main.log.step( "Starting ONOS CLI sessions" )
216 cliResults = main.TRUE
217 threads = []
218 for i in range( numControllers ):
219 t = main.Thread( target=CLIs[i].startOnosCli,
220 name="startOnosCli-" + str( i ),
221 args=[nodes[i].ip_address] )
222 threads.append( t )
223 t.start()
224
225 for t in threads:
226 t.join()
227 cliResults = cliResults and t.result
228 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
229 onpass="ONOS cli startup successful",
230 onfail="ONOS cli startup failed" )
231
232 if main.params[ 'tcpdump' ].lower() == "true":
233 main.step( "Start Packet Capture MN" )
234 main.Mininet2.startTcpdump(
235 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
236 + "-MN.pcap",
237 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
238 port=main.params[ 'MNtcpdump' ][ 'port' ] )
239
240 main.step( "App Ids check" )
241 appCheck = main.TRUE
242 threads = []
243 for i in range( numControllers ):
244 t = main.Thread( target=CLIs[i].appToIDCheck,
245 name="appToIDCheck-" + str( i ),
246 args=[] )
247 threads.append( t )
248 t.start()
249
250 for t in threads:
251 t.join()
252 appCheck = appCheck and t.result
253 if appCheck != main.TRUE:
254 main.log.warn( CLIs[0].apps() )
255 main.log.warn( CLIs[0].appIDs() )
256 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
257 onpass="App Ids seem to be correct",
258 onfail="Something is wrong with app Ids" )
259
260 if cliResults == main.FALSE:
261 main.log.error( "Failed to start ONOS, stopping test" )
262 main.cleanup()
263 main.exit()
264
265 def CASE2( self, main ):
266 """
267 Assign devices to controllers
268 """
269 import re
270 import time
271 assert numControllers, "numControllers not defined"
272 assert main, "main not defined"
273 assert utilities.assert_equals, "utilities.assert_equals not defined"
274 assert CLIs, "CLIs not defined"
275 assert nodes, "nodes not defined"
276 assert ONOS1Port, "ONOS1Port not defined"
277 assert ONOS2Port, "ONOS2Port not defined"
278 assert ONOS3Port, "ONOS3Port not defined"
279 assert ONOS4Port, "ONOS4Port not defined"
280 assert ONOS5Port, "ONOS5Port not defined"
281 assert ONOS6Port, "ONOS6Port not defined"
282 assert ONOS7Port, "ONOS7Port not defined"
283
284 main.case( "Assigning devices to controllers" )
285 main.caseExplaination = "Assign switches to ONOS using 'ovs-vsctl' " +\
286 "and check that an ONOS node becomes the " +\
287 "master of the device."
288 main.step( "Assign switches to controllers" )
289
290 ipList = []
291 for i in range( numControllers ):
292 ipList.append( nodes[ i ].ip_address )
293 swList = []
294 for i in range( 1, 29 ):
295 swList.append( "s" + str( i ) )
296 main.Mininet1.assignSwController( sw=swList, ip=ipList )
297
298 mastershipCheck = main.TRUE
299 for i in range( 1, 29 ):
300 response = main.Mininet1.getSwController( "s" + str( i ) )
301 try:
302 main.log.info( str( response ) )
303 except Exception:
304 main.log.info( repr( response ) )
305 for node in nodes:
306 if re.search( "tcp:" + node.ip_address, response ):
307 mastershipCheck = mastershipCheck and main.TRUE
308 else:
309 main.log.error( "Error, node " + node.ip_address + " is " +
310 "not in the list of controllers s" +
311 str( i ) + " is connecting to." )
312 mastershipCheck = main.FALSE
313 utilities.assert_equals(
314 expect=main.TRUE,
315 actual=mastershipCheck,
316 onpass="Switch mastership assigned correctly",
317 onfail="Switches not assigned correctly to controllers" )
318
319 def CASE21( self, main ):
320 """
321 Assign mastership to controllers
322 """
323 import re
324 import time
325 assert numControllers, "numControllers not defined"
326 assert main, "main not defined"
327 assert utilities.assert_equals, "utilities.assert_equals not defined"
328 assert CLIs, "CLIs not defined"
329 assert nodes, "nodes not defined"
330 assert ONOS1Port, "ONOS1Port not defined"
331 assert ONOS2Port, "ONOS2Port not defined"
332 assert ONOS3Port, "ONOS3Port not defined"
333 assert ONOS4Port, "ONOS4Port not defined"
334 assert ONOS5Port, "ONOS5Port not defined"
335 assert ONOS6Port, "ONOS6Port not defined"
336 assert ONOS7Port, "ONOS7Port not defined"
337
338 main.case( "Assigning Controller roles for switches" )
339 main.caseExplaination = "Check that ONOS is connected to each " +\
340 "device. Then manually assign" +\
341 " mastership to specific ONOS nodes using" +\
342 " 'device-role'"
343 main.step( "Assign mastership of switches to specific controllers" )
344 # Manually assign mastership to the controller we want
345 roleCall = main.TRUE
346
347 ipList = [ ]
348 deviceList = []
349 try:
350 # Assign mastership to specific controllers. This assignment was
351 # determined for a 7 node cluser, but will work with any sized
352 # cluster
353 for i in range( 1, 29 ): # switches 1 through 28
354 # set up correct variables:
355 if i == 1:
356 c = 0
357 ip = nodes[ c ].ip_address # ONOS1
358 deviceId = main.ONOScli1.getDevice( "1000" ).get( 'id' )
359 elif i == 2:
360 c = 1 % numControllers
361 ip = nodes[ c ].ip_address # ONOS2
362 deviceId = main.ONOScli1.getDevice( "2000" ).get( 'id' )
363 elif i == 3:
364 c = 1 % numControllers
365 ip = nodes[ c ].ip_address # ONOS2
366 deviceId = main.ONOScli1.getDevice( "3000" ).get( 'id' )
367 elif i == 4:
368 c = 3 % numControllers
369 ip = nodes[ c ].ip_address # ONOS4
370 deviceId = main.ONOScli1.getDevice( "3004" ).get( 'id' )
371 elif i == 5:
372 c = 2 % numControllers
373 ip = nodes[ c ].ip_address # ONOS3
374 deviceId = main.ONOScli1.getDevice( "5000" ).get( 'id' )
375 elif i == 6:
376 c = 2 % numControllers
377 ip = nodes[ c ].ip_address # ONOS3
378 deviceId = main.ONOScli1.getDevice( "6000" ).get( 'id' )
379 elif i == 7:
380 c = 5 % numControllers
381 ip = nodes[ c ].ip_address # ONOS6
382 deviceId = main.ONOScli1.getDevice( "6007" ).get( 'id' )
383 elif i >= 8 and i <= 17:
384 c = 4 % numControllers
385 ip = nodes[ c ].ip_address # ONOS5
386 dpid = '3' + str( i ).zfill( 3 )
387 deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
388 elif i >= 18 and i <= 27:
389 c = 6 % numControllers
390 ip = nodes[ c ].ip_address # ONOS7
391 dpid = '6' + str( i ).zfill( 3 )
392 deviceId = main.ONOScli1.getDevice( dpid ).get( 'id' )
393 elif i == 28:
394 c = 0
395 ip = nodes[ c ].ip_address # ONOS1
396 deviceId = main.ONOScli1.getDevice( "2800" ).get( 'id' )
397 else:
398 main.log.error( "You didn't write an else statement for " +
399 "switch s" + str( i ) )
400 roleCall = main.FALSE
401 # Assign switch
402 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
403 # TODO: make this controller dynamic
404 roleCall = roleCall and main.ONOScli1.deviceRole( deviceId,
405 ip )
406 ipList.append( ip )
407 deviceList.append( deviceId )
408 except ( AttributeError, AssertionError ):
409 main.log.exception( "Something is wrong with ONOS device view" )
410 main.log.info( main.ONOScli1.devices() )
411 utilities.assert_equals(
412 expect=main.TRUE,
413 actual=roleCall,
414 onpass="Re-assigned switch mastership to designated controller",
415 onfail="Something wrong with deviceRole calls" )
416
417 main.step( "Check mastership was correctly assigned" )
418 roleCheck = main.TRUE
419 # NOTE: This is due to the fact that device mastership change is not
420 # atomic and is actually a multi step process
421 time.sleep( 5 )
422 for i in range( len( ipList ) ):
423 ip = ipList[i]
424 deviceId = deviceList[i]
425 # Check assignment
426 master = main.ONOScli1.getRole( deviceId ).get( 'master' )
427 if ip in master:
428 roleCheck = roleCheck and main.TRUE
429 else:
430 roleCheck = roleCheck and main.FALSE
431 main.log.error( "Error, controller " + ip + " is not" +
432 " master " + "of device " +
433 str( deviceId ) + ". Master is " +
434 repr( master ) + "." )
435 utilities.assert_equals(
436 expect=main.TRUE,
437 actual=roleCheck,
438 onpass="Switches were successfully reassigned to designated " +
439 "controller",
440 onfail="Switches were not successfully reassigned" )
441
442 def CASE3( self, main ):
443 """
444 Assign intents
445 """
446 import time
447 import json
448 assert numControllers, "numControllers not defined"
449 assert main, "main not defined"
450 assert utilities.assert_equals, "utilities.assert_equals not defined"
451 assert CLIs, "CLIs not defined"
452 assert nodes, "nodes not defined"
453 try:
454 labels
455 except NameError:
456 main.log.error( "labels not defined, setting to []" )
457 labels = []
458 try:
459 data
460 except NameError:
461 main.log.error( "data not defined, setting to []" )
462 data = []
463 # NOTE: we must reinstall intents until we have a persistant intent
464 # datastore!
465 main.case( "Adding host Intents" )
466 main.caseExplaination = "Discover hosts by using pingall then " +\
467 "assign predetermined host-to-host intents." +\
468 " After installation, check that the intent" +\
469 " is distributed to all nodes and the state" +\
470 " is INSTALLED"
471
472 # install onos-app-fwd
473 main.step( "Install reactive forwarding app" )
474 installResults = CLIs[0].activateApp( "org.onosproject.fwd" )
475 utilities.assert_equals( expect=main.TRUE, actual=installResults,
476 onpass="Install fwd successful",
477 onfail="Install fwd failed" )
478
479 main.step( "Check app ids" )
480 appCheck = main.TRUE
481 threads = []
482 for i in range( numControllers ):
483 t = main.Thread( target=CLIs[i].appToIDCheck,
484 name="appToIDCheck-" + str( i ),
485 args=[] )
486 threads.append( t )
487 t.start()
488
489 for t in threads:
490 t.join()
491 appCheck = appCheck and t.result
492 if appCheck != main.TRUE:
493 main.log.warn( CLIs[0].apps() )
494 main.log.warn( CLIs[0].appIDs() )
495 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
496 onpass="App Ids seem to be correct",
497 onfail="Something is wrong with app Ids" )
498
499 main.step( "Discovering Hosts( Via pingall for now )" )
500 # FIXME: Once we have a host discovery mechanism, use that instead
501 # REACTIVE FWD test
502 pingResult = main.FALSE
503 for i in range(2): # Retry if pingall fails first time
504 time1 = time.time()
505 pingResult = main.Mininet1.pingall()
506 if i == 0:
507 utilities.assert_equals(
508 expect=main.TRUE,
509 actual=pingResult,
510 onpass="Reactive Pingall test passed",
511 onfail="Reactive Pingall failed, " +
512 "one or more ping pairs failed" )
513 time2 = time.time()
514 main.log.info( "Time for pingall: %2f seconds" %
515 ( time2 - time1 ) )
516 # timeout for fwd flows
517 time.sleep( 11 )
518 # uninstall onos-app-fwd
519 main.step( "Uninstall reactive forwarding app" )
520 uninstallResult = CLIs[0].deactivateApp( "org.onosproject.fwd" )
521 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
522 onpass="Uninstall fwd successful",
523 onfail="Uninstall fwd failed" )
524
525 main.step( "Check app ids" )
526 threads = []
527 appCheck2 = main.TRUE
528 for i in range( numControllers ):
529 t = main.Thread( target=CLIs[i].appToIDCheck,
530 name="appToIDCheck-" + str( i ),
531 args=[] )
532 threads.append( t )
533 t.start()
534
535 for t in threads:
536 t.join()
537 appCheck2 = appCheck2 and t.result
538 if appCheck2 != main.TRUE:
539 main.log.warn( CLIs[0].apps() )
540 main.log.warn( CLIs[0].appIDs() )
541 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
542 onpass="App Ids seem to be correct",
543 onfail="Something is wrong with app Ids" )
544
545 main.step( "Add host intents via cli" )
546 intentIds = []
547 # TODO: move the host numbers to params
548 # Maybe look at all the paths we ping?
549 intentAddResult = True
550 hostResult = main.TRUE
551 for i in range( 8, 18 ):
552 main.log.info( "Adding host intent between h" + str( i ) +
553 " and h" + str( i + 10 ) )
554 host1 = "00:00:00:00:00:" + \
555 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
556 host2 = "00:00:00:00:00:" + \
557 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
558 # NOTE: getHost can return None
559 host1Dict = main.ONOScli1.getHost( host1 )
560 host2Dict = main.ONOScli1.getHost( host2 )
561 host1Id = None
562 host2Id = None
563 if host1Dict and host2Dict:
564 host1Id = host1Dict.get( 'id', None )
565 host2Id = host2Dict.get( 'id', None )
566 if host1Id and host2Id:
567 nodeNum = ( i % numControllers )
568 tmpId = CLIs[ nodeNum ].addHostIntent( host1Id, host2Id )
569 if tmpId:
570 main.log.info( "Added intent with id: " + tmpId )
571 intentIds.append( tmpId )
572 else:
573 main.log.error( "addHostIntent returned: " +
574 repr( tmpId ) )
575 else:
576 main.log.error( "Error, getHost() failed for h" + str( i ) +
577 " and/or h" + str( i + 10 ) )
578 hosts = CLIs[ 0 ].hosts()
579 main.log.warn( "Hosts output: " )
580 try:
581 main.log.warn( json.dumps( json.loads( hosts ),
582 sort_keys=True,
583 indent=4,
584 separators=( ',', ': ' ) ) )
585 except ( ValueError, TypeError ):
586 main.log.warn( repr( hosts ) )
587 hostResult = main.FALSE
588 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
589 onpass="Found a host id for each host",
590 onfail="Error looking up host ids" )
591
592 intentStart = time.time()
593 onosIds = main.ONOScli1.getAllIntentsId()
594 main.log.info( "Submitted intents: " + str( intentIds ) )
595 main.log.info( "Intents in ONOS: " + str( onosIds ) )
596 for intent in intentIds:
597 if intent in onosIds:
598 pass # intent submitted is in onos
599 else:
600 intentAddResult = False
601 if intentAddResult:
602 intentStop = time.time()
603 else:
604 intentStop = None
605 # Print the intent states
606 intents = main.ONOScli1.intents()
607 intentStates = []
608 installedCheck = True
609 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
610 count = 0
611 try:
612 for intent in json.loads( intents ):
613 state = intent.get( 'state', None )
614 if "INSTALLED" not in state:
615 installedCheck = False
616 intentId = intent.get( 'id', None )
617 intentStates.append( ( intentId, state ) )
618 except ( ValueError, TypeError ):
619 main.log.exception( "Error parsing intents" )
620 # add submitted intents not in the store
621 tmplist = [ i for i, s in intentStates ]
622 missingIntents = False
623 for i in intentIds:
624 if i not in tmplist:
625 intentStates.append( ( i, " - " ) )
626 missingIntents = True
627 intentStates.sort()
628 for i, s in intentStates:
629 count += 1
630 main.log.info( "%-6s%-15s%-15s" %
631 ( str( count ), str( i ), str( s ) ) )
632 leaders = main.ONOScli1.leaders()
633 try:
634 missing = False
635 if leaders:
636 parsedLeaders = json.loads( leaders )
637 main.log.warn( json.dumps( parsedLeaders,
638 sort_keys=True,
639 indent=4,
640 separators=( ',', ': ' ) ) )
641 # check for all intent partitions
642 topics = []
643 for i in range( 14 ):
644 topics.append( "intent-partition-" + str( i ) )
645 main.log.debug( topics )
646 ONOStopics = [ j['topic'] for j in parsedLeaders ]
647 for topic in topics:
648 if topic not in ONOStopics:
649 main.log.error( "Error: " + topic +
650 " not in leaders" )
651 missing = True
652 else:
653 main.log.error( "leaders() returned None" )
654 except ( ValueError, TypeError ):
655 main.log.exception( "Error parsing leaders" )
656 main.log.error( repr( leaders ) )
657 # Check all nodes
658 if missing:
659 for node in CLIs:
660 response = node.leaders( jsonFormat=False)
661 main.log.warn( str( node.name ) + " leaders output: \n" +
662 str( response ) )
663
664 partitions = main.ONOScli1.partitions()
665 try:
666 if partitions :
667 parsedPartitions = json.loads( partitions )
668 main.log.warn( json.dumps( parsedPartitions,
669 sort_keys=True,
670 indent=4,
671 separators=( ',', ': ' ) ) )
672 # TODO check for a leader in all paritions
673 # TODO check for consistency among nodes
674 else:
675 main.log.error( "partitions() returned None" )
676 except ( ValueError, TypeError ):
677 main.log.exception( "Error parsing partitions" )
678 main.log.error( repr( partitions ) )
679 pendingMap = main.ONOScli1.pendingMap()
680 try:
681 if pendingMap :
682 parsedPending = json.loads( pendingMap )
683 main.log.warn( json.dumps( parsedPending,
684 sort_keys=True,
685 indent=4,
686 separators=( ',', ': ' ) ) )
687 # TODO check something here?
688 else:
689 main.log.error( "pendingMap() returned None" )
690 except ( ValueError, TypeError ):
691 main.log.exception( "Error parsing pending map" )
692 main.log.error( repr( pendingMap ) )
693
694 intentAddResult = bool( intentAddResult and not missingIntents and
695 installedCheck )
696 if not intentAddResult:
697 main.log.error( "Error in pushing host intents to ONOS" )
698
699 main.step( "Intent Anti-Entropy dispersion" )
700 for i in range(100):
701 correct = True
702 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
703 for cli in CLIs:
704 onosIds = []
705 ids = cli.getAllIntentsId()
706 onosIds.append( ids )
707 main.log.debug( "Intents in " + cli.name + ": " +
708 str( sorted( onosIds ) ) )
709 if sorted( ids ) != sorted( intentIds ):
710 main.log.warn( "Set of intent IDs doesn't match" )
711 correct = False
712 break
713 else:
714 intents = json.loads( cli.intents() )
715 for intent in intents:
716 if intent[ 'state' ] != "INSTALLED":
717 main.log.warn( "Intent " + intent[ 'id' ] +
718 " is " + intent[ 'state' ] )
719 correct = False
720 break
721 if correct:
722 break
723 else:
724 time.sleep(1)
725 if not intentStop:
726 intentStop = time.time()
727 global gossipTime
728 gossipTime = intentStop - intentStart
729 main.log.info( "It took about " + str( gossipTime ) +
730 " seconds for all intents to appear in each node" )
731 append = False
732 title = "Gossip Intents"
733 count = 1
734 while append is False:
735 curTitle = title + str( count )
736 if curTitle not in labels:
737 labels.append( curTitle )
738 data.append( str( gossipTime ) )
739 append = True
740 else:
741 count += 1
742 # FIXME: make this time configurable/calculate based off of number of
743 # nodes and gossip rounds
744 utilities.assert_greater_equals(
745 expect=40, actual=gossipTime,
746 onpass="ECM anti-entropy for intents worked within " +
747 "expected time",
748 onfail="Intent ECM anti-entropy took too long" )
749 if gossipTime <= 40:
750 intentAddResult = True
751
752 if not intentAddResult or "key" in pendingMap:
753 import time
754 installedCheck = True
755 main.log.info( "Sleeping 60 seconds to see if intents are found" )
756 time.sleep( 60 )
757 onosIds = main.ONOScli1.getAllIntentsId()
758 main.log.info( "Submitted intents: " + str( intentIds ) )
759 main.log.info( "Intents in ONOS: " + str( onosIds ) )
760 # Print the intent states
761 intents = main.ONOScli1.intents()
762 intentStates = []
763 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
764 count = 0
765 try:
766 for intent in json.loads( intents ):
767 # Iter through intents of a node
768 state = intent.get( 'state', None )
769 if "INSTALLED" not in state:
770 installedCheck = False
771 intentId = intent.get( 'id', None )
772 intentStates.append( ( intentId, state ) )
773 except ( ValueError, TypeError ):
774 main.log.exception( "Error parsing intents" )
775 # add submitted intents not in the store
776 tmplist = [ i for i, s in intentStates ]
777 for i in intentIds:
778 if i not in tmplist:
779 intentStates.append( ( i, " - " ) )
780 intentStates.sort()
781 for i, s in intentStates:
782 count += 1
783 main.log.info( "%-6s%-15s%-15s" %
784 ( str( count ), str( i ), str( s ) ) )
785 leaders = main.ONOScli1.leaders()
786 try:
787 missing = False
788 if leaders:
789 parsedLeaders = json.loads( leaders )
790 main.log.warn( json.dumps( parsedLeaders,
791 sort_keys=True,
792 indent=4,
793 separators=( ',', ': ' ) ) )
794 # check for all intent partitions
795 # check for election
796 topics = []
797 for i in range( 14 ):
798 topics.append( "intent-partition-" + str( i ) )
799 # FIXME: this should only be after we start the app
800 topics.append( "org.onosproject.election" )
801 main.log.debug( topics )
802 ONOStopics = [ j['topic'] for j in parsedLeaders ]
803 for topic in topics:
804 if topic not in ONOStopics:
805 main.log.error( "Error: " + topic +
806 " not in leaders" )
807 missing = True
808 else:
809 main.log.error( "leaders() returned None" )
810 except ( ValueError, TypeError ):
811 main.log.exception( "Error parsing leaders" )
812 main.log.error( repr( leaders ) )
813 # Check all nodes
814 if missing:
815 for node in CLIs:
816 response = node.leaders( jsonFormat=False)
817 main.log.warn( str( node.name ) + " leaders output: \n" +
818 str( response ) )
819
820 partitions = main.ONOScli1.partitions()
821 try:
822 if partitions :
823 parsedPartitions = json.loads( partitions )
824 main.log.warn( json.dumps( parsedPartitions,
825 sort_keys=True,
826 indent=4,
827 separators=( ',', ': ' ) ) )
828 # TODO check for a leader in all paritions
829 # TODO check for consistency among nodes
830 else:
831 main.log.error( "partitions() returned None" )
832 except ( ValueError, TypeError ):
833 main.log.exception( "Error parsing partitions" )
834 main.log.error( repr( partitions ) )
835 pendingMap = main.ONOScli1.pendingMap()
836 try:
837 if pendingMap :
838 parsedPending = json.loads( pendingMap )
839 main.log.warn( json.dumps( parsedPending,
840 sort_keys=True,
841 indent=4,
842 separators=( ',', ': ' ) ) )
843 # TODO check something here?
844 else:
845 main.log.error( "pendingMap() returned None" )
846 except ( ValueError, TypeError ):
847 main.log.exception( "Error parsing pending map" )
848 main.log.error( repr( pendingMap ) )
849
850 def CASE4( self, main ):
851 """
852 Ping across added host intents
853 """
854 import json
855 import time
856 assert numControllers, "numControllers not defined"
857 assert main, "main not defined"
858 assert utilities.assert_equals, "utilities.assert_equals not defined"
859 assert CLIs, "CLIs not defined"
860 assert nodes, "nodes not defined"
861 main.case( "Verify connectivity by sendind traffic across Intents" )
862 main.caseExplaination = "Ping across added host intents to check " +\
863 "functionality and check the state of " +\
864 "the intent"
865 main.step( "Ping across added host intents" )
866 PingResult = main.TRUE
867 for i in range( 8, 18 ):
868 ping = main.Mininet1.pingHost( src="h" + str( i ),
869 target="h" + str( i + 10 ) )
870 PingResult = PingResult and ping
871 if ping == main.FALSE:
872 main.log.warn( "Ping failed between h" + str( i ) +
873 " and h" + str( i + 10 ) )
874 elif ping == main.TRUE:
875 main.log.info( "Ping test passed!" )
876 # Don't set PingResult or you'd override failures
877 if PingResult == main.FALSE:
878 main.log.error(
879 "Intents have not been installed correctly, pings failed." )
880 # TODO: pretty print
881 main.log.warn( "ONOS1 intents: " )
882 try:
883 tmpIntents = main.ONOScli1.intents()
884 main.log.warn( json.dumps( json.loads( tmpIntents ),
885 sort_keys=True,
886 indent=4,
887 separators=( ',', ': ' ) ) )
888 except ( ValueError, TypeError ):
889 main.log.warn( repr( tmpIntents ) )
890 utilities.assert_equals(
891 expect=main.TRUE,
892 actual=PingResult,
893 onpass="Intents have been installed correctly and pings work",
894 onfail="Intents have not been installed correctly, pings failed." )
895
896 main.step( "Check Intent state" )
897 installedCheck = False
898 loopCount = 0
899 while not installedCheck and loopCount < 40:
900 installedCheck = True
901 # Print the intent states
902 intents = main.ONOScli1.intents()
903 intentStates = []
904 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
905 # Iter through intents of a node
906 try:
907 for intent in json.loads( intents ):
908 state = intent.get( 'state', None )
909 if "INSTALLED" not in state:
910 installedCheck = False
911 intentId = intent.get( 'id', None )
912 intentStates.append( ( intentId, state ) )
913 except ( ValueError, TypeError ):
914 main.log.exception( "Error parsing intents." )
915 # Print states
916 intentStates.sort()
917 for i, s in intentStates:
918 count += 1
919 main.log.info( "%-6s%-15s%-15s" %
920 ( str( count ), str( i ), str( s ) ) )
921 if not installedCheck:
922 time.sleep( 1 )
923 loopCount += 1
924 utilities.assert_equals( expect=True, actual=installedCheck,
925 onpass="Intents are all INSTALLED",
926 onfail="Intents are not all in " +
927 "INSTALLED state" )
928
929 main.step( "Check leadership of topics" )
930 leaders = main.ONOScli1.leaders()
931 topicCheck = main.TRUE
932 try:
933 if leaders:
934 parsedLeaders = json.loads( leaders )
935 main.log.warn( json.dumps( parsedLeaders,
936 sort_keys=True,
937 indent=4,
938 separators=( ',', ': ' ) ) )
939 # check for all intent partitions
940 # check for election
941 # TODO: Look at Devices as topics now that it uses this system
942 topics = []
943 for i in range( 14 ):
944 topics.append( "intent-partition-" + str( i ) )
945 # FIXME: this should only be after we start the app
946 # FIXME: topics.append( "org.onosproject.election" )
947 # Print leaders output
948 main.log.debug( topics )
949 ONOStopics = [ j['topic'] for j in parsedLeaders ]
950 for topic in topics:
951 if topic not in ONOStopics:
952 main.log.error( "Error: " + topic +
953 " not in leaders" )
954 topicCheck = main.FALSE
955 else:
956 main.log.error( "leaders() returned None" )
957 topicCheck = main.FALSE
958 except ( ValueError, TypeError ):
959 topicCheck = main.FALSE
960 main.log.exception( "Error parsing leaders" )
961 main.log.error( repr( leaders ) )
962 # TODO: Check for a leader of these topics
963 # Check all nodes
964 if topicCheck:
965 for node in CLIs:
966 response = node.leaders( jsonFormat=False)
967 main.log.warn( str( node.name ) + " leaders output: \n" +
968 str( response ) )
969
970 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
971 onpass="intent Partitions is in leaders",
972 onfail="Some topics were lost " )
973 # Print partitions
974 partitions = main.ONOScli1.partitions()
975 try:
976 if partitions :
977 parsedPartitions = json.loads( partitions )
978 main.log.warn( json.dumps( parsedPartitions,
979 sort_keys=True,
980 indent=4,
981 separators=( ',', ': ' ) ) )
982 # TODO check for a leader in all paritions
983 # TODO check for consistency among nodes
984 else:
985 main.log.error( "partitions() returned None" )
986 except ( ValueError, TypeError ):
987 main.log.exception( "Error parsing partitions" )
988 main.log.error( repr( partitions ) )
989 # Print Pending Map
990 pendingMap = main.ONOScli1.pendingMap()
991 try:
992 if pendingMap :
993 parsedPending = json.loads( pendingMap )
994 main.log.warn( json.dumps( parsedPending,
995 sort_keys=True,
996 indent=4,
997 separators=( ',', ': ' ) ) )
998 # TODO check something here?
999 else:
1000 main.log.error( "pendingMap() returned None" )
1001 except ( ValueError, TypeError ):
1002 main.log.exception( "Error parsing pending map" )
1003 main.log.error( repr( pendingMap ) )
1004
1005 if not installedCheck:
1006 main.log.info( "Waiting 60 seconds to see if the state of " +
1007 "intents change" )
1008 time.sleep( 60 )
1009 # Print the intent states
1010 intents = main.ONOScli1.intents()
1011 intentStates = []
1012 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1013 count = 0
1014 # Iter through intents of a node
1015 try:
1016 for intent in json.loads( intents ):
1017 state = intent.get( 'state', None )
1018 if "INSTALLED" not in state:
1019 installedCheck = False
1020 intentId = intent.get( 'id', None )
1021 intentStates.append( ( intentId, state ) )
1022 except ( ValueError, TypeError ):
1023 main.log.exception( "Error parsing intents." )
1024 intentStates.sort()
1025 for i, s in intentStates:
1026 count += 1
1027 main.log.info( "%-6s%-15s%-15s" %
1028 ( str( count ), str( i ), str( s ) ) )
1029 leaders = main.ONOScli1.leaders()
1030 try:
1031 missing = False
1032 if leaders:
1033 parsedLeaders = json.loads( leaders )
1034 main.log.warn( json.dumps( parsedLeaders,
1035 sort_keys=True,
1036 indent=4,
1037 separators=( ',', ': ' ) ) )
1038 # check for all intent partitions
1039 # check for election
1040 topics = []
1041 for i in range( 14 ):
1042 topics.append( "intent-partition-" + str( i ) )
1043 # FIXME: this should only be after we start the app
1044 topics.append( "org.onosproject.election" )
1045 main.log.debug( topics )
1046 ONOStopics = [ j['topic'] for j in parsedLeaders ]
1047 for topic in topics:
1048 if topic not in ONOStopics:
1049 main.log.error( "Error: " + topic +
1050 " not in leaders" )
1051 missing = True
1052 else:
1053 main.log.error( "leaders() returned None" )
1054 except ( ValueError, TypeError ):
1055 main.log.exception( "Error parsing leaders" )
1056 main.log.error( repr( leaders ) )
1057 if missing:
1058 for node in CLIs:
1059 response = node.leaders( jsonFormat=False)
1060 main.log.warn( str( node.name ) + " leaders output: \n" +
1061 str( response ) )
1062
1063 partitions = main.ONOScli1.partitions()
1064 try:
1065 if partitions :
1066 parsedPartitions = json.loads( partitions )
1067 main.log.warn( json.dumps( parsedPartitions,
1068 sort_keys=True,
1069 indent=4,
1070 separators=( ',', ': ' ) ) )
1071 # TODO check for a leader in all paritions
1072 # TODO check for consistency among nodes
1073 else:
1074 main.log.error( "partitions() returned None" )
1075 except ( ValueError, TypeError ):
1076 main.log.exception( "Error parsing partitions" )
1077 main.log.error( repr( partitions ) )
1078 pendingMap = main.ONOScli1.pendingMap()
1079 try:
1080 if pendingMap :
1081 parsedPending = json.loads( pendingMap )
1082 main.log.warn( json.dumps( parsedPending,
1083 sort_keys=True,
1084 indent=4,
1085 separators=( ',', ': ' ) ) )
1086 # TODO check something here?
1087 else:
1088 main.log.error( "pendingMap() returned None" )
1089 except ( ValueError, TypeError ):
1090 main.log.exception( "Error parsing pending map" )
1091 main.log.error( repr( pendingMap ) )
1092 # Print flowrules
1093 main.log.debug( CLIs[0].flows( jsonFormat=False ) )
1094 main.step( "Wait a minute then ping again" )
1095 # the wait is above
1096 PingResult = main.TRUE
1097 for i in range( 8, 18 ):
1098 ping = main.Mininet1.pingHost( src="h" + str( i ),
1099 target="h" + str( i + 10 ) )
1100 PingResult = PingResult and ping
1101 if ping == main.FALSE:
1102 main.log.warn( "Ping failed between h" + str( i ) +
1103 " and h" + str( i + 10 ) )
1104 elif ping == main.TRUE:
1105 main.log.info( "Ping test passed!" )
1106 # Don't set PingResult or you'd override failures
1107 if PingResult == main.FALSE:
1108 main.log.error(
1109 "Intents have not been installed correctly, pings failed." )
1110 # TODO: pretty print
1111 main.log.warn( "ONOS1 intents: " )
1112 try:
1113 tmpIntents = main.ONOScli1.intents()
1114 main.log.warn( json.dumps( json.loads( tmpIntents ),
1115 sort_keys=True,
1116 indent=4,
1117 separators=( ',', ': ' ) ) )
1118 except ( ValueError, TypeError ):
1119 main.log.warn( repr( tmpIntents ) )
1120 utilities.assert_equals(
1121 expect=main.TRUE,
1122 actual=PingResult,
1123 onpass="Intents have been installed correctly and pings work",
1124 onfail="Intents have not been installed correctly, pings failed." )
1125
1126 def CASE5( self, main ):
1127 """
1128 Reading state of ONOS
1129 """
1130 import json
1131 import time
1132 assert numControllers, "numControllers not defined"
1133 assert main, "main not defined"
1134 assert utilities.assert_equals, "utilities.assert_equals not defined"
1135 assert CLIs, "CLIs not defined"
1136 assert nodes, "nodes not defined"
1137
1138 main.case( "Setting up and gathering data for current state" )
1139 # The general idea for this test case is to pull the state of
1140 # ( intents,flows, topology,... ) from each ONOS node
1141 # We can then compare them with each other and also with past states
1142
1143 main.step( "Check that each switch has a master" )
1144 global mastershipState
1145 mastershipState = '[]'
1146
1147 # Assert that each device has a master
1148 rolesNotNull = main.TRUE
1149 threads = []
1150 for i in range( numControllers ):
1151 t = main.Thread( target=CLIs[i].rolesNotNull,
1152 name="rolesNotNull-" + str( i ),
1153 args=[] )
1154 threads.append( t )
1155 t.start()
1156
1157 for t in threads:
1158 t.join()
1159 rolesNotNull = rolesNotNull and t.result
1160 utilities.assert_equals(
1161 expect=main.TRUE,
1162 actual=rolesNotNull,
1163 onpass="Each device has a master",
1164 onfail="Some devices don't have a master assigned" )
1165
1166 main.step( "Get the Mastership of each switch from each controller" )
1167 ONOSMastership = []
1168 mastershipCheck = main.FALSE
1169 consistentMastership = True
1170 rolesResults = True
1171 threads = []
1172 for i in range( numControllers ):
1173 t = main.Thread( target=CLIs[i].roles,
1174 name="roles-" + str( i ),
1175 args=[] )
1176 threads.append( t )
1177 t.start()
1178
1179 for t in threads:
1180 t.join()
1181 ONOSMastership.append( t.result )
1182
1183 for i in range( numControllers ):
1184 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1185 main.log.error( "Error in getting ONOS" + str( i + 1 ) +
1186 " roles" )
1187 main.log.warn(
1188 "ONOS" + str( i + 1 ) + " mastership response: " +
1189 repr( ONOSMastership[i] ) )
1190 rolesResults = False
1191 utilities.assert_equals(
1192 expect=True,
1193 actual=rolesResults,
1194 onpass="No error in reading roles output",
1195 onfail="Error in reading roles from ONOS" )
1196
1197 main.step( "Check for consistency in roles from each controller" )
1198 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1199 main.log.info(
1200 "Switch roles are consistent across all ONOS nodes" )
1201 else:
1202 consistentMastership = False
1203 utilities.assert_equals(
1204 expect=True,
1205 actual=consistentMastership,
1206 onpass="Switch roles are consistent across all ONOS nodes",
1207 onfail="ONOS nodes have different views of switch roles" )
1208
1209 if rolesResults and not consistentMastership:
1210 for i in range( numControllers ):
1211 try:
1212 main.log.warn(
1213 "ONOS" + str( i + 1 ) + " roles: ",
1214 json.dumps(
1215 json.loads( ONOSMastership[ i ] ),
1216 sort_keys=True,
1217 indent=4,
1218 separators=( ',', ': ' ) ) )
1219 except ( ValueError, TypeError ):
1220 main.log.warn( repr( ONOSMastership[ i ] ) )
1221 elif rolesResults and consistentMastership:
1222 mastershipCheck = main.TRUE
1223 mastershipState = ONOSMastership[ 0 ]
1224
1225 main.step( "Get the intents from each controller" )
1226 global intentState
1227 intentState = []
1228 ONOSIntents = []
1229 intentCheck = main.FALSE
1230 consistentIntents = True
1231 intentsResults = True
1232 threads = []
1233 for i in range( numControllers ):
1234 t = main.Thread( target=CLIs[i].intents,
1235 name="intents-" + str( i ),
1236 args=[],
1237 kwargs={ 'jsonFormat': True } )
1238 threads.append( t )
1239 t.start()
1240
1241 for t in threads:
1242 t.join()
1243 ONOSIntents.append( t.result )
1244
1245 for i in range( numControllers ):
1246 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1247 main.log.error( "Error in getting ONOS" + str( i + 1 ) +
1248 " intents" )
1249 main.log.warn( "ONOS" + str( i + 1 ) + " intents response: " +
1250 repr( ONOSIntents[ i ] ) )
1251 intentsResults = False
1252 utilities.assert_equals(
1253 expect=True,
1254 actual=intentsResults,
1255 onpass="No error in reading intents output",
1256 onfail="Error in reading intents from ONOS" )
1257
1258 main.step( "Check for consistency in Intents from each controller" )
1259 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1260 main.log.info( "Intents are consistent across all ONOS " +
1261 "nodes" )
1262 else:
1263 consistentIntents = False
1264 main.log.error( "Intents not consistent" )
1265 utilities.assert_equals(
1266 expect=True,
1267 actual=consistentIntents,
1268 onpass="Intents are consistent across all ONOS nodes",
1269 onfail="ONOS nodes have different views of intents" )
1270
1271 if intentsResults:
1272 # Try to make it easy to figure out what is happening
1273 #
1274 # Intent ONOS1 ONOS2 ...
1275 # 0x01 INSTALLED INSTALLING
1276 # ... ... ...
1277 # ... ... ...
1278 title = " Id"
1279 for n in range( numControllers ):
1280 title += " " * 10 + "ONOS" + str( n + 1 )
1281 main.log.warn( title )
1282 # get all intent keys in the cluster
1283 keys = []
1284 for nodeStr in ONOSIntents:
1285 node = json.loads( nodeStr )
1286 for intent in node:
1287 keys.append( intent.get( 'id' ) )
1288 keys = set( keys )
1289 for key in keys:
1290 row = "%-13s" % key
1291 for nodeStr in ONOSIntents:
1292 node = json.loads( nodeStr )
1293 for intent in node:
1294 if intent.get( 'id', "Error" ) == key:
1295 row += "%-15s" % intent.get( 'state' )
1296 main.log.warn( row )
1297 # End table view
1298
1299 if intentsResults and not consistentIntents:
1300 # print the json objects
1301 n = len(ONOSIntents)
1302 main.log.debug( "ONOS" + str( n ) + " intents: " )
1303 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1304 sort_keys=True,
1305 indent=4,
1306 separators=( ',', ': ' ) ) )
1307 for i in range( numControllers ):
1308 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1309 main.log.debug( "ONOS" + str( i + 1 ) + " intents: " )
1310 main.log.debug( json.dumps( json.loads( ONOSIntents[i] ),
1311 sort_keys=True,
1312 indent=4,
1313 separators=( ',', ': ' ) ) )
1314 else:
1315 main.log.debug( nodes[ i ].name + " intents match ONOS" +
1316 str( n ) + " intents" )
1317 elif intentsResults and consistentIntents:
1318 intentCheck = main.TRUE
1319 intentState = ONOSIntents[ 0 ]
1320
1321 main.step( "Get the flows from each controller" )
1322 global flowState
1323 flowState = []
1324 ONOSFlows = []
1325 ONOSFlowsJson = []
1326 flowCheck = main.FALSE
1327 consistentFlows = True
1328 flowsResults = True
1329 threads = []
1330 for i in range( numControllers ):
1331 t = main.Thread( target=CLIs[i].flows,
1332 name="flows-" + str( i ),
1333 args=[],
1334 kwargs={ 'jsonFormat': True } )
1335 threads.append( t )
1336 t.start()
1337
1338 # NOTE: Flows command can take some time to run
1339 time.sleep(30)
1340 for t in threads:
1341 t.join()
1342 result = t.result
1343 ONOSFlows.append( result )
1344
1345 for i in range( numControllers ):
1346 num = str( i + 1 )
1347 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1348 main.log.error( "Error in getting ONOS" + num + " flows" )
1349 main.log.warn( "ONOS" + num + " flows response: " +
1350 repr( ONOSFlows[ i ] ) )
1351 flowsResults = False
1352 ONOSFlowsJson.append( None )
1353 else:
1354 try:
1355 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1356 except ( ValueError, TypeError ):
1357 # FIXME: change this to log.error?
1358 main.log.exception( "Error in parsing ONOS" + num +
1359 " response as json." )
1360 main.log.error( repr( ONOSFlows[ i ] ) )
1361 ONOSFlowsJson.append( None )
1362 flowsResults = False
1363 utilities.assert_equals(
1364 expect=True,
1365 actual=flowsResults,
1366 onpass="No error in reading flows output",
1367 onfail="Error in reading flows from ONOS" )
1368
1369 main.step( "Check for consistency in Flows from each controller" )
1370 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1371 if all( tmp ):
1372 main.log.info( "Flow count is consistent across all ONOS nodes" )
1373 else:
1374 consistentFlows = False
1375 utilities.assert_equals(
1376 expect=True,
1377 actual=consistentFlows,
1378 onpass="The flow count is consistent across all ONOS nodes",
1379 onfail="ONOS nodes have different flow counts" )
1380
1381 if flowsResults and not consistentFlows:
1382 for i in range( numControllers ):
1383 try:
1384 main.log.warn(
1385 "ONOS" + str( i + 1 ) + " flows: " +
1386 json.dumps( json.loads( ONOSFlows[i] ), sort_keys=True,
1387 indent=4, separators=( ',', ': ' ) ) )
1388 except ( ValueError, TypeError ):
1389 main.log.warn(
1390 "ONOS" + str( i + 1 ) + " flows: " +
1391 repr( ONOSFlows[ i ] ) )
1392 elif flowsResults and consistentFlows:
1393 flowCheck = main.TRUE
1394 flowState = ONOSFlows[ 0 ]
1395
1396 main.step( "Get the OF Table entries" )
1397 global flows
1398 flows = []
1399 for i in range( 1, 29 ):
1400 flows.append( main.Mininet2.getFlowTable( 1.3, "s" + str( i ) ) )
1401 if flowCheck == main.FALSE:
1402 for table in flows:
1403 main.log.warn( table )
1404 # TODO: Compare switch flow tables with ONOS flow tables
1405
1406 main.step( "Start continuous pings" )
1407 main.Mininet2.pingLong(
1408 src=main.params[ 'PING' ][ 'source1' ],
1409 target=main.params[ 'PING' ][ 'target1' ],
1410 pingTime=500 )
1411 main.Mininet2.pingLong(
1412 src=main.params[ 'PING' ][ 'source2' ],
1413 target=main.params[ 'PING' ][ 'target2' ],
1414 pingTime=500 )
1415 main.Mininet2.pingLong(
1416 src=main.params[ 'PING' ][ 'source3' ],
1417 target=main.params[ 'PING' ][ 'target3' ],
1418 pingTime=500 )
1419 main.Mininet2.pingLong(
1420 src=main.params[ 'PING' ][ 'source4' ],
1421 target=main.params[ 'PING' ][ 'target4' ],
1422 pingTime=500 )
1423 main.Mininet2.pingLong(
1424 src=main.params[ 'PING' ][ 'source5' ],
1425 target=main.params[ 'PING' ][ 'target5' ],
1426 pingTime=500 )
1427 main.Mininet2.pingLong(
1428 src=main.params[ 'PING' ][ 'source6' ],
1429 target=main.params[ 'PING' ][ 'target6' ],
1430 pingTime=500 )
1431 main.Mininet2.pingLong(
1432 src=main.params[ 'PING' ][ 'source7' ],
1433 target=main.params[ 'PING' ][ 'target7' ],
1434 pingTime=500 )
1435 main.Mininet2.pingLong(
1436 src=main.params[ 'PING' ][ 'source8' ],
1437 target=main.params[ 'PING' ][ 'target8' ],
1438 pingTime=500 )
1439 main.Mininet2.pingLong(
1440 src=main.params[ 'PING' ][ 'source9' ],
1441 target=main.params[ 'PING' ][ 'target9' ],
1442 pingTime=500 )
1443 main.Mininet2.pingLong(
1444 src=main.params[ 'PING' ][ 'source10' ],
1445 target=main.params[ 'PING' ][ 'target10' ],
1446 pingTime=500 )
1447
1448 main.step( "Collecting topology information from ONOS" )
1449 devices = []
1450 threads = []
1451 for i in range( numControllers ):
1452 t = main.Thread( target=CLIs[i].devices,
1453 name="devices-" + str( i ),
1454 args=[ ] )
1455 threads.append( t )
1456 t.start()
1457
1458 for t in threads:
1459 t.join()
1460 devices.append( t.result )
1461 hosts = []
1462 threads = []
1463 for i in range( numControllers ):
1464 t = main.Thread( target=CLIs[i].hosts,
1465 name="hosts-" + str( i ),
1466 args=[ ] )
1467 threads.append( t )
1468 t.start()
1469
1470 for t in threads:
1471 t.join()
1472 try:
1473 hosts.append( json.loads( t.result ) )
1474 except ( ValueError, TypeError ):
1475 # FIXME: better handling of this, print which node
1476 # Maybe use thread name?
1477 main.log.exception( "Error parsing json output of hosts" )
1478 # FIXME: should this be an empty json object instead?
1479 hosts.append( None )
1480
1481 ports = []
1482 threads = []
1483 for i in range( numControllers ):
1484 t = main.Thread( target=CLIs[i].ports,
1485 name="ports-" + str( i ),
1486 args=[ ] )
1487 threads.append( t )
1488 t.start()
1489
1490 for t in threads:
1491 t.join()
1492 ports.append( t.result )
1493 links = []
1494 threads = []
1495 for i in range( numControllers ):
1496 t = main.Thread( target=CLIs[i].links,
1497 name="links-" + str( i ),
1498 args=[ ] )
1499 threads.append( t )
1500 t.start()
1501
1502 for t in threads:
1503 t.join()
1504 links.append( t.result )
1505 clusters = []
1506 threads = []
1507 for i in range( numControllers ):
1508 t = main.Thread( target=CLIs[i].clusters,
1509 name="clusters-" + str( i ),
1510 args=[ ] )
1511 threads.append( t )
1512 t.start()
1513
1514 for t in threads:
1515 t.join()
1516 clusters.append( t.result )
1517 # Compare json objects for hosts and dataplane clusters
1518
1519 # hosts
1520 main.step( "Host view is consistent across ONOS nodes" )
1521 consistentHostsResult = main.TRUE
1522 for controller in range( len( hosts ) ):
1523 controllerStr = str( controller + 1 )
1524 if "Error" not in hosts[ controller ]:
1525 if hosts[ controller ] == hosts[ 0 ]:
1526 continue
1527 else: # hosts not consistent
1528 main.log.error( "hosts from ONOS" +
1529 controllerStr +
1530 " is inconsistent with ONOS1" )
1531 main.log.warn( repr( hosts[ controller ] ) )
1532 consistentHostsResult = main.FALSE
1533
1534 else:
1535 main.log.error( "Error in getting ONOS hosts from ONOS" +
1536 controllerStr )
1537 consistentHostsResult = main.FALSE
1538 main.log.warn( "ONOS" + controllerStr +
1539 " hosts response: " +
1540 repr( hosts[ controller ] ) )
1541 utilities.assert_equals(
1542 expect=main.TRUE,
1543 actual=consistentHostsResult,
1544 onpass="Hosts view is consistent across all ONOS nodes",
1545 onfail="ONOS nodes have different views of hosts" )
1546
1547 main.step( "Each host has an IP address" )
1548 ipResult = main.TRUE
1549 for controller in range( 0, len( hosts ) ):
1550 controllerStr = str( controller + 1 )
1551 for host in hosts[ controller ]:
1552 if not host.get( 'ipAddresses', [ ] ):
1553 main.log.error( "DEBUG:Error with host ips on controller" +
1554 controllerStr + ": " + str( host ) )
1555 ipResult = main.FALSE
1556 utilities.assert_equals(
1557 expect=main.TRUE,
1558 actual=ipResult,
1559 onpass="The ips of the hosts aren't empty",
1560 onfail="The ip of at least one host is missing" )
1561
1562 # Strongly connected clusters of devices
1563 main.step( "Cluster view is consistent across ONOS nodes" )
1564 consistentClustersResult = main.TRUE
1565 for controller in range( len( clusters ) ):
1566 controllerStr = str( controller + 1 )
1567 if "Error" not in clusters[ controller ]:
1568 if clusters[ controller ] == clusters[ 0 ]:
1569 continue
1570 else: # clusters not consistent
1571 main.log.error( "clusters from ONOS" + controllerStr +
1572 " is inconsistent with ONOS1" )
1573 consistentClustersResult = main.FALSE
1574
1575 else:
1576 main.log.error( "Error in getting dataplane clusters " +
1577 "from ONOS" + controllerStr )
1578 consistentClustersResult = main.FALSE
1579 main.log.warn( "ONOS" + controllerStr +
1580 " clusters response: " +
1581 repr( clusters[ controller ] ) )
1582 utilities.assert_equals(
1583 expect=main.TRUE,
1584 actual=consistentClustersResult,
1585 onpass="Clusters view is consistent across all ONOS nodes",
1586 onfail="ONOS nodes have different views of clusters" )
1587 # there should always only be one cluster
1588 main.step( "Cluster view correct across ONOS nodes" )
1589 try:
1590 numClusters = len( json.loads( clusters[ 0 ] ) )
1591 except ( ValueError, TypeError ):
1592 main.log.exception( "Error parsing clusters[0]: " +
1593 repr( clusters[ 0 ] ) )
1594 clusterResults = main.FALSE
1595 if numClusters == 1:
1596 clusterResults = main.TRUE
1597 utilities.assert_equals(
1598 expect=1,
1599 actual=numClusters,
1600 onpass="ONOS shows 1 SCC",
1601 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1602
1603 main.step( "Comparing ONOS topology to MN" )
1604 devicesResults = main.TRUE
1605 linksResults = main.TRUE
1606 hostsResults = main.TRUE
1607 mnSwitches = main.Mininet1.getSwitches()
1608 mnLinks = main.Mininet1.getLinks()
1609 mnHosts = main.Mininet1.getHosts()
1610 for controller in range( numControllers ):
1611 controllerStr = str( controller + 1 )
1612 if devices[ controller ] and ports[ controller ] and\
1613 "Error" not in devices[ controller ] and\
1614 "Error" not in ports[ controller ]:
1615
1616 currentDevicesResult = main.Mininet1.compareSwitches(
1617 mnSwitches,
1618 json.loads( devices[ controller ] ),
1619 json.loads( ports[ controller ] ) )
1620 else:
1621 currentDevicesResult = main.FALSE
1622 utilities.assert_equals( expect=main.TRUE,
1623 actual=currentDevicesResult,
1624 onpass="ONOS" + controllerStr +
1625 " Switches view is correct",
1626 onfail="ONOS" + controllerStr +
1627 " Switches view is incorrect" )
1628 if links[ controller ] and "Error" not in links[ controller ]:
1629 currentLinksResult = main.Mininet1.compareLinks(
1630 mnSwitches, mnLinks,
1631 json.loads( links[ controller ] ) )
1632 else:
1633 currentLinksResult = main.FALSE
1634 utilities.assert_equals( expect=main.TRUE,
1635 actual=currentLinksResult,
1636 onpass="ONOS" + controllerStr +
1637 " links view is correct",
1638 onfail="ONOS" + controllerStr +
1639 " links view is incorrect" )
1640
1641 if hosts[ controller ] or "Error" not in hosts[ controller ]:
1642 currentHostsResult = main.Mininet1.compareHosts(
1643 mnHosts,
1644 hosts[ controller ] )
1645 else:
1646 currentHostsResult = main.FALSE
1647 utilities.assert_equals( expect=main.TRUE,
1648 actual=currentHostsResult,
1649 onpass="ONOS" + controllerStr +
1650 " hosts exist in Mininet",
1651 onfail="ONOS" + controllerStr +
1652 " hosts don't match Mininet" )
1653
1654 devicesResults = devicesResults and currentDevicesResult
1655 linksResults = linksResults and currentLinksResult
1656 hostsResults = hostsResults and currentHostsResult
1657
1658 main.step( "Device information is correct" )
1659 utilities.assert_equals(
1660 expect=main.TRUE,
1661 actual=devicesResults,
1662 onpass="Device information is correct",
1663 onfail="Device information is incorrect" )
1664
1665 main.step( "Links are correct" )
1666 utilities.assert_equals(
1667 expect=main.TRUE,
1668 actual=linksResults,
1669 onpass="Link are correct",
1670 onfail="Links are incorrect" )
1671
1672 main.step( "Hosts are correct" )
1673 utilities.assert_equals(
1674 expect=main.TRUE,
1675 actual=hostsResults,
1676 onpass="Hosts are correct",
1677 onfail="Hosts are incorrect" )
1678
1679 def CASE6( self, main ):
1680 """
1681 The Failure case.
1682 """
1683 import time
1684 assert numControllers, "numControllers not defined"
1685 assert main, "main not defined"
1686 assert utilities.assert_equals, "utilities.assert_equals not defined"
1687 assert CLIs, "CLIs not defined"
1688 assert nodes, "nodes not defined"
1689 try:
1690 labels
1691 except NameError:
1692 main.log.error( "labels not defined, setting to []" )
1693 global labels
1694 labels = []
1695 try:
1696 data
1697 except NameError:
1698 main.log.error( "data not defined, setting to []" )
1699 global data
1700 data = []
1701 # Reset non-persistent variables
1702 try:
1703 iCounterValue = 0
1704 except NameError:
1705 main.log.error( "iCounterValue not defined, setting to 0" )
1706 iCounterValue = 0
1707
1708 main.case( "Restart entire ONOS cluster" )
1709
1710 main.step( "Killing ONOS nodes" )
1711 killResults = main.TRUE
1712 killTime = time.time()
1713 for node in nodes:
1714 killed = main.ONOSbench.onosKill( node.ip_address )
1715 killResults = killResults and killed
1716 utilities.assert_equals( expect=main.TRUE, actual=killResults,
1717 onpass="ONOS nodes killed",
1718 onfail="ONOS kill unsuccessful" )
1719
1720 main.step( "Checking if ONOS is up yet" )
1721 for i in range( 2 ):
1722 onosIsupResult = main.TRUE
1723 for node in nodes:
1724 started = main.ONOSbench.isup( node.ip_address )
1725 if not started:
1726 main.log.error( node.name + " didn't start!" )
1727 onosIsupResult = onosIsupResult and started
1728 if onosIsupResult == main.TRUE:
1729 break
1730 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
1731 onpass="ONOS restarted",
1732 onfail="ONOS restart NOT successful" )
1733
1734 main.log.step( "Starting ONOS CLI sessions" )
1735 cliResults = main.TRUE
1736 threads = []
1737 for i in range( numControllers ):
1738 t = main.Thread( target=CLIs[i].startOnosCli,
1739 name="startOnosCli-" + str( i ),
1740 args=[nodes[i].ip_address] )
1741 threads.append( t )
1742 t.start()
1743
1744 for t in threads:
1745 t.join()
1746 cliResults = cliResults and t.result
1747 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
1748 onpass="ONOS cli started",
1749 onfail="ONOS clis did not restart" )
1750
1751 # Grab the time of restart so we chan check how long the gossip
1752 # protocol has had time to work
1753 main.restartTime = time.time() - killTime
1754 main.log.debug( "Restart time: " + str( main.restartTime ) )
1755 labels.append( "Restart" )
1756 data.append( str( main.restartTime ) )
1757
1758 # FIXME: revisit test plan for election with madan
1759 # Rerun for election on restarted nodes
1760 runResults = main.TRUE
1761 for cli in CLIs:
1762 run = CLIs[0].electionTestRun()
1763 if run != main.TRUE:
1764 main.log.error( "Error running for election on " + cli.name )
1765 runResults = runResults and run
1766 utilities.assert_equals( expect=main.TRUE, actual=runResults,
1767 onpass="Reran for election",
1768 onfail="Failed to rerun for election" )
1769
1770 # TODO: Make this configurable
1771 time.sleep( 60 )
1772 main.log.debug( CLIs[0].nodes( jsonFormat=False ) )
1773 main.log.debug( CLIs[0].leaders( jsonFormat=False ) )
1774 main.log.debug( CLIs[0].partitions( jsonFormat=False ) )
1775
1776 def CASE7( self, main ):
1777 """
1778 Check state after ONOS failure
1779 """
1780 import json
1781 assert numControllers, "numControllers not defined"
1782 assert main, "main not defined"
1783 assert utilities.assert_equals, "utilities.assert_equals not defined"
1784 assert CLIs, "CLIs not defined"
1785 assert nodes, "nodes not defined"
1786 main.case( "Running ONOS Constant State Tests" )
1787
1788 main.step( "Check that each switch has a master" )
1789 # Assert that each device has a master
1790 rolesNotNull = main.TRUE
1791 threads = []
1792 for i in range( numControllers ):
1793 t = main.Thread( target=CLIs[i].rolesNotNull,
1794 name="rolesNotNull-" + str( i ),
1795 args=[ ] )
1796 threads.append( t )
1797 t.start()
1798
1799 for t in threads:
1800 t.join()
1801 rolesNotNull = rolesNotNull and t.result
1802 utilities.assert_equals(
1803 expect=main.TRUE,
1804 actual=rolesNotNull,
1805 onpass="Each device has a master",
1806 onfail="Some devices don't have a master assigned" )
1807
1808 main.step( "Read device roles from ONOS" )
1809 ONOSMastership = []
1810 mastershipCheck = main.FALSE
1811 consistentMastership = True
1812 rolesResults = True
1813 threads = []
1814 for i in range( numControllers ):
1815 t = main.Thread( target=CLIs[i].roles,
1816 name="roles-" + str( i ),
1817 args=[] )
1818 threads.append( t )
1819 t.start()
1820
1821 for t in threads:
1822 t.join()
1823 ONOSMastership.append( t.result )
1824
1825 for i in range( numControllers ):
1826 if not ONOSMastership[i] or "Error" in ONOSMastership[i]:
1827 main.log.error( "Error in getting ONOS" + str( i + 1 ) +
1828 " roles" )
1829 main.log.warn(
1830 "ONOS" + str( i + 1 ) + " mastership response: " +
1831 repr( ONOSMastership[i] ) )
1832 rolesResults = False
1833 utilities.assert_equals(
1834 expect=True,
1835 actual=rolesResults,
1836 onpass="No error in reading roles output",
1837 onfail="Error in reading roles from ONOS" )
1838
1839 main.step( "Check for consistency in roles from each controller" )
1840 if all([ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1841 main.log.info(
1842 "Switch roles are consistent across all ONOS nodes" )
1843 else:
1844 consistentMastership = False
1845 utilities.assert_equals(
1846 expect=True,
1847 actual=consistentMastership,
1848 onpass="Switch roles are consistent across all ONOS nodes",
1849 onfail="ONOS nodes have different views of switch roles" )
1850
1851 if rolesResults and not consistentMastership:
1852 for i in range( numControllers ):
1853 main.log.warn(
1854 "ONOS" + str( i + 1 ) + " roles: ",
1855 json.dumps(
1856 json.loads( ONOSMastership[ i ] ),
1857 sort_keys=True,
1858 indent=4,
1859 separators=( ',', ': ' ) ) )
1860 elif rolesResults and not consistentMastership:
1861 mastershipCheck = main.TRUE
1862
1863 '''
1864 description2 = "Compare switch roles from before failure"
1865 main.step( description2 )
1866 try:
1867 currentJson = json.loads( ONOSMastership[0] )
1868 oldJson = json.loads( mastershipState )
1869 except ( ValueError, TypeError ):
1870 main.log.exception( "Something is wrong with parsing " +
1871 "ONOSMastership[0] or mastershipState" )
1872 main.log.error( "ONOSMastership[0]: " + repr( ONOSMastership[0] ) )
1873 main.log.error( "mastershipState" + repr( mastershipState ) )
1874 main.cleanup()
1875 main.exit()
1876 mastershipCheck = main.TRUE
1877 for i in range( 1, 29 ):
1878 switchDPID = str(
1879 main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
1880 current = [ switch[ 'master' ] for switch in currentJson
1881 if switchDPID in switch[ 'id' ] ]
1882 old = [ switch[ 'master' ] for switch in oldJson
1883 if switchDPID in switch[ 'id' ] ]
1884 if current == old:
1885 mastershipCheck = mastershipCheck and main.TRUE
1886 else:
1887 main.log.warn( "Mastership of switch %s changed" % switchDPID )
1888 mastershipCheck = main.FALSE
1889 utilities.assert_equals(
1890 expect=main.TRUE,
1891 actual=mastershipCheck,
1892 onpass="Mastership of Switches was not changed",
1893 onfail="Mastership of some switches changed" )
1894 '''
1895 # NOTE: we expect mastership to change on controller failure
1896
1897 main.step( "Get the intents and compare across all nodes" )
1898 ONOSIntents = []
1899 intentCheck = main.FALSE
1900 consistentIntents = True
1901 intentsResults = True
1902 threads = []
1903 for i in range( numControllers ):
1904 t = main.Thread( target=CLIs[i].intents,
1905 name="intents-" + str( i ),
1906 args=[],
1907 kwargs={ 'jsonFormat': True } )
1908 threads.append( t )
1909 t.start()
1910
1911 for t in threads:
1912 t.join()
1913 ONOSIntents.append( t.result )
1914
1915 for i in range( numControllers ):
1916 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1917 main.log.error( "Error in getting ONOS" + str( i + 1 ) +
1918 " intents" )
1919 main.log.warn( "ONOS" + str( i + 1 ) + " intents response: " +
1920 repr( ONOSIntents[ i ] ) )
1921 intentsResults = False
1922 utilities.assert_equals(
1923 expect=True,
1924 actual=intentsResults,
1925 onpass="No error in reading intents output",
1926 onfail="Error in reading intents from ONOS" )
1927
1928 main.step( "Check for consistency in Intents from each controller" )
1929 if all([ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1930 main.log.info( "Intents are consistent across all ONOS " +
1931 "nodes" )
1932 else:
1933 consistentIntents = False
1934
1935 # Try to make it easy to figure out what is happening
1936 #
1937 # Intent ONOS1 ONOS2 ...
1938 # 0x01 INSTALLED INSTALLING
1939 # ... ... ...
1940 # ... ... ...
1941 title = " ID"
1942 for n in range( numControllers ):
1943 title += " " * 10 + "ONOS" + str( n + 1 )
1944 main.log.warn( title )
1945 # get all intent keys in the cluster
1946 keys = []
1947 for nodeStr in ONOSIntents:
1948 node = json.loads( nodeStr )
1949 for intent in node:
1950 keys.append( intent.get( 'id' ) )
1951 keys = set( keys )
1952 for key in keys:
1953 row = "%-13s" % key
1954 for nodeStr in ONOSIntents:
1955 node = json.loads( nodeStr )
1956 for intent in node:
1957 if intent.get( 'id' ) == key:
1958 row += "%-15s" % intent.get( 'state' )
1959 main.log.warn( row )
1960 # End table view
1961
1962 utilities.assert_equals(
1963 expect=True,
1964 actual=consistentIntents,
1965 onpass="Intents are consistent across all ONOS nodes",
1966 onfail="ONOS nodes have different views of intents" )
1967 intentStates = []
1968 for node in ONOSIntents: # Iter through ONOS nodes
1969 nodeStates = []
1970 # Iter through intents of a node
1971 try:
1972 for intent in json.loads( node ):
1973 nodeStates.append( intent[ 'state' ] )
1974 except ( ValueError, TypeError ):
1975 main.log.exception( "Error in parsing intents" )
1976 main.log.error( repr( node ) )
1977 intentStates.append( nodeStates )
1978 out = [ (i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
1979 main.log.info( dict( out ) )
1980
1981 if intentsResults and not consistentIntents:
1982 for i in range( numControllers ):
1983 main.log.warn( "ONOS" + str( i + 1 ) + " intents: " )
1984 main.log.warn( json.dumps(
1985 json.loads( ONOSIntents[ i ] ),
1986 sort_keys=True,
1987 indent=4,
1988 separators=( ',', ': ' ) ) )
1989 elif intentsResults and consistentIntents:
1990 intentCheck = main.TRUE
1991
1992 # NOTE: Store has no durability, so intents are lost across system
1993 # restarts
1994 """
1995 main.step( "Compare current intents with intents before the failure" )
1996 # NOTE: this requires case 5 to pass for intentState to be set.
1997 # maybe we should stop the test if that fails?
1998 sameIntents = main.FALSE
1999 if intentState and intentState == ONOSIntents[ 0 ]:
2000 sameIntents = main.TRUE
2001 main.log.info( "Intents are consistent with before failure" )
2002 # TODO: possibly the states have changed? we may need to figure out
2003 # what the acceptable states are
2004 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2005 sameIntents = main.TRUE
2006 try:
2007 before = json.loads( intentState )
2008 after = json.loads( ONOSIntents[ 0 ] )
2009 for intent in before:
2010 if intent not in after:
2011 sameIntents = main.FALSE
2012 main.log.debug( "Intent is not currently in ONOS " +
2013 "(at least in the same form):" )
2014 main.log.debug( json.dumps( intent ) )
2015 except ( ValueError, TypeError ):
2016 main.log.exception( "Exception printing intents" )
2017 main.log.debug( repr( ONOSIntents[0] ) )
2018 main.log.debug( repr( intentState ) )
2019 if sameIntents == main.FALSE:
2020 try:
2021 main.log.debug( "ONOS intents before: " )
2022 main.log.debug( json.dumps( json.loads( intentState ),
2023 sort_keys=True, indent=4,
2024 separators=( ',', ': ' ) ) )
2025 main.log.debug( "Current ONOS intents: " )
2026 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2027 sort_keys=True, indent=4,
2028 separators=( ',', ': ' ) ) )
2029 except ( ValueError, TypeError ):
2030 main.log.exception( "Exception printing intents" )
2031 main.log.debug( repr( ONOSIntents[0] ) )
2032 main.log.debug( repr( intentState ) )
2033 utilities.assert_equals(
2034 expect=main.TRUE,
2035 actual=sameIntents,
2036 onpass="Intents are consistent with before failure",
2037 onfail="The Intents changed during failure" )
2038 intentCheck = intentCheck and sameIntents
2039 """
2040 main.step( "Get the OF Table entries and compare to before " +
2041 "component failure" )
2042 FlowTables = main.TRUE
2043 flows2 = []
2044 for i in range( 28 ):
2045 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2046 tmpFlows = main.Mininet2.getFlowTable( 1.3, "s" + str( i + 1 ) )
2047 flows2.append( tmpFlows )
2048 tempResult = main.Mininet2.flowComp(
2049 flow1=flows[ i ],
2050 flow2=tmpFlows )
2051 FlowTables = FlowTables and tempResult
2052 if FlowTables == main.FALSE:
2053 main.log.info( "Differences in flow table for switch: s" +
2054 str( i + 1 ) )
2055 utilities.assert_equals(
2056 expect=main.TRUE,
2057 actual=FlowTables,
2058 onpass="No changes were found in the flow tables",
2059 onfail="Changes were found in the flow tables" )
2060
2061 main.Mininet2.pingLongKill()
2062 '''
2063 # main.step( "Check the continuous pings to ensure that no packets " +
2064 # "were dropped during component failure" )
2065 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2066 main.params[ 'TESTONIP' ] )
2067 LossInPings = main.FALSE
2068 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2069 for i in range( 8, 18 ):
2070 main.log.info(
2071 "Checking for a loss in pings along flow from s" +
2072 str( i ) )
2073 LossInPings = main.Mininet2.checkForLoss(
2074 "/tmp/ping.h" +
2075 str( i ) ) or LossInPings
2076 if LossInPings == main.TRUE:
2077 main.log.info( "Loss in ping detected" )
2078 elif LossInPings == main.ERROR:
2079 main.log.info( "There are multiple mininet process running" )
2080 elif LossInPings == main.FALSE:
2081 main.log.info( "No Loss in the pings" )
2082 main.log.info( "No loss of dataplane connectivity" )
2083 # utilities.assert_equals(
2084 # expect=main.FALSE,
2085 # actual=LossInPings,
2086 # onpass="No Loss of connectivity",
2087 # onfail="Loss of dataplane connectivity detected" )
2088
2089 # NOTE: Since intents are not persisted with IntnentStore,
2090 # we expect loss in dataplane connectivity
2091 LossInPings = main.FALSE
2092 '''
2093
2094 main.step( "Leadership Election is still functional" )
2095 # Test of LeadershipElection
2096 leaderList = []
2097 leaderResult = main.TRUE
2098 for cli in CLIs:
2099 leaderN = cli.electionTestLeader()
2100 leaderList.append( leaderN )
2101 if leaderN == main.FALSE:
2102 # error in response
2103 main.log.error( "Something is wrong with " +
2104 "electionTestLeader function, check the" +
2105 " error logs" )
2106 leaderResult = main.FALSE
2107 elif leaderN is None:
2108 main.log.error( cli.name +
2109 " shows no leader for the election-app." )
2110 leaderResult = main.FALSE
2111 if len( set( leaderList ) ) != 1:
2112 leaderResult = main.FALSE
2113 main.log.error(
2114 "Inconsistent view of leader for the election test app" )
2115 # TODO: print the list
2116 utilities.assert_equals(
2117 expect=main.TRUE,
2118 actual=leaderResult,
2119 onpass="Leadership election passed",
2120 onfail="Something went wrong with Leadership election" )
2121
2122 def CASE8( self, main ):
2123 """
2124 Compare topo
2125 """
2126 import json
2127 import time
2128 assert numControllers, "numControllers not defined"
2129 assert main, "main not defined"
2130 assert utilities.assert_equals, "utilities.assert_equals not defined"
2131 assert CLIs, "CLIs not defined"
2132 assert nodes, "nodes not defined"
2133
2134 main.case( "Compare ONOS Topology view to Mininet topology" )
2135 main.caseExplaination = "Compare topology objects between Mininet" +\
2136 " and ONOS"
2137
2138 main.step( "Comparing ONOS topology to MN" )
2139 devicesResults = main.TRUE
2140 linksResults = main.TRUE
2141 hostsResults = main.TRUE
2142 hostAttachmentResults = True
2143 topoResult = main.FALSE
2144 elapsed = 0
2145 count = 0
2146 main.step( "Collecting topology information from ONOS" )
2147 startTime = time.time()
2148 # Give time for Gossip to work
2149 while topoResult == main.FALSE and elapsed < 60:
2150 count += 1
2151 cliStart = time.time()
2152 devices = []
2153 threads = []
2154 for i in range( numControllers ):
2155 t = main.Thread( target=CLIs[i].devices,
2156 name="devices-" + str( i ),
2157 args=[ ] )
2158 threads.append( t )
2159 t.start()
2160
2161 for t in threads:
2162 t.join()
2163 devices.append( t.result )
2164 hosts = []
2165 ipResult = main.TRUE
2166 threads = []
2167 for i in range( numControllers ):
2168 t = main.Thread( target=CLIs[i].hosts,
2169 name="hosts-" + str( i ),
2170 args=[ ] )
2171 threads.append( t )
2172 t.start()
2173
2174 for t in threads:
2175 t.join()
2176 try:
2177 hosts.append( json.loads( t.result ) )
2178 except ( ValueError, TypeError ):
2179 main.log.exception( "Error parsing hosts results" )
2180 main.log.error( repr( t.result ) )
2181 for controller in range( 0, len( hosts ) ):
2182 controllerStr = str( controller + 1 )
2183 for host in hosts[ controller ]:
2184 if host is None or host.get( 'ipAddresses', [] ) == []:
2185 main.log.error(
2186 "DEBUG:Error with host ipAddresses on controller" +
2187 controllerStr + ": " + str( host ) )
2188 ipResult = main.FALSE
2189 ports = []
2190 threads = []
2191 for i in range( numControllers ):
2192 t = main.Thread( target=CLIs[i].ports,
2193 name="ports-" + str( i ),
2194 args=[ ] )
2195 threads.append( t )
2196 t.start()
2197
2198 for t in threads:
2199 t.join()
2200 ports.append( t.result )
2201 links = []
2202 threads = []
2203 for i in range( numControllers ):
2204 t = main.Thread( target=CLIs[i].links,
2205 name="links-" + str( i ),
2206 args=[ ] )
2207 threads.append( t )
2208 t.start()
2209
2210 for t in threads:
2211 t.join()
2212 links.append( t.result )
2213 clusters = []
2214 threads = []
2215 for i in range( numControllers ):
2216 t = main.Thread( target=CLIs[i].clusters,
2217 name="clusters-" + str( i ),
2218 args=[ ] )
2219 threads.append( t )
2220 t.start()
2221
2222 for t in threads:
2223 t.join()
2224 clusters.append( t.result )
2225
2226 elapsed = time.time() - startTime
2227 cliTime = time.time() - cliStart
2228 print "Elapsed time: " + str( elapsed )
2229 print "CLI time: " + str( cliTime )
2230
2231 mnSwitches = main.Mininet1.getSwitches()
2232 mnLinks = main.Mininet1.getLinks()
2233 mnHosts = main.Mininet1.getHosts()
2234 for controller in range( numControllers ):
2235 controllerStr = str( controller + 1 )
2236 if devices[ controller ] and ports[ controller ] and\
2237 "Error" not in devices[ controller ] and\
2238 "Error" not in ports[ controller ]:
2239
2240 currentDevicesResult = main.Mininet1.compareSwitches(
2241 mnSwitches,
2242 json.loads( devices[ controller ] ),
2243 json.loads( ports[ controller ] ) )
2244 else:
2245 currentDevicesResult = main.FALSE
2246 utilities.assert_equals( expect=main.TRUE,
2247 actual=currentDevicesResult,
2248 onpass="ONOS" + controllerStr +
2249 " Switches view is correct",
2250 onfail="ONOS" + controllerStr +
2251 " Switches view is incorrect" )
2252
2253 if links[ controller ] and "Error" not in links[ controller ]:
2254 currentLinksResult = main.Mininet1.compareLinks(
2255 mnSwitches, mnLinks,
2256 json.loads( links[ controller ] ) )
2257 else:
2258 currentLinksResult = main.FALSE
2259 utilities.assert_equals( expect=main.TRUE,
2260 actual=currentLinksResult,
2261 onpass="ONOS" + controllerStr +
2262 " links view is correct",
2263 onfail="ONOS" + controllerStr +
2264 " links view is incorrect" )
2265
2266 if hosts[ controller ] or "Error" not in hosts[ controller ]:
2267 currentHostsResult = main.Mininet1.compareHosts(
2268 mnHosts,
2269 hosts[ controller ] )
2270 else:
2271 currentHostsResult = main.FALSE
2272 utilities.assert_equals( expect=main.TRUE,
2273 actual=currentHostsResult,
2274 onpass="ONOS" + controllerStr +
2275 " hosts exist in Mininet",
2276 onfail="ONOS" + controllerStr +
2277 " hosts don't match Mininet" )
2278 # CHECKING HOST ATTACHMENT POINTS
2279 hostAttachment = True
2280 noHosts = False
2281 # FIXME: topo-HA/obelisk specific mappings:
2282 # key is mac and value is dpid
2283 mappings = {}
2284 for i in range( 1, 29 ): # hosts 1 through 28
2285 # set up correct variables:
2286 macId = "00:" * 5 + hex( i ).split( "0x" )[1].upper().zfill(2)
2287 if i == 1:
2288 deviceId = "1000".zfill(16)
2289 elif i == 2:
2290 deviceId = "2000".zfill(16)
2291 elif i == 3:
2292 deviceId = "3000".zfill(16)
2293 elif i == 4:
2294 deviceId = "3004".zfill(16)
2295 elif i == 5:
2296 deviceId = "5000".zfill(16)
2297 elif i == 6:
2298 deviceId = "6000".zfill(16)
2299 elif i == 7:
2300 deviceId = "6007".zfill(16)
2301 elif i >= 8 and i <= 17:
2302 dpid = '3' + str( i ).zfill( 3 )
2303 deviceId = dpid.zfill(16)
2304 elif i >= 18 and i <= 27:
2305 dpid = '6' + str( i ).zfill( 3 )
2306 deviceId = dpid.zfill(16)
2307 elif i == 28:
2308 deviceId = "2800".zfill(16)
2309 mappings[ macId ] = deviceId
2310 if hosts[ controller ] or "Error" not in hosts[ controller ]:
2311 if hosts[ controller ] == []:
2312 main.log.warn( "There are no hosts discovered" )
2313 noHosts = True
2314 else:
2315 for host in hosts[ controller ]:
2316 mac = None
2317 location = None
2318 device = None
2319 port = None
2320 try:
2321 mac = host.get( 'mac' )
2322 assert mac, "mac field could not be found for this host object"
2323
2324 location = host.get( 'location' )
2325 assert location, "location field could not be found for this host object"
2326
2327 # Trim the protocol identifier off deviceId
2328 device = str( location.get( 'elementId' ) ).split(':')[1]
2329 assert device, "elementId field could not be found for this host location object"
2330
2331 port = location.get( 'port' )
2332 assert port, "port field could not be found for this host location object"
2333
2334 # Now check if this matches where they should be
2335 if mac and device and port:
2336 if str( port ) != "1":
2337 main.log.error( "The attachment port is incorrect for " +
2338 "host " + str( mac ) +
2339 ". Expected: 1 Actual: " + str( port) )
2340 hostAttachment = False
2341 if device != mappings[ str( mac ) ]:
2342 main.log.error( "The attachment device is incorrect for " +
2343 "host " + str( mac ) +
2344 ". Expected: " + mappings[ str( mac ) ] +
2345 " Actual: " + device )
2346 hostAttachment = False
2347 else:
2348 hostAttachment = False
2349 except AssertionError:
2350 main.log.exception( "Json object not as expected" )
2351 main.log.error( repr( host ) )
2352 hostAttachment = False
2353 else:
2354 main.log.error( "No hosts json output or \"Error\"" +
2355 " in output. hosts = " +
2356 repr( hosts[ controller ] ) )
2357 if noHosts is False:
2358 # TODO: Find a way to know if there should be hosts in a
2359 # given point of the test
2360 hostAttachment = True
2361
2362 # END CHECKING HOST ATTACHMENT POINTS
2363 devicesResults = devicesResults and currentDevicesResult
2364 linksResults = linksResults and currentLinksResult
2365 hostsResults = hostsResults and currentHostsResult
2366 hostAttachmentResults = hostAttachmentResults and\
2367 hostAttachment
2368 topoResult = ( devicesResults and linksResults
2369 and hostsResults and ipResult and
2370 hostAttachmentResults )
2371
2372 # Compare json objects for hosts and dataplane clusters
2373
2374 # hosts
2375 main.step( "Hosts view is consistent across all ONOS nodes" )
2376 consistentHostsResult = main.TRUE
2377 for controller in range( len( hosts ) ):
2378 controllerStr = str( controller + 1 )
2379 if "Error" not in hosts[ controller ]:
2380 if hosts[ controller ] == hosts[ 0 ]:
2381 continue
2382 else: # hosts not consistent
2383 main.log.error( "hosts from ONOS" + controllerStr +
2384 " is inconsistent with ONOS1" )
2385 main.log.warn( repr( hosts[ controller ] ) )
2386 consistentHostsResult = main.FALSE
2387
2388 else:
2389 main.log.error( "Error in getting ONOS hosts from ONOS" +
2390 controllerStr )
2391 consistentHostsResult = main.FALSE
2392 main.log.warn( "ONOS" + controllerStr +
2393 " hosts response: " +
2394 repr( hosts[ controller ] ) )
2395 utilities.assert_equals(
2396 expect=main.TRUE,
2397 actual=consistentHostsResult,
2398 onpass="Hosts view is consistent across all ONOS nodes",
2399 onfail="ONOS nodes have different views of hosts" )
2400
2401 main.step( "Hosts information is correct" )
2402 hostsResults = hostsResults and ipResult
2403 utilities.assert_equals(
2404 expect=main.TRUE,
2405 actual=hostsResults,
2406 onpass="Host information is correct",
2407 onfail="Host information is incorrect" )
2408
2409 main.step( "Host attachment points to the network" )
2410 utilities.assert_equals(
2411 expect=True,
2412 actual=hostAttachmentResults,
2413 onpass="Hosts are correctly attached to the network",
2414 onfail="ONOS did not correctly attach hosts to the network" )
2415
2416 # Strongly connected clusters of devices
2417 main.step( "Clusters view is consistent across all ONOS nodes" )
2418 consistentClustersResult = main.TRUE
2419 for controller in range( len( clusters ) ):
2420 controllerStr = str( controller + 1 )
2421 if "Error" not in clusters[ controller ]:
2422 if clusters[ controller ] == clusters[ 0 ]:
2423 continue
2424 else: # clusters not consistent
2425 main.log.error( "clusters from ONOS" +
2426 controllerStr +
2427 " is inconsistent with ONOS1" )
2428 consistentClustersResult = main.FALSE
2429
2430 else:
2431 main.log.error( "Error in getting dataplane clusters " +
2432 "from ONOS" + controllerStr )
2433 consistentClustersResult = main.FALSE
2434 main.log.warn( "ONOS" + controllerStr +
2435 " clusters response: " +
2436 repr( clusters[ controller ] ) )
2437 utilities.assert_equals(
2438 expect=main.TRUE,
2439 actual=consistentClustersResult,
2440 onpass="Clusters view is consistent across all ONOS nodes",
2441 onfail="ONOS nodes have different views of clusters" )
2442
2443 main.step( "There is only one SCC" )
2444 # there should always only be one cluster
2445 try:
2446 numClusters = len( json.loads( clusters[ 0 ] ) )
2447 except ( ValueError, TypeError ):
2448 main.log.exception( "Error parsing clusters[0]: " +
2449 repr( clusters[0] ) )
2450 clusterResults = main.FALSE
2451 if numClusters == 1:
2452 clusterResults = main.TRUE
2453 utilities.assert_equals(
2454 expect=1,
2455 actual=numClusters,
2456 onpass="ONOS shows 1 SCC",
2457 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
2458
2459 topoResult = ( devicesResults and linksResults
2460 and hostsResults and consistentHostsResult
2461 and consistentClustersResult and clusterResults
2462 and ipResult and hostAttachmentResults )
2463
2464 topoResult = topoResult and int( count <= 2 )
2465 note = "note it takes about " + str( int( cliTime ) ) + \
2466 " seconds for the test to make all the cli calls to fetch " +\
2467 "the topology from each ONOS instance"
2468 main.log.info(
2469 "Very crass estimate for topology discovery/convergence( " +
2470 str( note ) + " ): " + str( elapsed ) + " seconds, " +
2471 str( count ) + " tries" )
2472
2473 main.step( "Device information is correct" )
2474 utilities.assert_equals(
2475 expect=main.TRUE,
2476 actual=devicesResults,
2477 onpass="Device information is correct",
2478 onfail="Device information is incorrect" )
2479
2480 main.step( "Links are correct" )
2481 utilities.assert_equals(
2482 expect=main.TRUE,
2483 actual=linksResults,
2484 onpass="Link are correct",
2485 onfail="Links are incorrect" )
2486
2487 # FIXME: move this to an ONOS state case
2488 main.step( "Checking ONOS nodes" )
2489 nodesOutput = []
2490 nodeResults = main.TRUE
2491 threads = []
2492 for i in range( numControllers ):
2493 t = main.Thread( target=CLIs[i].nodes,
2494 name="nodes-" + str( i ),
2495 args=[ ] )
2496 threads.append( t )
2497 t.start()
2498
2499 for t in threads:
2500 t.join()
2501 nodesOutput.append( t.result )
2502 ips = [ node.ip_address for node in nodes ]
2503 for i in nodesOutput:
2504 try:
2505 current = json.loads( i )
2506 for node in current:
2507 currentResult = main.FALSE
2508 if node['ip'] in ips: # node in nodes() output is in cell
2509 if node['state'] == 'ACTIVE':
2510 currentResult = main.TRUE
2511 else:
2512 main.log.error( "Error in ONOS node availability" )
2513 main.log.error(
2514 json.dumps( current,
2515 sort_keys=True,
2516 indent=4,
2517 separators=( ',', ': ' ) ) )
2518 break
2519 nodeResults = nodeResults and currentResult
2520 except ( ValueError, TypeError ):
2521 main.log.error( "Error parsing nodes output" )
2522 main.log.warn( repr( i ) )
2523 utilities.assert_equals( expect=main.TRUE, actual=nodeResults,
2524 onpass="Nodes check successful",
2525 onfail="Nodes check NOT successful" )
2526
2527 def CASE9( self, main ):
2528 """
2529 Link s3-s28 down
2530 """
2531 import time
2532 assert numControllers, "numControllers not defined"
2533 assert main, "main not defined"
2534 assert utilities.assert_equals, "utilities.assert_equals not defined"
2535 assert CLIs, "CLIs not defined"
2536 assert nodes, "nodes not defined"
2537 # NOTE: You should probably run a topology check after this
2538
2539 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2540
2541 description = "Turn off a link to ensure that Link Discovery " +\
2542 "is working properly"
2543 main.case( description )
2544
2545 main.step( "Kill Link between s3 and s28" )
2546 LinkDown = main.Mininet1.link( END1="s3", END2="s28", OPTION="down" )
2547 main.log.info( "Waiting " + str( linkSleep ) +
2548 " seconds for link down to be discovered" )
2549 time.sleep( linkSleep )
2550 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
2551 onpass="Link down successful",
2552 onfail="Failed to bring link down" )
2553 # TODO do some sort of check here
2554
2555 def CASE10( self, main ):
2556 """
2557 Link s3-s28 up
2558 """
2559 import time
2560 assert numControllers, "numControllers not defined"
2561 assert main, "main not defined"
2562 assert utilities.assert_equals, "utilities.assert_equals not defined"
2563 assert CLIs, "CLIs not defined"
2564 assert nodes, "nodes not defined"
2565 # NOTE: You should probably run a topology check after this
2566
2567 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
2568
2569 description = "Restore a link to ensure that Link Discovery is " + \
2570 "working properly"
2571 main.case( description )
2572
2573 main.step( "Bring link between s3 and s28 back up" )
2574 LinkUp = main.Mininet1.link( END1="s3", END2="s28", OPTION="up" )
2575 main.log.info( "Waiting " + str( linkSleep ) +
2576 " seconds for link up to be discovered" )
2577 time.sleep( linkSleep )
2578 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
2579 onpass="Link up successful",
2580 onfail="Failed to bring link up" )
2581 # TODO do some sort of check here
2582
2583 def CASE11( self, main ):
2584 """
2585 Switch Down
2586 """
2587 # NOTE: You should probably run a topology check after this
2588 import time
2589 assert numControllers, "numControllers not defined"
2590 assert main, "main not defined"
2591 assert utilities.assert_equals, "utilities.assert_equals not defined"
2592 assert CLIs, "CLIs not defined"
2593 assert nodes, "nodes not defined"
2594
2595 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2596
2597 description = "Killing a switch to ensure it is discovered correctly"
2598 main.case( description )
2599 switch = main.params[ 'kill' ][ 'switch' ]
2600 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2601
2602 # TODO: Make this switch parameterizable
2603 main.step( "Kill " + switch )
2604 main.log.info( "Deleting " + switch )
2605 main.Mininet1.delSwitch( switch )
2606 main.log.info( "Waiting " + str( switchSleep ) +
2607 " seconds for switch down to be discovered" )
2608 time.sleep( switchSleep )
2609 device = main.ONOScli1.getDevice( dpid=switchDPID )
2610 # Peek at the deleted switch
2611 main.log.warn( str( device ) )
2612 result = main.FALSE
2613 if device and device[ 'available' ] is False:
2614 result = main.TRUE
2615 utilities.assert_equals( expect=main.TRUE, actual=result,
2616 onpass="Kill switch successful",
2617 onfail="Failed to kill switch?" )
2618
2619 def CASE12( self, main ):
2620 """
2621 Switch Up
2622 """
2623 # NOTE: You should probably run a topology check after this
2624 import time
2625 assert numControllers, "numControllers not defined"
2626 assert main, "main not defined"
2627 assert utilities.assert_equals, "utilities.assert_equals not defined"
2628 assert CLIs, "CLIs not defined"
2629 assert nodes, "nodes not defined"
2630 assert ONOS1Port, "ONOS1Port not defined"
2631 assert ONOS2Port, "ONOS2Port not defined"
2632 assert ONOS3Port, "ONOS3Port not defined"
2633 assert ONOS4Port, "ONOS4Port not defined"
2634 assert ONOS5Port, "ONOS5Port not defined"
2635 assert ONOS6Port, "ONOS6Port not defined"
2636 assert ONOS7Port, "ONOS7Port not defined"
2637
2638 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
2639 switch = main.params[ 'kill' ][ 'switch' ]
2640 switchDPID = main.params[ 'kill' ][ 'dpid' ]
2641 links = main.params[ 'kill' ][ 'links' ].split()
2642 description = "Adding a switch to ensure it is discovered correctly"
2643 main.case( description )
2644
2645 main.step( "Add back " + switch )
2646 main.Mininet1.addSwitch( switch, dpid=switchDPID )
2647 for peer in links:
2648 main.Mininet1.addLink( switch, peer )
2649 ipList = []
2650 for i in range( numControllers ):
2651 ipList.append( nodes[ i ].ip_address )
2652 main.Mininet1.assignSwController( sw=switch, ip=ipList )
2653 main.log.info( "Waiting " + str( switchSleep ) +
2654 " seconds for switch up to be discovered" )
2655 time.sleep( switchSleep )
2656 device = main.ONOScli1.getDevice( dpid=switchDPID )
2657 # Peek at the deleted switch
2658 main.log.warn( str( device ) )
2659 result = main.FALSE
2660 if device and device[ 'available' ]:
2661 result = main.TRUE
2662 utilities.assert_equals( expect=main.TRUE, actual=result,
2663 onpass="add switch successful",
2664 onfail="Failed to add switch?" )
2665
2666 def CASE13( self, main ):
2667 """
2668 Clean up
2669 """
2670 import os
2671 import time
2672 assert numControllers, "numControllers not defined"
2673 assert main, "main not defined"
2674 assert utilities.assert_equals, "utilities.assert_equals not defined"
2675 assert CLIs, "CLIs not defined"
2676 assert nodes, "nodes not defined"
2677
2678 # printing colors to terminal
2679 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2680 'blue': '\033[94m', 'green': '\033[92m',
2681 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2682 main.case( "Test Cleanup" )
2683 main.step( "Killing tcpdumps" )
2684 main.Mininet2.stopTcpdump()
2685
2686 testname = main.TEST
2687 if main.params[ 'BACKUP' ] == "True":
2688 main.step( "Copying MN pcap and ONOS log files to test station" )
2689 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2690 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2691 # NOTE: MN Pcap file is being saved to ~/packet_captures
2692 # scp this file as MN and TestON aren't necessarily the same vm
2693 # FIXME: scp
2694 # mn files
2695 # TODO: Load these from params
2696 # NOTE: must end in /
2697 logFolder = "/opt/onos/log/"
2698 logFiles = [ "karaf.log", "karaf.log.1" ]
2699 # NOTE: must end in /
2700 dstDir = "~/packet_captures/"
2701 for f in logFiles:
2702 for node in nodes:
2703 main.ONOSbench.handle.sendline( "scp sdn@" + node.ip_address +
2704 ":" + logFolder + f + " " +
2705 teststationUser + "@" +
2706 teststationIP + ":" +
2707 dstDir + str( testname ) +
2708 "-" + node.name + "-" + f )
2709 main.ONOSbench.handle.expect( "\$" )
2710
2711 # std*.log's
2712 # NOTE: must end in /
2713 logFolder = "/opt/onos/var/"
2714 logFiles = [ "stderr.log", "stdout.log" ]
2715 # NOTE: must end in /
2716 dstDir = "~/packet_captures/"
2717 for f in logFiles:
2718 for node in nodes:
2719 main.ONOSbench.handle.sendline( "scp sdn@" + node.ip_address +
2720 ":" + logFolder + f + " " +
2721 teststationUser + "@" +
2722 teststationIP + ":" +
2723 dstDir + str( testname ) +
2724 "-" + node.name + "-" + f )
2725 main.ONOSbench.handle.expect( "\$" )
2726 # sleep so scp can finish
2727 time.sleep( 10 )
2728 main.step( "Packing and rotating pcap archives" )
2729 os.system( "~/TestON/dependencies/rotate.sh " + str( testname ) )
2730
2731 main.step( "Stopping Mininet" )
2732 mnResult = main.Mininet1.stopNet()
2733 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2734 onpass="Mininet stopped",
2735 onfail="MN cleanup NOT successful" )
2736
2737 main.step( "Checking ONOS Logs for errors" )
2738 for node in nodes:
2739 print colors[ 'purple' ] + "Checking logs for errors on " + \
2740 node.name + ":" + colors[ 'end' ]
2741 print main.ONOSbench.checkLogs( node.ip_address, restart=True )
2742
2743 try:
2744 timerLog = open( main.logdir + "/Timers.csv", 'w')
2745 main.log.error( ", ".join( labels ) + "\n" + ", ".join( data ) )
2746 timerLog.write( ", ".join( labels ) + "\n" + ", ".join( data ) )
2747 timerLog.close()
2748 except NameError, e:
2749 main.log.exception(e)
2750
2751 def CASE14( self, main ):
2752 """
2753 start election app on all onos nodes
2754 """
2755 assert numControllers, "numControllers not defined"
2756 assert main, "main not defined"
2757 assert utilities.assert_equals, "utilities.assert_equals not defined"
2758 assert CLIs, "CLIs not defined"
2759 assert nodes, "nodes not defined"
2760
2761 main.case("Start Leadership Election app")
2762 main.step( "Install leadership election app" )
2763 appResult = main.ONOScli1.activateApp( "org.onosproject.election" )
2764 utilities.assert_equals(
2765 expect=main.TRUE,
2766 actual=appResult,
2767 onpass="Election app installed",
2768 onfail="Something went wrong with installing Leadership election" )
2769
2770 main.step( "Run for election on each node" )
2771 leaderResult = main.TRUE
2772 leaders = []
2773 for cli in CLIs:
2774 cli.electionTestRun()
2775 for cli in CLIs:
2776 leader = cli.electionTestLeader()
2777 if leader is None or leader == main.FALSE:
2778 main.log.error( cli.name + ": Leader for the election app " +
2779 "should be an ONOS node, instead got '" +
2780 str( leader ) + "'" )
2781 leaderResult = main.FALSE
2782 leaders.append( leader )
2783 utilities.assert_equals(
2784 expect=main.TRUE,
2785 actual=leaderResult,
2786 onpass="Successfully ran for leadership",
2787 onfail="Failed to run for leadership" )
2788
2789 main.step( "Check that each node shows the same leader" )
2790 sameLeader = main.TRUE
2791 if len( set( leaders ) ) != 1:
2792 sameLeader = main.FALSE
2793 main.log.error( "Results of electionTestLeader is order of CLIs:" +
2794 str( leaders ) )
2795 utilities.assert_equals(
2796 expect=main.TRUE,
2797 actual=sameLeader,
2798 onpass="Leadership is consistent for the election topic",
2799 onfail="Nodes have different leaders" )
2800
2801 def CASE15( self, main ):
2802 """
2803 Check that Leadership Election is still functional
2804 """
2805 import time
2806 assert numControllers, "numControllers not defined"
2807 assert main, "main not defined"
2808 assert utilities.assert_equals, "utilities.assert_equals not defined"
2809 assert CLIs, "CLIs not defined"
2810 assert nodes, "nodes not defined"
2811
2812 leaderResult = main.TRUE
2813 description = "Check that Leadership Election is still functional"
2814 main.case( description )
2815 # NOTE: Need to re-run since being a canidate is not persistant
2816 main.step( "Run for election on each node" )
2817 leaderResult = main.TRUE
2818 leaders = []
2819 for cli in CLIs:
2820 cli.electionTestRun()
2821 for cli in CLIs:
2822 leader = cli.electionTestLeader()
2823 if leader is None or leader == main.FALSE:
2824 main.log.error( cli.name + ": Leader for the election app " +
2825 "should be an ONOS node, instead got '" +
2826 str( leader ) + "'" )
2827 leaderResult = main.FALSE
2828 leaders.append( leader )
2829 utilities.assert_equals(
2830 expect=main.TRUE,
2831 actual=leaderResult,
2832 onpass="Successfully ran for leadership",
2833 onfail="Failed to run for leadership" )
2834
2835 main.step( "Check that each node shows the same leader" )
2836 sameLeader = main.TRUE
2837 if len( set( leaders ) ) != 1:
2838 sameLeader = main.FALSE
2839 main.log.error( "Results of electionTestLeader is order of CLIs:" +
2840 str( leaders ) )
2841 utilities.assert_equals(
2842 expect=main.TRUE,
2843 actual=sameLeader,
2844 onpass="Leadership is consistent for the election topic",
2845 onfail="Nodes have different leaders" )
2846
2847 main.step( "Find current leader and withdraw" )
2848 leader = main.ONOScli1.electionTestLeader()
2849 # do some sanity checking on leader before using it
2850 withdrawResult = main.FALSE
2851 if leader is None or leader == main.FALSE:
2852 main.log.error(
2853 "Leader for the election app should be an ONOS node," +
2854 "instead got '" + str( leader ) + "'" )
2855 leaderResult = main.FALSE
2856 oldLeader = None
2857 for i in range( len( CLIs ) ):
2858 if leader == nodes[ i ].ip_address:
2859 oldLeader = CLIs[ i ]
2860 break
2861 else: # FOR/ELSE statement
2862 main.log.error( "Leader election, could not find current leader" )
2863 if oldLeader:
2864 withdrawResult = oldLeader.electionTestWithdraw()
2865 utilities.assert_equals(
2866 expect=main.TRUE,
2867 actual=withdrawResult,
2868 onpass="Node was withdrawn from election",
2869 onfail="Node was not withdrawn from election" )
2870
2871 main.step( "Make sure new leader is elected" )
2872 # FIXME: use threads
2873 leaderList = []
2874 for cli in CLIs:
2875 leaderN = cli.electionTestLeader()
2876 leaderList.append( leaderN )
2877 if leaderN == leader:
2878 main.log.error( cli.name + " still sees " + str( leader ) +
2879 " as leader after they withdrew" )
2880 leaderResult = main.FALSE
2881 elif leaderN == main.FALSE:
2882 # error in response
2883 # TODO: add check for "Command not found:" in the driver, this
2884 # means the app isn't loaded
2885 main.log.error( "Something is wrong with " +
2886 "electionTestLeader function, " +
2887 "check the error logs" )
2888 leaderResult = main.FALSE
2889 elif leaderN is None:
2890 # node may not have recieved the event yet
2891 time.sleep(7)
2892 leaderN = cli.electionTestLeader()
2893 leaderList.pop()
2894 leaderList.append( leaderN )
2895 consistentLeader = main.FALSE
2896 if len( set( leaderList ) ) == 1:
2897 main.log.info( "Each Election-app sees '" +
2898 str( leaderList[ 0 ] ) +
2899 "' as the leader" )
2900 consistentLeader = main.TRUE
2901 else:
2902 main.log.error(
2903 "Inconsistent responses for leader of Election-app:" )
2904 for n in range( len( leaderList ) ):
2905 main.log.error( "ONOS" + str( n + 1 ) + " response: " +
2906 str( leaderList[ n ] ) )
2907 leaderResult = leaderResult and consistentLeader
2908 utilities.assert_equals(
2909 expect=main.TRUE,
2910 actual=leaderResult,
2911 onpass="Leadership election passed",
2912 onfail="Something went wrong with Leadership election" )
2913
2914 main.step( "Run for election on old leader( just so everyone " +
2915 "is in the hat )" )
2916 if oldLeader:
2917 runResult = oldLeader.electionTestRun()
2918 else:
2919 runResult = main.FALSE
2920 utilities.assert_equals(
2921 expect=main.TRUE,
2922 actual=runResult,
2923 onpass="App re-ran for election",
2924 onfail="App failed to run for election" )
2925
2926 main.step( "Leader did not change when old leader re-ran" )
2927 afterRun = main.ONOScli1.electionTestLeader()
2928 # verify leader didn't just change
2929 if afterRun == leaderList[ 0 ]:
2930 afterResult = main.TRUE
2931 else:
2932 afterResult = main.FALSE
2933
2934 utilities.assert_equals(
2935 expect=main.TRUE,
2936 actual=afterResult,
2937 onpass="Old leader successfully re-ran for election",
2938 onfail="Something went wrong with Leadership election after " +
2939 "the old leader re-ran for election" )
2940
2941 def CASE16( self, main ):
2942 """
2943 Install Distributed Primitives app
2944 """
2945 import time
2946 assert numControllers, "numControllers not defined"
2947 assert main, "main not defined"
2948 assert utilities.assert_equals, "utilities.assert_equals not defined"
2949 assert CLIs, "CLIs not defined"
2950 assert nodes, "nodes not defined"
2951
2952 # Variables for the distributed primitives tests
2953 global pCounterName
2954 global iCounterName
2955 global pCounterValue
2956 global iCounterValue
2957 global onosSet
2958 global onosSetName
2959 pCounterName = "TestON-Partitions"
2960 iCounterName = "TestON-inMemory"
2961 pCounterValue = 0
2962 iCounterValue = 0
2963 onosSet = set([])
2964 onosSetName = "TestON-set"
2965
2966 description = "Install Primitives app"
2967 main.case( description )
2968 main.step( "Install Primitives app" )
2969 appName = "org.onosproject.distributedprimitives"
2970 appResults = CLIs[0].activateApp( appName )
2971 utilities.assert_equals( expect=main.TRUE,
2972 actual=appResults,
2973 onpass="Primitives app activated",
2974 onfail="Primitives app not activated" )
2975 time.sleep( 5 ) # To allow all nodes to activate
2976
2977 def CASE17( self, main ):
2978 """
2979 Check for basic functionality with distributed primitives
2980 """
2981 import json
2982 # Make sure variables are defined/set
2983 assert numControllers, "numControllers not defined"
2984 assert main, "main not defined"
2985 assert utilities.assert_equals, "utilities.assert_equals not defined"
2986 assert CLIs, "CLIs not defined"
2987 assert nodes, "nodes not defined"
2988 assert pCounterName, "pCounterName not defined"
2989 assert iCounterName, "iCounterName not defined"
2990 assert onosSetName, "onosSetName not defined"
2991 # NOTE: assert fails if value is 0/None/Empty/False
2992 try:
2993 pCounterValue
2994 except NameError:
2995 main.log.error( "pCounterValue not defined, setting to 0" )
2996 pCounterValue = 0
2997 try:
2998 iCounterValue
2999 except NameError:
3000 main.log.error( "iCounterValue not defined, setting to 0" )
3001 iCounterValue = 0
3002 try:
3003 onosSet
3004 except NameError:
3005 main.log.error( "onosSet not defined, setting to empty Set" )
3006 onosSet = set([])
3007 # Variables for the distributed primitives tests. These are local only
3008 addValue = "a"
3009 addAllValue = "a b c d e f"
3010 retainValue = "c d e f"
3011
3012 description = "Check for basic functionality with distributed " +\
3013 "primitives"
3014 main.case( description )
3015 main.caseExplaination = "Test the methods of the distributed primitives (counters and sets) throught the cli"
3016 # DISTRIBUTED ATOMIC COUNTERS
3017 main.step( "Increment and get a default counter on each node" )
3018 pCounters = []
3019 threads = []
3020 addedPValues = []
3021 for i in range( numControllers ):
3022 t = main.Thread( target=CLIs[i].counterTestIncrement,
3023 name="counterIncrement-" + str( i ),
3024 args=[ pCounterName ] )
3025 pCounterValue += 1
3026 addedPValues.append( pCounterValue )
3027 threads.append( t )
3028 t.start()
3029
3030 for t in threads:
3031 t.join()
3032 pCounters.append( t.result )
3033 # Check that counter incremented numController times
3034 pCounterResults = True
3035 for i in addedPValues:
3036 tmpResult = i in pCounters
3037 pCounterResults = pCounterResults and tmpResult
3038 if not tmpResult:
3039 main.log.error( str( i ) + " is not in partitioned "
3040 "counter incremented results" )
3041 utilities.assert_equals( expect=True,
3042 actual=pCounterResults,
3043 onpass="Default counter incremented",
3044 onfail="Error incrementing default" +
3045 " counter" )
3046
3047 main.step( "Increment and get an in memory counter on each node" )
3048 iCounters = []
3049 addedIValues = []
3050 threads = []
3051 for i in range( numControllers ):
3052 t = main.Thread( target=CLIs[i].counterTestIncrement,
3053 name="icounterIncrement-" + str( i ),
3054 args=[ iCounterName ],
3055 kwargs={ "inMemory": True } )
3056 iCounterValue += 1
3057 addedIValues.append( iCounterValue )
3058 threads.append( t )
3059 t.start()
3060
3061 for t in threads:
3062 t.join()
3063 iCounters.append( t.result )
3064 # Check that counter incremented numController times
3065 iCounterResults = True
3066 for i in addedIValues:
3067 tmpResult = i in iCounters
3068 iCounterResults = iCounterResults and tmpResult
3069 if not tmpResult:
3070 main.log.error( str( i ) + " is not in the in-memory "
3071 "counter incremented results" )
3072 utilities.assert_equals( expect=True,
3073 actual=iCounterResults,
3074 onpass="In memory counter incremented",
3075 onfail="Error incrementing in memory" +
3076 " counter" )
3077
3078 main.step( "Check counters are consistant across nodes" )
3079 onosCounters = []
3080 threads = []
3081 for i in range( numControllers ):
3082 t = main.Thread( target=CLIs[i].counters,
3083 name="counters-" + str( i ) )
3084 threads.append( t )
3085 t.start()
3086 for t in threads:
3087 t.join()
3088 onosCounters.append( t.result )
3089 tmp = [ i == onosCounters[ 0 ] for i in onosCounters ]
3090 if all( tmp ):
3091 main.log.info( "Counters are consistent across all nodes" )
3092 consistentCounterResults = main.TRUE
3093 else:
3094 main.log.error( "Counters are not consistent across all nodes" )
3095 consistentCounterResults = main.FALSE
3096 utilities.assert_equals( expect=main.TRUE,
3097 actual=consistentCounterResults,
3098 onpass="ONOS counters are consistent " +
3099 "across nodes",
3100 onfail="ONOS Counters are inconsistent " +
3101 "across nodes" )
3102
3103 main.step( "Counters we added have the correct values" )
3104 correctResults = main.TRUE
3105 for i in range( numControllers ):
3106 try:
3107 current = json.loads( onosCounters[i] )
3108 except ( ValueError, TypeError ):
3109 main.log.error( "Could not parse counters response from ONOS" +
3110 str( i + 1 ) )
3111 main.log.warn( repr( onosCounters[ i ] ) )
3112 pValue = None
3113 iValue = None
3114 try:
3115 for database in current:
3116 partitioned = database.get( 'partitionedDatabaseCounters' )
3117 if partitioned:
3118 for value in partitioned:
3119 if value.get( 'name' ) == pCounterName:
3120 pValue = value.get( 'value' )
3121 break
3122 inMemory = database.get( 'inMemoryDatabaseCounters' )
3123 if inMemory:
3124 for value in inMemory:
3125 if value.get( 'name' ) == iCounterName:
3126 iValue = value.get( 'value' )
3127 break
3128 except AttributeError, e:
3129 main.log.error( "ONOS" + str( i + 1 ) + " counters result " +
3130 "is not as expected" )
3131 correctResults = main.FALSE
3132 if pValue == pCounterValue:
3133 main.log.info( "Partitioned counter value is correct" )
3134 else:
3135 main.log.error( "Partitioned counter value is incorrect," +
3136 " expected value: " + str( pCounterValue )
3137 + " current value: " + str( pValue ) )
3138 correctResults = main.FALSE
3139 if iValue == iCounterValue:
3140 main.log.info( "In memory counter value is correct" )
3141 else:
3142 main.log.error( "In memory counter value is incorrect, " +
3143 "expected value: " + str( iCounterValue ) +
3144 " current value: " + str( iValue ) )
3145 correctResults = main.FALSE
3146 utilities.assert_equals( expect=main.TRUE,
3147 actual=correctResults,
3148 onpass="Added counters are correct",
3149 onfail="Added counters are incorrect" )
3150 # DISTRIBUTED SETS
3151 main.step( "Distributed Set get" )
3152 size = len( onosSet )
3153 getResponses = []
3154 threads = []
3155 for i in range( numControllers ):
3156 t = main.Thread( target=CLIs[i].setTestGet,
3157 name="setTestGet-" + str( i ),
3158 args=[ onosSetName ] )
3159 threads.append( t )
3160 t.start()
3161 for t in threads:
3162 t.join()
3163 getResponses.append( t.result )
3164
3165 getResults = main.TRUE
3166 for i in range( numControllers ):
3167 if isinstance( getResponses[ i ], list):
3168 current = set( getResponses[ i ] )
3169 if len( current ) == len( getResponses[ i ] ):
3170 # no repeats
3171 if onosSet != current:
3172 main.log.error( "ONOS" + str( i + 1 ) +
3173 " has incorrect view" +
3174 " of set " + onosSetName + ":\n" +
3175 str( getResponses[ i ] ) )
3176 main.log.debug( "Expected: " + str( onosSet ) )
3177 main.log.debug( "Actual: " + str( current ) )
3178 getResults = main.FALSE
3179 else:
3180 # error, set is not a set
3181 main.log.error( "ONOS" + str( i + 1 ) +
3182 " has repeat elements in" +
3183 " set " + onosSetName + ":\n" +
3184 str( getResponses[ i ] ) )
3185 getResults = main.FALSE
3186 elif getResponses[ i ] == main.ERROR:
3187 getResults = main.FALSE
3188 utilities.assert_equals( expect=main.TRUE,
3189 actual=getResults,
3190 onpass="Set elements are correct",
3191 onfail="Set elements are incorrect" )
3192
3193 main.step( "Distributed Set size" )
3194 sizeResponses = []
3195 threads = []
3196 for i in range( numControllers ):
3197 t = main.Thread( target=CLIs[i].setTestSize,
3198 name="setTestSize-" + str( i ),
3199 args=[ onosSetName ] )
3200 threads.append( t )
3201 t.start()
3202 for t in threads:
3203 t.join()
3204 sizeResponses.append( t.result )
3205
3206 sizeResults = main.TRUE
3207 for i in range( numControllers ):
3208 if size != sizeResponses[ i ]:
3209 sizeResults = main.FALSE
3210 main.log.error( "ONOS" + str( i + 1 ) +
3211 " expected a size of " + str( size ) +
3212 " for set " + onosSetName +
3213 " but got " + str( sizeResponses[ i ] ) )
3214 utilities.assert_equals( expect=main.TRUE,
3215 actual=sizeResults,
3216 onpass="Set sizes are correct",
3217 onfail="Set sizes are incorrect" )
3218
3219 main.step( "Distributed Set add()" )
3220 onosSet.add( addValue )
3221 addResponses = []
3222 threads = []
3223 for i in range( numControllers ):
3224 t = main.Thread( target=CLIs[i].setTestAdd,
3225 name="setTestAdd-" + str( i ),
3226 args=[ onosSetName, addValue ] )
3227 threads.append( t )
3228 t.start()
3229 for t in threads:
3230 t.join()
3231 addResponses.append( t.result )
3232
3233 # main.TRUE = successfully changed the set
3234 # main.FALSE = action resulted in no change in set
3235 # main.ERROR - Some error in executing the function
3236 addResults = main.TRUE
3237 for i in range( numControllers ):
3238 if addResponses[ i ] == main.TRUE:
3239 # All is well
3240 pass
3241 elif addResponses[ i ] == main.FALSE:
3242 # Already in set, probably fine
3243 pass
3244 elif addResponses[ i ] == main.ERROR:
3245 # Error in execution
3246 addResults = main.FALSE
3247 else:
3248 # unexpected result
3249 addResults = main.FALSE
3250 if addResults != main.TRUE:
3251 main.log.error( "Error executing set add" )
3252
3253 # Check if set is still correct
3254 size = len( onosSet )
3255 getResponses = []
3256 threads = []
3257 for i in range( numControllers ):
3258 t = main.Thread( target=CLIs[i].setTestGet,
3259 name="setTestGet-" + str( i ),
3260 args=[ onosSetName ] )
3261 threads.append( t )
3262 t.start()
3263 for t in threads:
3264 t.join()
3265 getResponses.append( t.result )
3266 getResults = main.TRUE
3267 for i in range( numControllers ):
3268 if isinstance( getResponses[ i ], list):
3269 current = set( getResponses[ i ] )
3270 if len( current ) == len( getResponses[ i ] ):
3271 # no repeats
3272 if onosSet != current:
3273 main.log.error( "ONOS" + str( i + 1 ) +
3274 " has incorrect view" +
3275 " of set " + onosSetName + ":\n" +
3276 str( getResponses[ i ] ) )
3277 main.log.debug( "Expected: " + str( onosSet ) )
3278 main.log.debug( "Actual: " + str( current ) )
3279 getResults = main.FALSE
3280 else:
3281 # error, set is not a set
3282 main.log.error( "ONOS" + str( i + 1 ) +
3283 " has repeat elements in" +
3284 " set " + onosSetName + ":\n" +
3285 str( getResponses[ i ] ) )
3286 getResults = main.FALSE
3287 elif getResponses[ i ] == main.ERROR:
3288 getResults = main.FALSE
3289 sizeResponses = []
3290 threads = []
3291 for i in range( numControllers ):
3292 t = main.Thread( target=CLIs[i].setTestSize,
3293 name="setTestSize-" + str( i ),
3294 args=[ onosSetName ] )
3295 threads.append( t )
3296 t.start()
3297 for t in threads:
3298 t.join()
3299 sizeResponses.append( t.result )
3300 sizeResults = main.TRUE
3301 for i in range( numControllers ):
3302 if size != sizeResponses[ i ]:
3303 sizeResults = main.FALSE
3304 main.log.error( "ONOS" + str( i + 1 ) +
3305 " expected a size of " + str( size ) +
3306 " for set " + onosSetName +
3307 " but got " + str( sizeResponses[ i ] ) )
3308 addResults = addResults and getResults and sizeResults
3309 utilities.assert_equals( expect=main.TRUE,
3310 actual=addResults,
3311 onpass="Set add correct",
3312 onfail="Set add was incorrect" )
3313
3314 main.step( "Distributed Set addAll()" )
3315 onosSet.update( addAllValue.split() )
3316 addResponses = []
3317 threads = []
3318 for i in range( numControllers ):
3319 t = main.Thread( target=CLIs[i].setTestAdd,
3320 name="setTestAddAll-" + str( i ),
3321 args=[ onosSetName, addAllValue ] )
3322 threads.append( t )
3323 t.start()
3324 for t in threads:
3325 t.join()
3326 addResponses.append( t.result )
3327
3328 # main.TRUE = successfully changed the set
3329 # main.FALSE = action resulted in no change in set
3330 # main.ERROR - Some error in executing the function
3331 addAllResults = main.TRUE
3332 for i in range( numControllers ):
3333 if addResponses[ i ] == main.TRUE:
3334 # All is well
3335 pass
3336 elif addResponses[ i ] == main.FALSE:
3337 # Already in set, probably fine
3338 pass
3339 elif addResponses[ i ] == main.ERROR:
3340 # Error in execution
3341 addAllResults = main.FALSE
3342 else:
3343 # unexpected result
3344 addAllResults = main.FALSE
3345 if addAllResults != main.TRUE:
3346 main.log.error( "Error executing set addAll" )
3347
3348 # Check if set is still correct
3349 size = len( onosSet )
3350 getResponses = []
3351 threads = []
3352 for i in range( numControllers ):
3353 t = main.Thread( target=CLIs[i].setTestGet,
3354 name="setTestGet-" + str( i ),
3355 args=[ onosSetName ] )
3356 threads.append( t )
3357 t.start()
3358 for t in threads:
3359 t.join()
3360 getResponses.append( t.result )
3361 getResults = main.TRUE
3362 for i in range( numControllers ):
3363 if isinstance( getResponses[ i ], list):
3364 current = set( getResponses[ i ] )
3365 if len( current ) == len( getResponses[ i ] ):
3366 # no repeats
3367 if onosSet != current:
3368 main.log.error( "ONOS" + str( i + 1 ) +
3369 " has incorrect view" +
3370 " of set " + onosSetName + ":\n" +
3371 str( getResponses[ i ] ) )
3372 main.log.debug( "Expected: " + str( onosSet ) )
3373 main.log.debug( "Actual: " + str( current ) )
3374 getResults = main.FALSE
3375 else:
3376 # error, set is not a set
3377 main.log.error( "ONOS" + str( i + 1 ) +
3378 " has repeat elements in" +
3379 " set " + onosSetName + ":\n" +
3380 str( getResponses[ i ] ) )
3381 getResults = main.FALSE
3382 elif getResponses[ i ] == main.ERROR:
3383 getResults = main.FALSE
3384 sizeResponses = []
3385 threads = []
3386 for i in range( numControllers ):
3387 t = main.Thread( target=CLIs[i].setTestSize,
3388 name="setTestSize-" + str( i ),
3389 args=[ onosSetName ] )
3390 threads.append( t )
3391 t.start()
3392 for t in threads:
3393 t.join()
3394 sizeResponses.append( t.result )
3395 sizeResults = main.TRUE
3396 for i in range( numControllers ):
3397 if size != sizeResponses[ i ]:
3398 sizeResults = main.FALSE
3399 main.log.error( "ONOS" + str( i + 1 ) +
3400 " expected a size of " + str( size ) +
3401 " for set " + onosSetName +
3402 " but got " + str( sizeResponses[ i ] ) )
3403 addAllResults = addAllResults and getResults and sizeResults
3404 utilities.assert_equals( expect=main.TRUE,
3405 actual=addAllResults,
3406 onpass="Set addAll correct",
3407 onfail="Set addAll was incorrect" )
3408
3409 main.step( "Distributed Set contains()" )
3410 containsResponses = []
3411 threads = []
3412 for i in range( numControllers ):
3413 t = main.Thread( target=CLIs[i].setTestGet,
3414 name="setContains-" + str( i ),
3415 args=[ onosSetName ],
3416 kwargs={ "values": addValue } )
3417 threads.append( t )
3418 t.start()
3419 for t in threads:
3420 t.join()
3421 # NOTE: This is the tuple
3422 containsResponses.append( t.result )
3423
3424 containsResults = main.TRUE
3425 for i in range( numControllers ):
3426 if containsResponses[ i ] == main.ERROR:
3427 containsResults = main.FALSE
3428 else:
3429 containsResults = containsResults and\
3430 containsResponses[ i ][ 1 ]
3431 utilities.assert_equals( expect=main.TRUE,
3432 actual=containsResults,
3433 onpass="Set contains is functional",
3434 onfail="Set contains failed" )
3435
3436 main.step( "Distributed Set containsAll()" )
3437 containsAllResponses = []
3438 threads = []
3439 for i in range( numControllers ):
3440 t = main.Thread( target=CLIs[i].setTestGet,
3441 name="setContainsAll-" + str( i ),
3442 args=[ onosSetName ],
3443 kwargs={ "values": addAllValue } )
3444 threads.append( t )
3445 t.start()
3446 for t in threads:
3447 t.join()
3448 # NOTE: This is the tuple
3449 containsAllResponses.append( t.result )
3450
3451 containsAllResults = main.TRUE
3452 for i in range( numControllers ):
3453 if containsResponses[ i ] == main.ERROR:
3454 containsResults = main.FALSE
3455 else:
3456 containsResults = containsResults and\
3457 containsResponses[ i ][ 1 ]
3458 utilities.assert_equals( expect=main.TRUE,
3459 actual=containsAllResults,
3460 onpass="Set containsAll is functional",
3461 onfail="Set containsAll failed" )
3462
3463 main.step( "Distributed Set remove()" )
3464 onosSet.remove( addValue )
3465 removeResponses = []
3466 threads = []
3467 for i in range( numControllers ):
3468 t = main.Thread( target=CLIs[i].setTestRemove,
3469 name="setTestRemove-" + str( i ),
3470 args=[ onosSetName, addValue ] )
3471 threads.append( t )
3472 t.start()
3473 for t in threads:
3474 t.join()
3475 removeResponses.append( t.result )
3476
3477 # main.TRUE = successfully changed the set
3478 # main.FALSE = action resulted in no change in set
3479 # main.ERROR - Some error in executing the function
3480 removeResults = main.TRUE
3481 for i in range( numControllers ):
3482 if removeResponses[ i ] == main.TRUE:
3483 # All is well
3484 pass
3485 elif removeResponses[ i ] == main.FALSE:
3486 # not in set, probably fine
3487 pass
3488 elif removeResponses[ i ] == main.ERROR:
3489 # Error in execution
3490 removeResults = main.FALSE
3491 else:
3492 # unexpected result
3493 removeResults = main.FALSE
3494 if removeResults != main.TRUE:
3495 main.log.error( "Error executing set remove" )
3496
3497 # Check if set is still correct
3498 size = len( onosSet )
3499 getResponses = []
3500 threads = []
3501 for i in range( numControllers ):
3502 t = main.Thread( target=CLIs[i].setTestGet,
3503 name="setTestGet-" + str( i ),
3504 args=[ onosSetName ] )
3505 threads.append( t )
3506 t.start()
3507 for t in threads:
3508 t.join()
3509 getResponses.append( t.result )
3510 getResults = main.TRUE
3511 for i in range( numControllers ):
3512 if isinstance( getResponses[ i ], list):
3513 current = set( getResponses[ i ] )
3514 if len( current ) == len( getResponses[ i ] ):
3515 # no repeats
3516 if onosSet != current:
3517 main.log.error( "ONOS" + str( i + 1 ) +
3518 " has incorrect view" +
3519 " of set " + onosSetName + ":\n" +
3520 str( getResponses[ i ] ) )
3521 main.log.debug( "Expected: " + str( onosSet ) )
3522 main.log.debug( "Actual: " + str( current ) )
3523 getResults = main.FALSE
3524 else:
3525 # error, set is not a set
3526 main.log.error( "ONOS" + str( i + 1 ) +
3527 " has repeat elements in" +
3528 " set " + onosSetName + ":\n" +
3529 str( getResponses[ i ] ) )
3530 getResults = main.FALSE
3531 elif getResponses[ i ] == main.ERROR:
3532 getResults = main.FALSE
3533 sizeResponses = []
3534 threads = []
3535 for i in range( numControllers ):
3536 t = main.Thread( target=CLIs[i].setTestSize,
3537 name="setTestSize-" + str( i ),
3538 args=[ onosSetName ] )
3539 threads.append( t )
3540 t.start()
3541 for t in threads:
3542 t.join()
3543 sizeResponses.append( t.result )
3544 sizeResults = main.TRUE
3545 for i in range( numControllers ):
3546 if size != sizeResponses[ i ]:
3547 sizeResults = main.FALSE
3548 main.log.error( "ONOS" + str( i + 1 ) +
3549 " expected a size of " + str( size ) +
3550 " for set " + onosSetName +
3551 " but got " + str( sizeResponses[ i ] ) )
3552 removeResults = removeResults and getResults and sizeResults
3553 utilities.assert_equals( expect=main.TRUE,
3554 actual=removeResults,
3555 onpass="Set remove correct",
3556 onfail="Set remove was incorrect" )
3557
3558 main.step( "Distributed Set removeAll()" )
3559 onosSet.difference_update( addAllValue.split() )
3560 removeAllResponses = []
3561 threads = []
3562 try:
3563 for i in range( numControllers ):
3564 t = main.Thread( target=CLIs[i].setTestRemove,
3565 name="setTestRemoveAll-" + str( i ),
3566 args=[ onosSetName, addAllValue ] )
3567 threads.append( t )
3568 t.start()
3569 for t in threads:
3570 t.join()
3571 removeAllResponses.append( t.result )
3572 except Exception, e:
3573 main.log.exception(e)
3574
3575 # main.TRUE = successfully changed the set
3576 # main.FALSE = action resulted in no change in set
3577 # main.ERROR - Some error in executing the function
3578 removeAllResults = main.TRUE
3579 for i in range( numControllers ):
3580 if removeAllResponses[ i ] == main.TRUE:
3581 # All is well
3582 pass
3583 elif removeAllResponses[ i ] == main.FALSE:
3584 # not in set, probably fine
3585 pass
3586 elif removeAllResponses[ i ] == main.ERROR:
3587 # Error in execution
3588 removeAllResults = main.FALSE
3589 else:
3590 # unexpected result
3591 removeAllResults = main.FALSE
3592 if removeAllResults != main.TRUE:
3593 main.log.error( "Error executing set removeAll" )
3594
3595 # Check if set is still correct
3596 size = len( onosSet )
3597 getResponses = []
3598 threads = []
3599 for i in range( numControllers ):
3600 t = main.Thread( target=CLIs[i].setTestGet,
3601 name="setTestGet-" + str( i ),
3602 args=[ onosSetName ] )
3603 threads.append( t )
3604 t.start()
3605 for t in threads:
3606 t.join()
3607 getResponses.append( t.result )
3608 getResults = main.TRUE
3609 for i in range( numControllers ):
3610 if isinstance( getResponses[ i ], list):
3611 current = set( getResponses[ i ] )
3612 if len( current ) == len( getResponses[ i ] ):
3613 # no repeats
3614 if onosSet != current:
3615 main.log.error( "ONOS" + str( i + 1 ) +
3616 " has incorrect view" +
3617 " of set " + onosSetName + ":\n" +
3618 str( getResponses[ i ] ) )
3619 main.log.debug( "Expected: " + str( onosSet ) )
3620 main.log.debug( "Actual: " + str( current ) )
3621 getResults = main.FALSE
3622 else:
3623 # error, set is not a set
3624 main.log.error( "ONOS" + str( i + 1 ) +
3625 " has repeat elements in" +
3626 " set " + onosSetName + ":\n" +
3627 str( getResponses[ i ] ) )
3628 getResults = main.FALSE
3629 elif getResponses[ i ] == main.ERROR:
3630 getResults = main.FALSE
3631 sizeResponses = []
3632 threads = []
3633 for i in range( numControllers ):
3634 t = main.Thread( target=CLIs[i].setTestSize,
3635 name="setTestSize-" + str( i ),
3636 args=[ onosSetName ] )
3637 threads.append( t )
3638 t.start()
3639 for t in threads:
3640 t.join()
3641 sizeResponses.append( t.result )
3642 sizeResults = main.TRUE
3643 for i in range( numControllers ):
3644 if size != sizeResponses[ i ]:
3645 sizeResults = main.FALSE
3646 main.log.error( "ONOS" + str( i + 1 ) +
3647 " expected a size of " + str( size ) +
3648 " for set " + onosSetName +
3649 " but got " + str( sizeResponses[ i ] ) )
3650 removeAllResults = removeAllResults and getResults and sizeResults
3651 utilities.assert_equals( expect=main.TRUE,
3652 actual=removeAllResults,
3653 onpass="Set removeAll correct",
3654 onfail="Set removeAll was incorrect" )
3655
3656 main.step( "Distributed Set addAll()" )
3657 onosSet.update( addAllValue.split() )
3658 addResponses = []
3659 threads = []
3660 for i in range( numControllers ):
3661 t = main.Thread( target=CLIs[i].setTestAdd,
3662 name="setTestAddAll-" + str( i ),
3663 args=[ onosSetName, addAllValue ] )
3664 threads.append( t )
3665 t.start()
3666 for t in threads:
3667 t.join()
3668 addResponses.append( t.result )
3669
3670 # main.TRUE = successfully changed the set
3671 # main.FALSE = action resulted in no change in set
3672 # main.ERROR - Some error in executing the function
3673 addAllResults = main.TRUE
3674 for i in range( numControllers ):
3675 if addResponses[ i ] == main.TRUE:
3676 # All is well
3677 pass
3678 elif addResponses[ i ] == main.FALSE:
3679 # Already in set, probably fine
3680 pass
3681 elif addResponses[ i ] == main.ERROR:
3682 # Error in execution
3683 addAllResults = main.FALSE
3684 else:
3685 # unexpected result
3686 addAllResults = main.FALSE
3687 if addAllResults != main.TRUE:
3688 main.log.error( "Error executing set addAll" )
3689
3690 # Check if set is still correct
3691 size = len( onosSet )
3692 getResponses = []
3693 threads = []
3694 for i in range( numControllers ):
3695 t = main.Thread( target=CLIs[i].setTestGet,
3696 name="setTestGet-" + str( i ),
3697 args=[ onosSetName ] )
3698 threads.append( t )
3699 t.start()
3700 for t in threads:
3701 t.join()
3702 getResponses.append( t.result )
3703 getResults = main.TRUE
3704 for i in range( numControllers ):
3705 if isinstance( getResponses[ i ], list):
3706 current = set( getResponses[ i ] )
3707 if len( current ) == len( getResponses[ i ] ):
3708 # no repeats
3709 if onosSet != current:
3710 main.log.error( "ONOS" + str( i + 1 ) +
3711 " has incorrect view" +
3712 " of set " + onosSetName + ":\n" +
3713 str( getResponses[ i ] ) )
3714 main.log.debug( "Expected: " + str( onosSet ) )
3715 main.log.debug( "Actual: " + str( current ) )
3716 getResults = main.FALSE
3717 else:
3718 # error, set is not a set
3719 main.log.error( "ONOS" + str( i + 1 ) +
3720 " has repeat elements in" +
3721 " set " + onosSetName + ":\n" +
3722 str( getResponses[ i ] ) )
3723 getResults = main.FALSE
3724 elif getResponses[ i ] == main.ERROR:
3725 getResults = main.FALSE
3726 sizeResponses = []
3727 threads = []
3728 for i in range( numControllers ):
3729 t = main.Thread( target=CLIs[i].setTestSize,
3730 name="setTestSize-" + str( i ),
3731 args=[ onosSetName ] )
3732 threads.append( t )
3733 t.start()
3734 for t in threads:
3735 t.join()
3736 sizeResponses.append( t.result )
3737 sizeResults = main.TRUE
3738 for i in range( numControllers ):
3739 if size != sizeResponses[ i ]:
3740 sizeResults = main.FALSE
3741 main.log.error( "ONOS" + str( i + 1 ) +
3742 " expected a size of " + str( size ) +
3743 " for set " + onosSetName +
3744 " but got " + str( sizeResponses[ i ] ) )
3745 addAllResults = addAllResults and getResults and sizeResults
3746 utilities.assert_equals( expect=main.TRUE,
3747 actual=addAllResults,
3748 onpass="Set addAll correct",
3749 onfail="Set addAll was incorrect" )
3750
3751 main.step( "Distributed Set clear()" )
3752 onosSet.clear()
3753 clearResponses = []
3754 threads = []
3755 for i in range( numControllers ):
3756 t = main.Thread( target=CLIs[i].setTestRemove,
3757 name="setTestClear-" + str( i ),
3758 args=[ onosSetName, " "], # Values doesn't matter
3759 kwargs={ "clear": True } )
3760 threads.append( t )
3761 t.start()
3762 for t in threads:
3763 t.join()
3764 clearResponses.append( t.result )
3765
3766 # main.TRUE = successfully changed the set
3767 # main.FALSE = action resulted in no change in set
3768 # main.ERROR - Some error in executing the function
3769 clearResults = main.TRUE
3770 for i in range( numControllers ):
3771 if clearResponses[ i ] == main.TRUE:
3772 # All is well
3773 pass
3774 elif clearResponses[ i ] == main.FALSE:
3775 # Nothing set, probably fine
3776 pass
3777 elif clearResponses[ i ] == main.ERROR:
3778 # Error in execution
3779 clearResults = main.FALSE
3780 else:
3781 # unexpected result
3782 clearResults = main.FALSE
3783 if clearResults != main.TRUE:
3784 main.log.error( "Error executing set clear" )
3785
3786 # Check if set is still correct
3787 size = len( onosSet )
3788 getResponses = []
3789 threads = []
3790 for i in range( numControllers ):
3791 t = main.Thread( target=CLIs[i].setTestGet,
3792 name="setTestGet-" + str( i ),
3793 args=[ onosSetName ] )
3794 threads.append( t )
3795 t.start()
3796 for t in threads:
3797 t.join()
3798 getResponses.append( t.result )
3799 getResults = main.TRUE
3800 for i in range( numControllers ):
3801 if isinstance( getResponses[ i ], list):
3802 current = set( getResponses[ i ] )
3803 if len( current ) == len( getResponses[ i ] ):
3804 # no repeats
3805 if onosSet != current:
3806 main.log.error( "ONOS" + str( i + 1 ) +
3807 " has incorrect view" +
3808 " of set " + onosSetName + ":\n" +
3809 str( getResponses[ i ] ) )
3810 main.log.debug( "Expected: " + str( onosSet ) )
3811 main.log.debug( "Actual: " + str( current ) )
3812 getResults = main.FALSE
3813 else:
3814 # error, set is not a set
3815 main.log.error( "ONOS" + str( i + 1 ) +
3816 " has repeat elements in" +
3817 " set " + onosSetName + ":\n" +
3818 str( getResponses[ i ] ) )
3819 getResults = main.FALSE
3820 elif getResponses[ i ] == main.ERROR:
3821 getResults = main.FALSE
3822 sizeResponses = []
3823 threads = []
3824 for i in range( numControllers ):
3825 t = main.Thread( target=CLIs[i].setTestSize,
3826 name="setTestSize-" + str( i ),
3827 args=[ onosSetName ] )
3828 threads.append( t )
3829 t.start()
3830 for t in threads:
3831 t.join()
3832 sizeResponses.append( t.result )
3833 sizeResults = main.TRUE
3834 for i in range( numControllers ):
3835 if size != sizeResponses[ i ]:
3836 sizeResults = main.FALSE
3837 main.log.error( "ONOS" + str( i + 1 ) +
3838 " expected a size of " + str( size ) +
3839 " for set " + onosSetName +
3840 " but got " + str( sizeResponses[ i ] ) )
3841 clearResults = clearResults and getResults and sizeResults
3842 utilities.assert_equals( expect=main.TRUE,
3843 actual=clearResults,
3844 onpass="Set clear correct",
3845 onfail="Set clear was incorrect" )
3846
3847 main.step( "Distributed Set addAll()" )
3848 onosSet.update( addAllValue.split() )
3849 addResponses = []
3850 threads = []
3851 for i in range( numControllers ):
3852 t = main.Thread( target=CLIs[i].setTestAdd,
3853 name="setTestAddAll-" + str( i ),
3854 args=[ onosSetName, addAllValue ] )
3855 threads.append( t )
3856 t.start()
3857 for t in threads:
3858 t.join()
3859 addResponses.append( t.result )
3860
3861 # main.TRUE = successfully changed the set
3862 # main.FALSE = action resulted in no change in set
3863 # main.ERROR - Some error in executing the function
3864 addAllResults = main.TRUE
3865 for i in range( numControllers ):
3866 if addResponses[ i ] == main.TRUE:
3867 # All is well
3868 pass
3869 elif addResponses[ i ] == main.FALSE:
3870 # Already in set, probably fine
3871 pass
3872 elif addResponses[ i ] == main.ERROR:
3873 # Error in execution
3874 addAllResults = main.FALSE
3875 else:
3876 # unexpected result
3877 addAllResults = main.FALSE
3878 if addAllResults != main.TRUE:
3879 main.log.error( "Error executing set addAll" )
3880
3881 # Check if set is still correct
3882 size = len( onosSet )
3883 getResponses = []
3884 threads = []
3885 for i in range( numControllers ):
3886 t = main.Thread( target=CLIs[i].setTestGet,
3887 name="setTestGet-" + str( i ),
3888 args=[ onosSetName ] )
3889 threads.append( t )
3890 t.start()
3891 for t in threads:
3892 t.join()
3893 getResponses.append( t.result )
3894 getResults = main.TRUE
3895 for i in range( numControllers ):
3896 if isinstance( getResponses[ i ], list):
3897 current = set( getResponses[ i ] )
3898 if len( current ) == len( getResponses[ i ] ):
3899 # no repeats
3900 if onosSet != current:
3901 main.log.error( "ONOS" + str( i + 1 ) +
3902 " has incorrect view" +
3903 " of set " + onosSetName + ":\n" +
3904 str( getResponses[ i ] ) )
3905 main.log.debug( "Expected: " + str( onosSet ) )
3906 main.log.debug( "Actual: " + str( current ) )
3907 getResults = main.FALSE
3908 else:
3909 # error, set is not a set
3910 main.log.error( "ONOS" + str( i + 1 ) +
3911 " has repeat elements in" +
3912 " set " + onosSetName + ":\n" +
3913 str( getResponses[ i ] ) )
3914 getResults = main.FALSE
3915 elif getResponses[ i ] == main.ERROR:
3916 getResults = main.FALSE
3917 sizeResponses = []
3918 threads = []
3919 for i in range( numControllers ):
3920 t = main.Thread( target=CLIs[i].setTestSize,
3921 name="setTestSize-" + str( i ),
3922 args=[ onosSetName ] )
3923 threads.append( t )
3924 t.start()
3925 for t in threads:
3926 t.join()
3927 sizeResponses.append( t.result )
3928 sizeResults = main.TRUE
3929 for i in range( numControllers ):
3930 if size != sizeResponses[ i ]:
3931 sizeResults = main.FALSE
3932 main.log.error( "ONOS" + str( i + 1 ) +
3933 " expected a size of " + str( size ) +
3934 " for set " + onosSetName +
3935 " but got " + str( sizeResponses[ i ] ) )
3936 addAllResults = addAllResults and getResults and sizeResults
3937 utilities.assert_equals( expect=main.TRUE,
3938 actual=addAllResults,
3939 onpass="Set addAll correct",
3940 onfail="Set addAll was incorrect" )
3941
3942 main.step( "Distributed Set retain()" )
3943 onosSet.intersection_update( retainValue.split() )
3944 retainResponses = []
3945 threads = []
3946 for i in range( numControllers ):
3947 t = main.Thread( target=CLIs[i].setTestRemove,
3948 name="setTestRetain-" + str( i ),
3949 args=[ onosSetName, retainValue ],
3950 kwargs={ "retain": True } )
3951 threads.append( t )
3952 t.start()
3953 for t in threads:
3954 t.join()
3955 retainResponses.append( t.result )
3956
3957 # main.TRUE = successfully changed the set
3958 # main.FALSE = action resulted in no change in set
3959 # main.ERROR - Some error in executing the function
3960 retainResults = main.TRUE
3961 for i in range( numControllers ):
3962 if retainResponses[ i ] == main.TRUE:
3963 # All is well
3964 pass
3965 elif retainResponses[ i ] == main.FALSE:
3966 # Already in set, probably fine
3967 pass
3968 elif retainResponses[ i ] == main.ERROR:
3969 # Error in execution
3970 retainResults = main.FALSE
3971 else:
3972 # unexpected result
3973 retainResults = main.FALSE
3974 if retainResults != main.TRUE:
3975 main.log.error( "Error executing set retain" )
3976
3977 # Check if set is still correct
3978 size = len( onosSet )
3979 getResponses = []
3980 threads = []
3981 for i in range( numControllers ):
3982 t = main.Thread( target=CLIs[i].setTestGet,
3983 name="setTestGet-" + str( i ),
3984 args=[ onosSetName ] )
3985 threads.append( t )
3986 t.start()
3987 for t in threads:
3988 t.join()
3989 getResponses.append( t.result )
3990 getResults = main.TRUE
3991 for i in range( numControllers ):
3992 if isinstance( getResponses[ i ], list):
3993 current = set( getResponses[ i ] )
3994 if len( current ) == len( getResponses[ i ] ):
3995 # no repeats
3996 if onosSet != current:
3997 main.log.error( "ONOS" + str( i + 1 ) +
3998 " has incorrect view" +
3999 " of set " + onosSetName + ":\n" +
4000 str( getResponses[ i ] ) )
4001 main.log.debug( "Expected: " + str( onosSet ) )
4002 main.log.debug( "Actual: " + str( current ) )
4003 getResults = main.FALSE
4004 else:
4005 # error, set is not a set
4006 main.log.error( "ONOS" + str( i + 1 ) +
4007 " has repeat elements in" +
4008 " set " + onosSetName + ":\n" +
4009 str( getResponses[ i ] ) )
4010 getResults = main.FALSE
4011 elif getResponses[ i ] == main.ERROR:
4012 getResults = main.FALSE
4013 sizeResponses = []
4014 threads = []
4015 for i in range( numControllers ):
4016 t = main.Thread( target=CLIs[i].setTestSize,
4017 name="setTestSize-" + str( i ),
4018 args=[ onosSetName ] )
4019 threads.append( t )
4020 t.start()
4021 for t in threads:
4022 t.join()
4023 sizeResponses.append( t.result )
4024 sizeResults = main.TRUE
4025 for i in range( numControllers ):
4026 if size != sizeResponses[ i ]:
4027 sizeResults = main.FALSE
4028 main.log.error( "ONOS" + str( i + 1 ) +
4029 " expected a size of " +
4030 str( size ) + " for set " + onosSetName +
4031 " but got " + str( sizeResponses[ i ] ) )
4032 retainResults = retainResults and getResults and sizeResults
4033 utilities.assert_equals( expect=main.TRUE,
4034 actual=retainResults,
4035 onpass="Set retain correct",
4036 onfail="Set retain was incorrect" )
4037