blob: 4b8fae7e4161ec295f8d1f77af88a7fe79dfc68f [file] [log] [blame]
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07001"""
2Copyright 2015 Open Networking Foundation (ONF)
3
4Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
5the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
6or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
7
8 TestON is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation, either version 2 of the License, or
11 (at your option) any later version.
12
13 TestON is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with TestON. If not, see <http://www.gnu.org/licenses/>.
20"""
21
Jon Halla440e872016-03-31 15:15:50 -070022import json
Jon Hall41d39f12016-04-11 22:54:35 -070023import time
Jon Halle1a3b752015-07-22 13:02:46 -070024
Jon Hallf37d44d2017-05-24 10:37:30 -070025
Jon Hall41d39f12016-04-11 22:54:35 -070026class HA():
Jon Hall57b50432015-10-22 10:20:10 -070027
Jon Halla440e872016-03-31 15:15:50 -070028 def __init__( self ):
29 self.default = ''
Jon Hall57b50432015-10-22 10:20:10 -070030
Devin Lim58046fa2017-07-05 16:55:00 -070031 def customizeOnosGenPartitions( self ):
32 self.startingMininet()
33 # copy gen-partions file to ONOS
34 # NOTE: this assumes TestON and ONOS are on the same machine
35 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
36 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
37 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
38 main.ONOSbench.ip_address,
39 srcFile,
40 dstDir,
41 pwd=main.ONOSbench.pwd,
42 direction="from" )
43 def cleanUpGenPartition( self ):
44 # clean up gen-partitions file
45 try:
46 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
47 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
48 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
49 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
50 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
51 str( main.ONOSbench.handle.before ) )
52 except ( pexpect.TIMEOUT, pexpect.EOF ):
53 main.log.exception( "ONOSbench: pexpect exception found:" +
54 main.ONOSbench.handle.before )
55 main.cleanup()
56 main.exit()
57 def startingMininet( self ):
58 main.step( "Starting Mininet" )
59 # scp topo file to mininet
60 # TODO: move to params?
61 topoName = "obelisk.py"
62 filePath = main.ONOSbench.home + "/tools/test/topos/"
63 main.ONOSbench.scp( main.Mininet1,
64 filePath + topoName,
65 main.Mininet1.home,
66 direction="to" )
67 mnResult = main.Mininet1.startNet()
68 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
69 onpass="Mininet Started",
70 onfail="Error starting Mininet" )
71 def scalingMetadata( self ):
72 import re
73 main.scaling = main.params[ 'scaling' ].split( "," )
74 main.log.debug( main.scaling )
75 scale = main.scaling.pop( 0 )
76 main.log.debug( scale )
77 if "e" in scale:
78 equal = True
79 else:
80 equal = False
81 main.log.debug( equal )
82 main.numCtrls = int( re.search( "\d+", scale ).group( 0 ) )
83 genResult = main.Server.generateFile( main.numCtrls, equal=equal )
84 utilities.assert_equals( expect=main.TRUE, actual=genResult,
85 onpass="New cluster metadata file generated",
86 onfail="Failled to generate new metadata file" )
87 def swapNodeMetadata( self ):
88 if main.numCtrls >= 5:
89 main.numCtrls -= 2
90 else:
91 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
92 genResult = main.Server.generateFile( main.numCtrls )
93 utilities.assert_equals( expect=main.TRUE, actual=genResult,
94 onpass="New cluster metadata file generated",
95 onfail="Failled to generate new metadata file" )
96 def customizeOnosService( self, metadataMethod ):
97 import os
98 main.step( "Setup server for cluster metadata file" )
99 main.serverPort = main.params[ 'server' ][ 'port' ]
100 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
101 main.log.debug( "Root dir: {}".format( rootDir ) )
102 status = main.Server.start( main.ONOSbench,
103 rootDir,
104 port=main.serverPort,
105 logDir=main.logdir + "/server.log" )
106 utilities.assert_equals( expect=main.TRUE, actual=status,
107 onpass="Server started",
108 onfail="Failled to start SimpleHTTPServer" )
109
110 main.step( "Generate initial metadata file" )
111 metadataMethod()
112
113 self.startingMininet()
114
115 main.step( "Copying backup config files" )
116 main.onosServicepath = main.ONOSbench.home + "/tools/package/bin/onos-service"
117 cp = main.ONOSbench.scp( main.ONOSbench,
118 main.onosServicepath,
119 main.onosServicepath + ".backup",
120 direction="to" )
121
122 utilities.assert_equals( expect=main.TRUE,
123 actual=cp,
124 onpass="Copy backup config file succeeded",
125 onfail="Copy backup config file failed" )
126 # we need to modify the onos-service file to use remote metadata file
127 # url for cluster metadata file
128 iface = main.params[ 'server' ].get( 'interface' )
129 ip = main.ONOSbench.getIpAddr( iface=iface )
130 metaFile = "cluster.json"
131 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, main.serverPort, metaFile )
132 main.log.warn( javaArgs )
133 main.log.warn( repr( javaArgs ) )
134 handle = main.ONOSbench.handle
135 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, main.onosServicepath )
136 main.log.warn( sed )
137 main.log.warn( repr( sed ) )
138 handle.sendline( sed )
139 handle.expect( metaFile )
140 output = handle.before
141 handle.expect( "\$" )
142 output += handle.before
143 main.log.debug( repr( output ) )
144
145 def cleanUpOnosService( self ):
146 # Cleanup custom onos-service file
147 main.ONOSbench.scp( main.ONOSbench,
148 main.onosServicepath + ".backup",
149 main.onosServicepath,
150 direction="to" )
Jon Halla440e872016-03-31 15:15:50 -0700151 def consistentCheck( self ):
152 """
153 Checks that TestON counters are consistent across all nodes.
Jon Halle1a3b752015-07-22 13:02:46 -0700154
Jon Hallf37d44d2017-05-24 10:37:30 -0700155 Returns the tuple ( onosCounters, consistent )
Jon Hall41d39f12016-04-11 22:54:35 -0700156 - onosCounters is the parsed json output of the counters command on
157 all nodes
158 - consistent is main.TRUE if all "TestON" counters are consitent across
159 all nodes or main.FALSE
Jon Halla440e872016-03-31 15:15:50 -0700160 """
Jon Halle1a3b752015-07-22 13:02:46 -0700161 try:
Jon Halla440e872016-03-31 15:15:50 -0700162 # Get onos counters results
163 onosCountersRaw = []
164 threads = []
165 for i in main.activeNodes:
166 t = main.Thread( target=utilities.retry,
167 name="counters-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -0700168 args=[ main.CLIs[ i ].counters, [ None ] ],
169 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Halla440e872016-03-31 15:15:50 -0700170 'randomTime': True } )
171 threads.append( t )
172 t.start()
173 for t in threads:
174 t.join()
175 onosCountersRaw.append( t.result )
176 onosCounters = []
177 for i in range( len( main.activeNodes ) ):
178 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700179 onosCounters.append( json.loads( onosCountersRaw[ i ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700180 except ( ValueError, TypeError ):
181 main.log.error( "Could not parse counters response from ONOS" +
Jon Hallf37d44d2017-05-24 10:37:30 -0700182 str( main.activeNodes[ i ] + 1 ) )
Jon Halla440e872016-03-31 15:15:50 -0700183 main.log.warn( repr( onosCountersRaw[ i ] ) )
184 onosCounters.append( [] )
185
186 testCounters = {}
187 # make a list of all the "TestON-*" counters in ONOS
Jon Hall41d39f12016-04-11 22:54:35 -0700188 # lookes like a dict whose keys are the name of the ONOS node and
189 # values are a list of the counters. I.E.
Jon Hallf37d44d2017-05-24 10:37:30 -0700190 # { "ONOS1": [ { "name":"TestON-Partitions","value":56 } ]
Jon Halla440e872016-03-31 15:15:50 -0700191 # }
192 # NOTE: There is an assumtion that all nodes are active
193 # based on the above for loops
194 for controller in enumerate( onosCounters ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700195 for key, value in controller[ 1 ].iteritems():
Jon Halla440e872016-03-31 15:15:50 -0700196 if 'TestON' in key:
Jon Hallf37d44d2017-05-24 10:37:30 -0700197 node = 'ONOS' + str( controller[ 0 ] + 1 )
Jon Halla440e872016-03-31 15:15:50 -0700198 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700199 testCounters[ node ].append( { key: value } )
Jon Halla440e872016-03-31 15:15:50 -0700200 except KeyError:
Jon Hallf37d44d2017-05-24 10:37:30 -0700201 testCounters[ node ] = [ { key: value } ]
Jon Halla440e872016-03-31 15:15:50 -0700202 # compare the counters on each node
Jon Hallf37d44d2017-05-24 10:37:30 -0700203 firstV = testCounters.values()[ 0 ]
Jon Halla440e872016-03-31 15:15:50 -0700204 tmp = [ v == firstV for k, v in testCounters.iteritems() ]
205 if all( tmp ):
206 consistent = main.TRUE
207 else:
208 consistent = main.FALSE
209 main.log.error( "ONOS nodes have different values for counters:\n" +
210 testCounters )
211 return ( onosCounters, consistent )
212 except Exception:
213 main.log.exception( "" )
214 main.cleanup()
215 main.exit()
216
217 def counterCheck( self, counterName, counterValue ):
218 """
219 Checks that TestON counters are consistent across all nodes and that
220 specified counter is in ONOS with the given value
221 """
222 try:
223 correctResults = main.TRUE
224 # Get onos counters results and consistentCheck
225 onosCounters, consistent = self.consistentCheck()
226 # Check for correct values
227 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700228 current = onosCounters[ i ]
Jon Halla440e872016-03-31 15:15:50 -0700229 onosValue = None
230 try:
231 onosValue = current.get( counterName )
Jon Hall41d39f12016-04-11 22:54:35 -0700232 except AttributeError:
Jon Hallf37d44d2017-05-24 10:37:30 -0700233 node = str( main.activeNodes[ i ] + 1 )
Jon Hall41d39f12016-04-11 22:54:35 -0700234 main.log.exception( "ONOS" + node + " counters result " +
235 "is not as expected" )
Jon Halla440e872016-03-31 15:15:50 -0700236 correctResults = main.FALSE
237 if onosValue == counterValue:
238 main.log.info( counterName + " counter value is correct" )
239 else:
Jon Hall41d39f12016-04-11 22:54:35 -0700240 main.log.error( counterName +
241 " counter value is incorrect," +
242 " expected value: " + str( counterValue ) +
243 " current value: " + str( onosValue ) )
Jon Halla440e872016-03-31 15:15:50 -0700244 correctResults = main.FALSE
245 return consistent and correctResults
246 except Exception:
247 main.log.exception( "" )
248 main.cleanup()
249 main.exit()
Jon Hall41d39f12016-04-11 22:54:35 -0700250
251 def consistentLeaderboards( self, nodes ):
252 TOPIC = 'org.onosproject.election'
253 # FIXME: use threads
Jon Halle0f0b342017-04-18 11:43:47 -0700254 # FIXME: should we retry outside the function?
Jon Hall41d39f12016-04-11 22:54:35 -0700255 for n in range( 5 ): # Retry in case election is still happening
256 leaderList = []
257 # Get all leaderboards
258 for cli in nodes:
259 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
260 # Compare leaderboards
Jon Hallf37d44d2017-05-24 10:37:30 -0700261 result = all( i == leaderList[ 0 ] for i in leaderList ) and\
Jon Hall41d39f12016-04-11 22:54:35 -0700262 leaderList is not None
263 main.log.debug( leaderList )
264 main.log.warn( result )
265 if result:
266 return ( result, leaderList )
Jon Hallf37d44d2017-05-24 10:37:30 -0700267 time.sleep( 5 ) # TODO: paramerterize
Jon Hall41d39f12016-04-11 22:54:35 -0700268 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
269 return ( result, leaderList )
270
271 def nodesCheck( self, nodes ):
272 nodesOutput = []
273 results = True
274 threads = []
275 for i in nodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700276 t = main.Thread( target=main.CLIs[ i ].nodes,
Jon Hall41d39f12016-04-11 22:54:35 -0700277 name="nodes-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -0700278 args=[] )
Jon Hall41d39f12016-04-11 22:54:35 -0700279 threads.append( t )
280 t.start()
281
282 for t in threads:
283 t.join()
284 nodesOutput.append( t.result )
Jon Hallf37d44d2017-05-24 10:37:30 -0700285 ips = sorted( [ main.nodes[ node ].ip_address for node in nodes ] )
Jon Hall41d39f12016-04-11 22:54:35 -0700286 for i in nodesOutput:
287 try:
288 current = json.loads( i )
289 activeIps = []
290 currentResult = False
291 for node in current:
Jon Hallf37d44d2017-05-24 10:37:30 -0700292 if node[ 'state' ] == 'READY':
293 activeIps.append( node[ 'ip' ] )
Jon Hall41d39f12016-04-11 22:54:35 -0700294 activeIps.sort()
295 if ips == activeIps:
296 currentResult = True
297 except ( ValueError, TypeError ):
298 main.log.error( "Error parsing nodes output" )
299 main.log.warn( repr( i ) )
300 currentResult = False
301 results = results and currentResult
302 return results
Devin Lim58046fa2017-07-05 16:55:00 -0700303 def generateGraph( self, testName, plotName="Plot-HA", index=2 ):
304 # GRAPHS
305 # NOTE: important params here:
306 # job = name of Jenkins job
307 # Plot Name = Plot-HA, only can be used if multiple plots
308 # index = The number of the graph under plot name
309 job = testName
310 graphs = '<ac:structured-macro ac:name="html">\n'
311 graphs += '<ac:plain-text-body><![CDATA[\n'
312 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
313 '/plot/' + plotName + '/getPlot?index=' + str( index ) +\
314 '&width=500&height=300"' +\
315 'noborder="0" width="500" height="300" scrolling="yes" ' +\
316 'seamless="seamless"></iframe>\n'
317 graphs += ']]></ac:plain-text-body>\n'
318 graphs += '</ac:structured-macro>\n'
319 main.log.wiki( graphs )
320 def initialSetUp( self, serviceClean=False ):
321 """
322 rest of initialSetup
323 """
324
325 # Create a list of active nodes for use when some nodes are stopped
326 main.activeNodes = [ i for i in range( 0, main.numCtrls ) ]
327
328 if main.params[ 'tcpdump' ].lower() == "true":
329 main.step( "Start Packet Capture MN" )
330 main.Mininet2.startTcpdump(
331 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
332 + "-MN.pcap",
333 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
334 port=main.params[ 'MNtcpdump' ][ 'port' ] )
335
336 if serviceClean:
337 main.step( "Clean up ONOS service changes" )
338 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
339 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
340 main.ONOSbench.handle.expect( "\$" )
341
342 main.step( "Checking ONOS nodes" )
343 nodeResults = utilities.retry( self.nodesCheck,
344 False,
345 args=[ main.activeNodes ],
346 attempts=5 )
347
348 utilities.assert_equals( expect=True, actual=nodeResults,
349 onpass="Nodes check successful",
350 onfail="Nodes check NOT successful" )
351
352 if not nodeResults:
353 for i in main.activeNodes:
354 cli = main.CLIs[ i ]
355 main.log.debug( "{} components not ACTIVE: \n{}".format(
356 cli.name,
357 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
358 main.log.error( "Failed to start ONOS, stopping test" )
359 main.cleanup()
360 main.exit()
361
362 main.step( "Activate apps defined in the params file" )
363 # get data from the params
364 apps = main.params.get( 'apps' )
365 if apps:
366 apps = apps.split( ',' )
367 main.log.warn( apps )
368 activateResult = True
369 for app in apps:
370 main.CLIs[ 0 ].app( app, "Activate" )
371 # TODO: check this worked
372 time.sleep( 10 ) # wait for apps to activate
373 for app in apps:
374 state = main.CLIs[ 0 ].appStatus( app )
375 if state == "ACTIVE":
376 activateResult = activateResult and True
377 else:
378 main.log.error( "{} is in {} state".format( app, state ) )
379 activateResult = False
380 utilities.assert_equals( expect=True,
381 actual=activateResult,
382 onpass="Successfully activated apps",
383 onfail="Failed to activate apps" )
384 else:
385 main.log.warn( "No apps were specified to be loaded after startup" )
386
387 main.step( "Set ONOS configurations" )
388 config = main.params.get( 'ONOS_Configuration' )
389 if config:
390 main.log.debug( config )
391 checkResult = main.TRUE
392 for component in config:
393 for setting in config[ component ]:
394 value = config[ component ][ setting ]
395 check = main.CLIs[ 0 ].setCfg( component, setting, value )
396 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
397 checkResult = check and checkResult
398 utilities.assert_equals( expect=main.TRUE,
399 actual=checkResult,
400 onpass="Successfully set config",
401 onfail="Failed to set config" )
402 else:
403 main.log.warn( "No configurations were specified to be changed after startup" )
404
405 main.step( "App Ids check" )
406 appCheck = main.TRUE
407 threads = []
408 for i in main.activeNodes:
409 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
410 name="appToIDCheck-" + str( i ),
411 args=[] )
412 threads.append( t )
413 t.start()
414
415 for t in threads:
416 t.join()
417 appCheck = appCheck and t.result
418 if appCheck != main.TRUE:
419 node = main.activeNodes[ 0 ]
420 main.log.warn( main.CLIs[ node ].apps() )
421 main.log.warn( main.CLIs[ node ].appIDs() )
422 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
423 onpass="App Ids seem to be correct",
424 onfail="Something is wrong with app Ids" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -0700425
Jon Halle0f0b342017-04-18 11:43:47 -0700426 def workQueueStatsCheck( self, workQueueName, completed, inProgress, pending ):
427 # Completed
428 threads = []
429 completedValues = []
430 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700431 t = main.Thread( target=main.CLIs[ i ].workQueueTotalCompleted,
Jon Halle0f0b342017-04-18 11:43:47 -0700432 name="WorkQueueCompleted-" + str( i ),
433 args=[ workQueueName ] )
434 threads.append( t )
435 t.start()
436
437 for t in threads:
438 t.join()
439 completedValues.append( int( t.result ) )
440 # Check the results
441 completedResults = [ x == completed for x in completedValues ]
442 completedResult = all( completedResults )
443 if not completedResult:
444 main.log.warn( "Expected Work Queue {} to have {} completed, found {}".format(
445 workQueueName, completed, completedValues ) )
446
447 # In Progress
448 threads = []
449 inProgressValues = []
450 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700451 t = main.Thread( target=main.CLIs[ i ].workQueueTotalInProgress,
Jon Halle0f0b342017-04-18 11:43:47 -0700452 name="WorkQueueInProgress-" + str( i ),
453 args=[ workQueueName ] )
454 threads.append( t )
455 t.start()
456
457 for t in threads:
458 t.join()
459 inProgressValues.append( int( t.result ) )
460 # Check the results
461 inProgressResults = [ x == inProgress for x in inProgressValues ]
462 inProgressResult = all( inProgressResults )
463 if not inProgressResult:
464 main.log.warn( "Expected Work Queue {} to have {} inProgress, found {}".format(
465 workQueueName, inProgress, inProgressValues ) )
466
467 # Pending
468 threads = []
469 pendingValues = []
470 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -0700471 t = main.Thread( target=main.CLIs[ i ].workQueueTotalPending,
Jon Halle0f0b342017-04-18 11:43:47 -0700472 name="WorkQueuePending-" + str( i ),
473 args=[ workQueueName ] )
474 threads.append( t )
475 t.start()
476
477 for t in threads:
478 t.join()
479 pendingValues.append( int( t.result ) )
480 # Check the results
481 pendingResults = [ x == pending for x in pendingValues ]
482 pendingResult = all( pendingResults )
483 if not pendingResult:
484 main.log.warn( "Expected Work Queue {} to have {} pending, found {}".format(
485 workQueueName, pending, pendingValues ) )
486 return completedResult and inProgressResult and pendingResult
487
Devin Lim58046fa2017-07-05 16:55:00 -0700488 def assignDevices( self, main ):
489 """
490 Assign devices to controllers
491 """
492 import re
493 assert main.numCtrls, "main.numCtrls not defined"
494 assert main, "main not defined"
495 assert utilities.assert_equals, "utilities.assert_equals not defined"
496 assert main.CLIs, "main.CLIs not defined"
497 assert main.nodes, "main.nodes not defined"
498
499 main.case( "Assigning devices to controllers" )
500 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " + \
501 "and check that an ONOS node becomes the " + \
502 "master of the device."
503 main.step( "Assign switches to controllers" )
504
505 ipList = []
506 for i in range( main.ONOSbench.maxNodes ):
507 ipList.append( main.nodes[ i ].ip_address )
508 swList = []
509 for i in range( 1, 29 ):
510 swList.append( "s" + str( i ) )
511 main.Mininet1.assignSwController( sw=swList, ip=ipList )
512
513 mastershipCheck = main.TRUE
514 for i in range( 1, 29 ):
515 response = main.Mininet1.getSwController( "s" + str( i ) )
516 try:
517 main.log.info( str( response ) )
518 except Exception:
519 main.log.info( repr( response ) )
520 for node in main.nodes:
521 if re.search( "tcp:" + node.ip_address, response ):
522 mastershipCheck = mastershipCheck and main.TRUE
523 else:
524 main.log.error( "Error, node " + node.ip_address + " is " +
525 "not in the list of controllers s" +
526 str( i ) + " is connecting to." )
527 mastershipCheck = main.FALSE
528 utilities.assert_equals(
529 expect=main.TRUE,
530 actual=mastershipCheck,
531 onpass="Switch mastership assigned correctly",
532 onfail="Switches not assigned correctly to controllers" )
533 def assignIntents( self, main ):
534 """
535 Assign intents
536 """
537 import time
538 import json
539 assert main.numCtrls, "main.numCtrls not defined"
540 assert main, "main not defined"
541 assert utilities.assert_equals, "utilities.assert_equals not defined"
542 assert main.CLIs, "main.CLIs not defined"
543 assert main.nodes, "main.nodes not defined"
544 try:
545 main.HAlabels
546 except ( NameError, AttributeError ):
547 main.log.error( "main.HAlabels not defined, setting to []" )
548 main.HAlabels = []
549 try:
550 main.HAdata
551 except ( NameError, AttributeError ):
552 main.log.error( "data not defined, setting to []" )
553 main.HAdata = []
554 main.case( "Adding host Intents" )
555 main.caseExplanation = "Discover hosts by using pingall then " +\
556 "assign predetermined host-to-host intents." +\
557 " After installation, check that the intent" +\
558 " is distributed to all nodes and the state" +\
559 " is INSTALLED"
560
561 # install onos-app-fwd
562 main.step( "Install reactive forwarding app" )
563 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
564 installResults = onosCli.activateApp( "org.onosproject.fwd" )
565 utilities.assert_equals( expect=main.TRUE, actual=installResults,
566 onpass="Install fwd successful",
567 onfail="Install fwd failed" )
568
569 main.step( "Check app ids" )
570 appCheck = main.TRUE
571 threads = []
572 for i in main.activeNodes:
573 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
574 name="appToIDCheck-" + str( i ),
575 args=[] )
576 threads.append( t )
577 t.start()
578
579 for t in threads:
580 t.join()
581 appCheck = appCheck and t.result
582 if appCheck != main.TRUE:
583 main.log.warn( onosCli.apps() )
584 main.log.warn( onosCli.appIDs() )
585 utilities.assert_equals( expect=main.TRUE, actual=appCheck,
586 onpass="App Ids seem to be correct",
587 onfail="Something is wrong with app Ids" )
588
589 main.step( "Discovering Hosts( Via pingall for now )" )
590 # FIXME: Once we have a host discovery mechanism, use that instead
591 # REACTIVE FWD test
592 pingResult = main.FALSE
593 passMsg = "Reactive Pingall test passed"
594 time1 = time.time()
595 pingResult = main.Mininet1.pingall()
596 time2 = time.time()
597 if not pingResult:
598 main.log.warn( "First pingall failed. Trying again..." )
599 pingResult = main.Mininet1.pingall()
600 passMsg += " on the second try"
601 utilities.assert_equals(
602 expect=main.TRUE,
603 actual=pingResult,
604 onpass=passMsg,
605 onfail="Reactive Pingall failed, " +
606 "one or more ping pairs failed" )
607 main.log.info( "Time for pingall: %2f seconds" %
608 ( time2 - time1 ) )
609 # timeout for fwd flows
610 time.sleep( 11 )
611 # uninstall onos-app-fwd
612 main.step( "Uninstall reactive forwarding app" )
613 node = main.activeNodes[ 0 ]
614 uninstallResult = main.CLIs[ node ].deactivateApp( "org.onosproject.fwd" )
615 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
616 onpass="Uninstall fwd successful",
617 onfail="Uninstall fwd failed" )
618
619 main.step( "Check app ids" )
620 threads = []
621 appCheck2 = main.TRUE
622 for i in main.activeNodes:
623 t = main.Thread( target=main.CLIs[ i ].appToIDCheck,
624 name="appToIDCheck-" + str( i ),
625 args=[] )
626 threads.append( t )
627 t.start()
628
629 for t in threads:
630 t.join()
631 appCheck2 = appCheck2 and t.result
632 if appCheck2 != main.TRUE:
633 node = main.activeNodes[ 0 ]
634 main.log.warn( main.CLIs[ node ].apps() )
635 main.log.warn( main.CLIs[ node ].appIDs() )
636 utilities.assert_equals( expect=main.TRUE, actual=appCheck2,
637 onpass="App Ids seem to be correct",
638 onfail="Something is wrong with app Ids" )
639
640 main.step( "Add host intents via cli" )
641 intentIds = []
642 # TODO: move the host numbers to params
643 # Maybe look at all the paths we ping?
644 intentAddResult = True
645 hostResult = main.TRUE
646 for i in range( 8, 18 ):
647 main.log.info( "Adding host intent between h" + str( i ) +
648 " and h" + str( i + 10 ) )
649 host1 = "00:00:00:00:00:" + \
650 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
651 host2 = "00:00:00:00:00:" + \
652 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
653 # NOTE: getHost can return None
654 host1Dict = onosCli.getHost( host1 )
655 host2Dict = onosCli.getHost( host2 )
656 host1Id = None
657 host2Id = None
658 if host1Dict and host2Dict:
659 host1Id = host1Dict.get( 'id', None )
660 host2Id = host2Dict.get( 'id', None )
661 if host1Id and host2Id:
662 nodeNum = ( i % len( main.activeNodes ) )
663 node = main.activeNodes[ nodeNum ]
664 tmpId = main.CLIs[ node ].addHostIntent( host1Id, host2Id )
665 if tmpId:
666 main.log.info( "Added intent with id: " + tmpId )
667 intentIds.append( tmpId )
668 else:
669 main.log.error( "addHostIntent returned: " +
670 repr( tmpId ) )
671 else:
672 main.log.error( "Error, getHost() failed for h" + str( i ) +
673 " and/or h" + str( i + 10 ) )
674 node = main.activeNodes[ 0 ]
675 hosts = main.CLIs[ node ].hosts()
676 main.log.warn( "Hosts output: " )
677 try:
678 main.log.warn( json.dumps( json.loads( hosts ),
679 sort_keys=True,
680 indent=4,
681 separators=( ',', ': ' ) ) )
682 except ( ValueError, TypeError ):
683 main.log.warn( repr( hosts ) )
684 hostResult = main.FALSE
685 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
686 onpass="Found a host id for each host",
687 onfail="Error looking up host ids" )
688
689 intentStart = time.time()
690 onosIds = onosCli.getAllIntentsId()
691 main.log.info( "Submitted intents: " + str( intentIds ) )
692 main.log.info( "Intents in ONOS: " + str( onosIds ) )
693 for intent in intentIds:
694 if intent in onosIds:
695 pass # intent submitted is in onos
696 else:
697 intentAddResult = False
698 if intentAddResult:
699 intentStop = time.time()
700 else:
701 intentStop = None
702 # Print the intent states
703 intents = onosCli.intents()
704 intentStates = []
705 installedCheck = True
706 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
707 count = 0
708 try:
709 for intent in json.loads( intents ):
710 state = intent.get( 'state', None )
711 if "INSTALLED" not in state:
712 installedCheck = False
713 intentId = intent.get( 'id', None )
714 intentStates.append( ( intentId, state ) )
715 except ( ValueError, TypeError ):
716 main.log.exception( "Error parsing intents" )
717 # add submitted intents not in the store
718 tmplist = [ i for i, s in intentStates ]
719 missingIntents = False
720 for i in intentIds:
721 if i not in tmplist:
722 intentStates.append( ( i, " - " ) )
723 missingIntents = True
724 intentStates.sort()
725 for i, s in intentStates:
726 count += 1
727 main.log.info( "%-6s%-15s%-15s" %
728 ( str( count ), str( i ), str( s ) ) )
729 leaders = onosCli.leaders()
730 try:
731 missing = False
732 if leaders:
733 parsedLeaders = json.loads( leaders )
734 main.log.warn( json.dumps( parsedLeaders,
735 sort_keys=True,
736 indent=4,
737 separators=( ',', ': ' ) ) )
738 # check for all intent partitions
739 topics = []
740 for i in range( 14 ):
741 topics.append( "work-partition-" + str( i ) )
742 main.log.debug( topics )
743 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
744 for topic in topics:
745 if topic not in ONOStopics:
746 main.log.error( "Error: " + topic +
747 " not in leaders" )
748 missing = True
749 else:
750 main.log.error( "leaders() returned None" )
751 except ( ValueError, TypeError ):
752 main.log.exception( "Error parsing leaders" )
753 main.log.error( repr( leaders ) )
754 # Check all nodes
755 if missing:
756 for i in main.activeNodes:
757 response = main.CLIs[ i ].leaders( jsonFormat=False )
758 main.log.warn( str( main.CLIs[ i ].name ) + " leaders output: \n" +
759 str( response ) )
760
761 partitions = onosCli.partitions()
762 try:
763 if partitions:
764 parsedPartitions = json.loads( partitions )
765 main.log.warn( json.dumps( parsedPartitions,
766 sort_keys=True,
767 indent=4,
768 separators=( ',', ': ' ) ) )
769 # TODO check for a leader in all paritions
770 # TODO check for consistency among nodes
771 else:
772 main.log.error( "partitions() returned None" )
773 except ( ValueError, TypeError ):
774 main.log.exception( "Error parsing partitions" )
775 main.log.error( repr( partitions ) )
776 pendingMap = onosCli.pendingMap()
777 try:
778 if pendingMap:
779 parsedPending = json.loads( pendingMap )
780 main.log.warn( json.dumps( parsedPending,
781 sort_keys=True,
782 indent=4,
783 separators=( ',', ': ' ) ) )
784 # TODO check something here?
785 else:
786 main.log.error( "pendingMap() returned None" )
787 except ( ValueError, TypeError ):
788 main.log.exception( "Error parsing pending map" )
789 main.log.error( repr( pendingMap ) )
790
791 intentAddResult = bool( intentAddResult and not missingIntents and
792 installedCheck )
793 if not intentAddResult:
794 main.log.error( "Error in pushing host intents to ONOS" )
795
796 main.step( "Intent Anti-Entropy dispersion" )
797 for j in range( 100 ):
798 correct = True
799 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
800 for i in main.activeNodes:
801 onosIds = []
802 ids = main.CLIs[ i ].getAllIntentsId()
803 onosIds.append( ids )
804 main.log.debug( "Intents in " + main.CLIs[ i ].name + ": " +
805 str( sorted( onosIds ) ) )
806 if sorted( ids ) != sorted( intentIds ):
807 main.log.warn( "Set of intent IDs doesn't match" )
808 correct = False
809 break
810 else:
811 intents = json.loads( main.CLIs[ i ].intents() )
812 for intent in intents:
813 if intent[ 'state' ] != "INSTALLED":
814 main.log.warn( "Intent " + intent[ 'id' ] +
815 " is " + intent[ 'state' ] )
816 correct = False
817 break
818 if correct:
819 break
820 else:
821 time.sleep( 1 )
822 if not intentStop:
823 intentStop = time.time()
824 global gossipTime
825 gossipTime = intentStop - intentStart
826 main.log.info( "It took about " + str( gossipTime ) +
827 " seconds for all intents to appear in each node" )
828 append = False
829 title = "Gossip Intents"
830 count = 1
831 while append is False:
832 curTitle = title + str( count )
833 if curTitle not in main.HAlabels:
834 main.HAlabels.append( curTitle )
835 main.HAdata.append( str( gossipTime ) )
836 append = True
837 else:
838 count += 1
839 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
840 maxGossipTime = gossipPeriod * len( main.activeNodes )
841 utilities.assert_greater_equals(
842 expect=maxGossipTime, actual=gossipTime,
843 onpass="ECM anti-entropy for intents worked within " +
844 "expected time",
845 onfail="Intent ECM anti-entropy took too long. " +
846 "Expected time:{}, Actual time:{}".format( maxGossipTime,
847 gossipTime ) )
848 if gossipTime <= maxGossipTime:
849 intentAddResult = True
850
851 if not intentAddResult or "key" in pendingMap:
852 import time
853 installedCheck = True
854 main.log.info( "Sleeping 60 seconds to see if intents are found" )
855 time.sleep( 60 )
856 onosIds = onosCli.getAllIntentsId()
857 main.log.info( "Submitted intents: " + str( intentIds ) )
858 main.log.info( "Intents in ONOS: " + str( onosIds ) )
859 # Print the intent states
860 intents = onosCli.intents()
861 intentStates = []
862 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
863 count = 0
864 try:
865 for intent in json.loads( intents ):
866 # Iter through intents of a node
867 state = intent.get( 'state', None )
868 if "INSTALLED" not in state:
869 installedCheck = False
870 intentId = intent.get( 'id', None )
871 intentStates.append( ( intentId, state ) )
872 except ( ValueError, TypeError ):
873 main.log.exception( "Error parsing intents" )
874 # add submitted intents not in the store
875 tmplist = [ i for i, s in intentStates ]
876 for i in intentIds:
877 if i not in tmplist:
878 intentStates.append( ( i, " - " ) )
879 intentStates.sort()
880 for i, s in intentStates:
881 count += 1
882 main.log.info( "%-6s%-15s%-15s" %
883 ( str( count ), str( i ), str( s ) ) )
884 leaders = onosCli.leaders()
885 try:
886 missing = False
887 if leaders:
888 parsedLeaders = json.loads( leaders )
889 main.log.warn( json.dumps( parsedLeaders,
890 sort_keys=True,
891 indent=4,
892 separators=( ',', ': ' ) ) )
893 # check for all intent partitions
894 # check for election
895 topics = []
896 for i in range( 14 ):
897 topics.append( "work-partition-" + str( i ) )
898 # FIXME: this should only be after we start the app
899 topics.append( "org.onosproject.election" )
900 main.log.debug( topics )
901 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
902 for topic in topics:
903 if topic not in ONOStopics:
904 main.log.error( "Error: " + topic +
905 " not in leaders" )
906 missing = True
907 else:
908 main.log.error( "leaders() returned None" )
909 except ( ValueError, TypeError ):
910 main.log.exception( "Error parsing leaders" )
911 main.log.error( repr( leaders ) )
912 # Check all nodes
913 if missing:
914 for i in main.activeNodes:
915 node = main.CLIs[ i ]
916 response = node.leaders( jsonFormat=False )
917 main.log.warn( str( node.name ) + " leaders output: \n" +
918 str( response ) )
919
920 partitions = onosCli.partitions()
921 try:
922 if partitions:
923 parsedPartitions = json.loads( partitions )
924 main.log.warn( json.dumps( parsedPartitions,
925 sort_keys=True,
926 indent=4,
927 separators=( ',', ': ' ) ) )
928 # TODO check for a leader in all paritions
929 # TODO check for consistency among nodes
930 else:
931 main.log.error( "partitions() returned None" )
932 except ( ValueError, TypeError ):
933 main.log.exception( "Error parsing partitions" )
934 main.log.error( repr( partitions ) )
935 pendingMap = onosCli.pendingMap()
936 try:
937 if pendingMap:
938 parsedPending = json.loads( pendingMap )
939 main.log.warn( json.dumps( parsedPending,
940 sort_keys=True,
941 indent=4,
942 separators=( ',', ': ' ) ) )
943 # TODO check something here?
944 else:
945 main.log.error( "pendingMap() returned None" )
946 except ( ValueError, TypeError ):
947 main.log.exception( "Error parsing pending map" )
948 main.log.error( repr( pendingMap ) )
949 def pingAcrossHostIntent( self, main, multiIntentCheck, activateNode ):
950 """
951 Ping across added host intents
952 """
953 import json
954 import time
955 assert main.numCtrls, "main.numCtrls not defined"
956 assert main, "main not defined"
957 assert utilities.assert_equals, "utilities.assert_equals not defined"
958 assert main.CLIs, "main.CLIs not defined"
959 assert main.nodes, "main.nodes not defined"
960 main.case( "Verify connectivity by sending traffic across Intents" )
961 main.caseExplanation = "Ping across added host intents to check " +\
962 "functionality and check the state of " +\
963 "the intent"
964
965 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
966 main.step( "Check Intent state" )
967 installedCheck = False
968 loopCount = 0
969 while not installedCheck and loopCount < 40:
970 installedCheck = True
971 # Print the intent states
972 intents = onosCli.intents() if multiIntentCheck else main.ONOScli1.intents()
973 intentStates = []
974 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
975 count = 0
976 # Iter through intents of a node
977 try:
978 for intent in json.loads( intents ):
979 state = intent.get( 'state', None )
980 if "INSTALLED" not in state:
981 installedCheck = False
982 intentId = intent.get( 'id', None )
983 intentStates.append( ( intentId, state ) )
984 except ( ValueError, TypeError ):
985 main.log.exception( "Error parsing intents." )
986 # Print states
987 intentStates.sort()
988 for i, s in intentStates:
989 count += 1
990 main.log.info( "%-6s%-15s%-15s" %
991 ( str( count ), str( i ), str( s ) ) )
992 if not multiIntentCheck:
993 break
994 if not installedCheck:
995 time.sleep( 1 )
996 loopCount += 1
997 utilities.assert_equals( expect=True, actual=installedCheck,
998 onpass="Intents are all INSTALLED",
999 onfail="Intents are not all in " +
1000 "INSTALLED state" )
1001
1002 main.step( "Ping across added host intents" )
1003 PingResult = main.TRUE
1004 for i in range( 8, 18 ):
1005 ping = main.Mininet1.pingHost( src="h" + str( i ),
1006 target="h" + str( i + 10 ) )
1007 PingResult = PingResult and ping
1008 if ping == main.FALSE:
1009 main.log.warn( "Ping failed between h" + str( i ) +
1010 " and h" + str( i + 10 ) )
1011 elif ping == main.TRUE:
1012 main.log.info( "Ping test passed!" )
1013 # Don't set PingResult or you'd override failures
1014 if PingResult == main.FALSE:
1015 main.log.error(
1016 "Intents have not been installed correctly, pings failed." )
1017 # TODO: pretty print
1018 main.log.warn( "ONOS1 intents: " )
1019 try:
1020 tmpIntents = onosCli.intents()
1021 main.log.warn( json.dumps( json.loads( tmpIntents ),
1022 sort_keys=True,
1023 indent=4,
1024 separators=( ',', ': ' ) ) )
1025 except ( ValueError, TypeError ):
1026 main.log.warn( repr( tmpIntents ) )
1027 utilities.assert_equals(
1028 expect=main.TRUE,
1029 actual=PingResult,
1030 onpass="Intents have been installed correctly and pings work",
1031 onfail="Intents have not been installed correctly, pings failed." )
1032
1033 main.step( "Check leadership of topics" )
1034 leaders = onosCli.leaders()
1035 topicCheck = main.TRUE
1036 try:
1037 if leaders:
1038 parsedLeaders = json.loads( leaders )
1039 main.log.warn( json.dumps( parsedLeaders,
1040 sort_keys=True,
1041 indent=4,
1042 separators=( ',', ': ' ) ) )
1043 # check for all intent partitions
1044 # check for election
1045 # TODO: Look at Devices as topics now that it uses this system
1046 topics = []
1047 for i in range( 14 ):
1048 topics.append( "work-partition-" + str( i ) )
1049 # FIXME: this should only be after we start the app
1050 # FIXME: topics.append( "org.onosproject.election" )
1051 # Print leaders output
1052 main.log.debug( topics )
1053 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
1054 for topic in topics:
1055 if topic not in ONOStopics:
1056 main.log.error( "Error: " + topic +
1057 " not in leaders" )
1058 topicCheck = main.FALSE
1059 else:
1060 main.log.error( "leaders() returned None" )
1061 topicCheck = main.FALSE
1062 except ( ValueError, TypeError ):
1063 topicCheck = main.FALSE
1064 main.log.exception( "Error parsing leaders" )
1065 main.log.error( repr( leaders ) )
1066 # TODO: Check for a leader of these topics
1067 # Check all nodes
1068 if topicCheck:
1069 for i in main.activeNodes:
1070 node = main.CLIs[ i ]
1071 response = node.leaders( jsonFormat=False )
1072 main.log.warn( str( node.name ) + " leaders output: \n" +
1073 str( response ) )
1074
1075 utilities.assert_equals( expect=main.TRUE, actual=topicCheck,
1076 onpass="intent Partitions is in leaders",
1077 onfail="Some topics were lost " )
1078 # Print partitions
1079 partitions = onosCli.partitions()
1080 try:
1081 if partitions:
1082 parsedPartitions = json.loads( partitions )
1083 main.log.warn( json.dumps( parsedPartitions,
1084 sort_keys=True,
1085 indent=4,
1086 separators=( ',', ': ' ) ) )
1087 # TODO check for a leader in all paritions
1088 # TODO check for consistency among nodes
1089 else:
1090 main.log.error( "partitions() returned None" )
1091 except ( ValueError, TypeError ):
1092 main.log.exception( "Error parsing partitions" )
1093 main.log.error( repr( partitions ) )
1094 # Print Pending Map
1095 pendingMap = onosCli.pendingMap()
1096 try:
1097 if pendingMap:
1098 parsedPending = json.loads( pendingMap )
1099 main.log.warn( json.dumps( parsedPending,
1100 sort_keys=True,
1101 indent=4,
1102 separators=( ',', ': ' ) ) )
1103 # TODO check something here?
1104 else:
1105 main.log.error( "pendingMap() returned None" )
1106 except ( ValueError, TypeError ):
1107 main.log.exception( "Error parsing pending map" )
1108 main.log.error( repr( pendingMap ) )
1109
1110 if not installedCheck:
1111 main.log.info( "Waiting 60 seconds to see if the state of " +
1112 "intents change" )
1113 time.sleep( 60 )
1114 # Print the intent states
1115 intents = onosCli.intents()
1116 intentStates = []
1117 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
1118 count = 0
1119 # Iter through intents of a node
1120 try:
1121 for intent in json.loads( intents ):
1122 state = intent.get( 'state', None )
1123 if "INSTALLED" not in state:
1124 installedCheck = False
1125 intentId = intent.get( 'id', None )
1126 intentStates.append( ( intentId, state ) )
1127 except ( ValueError, TypeError ):
1128 main.log.exception( "Error parsing intents." )
1129 intentStates.sort()
1130 for i, s in intentStates:
1131 count += 1
1132 main.log.info( "%-6s%-15s%-15s" %
1133 ( str( count ), str( i ), str( s ) ) )
1134 leaders = onosCli.leaders()
1135 try:
1136 missing = False
1137 if leaders:
1138 parsedLeaders = json.loads( leaders )
1139 main.log.warn( json.dumps( parsedLeaders,
1140 sort_keys=True,
1141 indent=4,
1142 separators=( ',', ': ' ) ) )
1143 # check for all intent partitions
1144 # check for election
1145 topics = []
1146 for i in range( 14 ):
1147 topics.append( "work-partition-" + str( i ) )
1148 # FIXME: this should only be after we start the app
1149 topics.append( "org.onosproject.election" )
1150 main.log.debug( topics )
1151 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
1152 for topic in topics:
1153 if topic not in ONOStopics:
1154 main.log.error( "Error: " + topic +
1155 " not in leaders" )
1156 missing = True
1157 else:
1158 main.log.error( "leaders() returned None" )
1159 except ( ValueError, TypeError ):
1160 main.log.exception( "Error parsing leaders" )
1161 main.log.error( repr( leaders ) )
1162 if missing:
1163 for i in main.activeNodes:
1164 node = main.CLIs[ i ]
1165 response = node.leaders( jsonFormat=False )
1166 main.log.warn( str( node.name ) + " leaders output: \n" +
1167 str( response ) )
1168
1169 partitions = onosCli.partitions()
1170 try:
1171 if partitions:
1172 parsedPartitions = json.loads( partitions )
1173 main.log.warn( json.dumps( parsedPartitions,
1174 sort_keys=True,
1175 indent=4,
1176 separators=( ',', ': ' ) ) )
1177 # TODO check for a leader in all paritions
1178 # TODO check for consistency among nodes
1179 else:
1180 main.log.error( "partitions() returned None" )
1181 except ( ValueError, TypeError ):
1182 main.log.exception( "Error parsing partitions" )
1183 main.log.error( repr( partitions ) )
1184 pendingMap = onosCli.pendingMap()
1185 try:
1186 if pendingMap:
1187 parsedPending = json.loads( pendingMap )
1188 main.log.warn( json.dumps( parsedPending,
1189 sort_keys=True,
1190 indent=4,
1191 separators=( ',', ': ' ) ) )
1192 # TODO check something here?
1193 else:
1194 main.log.error( "pendingMap() returned None" )
1195 except ( ValueError, TypeError ):
1196 main.log.exception( "Error parsing pending map" )
1197 main.log.error( repr( pendingMap ) )
1198 # Print flowrules
1199 main.log.debug( main.CLIs[ main.activeNodes[0] ].flows( jsonFormat=False ) if activateNode else onosCli.flows( jsonFormat=False ) )
1200 main.step( "Wait a minute then ping again" )
1201 # the wait is above
1202 PingResult = main.TRUE
1203 for i in range( 8, 18 ):
1204 ping = main.Mininet1.pingHost( src="h" + str( i ),
1205 target="h" + str( i + 10 ) )
1206 PingResult = PingResult and ping
1207 if ping == main.FALSE:
1208 main.log.warn( "Ping failed between h" + str( i ) +
1209 " and h" + str( i + 10 ) )
1210 elif ping == main.TRUE:
1211 main.log.info( "Ping test passed!" )
1212 # Don't set PingResult or you'd override failures
1213 if PingResult == main.FALSE:
1214 main.log.error(
1215 "Intents have not been installed correctly, pings failed." )
1216 # TODO: pretty print
1217 main.log.warn( "ONOS1 intents: " )
1218 try:
1219 tmpIntents = onosCli.intents()
1220 main.log.warn( json.dumps( json.loads( tmpIntents ),
1221 sort_keys=True,
1222 indent=4,
1223 separators=( ',', ': ' ) ) )
1224 except ( ValueError, TypeError ):
1225 main.log.warn( repr( tmpIntents ) )
1226 utilities.assert_equals(
1227 expect=main.TRUE,
1228 actual=PingResult,
1229 onpass="Intents have been installed correctly and pings work",
1230 onfail="Intents have not been installed correctly, pings failed." )
1231
1232 def readingState( self, main ):
1233 """
1234 Reading state of ONOS
1235 """
1236 import json
1237 import time
1238 assert main.numCtrls, "main.numCtrls not defined"
1239 assert main, "main not defined"
1240 assert utilities.assert_equals, "utilities.assert_equals not defined"
1241 assert main.CLIs, "main.CLIs not defined"
1242 assert main.nodes, "main.nodes not defined"
1243 try:
1244 from tests.dependencies.topology import Topology
1245 except ImportError:
1246 main.log.error( "Topology not found exiting the test" )
1247 main.exit()
1248 try:
1249 main.topoRelated
1250 except ( NameError, AttributeError ):
1251 main.topoRelated = Topology()
1252 main.case( "Setting up and gathering data for current state" )
1253 # The general idea for this test case is to pull the state of
1254 # ( intents,flows, topology,... ) from each ONOS node
1255 # We can then compare them with each other and also with past states
1256
1257 main.step( "Check that each switch has a master" )
1258 global mastershipState
1259 mastershipState = '[]'
1260
1261 # Assert that each device has a master
1262 rolesNotNull = main.TRUE
1263 threads = []
1264 for i in main.activeNodes:
1265 t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
1266 name="rolesNotNull-" + str( i ),
1267 args=[] )
1268 threads.append( t )
1269 t.start()
1270
1271 for t in threads:
1272 t.join()
1273 rolesNotNull = rolesNotNull and t.result
1274 utilities.assert_equals(
1275 expect=main.TRUE,
1276 actual=rolesNotNull,
1277 onpass="Each device has a master",
1278 onfail="Some devices don't have a master assigned" )
1279
1280 main.step( "Get the Mastership of each switch from each controller" )
1281 ONOSMastership = []
1282 consistentMastership = True
1283 rolesResults = True
1284 threads = []
1285 for i in main.activeNodes:
1286 t = main.Thread( target=main.CLIs[ i ].roles,
1287 name="roles-" + str( i ),
1288 args=[] )
1289 threads.append( t )
1290 t.start()
1291
1292 for t in threads:
1293 t.join()
1294 ONOSMastership.append( t.result )
1295
1296 for i in range( len( ONOSMastership ) ):
1297 node = str( main.activeNodes[ i ] + 1 )
1298 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
1299 main.log.error( "Error in getting ONOS" + node + " roles" )
1300 main.log.warn( "ONOS" + node + " mastership response: " +
1301 repr( ONOSMastership[ i ] ) )
1302 rolesResults = False
1303 utilities.assert_equals(
1304 expect=True,
1305 actual=rolesResults,
1306 onpass="No error in reading roles output",
1307 onfail="Error in reading roles from ONOS" )
1308
1309 main.step( "Check for consistency in roles from each controller" )
1310 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1311 main.log.info(
1312 "Switch roles are consistent across all ONOS nodes" )
1313 else:
1314 consistentMastership = False
1315 utilities.assert_equals(
1316 expect=True,
1317 actual=consistentMastership,
1318 onpass="Switch roles are consistent across all ONOS nodes",
1319 onfail="ONOS nodes have different views of switch roles" )
1320
1321 if rolesResults and not consistentMastership:
1322 for i in range( len( main.activeNodes ) ):
1323 node = str( main.activeNodes[ i ] + 1 )
1324 try:
1325 main.log.warn(
1326 "ONOS" + node + " roles: ",
1327 json.dumps(
1328 json.loads( ONOSMastership[ i ] ),
1329 sort_keys=True,
1330 indent=4,
1331 separators=( ',', ': ' ) ) )
1332 except ( ValueError, TypeError ):
1333 main.log.warn( repr( ONOSMastership[ i ] ) )
1334 elif rolesResults and consistentMastership:
1335 mastershipState = ONOSMastership[ 0 ]
1336
1337 main.step( "Get the intents from each controller" )
1338 global intentState
1339 intentState = []
1340 ONOSIntents = []
1341 consistentIntents = True # Are Intents consistent across nodes?
1342 intentsResults = True # Could we read Intents from ONOS?
1343 threads = []
1344 for i in main.activeNodes:
1345 t = main.Thread( target=main.CLIs[ i ].intents,
1346 name="intents-" + str( i ),
1347 args=[],
1348 kwargs={ 'jsonFormat': True } )
1349 threads.append( t )
1350 t.start()
1351
1352 for t in threads:
1353 t.join()
1354 ONOSIntents.append( t.result )
1355
1356 for i in range( len( ONOSIntents ) ):
1357 node = str( main.activeNodes[ i ] + 1 )
1358 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1359 main.log.error( "Error in getting ONOS" + node + " intents" )
1360 main.log.warn( "ONOS" + node + " intents response: " +
1361 repr( ONOSIntents[ i ] ) )
1362 intentsResults = False
1363 utilities.assert_equals(
1364 expect=True,
1365 actual=intentsResults,
1366 onpass="No error in reading intents output",
1367 onfail="Error in reading intents from ONOS" )
1368
1369 main.step( "Check for consistency in Intents from each controller" )
1370 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1371 main.log.info( "Intents are consistent across all ONOS " +
1372 "nodes" )
1373 else:
1374 consistentIntents = False
1375 main.log.error( "Intents not consistent" )
1376 utilities.assert_equals(
1377 expect=True,
1378 actual=consistentIntents,
1379 onpass="Intents are consistent across all ONOS nodes",
1380 onfail="ONOS nodes have different views of intents" )
1381
1382 if intentsResults:
1383 # Try to make it easy to figure out what is happening
1384 #
1385 # Intent ONOS1 ONOS2 ...
1386 # 0x01 INSTALLED INSTALLING
1387 # ... ... ...
1388 # ... ... ...
1389 title = " Id"
1390 for n in main.activeNodes:
1391 title += " " * 10 + "ONOS" + str( n + 1 )
1392 main.log.warn( title )
1393 # get all intent keys in the cluster
1394 keys = []
1395 try:
1396 # Get the set of all intent keys
1397 for nodeStr in ONOSIntents:
1398 node = json.loads( nodeStr )
1399 for intent in node:
1400 keys.append( intent.get( 'id' ) )
1401 keys = set( keys )
1402 # For each intent key, print the state on each node
1403 for key in keys:
1404 row = "%-13s" % key
1405 for nodeStr in ONOSIntents:
1406 node = json.loads( nodeStr )
1407 for intent in node:
1408 if intent.get( 'id', "Error" ) == key:
1409 row += "%-15s" % intent.get( 'state' )
1410 main.log.warn( row )
1411 # End of intent state table
1412 except ValueError as e:
1413 main.log.exception( e )
1414 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1415
1416 if intentsResults and not consistentIntents:
1417 # print the json objects
1418 n = str( main.activeNodes[ -1 ] + 1 )
1419 main.log.debug( "ONOS" + n + " intents: " )
1420 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1421 sort_keys=True,
1422 indent=4,
1423 separators=( ',', ': ' ) ) )
1424 for i in range( len( ONOSIntents ) ):
1425 node = str( main.activeNodes[ i ] + 1 )
1426 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
1427 main.log.debug( "ONOS" + node + " intents: " )
1428 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
1429 sort_keys=True,
1430 indent=4,
1431 separators=( ',', ': ' ) ) )
1432 else:
1433 main.log.debug( "ONOS" + node + " intents match ONOS" +
1434 n + " intents" )
1435 elif intentsResults and consistentIntents:
1436 intentState = ONOSIntents[ 0 ]
1437
1438 main.step( "Get the flows from each controller" )
1439 global flowState
1440 flowState = []
1441 ONOSFlows = []
1442 ONOSFlowsJson = []
1443 flowCheck = main.FALSE
1444 consistentFlows = True
1445 flowsResults = True
1446 threads = []
1447 for i in main.activeNodes:
1448 t = main.Thread( target=main.CLIs[ i ].flows,
1449 name="flows-" + str( i ),
1450 args=[],
1451 kwargs={ 'jsonFormat': True } )
1452 threads.append( t )
1453 t.start()
1454
1455 # NOTE: Flows command can take some time to run
1456 time.sleep( 30 )
1457 for t in threads:
1458 t.join()
1459 result = t.result
1460 ONOSFlows.append( result )
1461
1462 for i in range( len( ONOSFlows ) ):
1463 num = str( main.activeNodes[ i ] + 1 )
1464 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
1465 main.log.error( "Error in getting ONOS" + num + " flows" )
1466 main.log.warn( "ONOS" + num + " flows response: " +
1467 repr( ONOSFlows[ i ] ) )
1468 flowsResults = False
1469 ONOSFlowsJson.append( None )
1470 else:
1471 try:
1472 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1473 except ( ValueError, TypeError ):
1474 # FIXME: change this to log.error?
1475 main.log.exception( "Error in parsing ONOS" + num +
1476 " response as json." )
1477 main.log.error( repr( ONOSFlows[ i ] ) )
1478 ONOSFlowsJson.append( None )
1479 flowsResults = False
1480 utilities.assert_equals(
1481 expect=True,
1482 actual=flowsResults,
1483 onpass="No error in reading flows output",
1484 onfail="Error in reading flows from ONOS" )
1485
1486 main.step( "Check for consistency in Flows from each controller" )
1487 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1488 if all( tmp ):
1489 main.log.info( "Flow count is consistent across all ONOS nodes" )
1490 else:
1491 consistentFlows = False
1492 utilities.assert_equals(
1493 expect=True,
1494 actual=consistentFlows,
1495 onpass="The flow count is consistent across all ONOS nodes",
1496 onfail="ONOS nodes have different flow counts" )
1497
1498 if flowsResults and not consistentFlows:
1499 for i in range( len( ONOSFlows ) ):
1500 node = str( main.activeNodes[ i ] + 1 )
1501 try:
1502 main.log.warn(
1503 "ONOS" + node + " flows: " +
1504 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
1505 indent=4, separators=( ',', ': ' ) ) )
1506 except ( ValueError, TypeError ):
1507 main.log.warn( "ONOS" + node + " flows: " +
1508 repr( ONOSFlows[ i ] ) )
1509 elif flowsResults and consistentFlows:
1510 flowCheck = main.TRUE
1511 flowState = ONOSFlows[ 0 ]
1512
1513 main.step( "Get the OF Table entries" )
1514 global flows
1515 flows = []
1516 for i in range( 1, 29 ):
1517 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1518 if flowCheck == main.FALSE:
1519 for table in flows:
1520 main.log.warn( table )
1521 # TODO: Compare switch flow tables with ONOS flow tables
1522
1523 main.step( "Start continuous pings" )
1524 main.Mininet2.pingLong(
1525 src=main.params[ 'PING' ][ 'source1' ],
1526 target=main.params[ 'PING' ][ 'target1' ],
1527 pingTime=500 )
1528 main.Mininet2.pingLong(
1529 src=main.params[ 'PING' ][ 'source2' ],
1530 target=main.params[ 'PING' ][ 'target2' ],
1531 pingTime=500 )
1532 main.Mininet2.pingLong(
1533 src=main.params[ 'PING' ][ 'source3' ],
1534 target=main.params[ 'PING' ][ 'target3' ],
1535 pingTime=500 )
1536 main.Mininet2.pingLong(
1537 src=main.params[ 'PING' ][ 'source4' ],
1538 target=main.params[ 'PING' ][ 'target4' ],
1539 pingTime=500 )
1540 main.Mininet2.pingLong(
1541 src=main.params[ 'PING' ][ 'source5' ],
1542 target=main.params[ 'PING' ][ 'target5' ],
1543 pingTime=500 )
1544 main.Mininet2.pingLong(
1545 src=main.params[ 'PING' ][ 'source6' ],
1546 target=main.params[ 'PING' ][ 'target6' ],
1547 pingTime=500 )
1548 main.Mininet2.pingLong(
1549 src=main.params[ 'PING' ][ 'source7' ],
1550 target=main.params[ 'PING' ][ 'target7' ],
1551 pingTime=500 )
1552 main.Mininet2.pingLong(
1553 src=main.params[ 'PING' ][ 'source8' ],
1554 target=main.params[ 'PING' ][ 'target8' ],
1555 pingTime=500 )
1556 main.Mininet2.pingLong(
1557 src=main.params[ 'PING' ][ 'source9' ],
1558 target=main.params[ 'PING' ][ 'target9' ],
1559 pingTime=500 )
1560 main.Mininet2.pingLong(
1561 src=main.params[ 'PING' ][ 'source10' ],
1562 target=main.params[ 'PING' ][ 'target10' ],
1563 pingTime=500 )
1564
1565 main.step( "Collecting topology information from ONOS" )
1566 devices = main.topoRelated.getAllDevices( main.activeNodes, False )
1567 hosts = main.topoRelated.getAllHosts( main.activeNodes, False, inJson=True )
1568 ports = main.topoRelated.getAllPorts( main.activeNodes, False )
1569 links = main.topoRelated.getAllLinks( main.activeNodes, False )
1570 clusters = main.topoRelated.getAllClusters( main.activeNodes, False )
1571 # Compare json objects for hosts and dataplane clusters
1572
1573 # hosts
1574 main.step( "Host view is consistent across ONOS nodes" )
1575 consistentHostsResult = main.TRUE
1576 for controller in range( len( hosts ) ):
1577 controllerStr = str( main.activeNodes[ controller ] + 1 )
1578 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1579 if hosts[ controller ] == hosts[ 0 ]:
1580 continue
1581 else: # hosts not consistent
1582 main.log.error( "hosts from ONOS" +
1583 controllerStr +
1584 " is inconsistent with ONOS1" )
1585 main.log.warn( repr( hosts[ controller ] ) )
1586 consistentHostsResult = main.FALSE
1587
1588 else:
1589 main.log.error( "Error in getting ONOS hosts from ONOS" +
1590 controllerStr )
1591 consistentHostsResult = main.FALSE
1592 main.log.warn( "ONOS" + controllerStr +
1593 " hosts response: " +
1594 repr( hosts[ controller ] ) )
1595 utilities.assert_equals(
1596 expect=main.TRUE,
1597 actual=consistentHostsResult,
1598 onpass="Hosts view is consistent across all ONOS nodes",
1599 onfail="ONOS nodes have different views of hosts" )
1600
1601 main.step( "Each host has an IP address" )
1602 ipResult = main.TRUE
1603 for controller in range( 0, len( hosts ) ):
1604 controllerStr = str( main.activeNodes[ controller ] + 1 )
1605 if hosts[ controller ]:
1606 for host in hosts[ controller ]:
1607 if not host.get( 'ipAddresses', [] ):
1608 main.log.error( "Error with host ips on controller" +
1609 controllerStr + ": " + str( host ) )
1610 ipResult = main.FALSE
1611 utilities.assert_equals(
1612 expect=main.TRUE,
1613 actual=ipResult,
1614 onpass="The ips of the hosts aren't empty",
1615 onfail="The ip of at least one host is missing" )
1616
1617 # Strongly connected clusters of devices
1618 main.step( "Cluster view is consistent across ONOS nodes" )
1619 consistentClustersResult = main.TRUE
1620 for controller in range( len( clusters ) ):
1621 controllerStr = str( main.activeNodes[ controller ] + 1 )
1622 if "Error" not in clusters[ controller ]:
1623 if clusters[ controller ] == clusters[ 0 ]:
1624 continue
1625 else: # clusters not consistent
1626 main.log.error( "clusters from ONOS" + controllerStr +
1627 " is inconsistent with ONOS1" )
1628 consistentClustersResult = main.FALSE
1629
1630 else:
1631 main.log.error( "Error in getting dataplane clusters " +
1632 "from ONOS" + controllerStr )
1633 consistentClustersResult = main.FALSE
1634 main.log.warn( "ONOS" + controllerStr +
1635 " clusters response: " +
1636 repr( clusters[ controller ] ) )
1637 utilities.assert_equals(
1638 expect=main.TRUE,
1639 actual=consistentClustersResult,
1640 onpass="Clusters view is consistent across all ONOS nodes",
1641 onfail="ONOS nodes have different views of clusters" )
1642 if not consistentClustersResult:
1643 main.log.debug( clusters )
1644
1645 # there should always only be one cluster
1646 main.step( "Cluster view correct across ONOS nodes" )
1647 try:
1648 numClusters = len( json.loads( clusters[ 0 ] ) )
1649 except ( ValueError, TypeError ):
1650 main.log.exception( "Error parsing clusters[0]: " +
1651 repr( clusters[ 0 ] ) )
1652 numClusters = "ERROR"
1653 utilities.assert_equals(
1654 expect=1,
1655 actual=numClusters,
1656 onpass="ONOS shows 1 SCC",
1657 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1658
1659 main.step( "Comparing ONOS topology to MN" )
1660 devicesResults = main.TRUE
1661 linksResults = main.TRUE
1662 hostsResults = main.TRUE
1663 mnSwitches = main.Mininet1.getSwitches()
1664 mnLinks = main.Mininet1.getLinks()
1665 mnHosts = main.Mininet1.getHosts()
1666 for controller in main.activeNodes:
1667 controllerStr = str( main.activeNodes[ controller ] + 1 )
1668 currentDevicesResult = main.topoRelated.compareDevicePort(
1669 main.Mininet1, controller,
1670 mnSwitches, devices, ports )
1671 utilities.assert_equals( expect=main.TRUE,
1672 actual=currentDevicesResult,
1673 onpass="ONOS" + controllerStr +
1674 " Switches view is correct",
1675 onfail="ONOS" + controllerStr +
1676 " Switches view is incorrect" )
1677
1678 currentLinksResult = main.topoRelated.compareBase( links, controller,
1679 main.Mininet1.compareLinks,
1680 [ mnSwitches, mnLinks ] )
1681 utilities.assert_equals( expect=main.TRUE,
1682 actual=currentLinksResult,
1683 onpass="ONOS" + controllerStr +
1684 " links view is correct",
1685 onfail="ONOS" + controllerStr +
1686 " links view is incorrect" )
1687
1688 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1689 currentHostsResult = main.Mininet1.compareHosts(
1690 mnHosts,
1691 hosts[ controller ] )
1692 else:
1693 currentHostsResult = main.FALSE
1694 utilities.assert_equals( expect=main.TRUE,
1695 actual=currentHostsResult,
1696 onpass="ONOS" + controllerStr +
1697 " hosts exist in Mininet",
1698 onfail="ONOS" + controllerStr +
1699 " hosts don't match Mininet" )
1700
1701 devicesResults = devicesResults and currentDevicesResult
1702 linksResults = linksResults and currentLinksResult
1703 hostsResults = hostsResults and currentHostsResult
1704
1705 main.step( "Device information is correct" )
1706 utilities.assert_equals(
1707 expect=main.TRUE,
1708 actual=devicesResults,
1709 onpass="Device information is correct",
1710 onfail="Device information is incorrect" )
1711
1712 main.step( "Links are correct" )
1713 utilities.assert_equals(
1714 expect=main.TRUE,
1715 actual=linksResults,
1716 onpass="Link are correct",
1717 onfail="Links are incorrect" )
1718
1719 main.step( "Hosts are correct" )
1720 utilities.assert_equals(
1721 expect=main.TRUE,
1722 actual=hostsResults,
1723 onpass="Hosts are correct",
1724 onfail="Hosts are incorrect" )
1725
1726 def checkDistPrimitivesFunc( self, main ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001727 """
1728 Check for basic functionality with distributed primitives
1729 """
Jon Halle0f0b342017-04-18 11:43:47 -07001730 # TODO: Clean this up so it's not just a cut/paste from the test
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001731 try:
1732 # Make sure variables are defined/set
1733 assert main.numCtrls, "main.numCtrls not defined"
1734 assert utilities.assert_equals, "utilities.assert_equals not defined"
1735 assert main.CLIs, "main.CLIs not defined"
1736 assert main.nodes, "main.nodes not defined"
1737 assert main.pCounterName, "main.pCounterName not defined"
1738 assert main.onosSetName, "main.onosSetName not defined"
1739 # NOTE: assert fails if value is 0/None/Empty/False
1740 try:
1741 main.pCounterValue
Devin Lim58046fa2017-07-05 16:55:00 -07001742 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001743 main.log.error( "main.pCounterValue not defined, setting to 0" )
1744 main.pCounterValue = 0
1745 try:
1746 main.onosSet
Devin Lim58046fa2017-07-05 16:55:00 -07001747 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001748 main.log.error( "main.onosSet not defined, setting to empty Set" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001749 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001750 # Variables for the distributed primitives tests. These are local only
1751 addValue = "a"
1752 addAllValue = "a b c d e f"
1753 retainValue = "c d e f"
Jon Halle0f0b342017-04-18 11:43:47 -07001754 valueName = "TestON-Value"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001755 valueValue = None
Jon Halle0f0b342017-04-18 11:43:47 -07001756 workQueueName = "TestON-Queue"
1757 workQueueCompleted = 0
1758 workQueueInProgress = 0
1759 workQueuePending = 0
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001760
1761 description = "Check for basic functionality with distributed " +\
1762 "primitives"
1763 main.case( description )
1764 main.caseExplanation = "Test the methods of the distributed " +\
1765 "primitives (counters and sets) throught the cli"
1766 # DISTRIBUTED ATOMIC COUNTERS
1767 # Partitioned counters
1768 main.step( "Increment then get a default counter on each node" )
1769 pCounters = []
1770 threads = []
1771 addedPValues = []
1772 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001773 t = main.Thread( target=main.CLIs[ i ].counterTestAddAndGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001774 name="counterAddAndGet-" + str( i ),
1775 args=[ main.pCounterName ] )
1776 main.pCounterValue += 1
1777 addedPValues.append( main.pCounterValue )
1778 threads.append( t )
1779 t.start()
1780
1781 for t in threads:
1782 t.join()
1783 pCounters.append( t.result )
1784 # Check that counter incremented numController times
1785 pCounterResults = True
1786 for i in addedPValues:
1787 tmpResult = i in pCounters
1788 pCounterResults = pCounterResults and tmpResult
1789 if not tmpResult:
1790 main.log.error( str( i ) + " is not in partitioned "
1791 "counter incremented results" )
1792 utilities.assert_equals( expect=True,
1793 actual=pCounterResults,
1794 onpass="Default counter incremented",
1795 onfail="Error incrementing default" +
1796 " counter" )
1797
1798 main.step( "Get then Increment a default counter on each node" )
1799 pCounters = []
1800 threads = []
1801 addedPValues = []
1802 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001803 t = main.Thread( target=main.CLIs[ i ].counterTestGetAndAdd,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001804 name="counterGetAndAdd-" + str( i ),
1805 args=[ main.pCounterName ] )
1806 addedPValues.append( main.pCounterValue )
1807 main.pCounterValue += 1
1808 threads.append( t )
1809 t.start()
1810
1811 for t in threads:
1812 t.join()
1813 pCounters.append( t.result )
1814 # Check that counter incremented numController times
1815 pCounterResults = True
1816 for i in addedPValues:
1817 tmpResult = i in pCounters
1818 pCounterResults = pCounterResults and tmpResult
1819 if not tmpResult:
1820 main.log.error( str( i ) + " is not in partitioned "
1821 "counter incremented results" )
1822 utilities.assert_equals( expect=True,
1823 actual=pCounterResults,
1824 onpass="Default counter incremented",
1825 onfail="Error incrementing default" +
1826 " counter" )
1827
1828 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001829 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001830 utilities.assert_equals( expect=main.TRUE,
1831 actual=incrementCheck,
1832 onpass="Added counters are correct",
1833 onfail="Added counters are incorrect" )
1834
1835 main.step( "Add -8 to then get a default counter on each node" )
1836 pCounters = []
1837 threads = []
1838 addedPValues = []
1839 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001840 t = main.Thread( target=main.CLIs[ i ].counterTestAddAndGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001841 name="counterIncrement-" + str( i ),
1842 args=[ main.pCounterName ],
1843 kwargs={ "delta": -8 } )
1844 main.pCounterValue += -8
1845 addedPValues.append( main.pCounterValue )
1846 threads.append( t )
1847 t.start()
1848
1849 for t in threads:
1850 t.join()
1851 pCounters.append( t.result )
1852 # Check that counter incremented numController times
1853 pCounterResults = True
1854 for i in addedPValues:
1855 tmpResult = i in pCounters
1856 pCounterResults = pCounterResults and tmpResult
1857 if not tmpResult:
1858 main.log.error( str( i ) + " is not in partitioned "
1859 "counter incremented results" )
1860 utilities.assert_equals( expect=True,
1861 actual=pCounterResults,
1862 onpass="Default counter incremented",
1863 onfail="Error incrementing default" +
1864 " counter" )
1865
1866 main.step( "Add 5 to then get a default counter on each node" )
1867 pCounters = []
1868 threads = []
1869 addedPValues = []
1870 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001871 t = main.Thread( target=main.CLIs[ i ].counterTestAddAndGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001872 name="counterIncrement-" + str( i ),
1873 args=[ main.pCounterName ],
1874 kwargs={ "delta": 5 } )
1875 main.pCounterValue += 5
1876 addedPValues.append( main.pCounterValue )
1877 threads.append( t )
1878 t.start()
1879
1880 for t in threads:
1881 t.join()
1882 pCounters.append( t.result )
1883 # Check that counter incremented numController times
1884 pCounterResults = True
1885 for i in addedPValues:
1886 tmpResult = i in pCounters
1887 pCounterResults = pCounterResults and tmpResult
1888 if not tmpResult:
1889 main.log.error( str( i ) + " is not in partitioned "
1890 "counter incremented results" )
1891 utilities.assert_equals( expect=True,
1892 actual=pCounterResults,
1893 onpass="Default counter incremented",
1894 onfail="Error incrementing default" +
1895 " counter" )
1896
1897 main.step( "Get then add 5 to a default counter on each node" )
1898 pCounters = []
1899 threads = []
1900 addedPValues = []
1901 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001902 t = main.Thread( target=main.CLIs[ i ].counterTestGetAndAdd,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001903 name="counterIncrement-" + str( i ),
1904 args=[ main.pCounterName ],
1905 kwargs={ "delta": 5 } )
1906 addedPValues.append( main.pCounterValue )
1907 main.pCounterValue += 5
1908 threads.append( t )
1909 t.start()
1910
1911 for t in threads:
1912 t.join()
1913 pCounters.append( t.result )
1914 # Check that counter incremented numController times
1915 pCounterResults = True
1916 for i in addedPValues:
1917 tmpResult = i in pCounters
1918 pCounterResults = pCounterResults and tmpResult
1919 if not tmpResult:
1920 main.log.error( str( i ) + " is not in partitioned "
1921 "counter incremented results" )
1922 utilities.assert_equals( expect=True,
1923 actual=pCounterResults,
1924 onpass="Default counter incremented",
1925 onfail="Error incrementing default" +
1926 " counter" )
1927
1928 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001929 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001930 utilities.assert_equals( expect=main.TRUE,
1931 actual=incrementCheck,
1932 onpass="Added counters are correct",
1933 onfail="Added counters are incorrect" )
1934
1935 # DISTRIBUTED SETS
1936 main.step( "Distributed Set get" )
1937 size = len( main.onosSet )
1938 getResponses = []
1939 threads = []
1940 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001941 t = main.Thread( target=main.CLIs[ i ].setTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001942 name="setTestGet-" + str( i ),
1943 args=[ main.onosSetName ] )
1944 threads.append( t )
1945 t.start()
1946 for t in threads:
1947 t.join()
1948 getResponses.append( t.result )
1949
1950 getResults = main.TRUE
1951 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001952 node = str( main.activeNodes[ i ] + 1 )
1953 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001954 current = set( getResponses[ i ] )
1955 if len( current ) == len( getResponses[ i ] ):
1956 # no repeats
1957 if main.onosSet != current:
1958 main.log.error( "ONOS" + node +
1959 " has incorrect view" +
1960 " of set " + main.onosSetName + ":\n" +
1961 str( getResponses[ i ] ) )
1962 main.log.debug( "Expected: " + str( main.onosSet ) )
1963 main.log.debug( "Actual: " + str( current ) )
1964 getResults = main.FALSE
1965 else:
1966 # error, set is not a set
1967 main.log.error( "ONOS" + node +
1968 " has repeat elements in" +
1969 " set " + main.onosSetName + ":\n" +
1970 str( getResponses[ i ] ) )
1971 getResults = main.FALSE
1972 elif getResponses[ i ] == main.ERROR:
1973 getResults = main.FALSE
1974 utilities.assert_equals( expect=main.TRUE,
1975 actual=getResults,
1976 onpass="Set elements are correct",
1977 onfail="Set elements are incorrect" )
1978
1979 main.step( "Distributed Set size" )
1980 sizeResponses = []
1981 threads = []
1982 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07001983 t = main.Thread( target=main.CLIs[ i ].setTestSize,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001984 name="setTestSize-" + str( i ),
1985 args=[ main.onosSetName ] )
1986 threads.append( t )
1987 t.start()
1988 for t in threads:
1989 t.join()
1990 sizeResponses.append( t.result )
1991
1992 sizeResults = main.TRUE
1993 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07001994 node = str( main.activeNodes[ i ] + 1 )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001995 if size != sizeResponses[ i ]:
1996 sizeResults = main.FALSE
1997 main.log.error( "ONOS" + node +
1998 " expected a size of " + str( size ) +
1999 " for set " + main.onosSetName +
2000 " but got " + str( sizeResponses[ i ] ) )
2001 utilities.assert_equals( expect=main.TRUE,
2002 actual=sizeResults,
2003 onpass="Set sizes are correct",
2004 onfail="Set sizes are incorrect" )
2005
2006 main.step( "Distributed Set add()" )
2007 main.onosSet.add( addValue )
2008 addResponses = []
2009 threads = []
2010 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002011 t = main.Thread( target=main.CLIs[ i ].setTestAdd,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002012 name="setTestAdd-" + str( i ),
2013 args=[ main.onosSetName, addValue ] )
2014 threads.append( t )
2015 t.start()
2016 for t in threads:
2017 t.join()
2018 addResponses.append( t.result )
2019
2020 # main.TRUE = successfully changed the set
2021 # main.FALSE = action resulted in no change in set
2022 # main.ERROR - Some error in executing the function
2023 addResults = main.TRUE
2024 for i in range( len( main.activeNodes ) ):
2025 if addResponses[ i ] == main.TRUE:
2026 # All is well
2027 pass
2028 elif addResponses[ i ] == main.FALSE:
2029 # Already in set, probably fine
2030 pass
2031 elif addResponses[ i ] == main.ERROR:
2032 # Error in execution
2033 addResults = main.FALSE
2034 else:
2035 # unexpected result
2036 addResults = main.FALSE
2037 if addResults != main.TRUE:
2038 main.log.error( "Error executing set add" )
2039
2040 # Check if set is still correct
2041 size = len( main.onosSet )
2042 getResponses = []
2043 threads = []
2044 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002045 t = main.Thread( target=main.CLIs[ i ].setTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002046 name="setTestGet-" + str( i ),
2047 args=[ main.onosSetName ] )
2048 threads.append( t )
2049 t.start()
2050 for t in threads:
2051 t.join()
2052 getResponses.append( t.result )
2053 getResults = main.TRUE
2054 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002055 node = str( main.activeNodes[ i ] + 1 )
2056 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002057 current = set( getResponses[ i ] )
2058 if len( current ) == len( getResponses[ i ] ):
2059 # no repeats
2060 if main.onosSet != current:
2061 main.log.error( "ONOS" + node + " has incorrect view" +
2062 " of set " + main.onosSetName + ":\n" +
2063 str( getResponses[ i ] ) )
2064 main.log.debug( "Expected: " + str( main.onosSet ) )
2065 main.log.debug( "Actual: " + str( current ) )
2066 getResults = main.FALSE
2067 else:
2068 # error, set is not a set
2069 main.log.error( "ONOS" + node + " has repeat elements in" +
2070 " set " + main.onosSetName + ":\n" +
2071 str( getResponses[ i ] ) )
2072 getResults = main.FALSE
2073 elif getResponses[ i ] == main.ERROR:
2074 getResults = main.FALSE
2075 sizeResponses = []
2076 threads = []
2077 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002078 t = main.Thread( target=main.CLIs[ i ].setTestSize,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002079 name="setTestSize-" + str( i ),
2080 args=[ main.onosSetName ] )
2081 threads.append( t )
2082 t.start()
2083 for t in threads:
2084 t.join()
2085 sizeResponses.append( t.result )
2086 sizeResults = main.TRUE
2087 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002088 node = str( main.activeNodes[ i ] + 1 )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002089 if size != sizeResponses[ i ]:
2090 sizeResults = main.FALSE
2091 main.log.error( "ONOS" + node +
2092 " expected a size of " + str( size ) +
2093 " for set " + main.onosSetName +
2094 " but got " + str( sizeResponses[ i ] ) )
2095 addResults = addResults and getResults and sizeResults
2096 utilities.assert_equals( expect=main.TRUE,
2097 actual=addResults,
2098 onpass="Set add correct",
2099 onfail="Set add was incorrect" )
2100
2101 main.step( "Distributed Set addAll()" )
2102 main.onosSet.update( addAllValue.split() )
2103 addResponses = []
2104 threads = []
2105 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002106 t = main.Thread( target=main.CLIs[ i ].setTestAdd,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002107 name="setTestAddAll-" + str( i ),
2108 args=[ main.onosSetName, addAllValue ] )
2109 threads.append( t )
2110 t.start()
2111 for t in threads:
2112 t.join()
2113 addResponses.append( t.result )
2114
2115 # main.TRUE = successfully changed the set
2116 # main.FALSE = action resulted in no change in set
2117 # main.ERROR - Some error in executing the function
2118 addAllResults = main.TRUE
2119 for i in range( len( main.activeNodes ) ):
2120 if addResponses[ i ] == main.TRUE:
2121 # All is well
2122 pass
2123 elif addResponses[ i ] == main.FALSE:
2124 # Already in set, probably fine
2125 pass
2126 elif addResponses[ i ] == main.ERROR:
2127 # Error in execution
2128 addAllResults = main.FALSE
2129 else:
2130 # unexpected result
2131 addAllResults = main.FALSE
2132 if addAllResults != main.TRUE:
2133 main.log.error( "Error executing set addAll" )
2134
2135 # Check if set is still correct
2136 size = len( main.onosSet )
2137 getResponses = []
2138 threads = []
2139 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002140 t = main.Thread( target=main.CLIs[ i ].setTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002141 name="setTestGet-" + str( i ),
2142 args=[ main.onosSetName ] )
2143 threads.append( t )
2144 t.start()
2145 for t in threads:
2146 t.join()
2147 getResponses.append( t.result )
2148 getResults = main.TRUE
2149 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002150 node = str( main.activeNodes[ i ] + 1 )
2151 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002152 current = set( getResponses[ i ] )
2153 if len( current ) == len( getResponses[ i ] ):
2154 # no repeats
2155 if main.onosSet != current:
2156 main.log.error( "ONOS" + node +
2157 " has incorrect view" +
2158 " of set " + main.onosSetName + ":\n" +
2159 str( getResponses[ i ] ) )
2160 main.log.debug( "Expected: " + str( main.onosSet ) )
2161 main.log.debug( "Actual: " + str( current ) )
2162 getResults = main.FALSE
2163 else:
2164 # error, set is not a set
2165 main.log.error( "ONOS" + node +
2166 " has repeat elements in" +
2167 " set " + main.onosSetName + ":\n" +
2168 str( getResponses[ i ] ) )
2169 getResults = main.FALSE
2170 elif getResponses[ i ] == main.ERROR:
2171 getResults = main.FALSE
2172 sizeResponses = []
2173 threads = []
2174 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002175 t = main.Thread( target=main.CLIs[ i ].setTestSize,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002176 name="setTestSize-" + str( i ),
2177 args=[ main.onosSetName ] )
2178 threads.append( t )
2179 t.start()
2180 for t in threads:
2181 t.join()
2182 sizeResponses.append( t.result )
2183 sizeResults = main.TRUE
2184 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002185 node = str( main.activeNodes[ i ] + 1 )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002186 if size != sizeResponses[ i ]:
2187 sizeResults = main.FALSE
2188 main.log.error( "ONOS" + node +
2189 " expected a size of " + str( size ) +
2190 " for set " + main.onosSetName +
2191 " but got " + str( sizeResponses[ i ] ) )
2192 addAllResults = addAllResults and getResults and sizeResults
2193 utilities.assert_equals( expect=main.TRUE,
2194 actual=addAllResults,
2195 onpass="Set addAll correct",
2196 onfail="Set addAll was incorrect" )
2197
2198 main.step( "Distributed Set contains()" )
2199 containsResponses = []
2200 threads = []
2201 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002202 t = main.Thread( target=main.CLIs[ i ].setTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002203 name="setContains-" + str( i ),
2204 args=[ main.onosSetName ],
2205 kwargs={ "values": addValue } )
2206 threads.append( t )
2207 t.start()
2208 for t in threads:
2209 t.join()
2210 # NOTE: This is the tuple
2211 containsResponses.append( t.result )
2212
2213 containsResults = main.TRUE
2214 for i in range( len( main.activeNodes ) ):
2215 if containsResponses[ i ] == main.ERROR:
2216 containsResults = main.FALSE
2217 else:
2218 containsResults = containsResults and\
2219 containsResponses[ i ][ 1 ]
2220 utilities.assert_equals( expect=main.TRUE,
2221 actual=containsResults,
2222 onpass="Set contains is functional",
2223 onfail="Set contains failed" )
2224
2225 main.step( "Distributed Set containsAll()" )
2226 containsAllResponses = []
2227 threads = []
2228 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002229 t = main.Thread( target=main.CLIs[ i ].setTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002230 name="setContainsAll-" + str( i ),
2231 args=[ main.onosSetName ],
2232 kwargs={ "values": addAllValue } )
2233 threads.append( t )
2234 t.start()
2235 for t in threads:
2236 t.join()
2237 # NOTE: This is the tuple
2238 containsAllResponses.append( t.result )
2239
2240 containsAllResults = main.TRUE
2241 for i in range( len( main.activeNodes ) ):
2242 if containsResponses[ i ] == main.ERROR:
2243 containsResults = main.FALSE
2244 else:
2245 containsResults = containsResults and\
2246 containsResponses[ i ][ 1 ]
2247 utilities.assert_equals( expect=main.TRUE,
2248 actual=containsAllResults,
2249 onpass="Set containsAll is functional",
2250 onfail="Set containsAll failed" )
2251
2252 main.step( "Distributed Set remove()" )
2253 main.onosSet.remove( addValue )
2254 removeResponses = []
2255 threads = []
2256 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002257 t = main.Thread( target=main.CLIs[ i ].setTestRemove,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002258 name="setTestRemove-" + str( i ),
2259 args=[ main.onosSetName, addValue ] )
2260 threads.append( t )
2261 t.start()
2262 for t in threads:
2263 t.join()
2264 removeResponses.append( t.result )
2265
2266 # main.TRUE = successfully changed the set
2267 # main.FALSE = action resulted in no change in set
2268 # main.ERROR - Some error in executing the function
2269 removeResults = main.TRUE
2270 for i in range( len( main.activeNodes ) ):
2271 if removeResponses[ i ] == main.TRUE:
2272 # All is well
2273 pass
2274 elif removeResponses[ i ] == main.FALSE:
2275 # not in set, probably fine
2276 pass
2277 elif removeResponses[ i ] == main.ERROR:
2278 # Error in execution
2279 removeResults = main.FALSE
2280 else:
2281 # unexpected result
2282 removeResults = main.FALSE
2283 if removeResults != main.TRUE:
2284 main.log.error( "Error executing set remove" )
2285
2286 # Check if set is still correct
2287 size = len( main.onosSet )
2288 getResponses = []
2289 threads = []
2290 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002291 t = main.Thread( target=main.CLIs[ i ].setTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002292 name="setTestGet-" + str( i ),
2293 args=[ main.onosSetName ] )
2294 threads.append( t )
2295 t.start()
2296 for t in threads:
2297 t.join()
2298 getResponses.append( t.result )
2299 getResults = main.TRUE
2300 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002301 node = str( main.activeNodes[ i ] + 1 )
2302 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002303 current = set( getResponses[ i ] )
2304 if len( current ) == len( getResponses[ i ] ):
2305 # no repeats
2306 if main.onosSet != current:
2307 main.log.error( "ONOS" + node +
2308 " has incorrect view" +
2309 " of set " + main.onosSetName + ":\n" +
2310 str( getResponses[ i ] ) )
2311 main.log.debug( "Expected: " + str( main.onosSet ) )
2312 main.log.debug( "Actual: " + str( current ) )
2313 getResults = main.FALSE
2314 else:
2315 # error, set is not a set
2316 main.log.error( "ONOS" + node +
2317 " has repeat elements in" +
2318 " set " + main.onosSetName + ":\n" +
2319 str( getResponses[ i ] ) )
2320 getResults = main.FALSE
2321 elif getResponses[ i ] == main.ERROR:
2322 getResults = main.FALSE
2323 sizeResponses = []
2324 threads = []
2325 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002326 t = main.Thread( target=main.CLIs[ i ].setTestSize,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002327 name="setTestSize-" + str( i ),
2328 args=[ main.onosSetName ] )
2329 threads.append( t )
2330 t.start()
2331 for t in threads:
2332 t.join()
2333 sizeResponses.append( t.result )
2334 sizeResults = main.TRUE
2335 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002336 node = str( main.activeNodes[ i ] + 1 )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002337 if size != sizeResponses[ i ]:
2338 sizeResults = main.FALSE
2339 main.log.error( "ONOS" + node +
2340 " expected a size of " + str( size ) +
2341 " for set " + main.onosSetName +
2342 " but got " + str( sizeResponses[ i ] ) )
2343 removeResults = removeResults and getResults and sizeResults
2344 utilities.assert_equals( expect=main.TRUE,
2345 actual=removeResults,
2346 onpass="Set remove correct",
2347 onfail="Set remove was incorrect" )
2348
2349 main.step( "Distributed Set removeAll()" )
2350 main.onosSet.difference_update( addAllValue.split() )
2351 removeAllResponses = []
2352 threads = []
2353 try:
2354 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002355 t = main.Thread( target=main.CLIs[ i ].setTestRemove,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002356 name="setTestRemoveAll-" + str( i ),
2357 args=[ main.onosSetName, addAllValue ] )
2358 threads.append( t )
2359 t.start()
2360 for t in threads:
2361 t.join()
2362 removeAllResponses.append( t.result )
Jon Hallf37d44d2017-05-24 10:37:30 -07002363 except Exception as e:
2364 main.log.exception( e )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002365
2366 # main.TRUE = successfully changed the set
2367 # main.FALSE = action resulted in no change in set
2368 # main.ERROR - Some error in executing the function
2369 removeAllResults = main.TRUE
2370 for i in range( len( main.activeNodes ) ):
2371 if removeAllResponses[ i ] == main.TRUE:
2372 # All is well
2373 pass
2374 elif removeAllResponses[ i ] == main.FALSE:
2375 # not in set, probably fine
2376 pass
2377 elif removeAllResponses[ i ] == main.ERROR:
2378 # Error in execution
2379 removeAllResults = main.FALSE
2380 else:
2381 # unexpected result
2382 removeAllResults = main.FALSE
2383 if removeAllResults != main.TRUE:
2384 main.log.error( "Error executing set removeAll" )
2385
2386 # Check if set is still correct
2387 size = len( main.onosSet )
2388 getResponses = []
2389 threads = []
2390 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002391 t = main.Thread( target=main.CLIs[ i ].setTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002392 name="setTestGet-" + str( i ),
2393 args=[ main.onosSetName ] )
2394 threads.append( t )
2395 t.start()
2396 for t in threads:
2397 t.join()
2398 getResponses.append( t.result )
2399 getResults = main.TRUE
2400 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002401 node = str( main.activeNodes[ i ] + 1 )
2402 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002403 current = set( getResponses[ i ] )
2404 if len( current ) == len( getResponses[ i ] ):
2405 # no repeats
2406 if main.onosSet != current:
2407 main.log.error( "ONOS" + node +
2408 " has incorrect view" +
2409 " of set " + main.onosSetName + ":\n" +
2410 str( getResponses[ i ] ) )
2411 main.log.debug( "Expected: " + str( main.onosSet ) )
2412 main.log.debug( "Actual: " + str( current ) )
2413 getResults = main.FALSE
2414 else:
2415 # error, set is not a set
2416 main.log.error( "ONOS" + node +
2417 " has repeat elements in" +
2418 " set " + main.onosSetName + ":\n" +
2419 str( getResponses[ i ] ) )
2420 getResults = main.FALSE
2421 elif getResponses[ i ] == main.ERROR:
2422 getResults = main.FALSE
2423 sizeResponses = []
2424 threads = []
2425 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002426 t = main.Thread( target=main.CLIs[ i ].setTestSize,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002427 name="setTestSize-" + str( i ),
2428 args=[ main.onosSetName ] )
2429 threads.append( t )
2430 t.start()
2431 for t in threads:
2432 t.join()
2433 sizeResponses.append( t.result )
2434 sizeResults = main.TRUE
2435 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002436 node = str( main.activeNodes[ i ] + 1 )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002437 if size != sizeResponses[ i ]:
2438 sizeResults = main.FALSE
2439 main.log.error( "ONOS" + node +
2440 " expected a size of " + str( size ) +
2441 " for set " + main.onosSetName +
2442 " but got " + str( sizeResponses[ i ] ) )
2443 removeAllResults = removeAllResults and getResults and sizeResults
2444 utilities.assert_equals( expect=main.TRUE,
2445 actual=removeAllResults,
2446 onpass="Set removeAll correct",
2447 onfail="Set removeAll was incorrect" )
2448
2449 main.step( "Distributed Set addAll()" )
2450 main.onosSet.update( addAllValue.split() )
2451 addResponses = []
2452 threads = []
2453 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002454 t = main.Thread( target=main.CLIs[ i ].setTestAdd,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002455 name="setTestAddAll-" + str( i ),
2456 args=[ main.onosSetName, addAllValue ] )
2457 threads.append( t )
2458 t.start()
2459 for t in threads:
2460 t.join()
2461 addResponses.append( t.result )
2462
2463 # main.TRUE = successfully changed the set
2464 # main.FALSE = action resulted in no change in set
2465 # main.ERROR - Some error in executing the function
2466 addAllResults = main.TRUE
2467 for i in range( len( main.activeNodes ) ):
2468 if addResponses[ i ] == main.TRUE:
2469 # All is well
2470 pass
2471 elif addResponses[ i ] == main.FALSE:
2472 # Already in set, probably fine
2473 pass
2474 elif addResponses[ i ] == main.ERROR:
2475 # Error in execution
2476 addAllResults = main.FALSE
2477 else:
2478 # unexpected result
2479 addAllResults = main.FALSE
2480 if addAllResults != main.TRUE:
2481 main.log.error( "Error executing set addAll" )
2482
2483 # Check if set is still correct
2484 size = len( main.onosSet )
2485 getResponses = []
2486 threads = []
2487 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002488 t = main.Thread( target=main.CLIs[ i ].setTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002489 name="setTestGet-" + str( i ),
2490 args=[ main.onosSetName ] )
2491 threads.append( t )
2492 t.start()
2493 for t in threads:
2494 t.join()
2495 getResponses.append( t.result )
2496 getResults = main.TRUE
2497 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002498 node = str( main.activeNodes[ i ] + 1 )
2499 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002500 current = set( getResponses[ i ] )
2501 if len( current ) == len( getResponses[ i ] ):
2502 # no repeats
2503 if main.onosSet != current:
2504 main.log.error( "ONOS" + node +
2505 " has incorrect view" +
2506 " of set " + main.onosSetName + ":\n" +
2507 str( getResponses[ i ] ) )
2508 main.log.debug( "Expected: " + str( main.onosSet ) )
2509 main.log.debug( "Actual: " + str( current ) )
2510 getResults = main.FALSE
2511 else:
2512 # error, set is not a set
2513 main.log.error( "ONOS" + node +
2514 " has repeat elements in" +
2515 " set " + main.onosSetName + ":\n" +
2516 str( getResponses[ i ] ) )
2517 getResults = main.FALSE
2518 elif getResponses[ i ] == main.ERROR:
2519 getResults = main.FALSE
2520 sizeResponses = []
2521 threads = []
2522 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002523 t = main.Thread( target=main.CLIs[ i ].setTestSize,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002524 name="setTestSize-" + str( i ),
2525 args=[ main.onosSetName ] )
2526 threads.append( t )
2527 t.start()
2528 for t in threads:
2529 t.join()
2530 sizeResponses.append( t.result )
2531 sizeResults = main.TRUE
2532 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002533 node = str( main.activeNodes[ i ] + 1 )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002534 if size != sizeResponses[ i ]:
2535 sizeResults = main.FALSE
2536 main.log.error( "ONOS" + node +
2537 " expected a size of " + str( size ) +
2538 " for set " + main.onosSetName +
2539 " but got " + str( sizeResponses[ i ] ) )
2540 addAllResults = addAllResults and getResults and sizeResults
2541 utilities.assert_equals( expect=main.TRUE,
2542 actual=addAllResults,
2543 onpass="Set addAll correct",
2544 onfail="Set addAll was incorrect" )
2545
2546 main.step( "Distributed Set clear()" )
2547 main.onosSet.clear()
2548 clearResponses = []
2549 threads = []
2550 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002551 t = main.Thread( target=main.CLIs[ i ].setTestRemove,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002552 name="setTestClear-" + str( i ),
Jon Hallf37d44d2017-05-24 10:37:30 -07002553 args=[ main.onosSetName, " " ], # Values doesn't matter
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002554 kwargs={ "clear": True } )
2555 threads.append( t )
2556 t.start()
2557 for t in threads:
2558 t.join()
2559 clearResponses.append( t.result )
2560
2561 # main.TRUE = successfully changed the set
2562 # main.FALSE = action resulted in no change in set
2563 # main.ERROR - Some error in executing the function
2564 clearResults = main.TRUE
2565 for i in range( len( main.activeNodes ) ):
2566 if clearResponses[ i ] == main.TRUE:
2567 # All is well
2568 pass
2569 elif clearResponses[ i ] == main.FALSE:
2570 # Nothing set, probably fine
2571 pass
2572 elif clearResponses[ i ] == main.ERROR:
2573 # Error in execution
2574 clearResults = main.FALSE
2575 else:
2576 # unexpected result
2577 clearResults = main.FALSE
2578 if clearResults != main.TRUE:
2579 main.log.error( "Error executing set clear" )
2580
2581 # Check if set is still correct
2582 size = len( main.onosSet )
2583 getResponses = []
2584 threads = []
2585 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002586 t = main.Thread( target=main.CLIs[ i ].setTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002587 name="setTestGet-" + str( i ),
2588 args=[ main.onosSetName ] )
2589 threads.append( t )
2590 t.start()
2591 for t in threads:
2592 t.join()
2593 getResponses.append( t.result )
2594 getResults = main.TRUE
2595 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002596 node = str( main.activeNodes[ i ] + 1 )
2597 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002598 current = set( getResponses[ i ] )
2599 if len( current ) == len( getResponses[ i ] ):
2600 # no repeats
2601 if main.onosSet != current:
2602 main.log.error( "ONOS" + node +
2603 " has incorrect view" +
2604 " of set " + main.onosSetName + ":\n" +
2605 str( getResponses[ i ] ) )
2606 main.log.debug( "Expected: " + str( main.onosSet ) )
2607 main.log.debug( "Actual: " + str( current ) )
2608 getResults = main.FALSE
2609 else:
2610 # error, set is not a set
2611 main.log.error( "ONOS" + node +
2612 " has repeat elements in" +
2613 " set " + main.onosSetName + ":\n" +
2614 str( getResponses[ i ] ) )
2615 getResults = main.FALSE
2616 elif getResponses[ i ] == main.ERROR:
2617 getResults = main.FALSE
2618 sizeResponses = []
2619 threads = []
2620 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002621 t = main.Thread( target=main.CLIs[ i ].setTestSize,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002622 name="setTestSize-" + str( i ),
2623 args=[ main.onosSetName ] )
2624 threads.append( t )
2625 t.start()
2626 for t in threads:
2627 t.join()
2628 sizeResponses.append( t.result )
2629 sizeResults = main.TRUE
2630 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002631 node = str( main.activeNodes[ i ] + 1 )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002632 if size != sizeResponses[ i ]:
2633 sizeResults = main.FALSE
2634 main.log.error( "ONOS" + node +
2635 " expected a size of " + str( size ) +
2636 " for set " + main.onosSetName +
2637 " but got " + str( sizeResponses[ i ] ) )
2638 clearResults = clearResults and getResults and sizeResults
2639 utilities.assert_equals( expect=main.TRUE,
2640 actual=clearResults,
2641 onpass="Set clear correct",
2642 onfail="Set clear was incorrect" )
2643
2644 main.step( "Distributed Set addAll()" )
2645 main.onosSet.update( addAllValue.split() )
2646 addResponses = []
2647 threads = []
2648 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002649 t = main.Thread( target=main.CLIs[ i ].setTestAdd,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002650 name="setTestAddAll-" + str( i ),
2651 args=[ main.onosSetName, addAllValue ] )
2652 threads.append( t )
2653 t.start()
2654 for t in threads:
2655 t.join()
2656 addResponses.append( t.result )
2657
2658 # main.TRUE = successfully changed the set
2659 # main.FALSE = action resulted in no change in set
2660 # main.ERROR - Some error in executing the function
2661 addAllResults = main.TRUE
2662 for i in range( len( main.activeNodes ) ):
2663 if addResponses[ i ] == main.TRUE:
2664 # All is well
2665 pass
2666 elif addResponses[ i ] == main.FALSE:
2667 # Already in set, probably fine
2668 pass
2669 elif addResponses[ i ] == main.ERROR:
2670 # Error in execution
2671 addAllResults = main.FALSE
2672 else:
2673 # unexpected result
2674 addAllResults = main.FALSE
2675 if addAllResults != main.TRUE:
2676 main.log.error( "Error executing set addAll" )
2677
2678 # Check if set is still correct
2679 size = len( main.onosSet )
2680 getResponses = []
2681 threads = []
2682 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002683 t = main.Thread( target=main.CLIs[ i ].setTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002684 name="setTestGet-" + str( i ),
2685 args=[ main.onosSetName ] )
2686 threads.append( t )
2687 t.start()
2688 for t in threads:
2689 t.join()
2690 getResponses.append( t.result )
2691 getResults = main.TRUE
2692 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002693 node = str( main.activeNodes[ i ] + 1 )
2694 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002695 current = set( getResponses[ i ] )
2696 if len( current ) == len( getResponses[ i ] ):
2697 # no repeats
2698 if main.onosSet != current:
2699 main.log.error( "ONOS" + node +
2700 " has incorrect view" +
2701 " of set " + main.onosSetName + ":\n" +
2702 str( getResponses[ i ] ) )
2703 main.log.debug( "Expected: " + str( main.onosSet ) )
2704 main.log.debug( "Actual: " + str( current ) )
2705 getResults = main.FALSE
2706 else:
2707 # error, set is not a set
2708 main.log.error( "ONOS" + node +
2709 " has repeat elements in" +
2710 " set " + main.onosSetName + ":\n" +
2711 str( getResponses[ i ] ) )
2712 getResults = main.FALSE
2713 elif getResponses[ i ] == main.ERROR:
2714 getResults = main.FALSE
2715 sizeResponses = []
2716 threads = []
2717 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002718 t = main.Thread( target=main.CLIs[ i ].setTestSize,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002719 name="setTestSize-" + str( i ),
2720 args=[ main.onosSetName ] )
2721 threads.append( t )
2722 t.start()
2723 for t in threads:
2724 t.join()
2725 sizeResponses.append( t.result )
2726 sizeResults = main.TRUE
2727 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002728 node = str( main.activeNodes[ i ] + 1 )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002729 if size != sizeResponses[ i ]:
2730 sizeResults = main.FALSE
2731 main.log.error( "ONOS" + node +
2732 " expected a size of " + str( size ) +
2733 " for set " + main.onosSetName +
2734 " but got " + str( sizeResponses[ i ] ) )
2735 addAllResults = addAllResults and getResults and sizeResults
2736 utilities.assert_equals( expect=main.TRUE,
2737 actual=addAllResults,
2738 onpass="Set addAll correct",
2739 onfail="Set addAll was incorrect" )
2740
2741 main.step( "Distributed Set retain()" )
2742 main.onosSet.intersection_update( retainValue.split() )
2743 retainResponses = []
2744 threads = []
2745 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002746 t = main.Thread( target=main.CLIs[ i ].setTestRemove,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002747 name="setTestRetain-" + str( i ),
2748 args=[ main.onosSetName, retainValue ],
2749 kwargs={ "retain": True } )
2750 threads.append( t )
2751 t.start()
2752 for t in threads:
2753 t.join()
2754 retainResponses.append( t.result )
2755
2756 # main.TRUE = successfully changed the set
2757 # main.FALSE = action resulted in no change in set
2758 # main.ERROR - Some error in executing the function
2759 retainResults = main.TRUE
2760 for i in range( len( main.activeNodes ) ):
2761 if retainResponses[ i ] == main.TRUE:
2762 # All is well
2763 pass
2764 elif retainResponses[ i ] == main.FALSE:
2765 # Already in set, probably fine
2766 pass
2767 elif retainResponses[ i ] == main.ERROR:
2768 # Error in execution
2769 retainResults = main.FALSE
2770 else:
2771 # unexpected result
2772 retainResults = main.FALSE
2773 if retainResults != main.TRUE:
2774 main.log.error( "Error executing set retain" )
2775
2776 # Check if set is still correct
2777 size = len( main.onosSet )
2778 getResponses = []
2779 threads = []
2780 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002781 t = main.Thread( target=main.CLIs[ i ].setTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002782 name="setTestGet-" + str( i ),
2783 args=[ main.onosSetName ] )
2784 threads.append( t )
2785 t.start()
2786 for t in threads:
2787 t.join()
2788 getResponses.append( t.result )
2789 getResults = main.TRUE
2790 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002791 node = str( main.activeNodes[ i ] + 1 )
2792 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002793 current = set( getResponses[ i ] )
2794 if len( current ) == len( getResponses[ i ] ):
2795 # no repeats
2796 if main.onosSet != current:
2797 main.log.error( "ONOS" + node +
2798 " has incorrect view" +
2799 " of set " + main.onosSetName + ":\n" +
2800 str( getResponses[ i ] ) )
2801 main.log.debug( "Expected: " + str( main.onosSet ) )
2802 main.log.debug( "Actual: " + str( current ) )
2803 getResults = main.FALSE
2804 else:
2805 # error, set is not a set
2806 main.log.error( "ONOS" + node +
2807 " has repeat elements in" +
2808 " set " + main.onosSetName + ":\n" +
2809 str( getResponses[ i ] ) )
2810 getResults = main.FALSE
2811 elif getResponses[ i ] == main.ERROR:
2812 getResults = main.FALSE
2813 sizeResponses = []
2814 threads = []
2815 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002816 t = main.Thread( target=main.CLIs[ i ].setTestSize,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002817 name="setTestSize-" + str( i ),
2818 args=[ main.onosSetName ] )
2819 threads.append( t )
2820 t.start()
2821 for t in threads:
2822 t.join()
2823 sizeResponses.append( t.result )
2824 sizeResults = main.TRUE
2825 for i in range( len( main.activeNodes ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -07002826 node = str( main.activeNodes[ i ] + 1 )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002827 if size != sizeResponses[ i ]:
2828 sizeResults = main.FALSE
2829 main.log.error( "ONOS" + node + " expected a size of " +
2830 str( size ) + " for set " + main.onosSetName +
2831 " but got " + str( sizeResponses[ i ] ) )
2832 retainResults = retainResults and getResults and sizeResults
2833 utilities.assert_equals( expect=main.TRUE,
2834 actual=retainResults,
2835 onpass="Set retain correct",
2836 onfail="Set retain was incorrect" )
2837
2838 # Transactional maps
2839 main.step( "Partitioned Transactional maps put" )
2840 tMapValue = "Testing"
2841 numKeys = 100
2842 putResult = True
Jon Hallf37d44d2017-05-24 10:37:30 -07002843 node = main.activeNodes[ 0 ]
2844 putResponses = main.CLIs[ node ].transactionalMapPut( numKeys, tMapValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002845 if putResponses and len( putResponses ) == 100:
2846 for i in putResponses:
2847 if putResponses[ i ][ 'value' ] != tMapValue:
2848 putResult = False
2849 else:
2850 putResult = False
2851 if not putResult:
2852 main.log.debug( "Put response values: " + str( putResponses ) )
2853 utilities.assert_equals( expect=True,
2854 actual=putResult,
2855 onpass="Partitioned Transactional Map put successful",
2856 onfail="Partitioned Transactional Map put values are incorrect" )
2857
2858 main.step( "Partitioned Transactional maps get" )
2859 # FIXME: is this sleep needed?
2860 time.sleep( 5 )
2861
2862 getCheck = True
2863 for n in range( 1, numKeys + 1 ):
2864 getResponses = []
2865 threads = []
2866 valueCheck = True
2867 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002868 t = main.Thread( target=main.CLIs[ i ].transactionalMapGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002869 name="TMap-get-" + str( i ),
2870 args=[ "Key" + str( n ) ] )
2871 threads.append( t )
2872 t.start()
2873 for t in threads:
2874 t.join()
2875 getResponses.append( t.result )
2876 for node in getResponses:
2877 if node != tMapValue:
2878 valueCheck = False
2879 if not valueCheck:
Jon Hallf37d44d2017-05-24 10:37:30 -07002880 main.log.warn( "Values for key 'Key" + str(n) + "' do not match:" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002881 main.log.warn( getResponses )
2882 getCheck = getCheck and valueCheck
2883 utilities.assert_equals( expect=True,
2884 actual=getCheck,
2885 onpass="Partitioned Transactional Map get values were correct",
2886 onfail="Partitioned Transactional Map values incorrect" )
2887
2888 # DISTRIBUTED ATOMIC VALUE
2889 main.step( "Get the value of a new value" )
2890 threads = []
2891 getValues = []
2892 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002893 t = main.Thread( target=main.CLIs[ i ].valueTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002894 name="ValueGet-" + str( i ),
2895 args=[ valueName ] )
2896 threads.append( t )
2897 t.start()
2898
2899 for t in threads:
2900 t.join()
2901 getValues.append( t.result )
2902 main.log.debug( getValues )
2903 # Check the results
2904 atomicValueGetResult = True
2905 expected = valueValue if valueValue is not None else "null"
2906 main.log.debug( "Checking for value of " + expected )
2907 for i in getValues:
2908 if i != expected:
2909 atomicValueGetResult = False
2910 utilities.assert_equals( expect=True,
2911 actual=atomicValueGetResult,
2912 onpass="Atomic Value get successful",
2913 onfail="Error getting atomic Value " +
2914 str( valueValue ) + ", found: " +
2915 str( getValues ) )
2916
2917 main.step( "Atomic Value set()" )
2918 valueValue = "foo"
2919 threads = []
2920 setValues = []
2921 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002922 t = main.Thread( target=main.CLIs[ i ].valueTestSet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002923 name="ValueSet-" + str( i ),
2924 args=[ valueName, valueValue ] )
2925 threads.append( t )
2926 t.start()
2927
2928 for t in threads:
2929 t.join()
2930 setValues.append( t.result )
2931 main.log.debug( setValues )
2932 # Check the results
2933 atomicValueSetResults = True
2934 for i in setValues:
2935 if i != main.TRUE:
2936 atomicValueSetResults = False
2937 utilities.assert_equals( expect=True,
2938 actual=atomicValueSetResults,
2939 onpass="Atomic Value set successful",
2940 onfail="Error setting atomic Value" +
2941 str( setValues ) )
2942
2943 main.step( "Get the value after set()" )
2944 threads = []
2945 getValues = []
2946 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002947 t = main.Thread( target=main.CLIs[ i ].valueTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002948 name="ValueGet-" + str( i ),
2949 args=[ valueName ] )
2950 threads.append( t )
2951 t.start()
2952
2953 for t in threads:
2954 t.join()
2955 getValues.append( t.result )
2956 main.log.debug( getValues )
2957 # Check the results
2958 atomicValueGetResult = True
2959 expected = valueValue if valueValue is not None else "null"
2960 main.log.debug( "Checking for value of " + expected )
2961 for i in getValues:
2962 if i != expected:
2963 atomicValueGetResult = False
2964 utilities.assert_equals( expect=True,
2965 actual=atomicValueGetResult,
2966 onpass="Atomic Value get successful",
2967 onfail="Error getting atomic Value " +
2968 str( valueValue ) + ", found: " +
2969 str( getValues ) )
2970
2971 main.step( "Atomic Value compareAndSet()" )
2972 oldValue = valueValue
2973 valueValue = "bar"
Jon Hallf37d44d2017-05-24 10:37:30 -07002974 i = main.activeNodes[ 0 ]
2975 CASValue = main.CLIs[ i ].valueTestCompareAndSet( valueName, oldValue, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002976 main.log.debug( CASValue )
2977 utilities.assert_equals( expect=main.TRUE,
2978 actual=CASValue,
2979 onpass="Atomic Value comapreAndSet successful",
2980 onfail="Error setting atomic Value:" +
2981 str( CASValue ) )
2982
2983 main.step( "Get the value after compareAndSet()" )
2984 threads = []
2985 getValues = []
2986 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07002987 t = main.Thread( target=main.CLIs[ i ].valueTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002988 name="ValueGet-" + str( i ),
2989 args=[ valueName ] )
2990 threads.append( t )
2991 t.start()
2992
2993 for t in threads:
2994 t.join()
2995 getValues.append( t.result )
2996 main.log.debug( getValues )
2997 # Check the results
2998 atomicValueGetResult = True
2999 expected = valueValue if valueValue is not None else "null"
3000 main.log.debug( "Checking for value of " + expected )
3001 for i in getValues:
3002 if i != expected:
3003 atomicValueGetResult = False
3004 utilities.assert_equals( expect=True,
3005 actual=atomicValueGetResult,
3006 onpass="Atomic Value get successful",
3007 onfail="Error getting atomic Value " +
3008 str( valueValue ) + ", found: " +
3009 str( getValues ) )
3010
3011 main.step( "Atomic Value getAndSet()" )
3012 oldValue = valueValue
3013 valueValue = "baz"
Jon Hallf37d44d2017-05-24 10:37:30 -07003014 i = main.activeNodes[ 0 ]
3015 GASValue = main.CLIs[ i ].valueTestGetAndSet( valueName, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003016 main.log.debug( GASValue )
3017 expected = oldValue if oldValue is not None else "null"
3018 utilities.assert_equals( expect=expected,
3019 actual=GASValue,
3020 onpass="Atomic Value GAS successful",
3021 onfail="Error with GetAndSet atomic Value: expected " +
3022 str( expected ) + ", found: " +
3023 str( GASValue ) )
3024
3025 main.step( "Get the value after getAndSet()" )
3026 threads = []
3027 getValues = []
3028 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07003029 t = main.Thread( target=main.CLIs[ i ].valueTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003030 name="ValueGet-" + str( i ),
3031 args=[ valueName ] )
3032 threads.append( t )
3033 t.start()
3034
3035 for t in threads:
3036 t.join()
3037 getValues.append( t.result )
3038 main.log.debug( getValues )
3039 # Check the results
3040 atomicValueGetResult = True
3041 expected = valueValue if valueValue is not None else "null"
3042 main.log.debug( "Checking for value of " + expected )
3043 for i in getValues:
3044 if i != expected:
3045 atomicValueGetResult = False
3046 utilities.assert_equals( expect=True,
3047 actual=atomicValueGetResult,
3048 onpass="Atomic Value get successful",
3049 onfail="Error getting atomic Value: expected " +
3050 str( valueValue ) + ", found: " +
3051 str( getValues ) )
3052
3053 main.step( "Atomic Value destory()" )
3054 valueValue = None
3055 threads = []
Jon Hallf37d44d2017-05-24 10:37:30 -07003056 i = main.activeNodes[ 0 ]
3057 destroyResult = main.CLIs[ i ].valueTestDestroy( valueName )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003058 main.log.debug( destroyResult )
3059 # Check the results
3060 utilities.assert_equals( expect=main.TRUE,
3061 actual=destroyResult,
3062 onpass="Atomic Value destroy successful",
3063 onfail="Error destroying atomic Value" )
3064
3065 main.step( "Get the value after destroy()" )
3066 threads = []
3067 getValues = []
3068 for i in main.activeNodes:
Jon Hallf37d44d2017-05-24 10:37:30 -07003069 t = main.Thread( target=main.CLIs[ i ].valueTestGet,
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003070 name="ValueGet-" + str( i ),
3071 args=[ valueName ] )
3072 threads.append( t )
3073 t.start()
3074
3075 for t in threads:
3076 t.join()
3077 getValues.append( t.result )
3078 main.log.debug( getValues )
3079 # Check the results
3080 atomicValueGetResult = True
3081 expected = valueValue if valueValue is not None else "null"
3082 main.log.debug( "Checking for value of " + expected )
3083 for i in getValues:
3084 if i != expected:
3085 atomicValueGetResult = False
3086 utilities.assert_equals( expect=True,
3087 actual=atomicValueGetResult,
3088 onpass="Atomic Value get successful",
3089 onfail="Error getting atomic Value " +
3090 str( valueValue ) + ", found: " +
3091 str( getValues ) )
Jon Halle0f0b342017-04-18 11:43:47 -07003092
3093 # WORK QUEUES
3094 main.step( "Work Queue add()" )
3095 threads = []
Jon Hallf37d44d2017-05-24 10:37:30 -07003096 i = main.activeNodes[ 0 ]
3097 addResult = main.CLIs[ i ].workQueueAdd( workQueueName, 'foo' )
Jon Halle0f0b342017-04-18 11:43:47 -07003098 workQueuePending += 1
3099 main.log.debug( addResult )
3100 # Check the results
3101 utilities.assert_equals( expect=main.TRUE,
3102 actual=addResult,
3103 onpass="Work Queue add successful",
3104 onfail="Error adding to Work Queue" )
3105
3106 main.step( "Check the work queue stats" )
3107 statsResults = self.workQueueStatsCheck( workQueueName,
3108 workQueueCompleted,
3109 workQueueInProgress,
3110 workQueuePending )
3111 utilities.assert_equals( expect=True,
3112 actual=statsResults,
3113 onpass="Work Queue stats correct",
3114 onfail="Work Queue stats incorrect " )
3115
3116 main.step( "Work Queue addMultiple()" )
3117 threads = []
Jon Hallf37d44d2017-05-24 10:37:30 -07003118 i = main.activeNodes[ 0 ]
3119 addMultipleResult = main.CLIs[ i ].workQueueAddMultiple( workQueueName, 'bar', 'baz' )
Jon Halle0f0b342017-04-18 11:43:47 -07003120 workQueuePending += 2
3121 main.log.debug( addMultipleResult )
3122 # Check the results
3123 utilities.assert_equals( expect=main.TRUE,
3124 actual=addMultipleResult,
3125 onpass="Work Queue add multiple successful",
3126 onfail="Error adding multiple items to Work Queue" )
3127
3128 main.step( "Check the work queue stats" )
3129 statsResults = self.workQueueStatsCheck( workQueueName,
3130 workQueueCompleted,
3131 workQueueInProgress,
3132 workQueuePending )
3133 utilities.assert_equals( expect=True,
3134 actual=statsResults,
3135 onpass="Work Queue stats correct",
3136 onfail="Work Queue stats incorrect " )
3137
3138 main.step( "Work Queue takeAndComplete() 1" )
3139 threads = []
Jon Hallf37d44d2017-05-24 10:37:30 -07003140 i = main.activeNodes[ 0 ]
Jon Halle0f0b342017-04-18 11:43:47 -07003141 number = 1
Jon Hallf37d44d2017-05-24 10:37:30 -07003142 take1Result = main.CLIs[ i ].workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07003143 workQueuePending -= number
3144 workQueueCompleted += number
3145 main.log.debug( take1Result )
3146 # Check the results
3147 utilities.assert_equals( expect=main.TRUE,
3148 actual=take1Result,
3149 onpass="Work Queue takeAndComplete 1 successful",
3150 onfail="Error taking 1 from Work Queue" )
3151
3152 main.step( "Check the work queue stats" )
3153 statsResults = self.workQueueStatsCheck( workQueueName,
3154 workQueueCompleted,
3155 workQueueInProgress,
3156 workQueuePending )
3157 utilities.assert_equals( expect=True,
3158 actual=statsResults,
3159 onpass="Work Queue stats correct",
3160 onfail="Work Queue stats incorrect " )
3161
3162 main.step( "Work Queue takeAndComplete() 2" )
3163 threads = []
Jon Hallf37d44d2017-05-24 10:37:30 -07003164 i = main.activeNodes[ 0 ]
Jon Halle0f0b342017-04-18 11:43:47 -07003165 number = 2
Jon Hallf37d44d2017-05-24 10:37:30 -07003166 take2Result = main.CLIs[ i ].workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07003167 workQueuePending -= number
3168 workQueueCompleted += number
3169 main.log.debug( take2Result )
3170 # Check the results
3171 utilities.assert_equals( expect=main.TRUE,
3172 actual=take2Result,
3173 onpass="Work Queue takeAndComplete 2 successful",
3174 onfail="Error taking 2 from Work Queue" )
3175
3176 main.step( "Check the work queue stats" )
3177 statsResults = self.workQueueStatsCheck( workQueueName,
3178 workQueueCompleted,
3179 workQueueInProgress,
3180 workQueuePending )
3181 utilities.assert_equals( expect=True,
3182 actual=statsResults,
3183 onpass="Work Queue stats correct",
3184 onfail="Work Queue stats incorrect " )
3185
3186 main.step( "Work Queue destroy()" )
3187 valueValue = None
3188 threads = []
Jon Hallf37d44d2017-05-24 10:37:30 -07003189 i = main.activeNodes[ 0 ]
3190 destroyResult = main.CLIs[ i ].workQueueDestroy( workQueueName )
Jon Halle0f0b342017-04-18 11:43:47 -07003191 workQueueCompleted = 0
3192 workQueueInProgress = 0
3193 workQueuePending = 0
3194 main.log.debug( destroyResult )
3195 # Check the results
3196 utilities.assert_equals( expect=main.TRUE,
3197 actual=destroyResult,
3198 onpass="Work Queue destroy successful",
3199 onfail="Error destroying Work Queue" )
3200
3201 main.step( "Check the work queue stats" )
3202 statsResults = self.workQueueStatsCheck( workQueueName,
3203 workQueueCompleted,
3204 workQueueInProgress,
3205 workQueuePending )
3206 utilities.assert_equals( expect=True,
3207 actual=statsResults,
3208 onpass="Work Queue stats correct",
3209 onfail="Work Queue stats incorrect " )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07003210 except Exception as e:
3211 main.log.error( "Exception: " + str( e ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003212
3213 def cleanUp( self, main ):
3214 """
3215 Clean up
3216 """
3217 import os
3218 import time
3219 assert main.numCtrls, "main.numCtrls not defined"
3220 assert main, "main not defined"
3221 assert utilities.assert_equals, "utilities.assert_equals not defined"
3222 assert main.CLIs, "main.CLIs not defined"
3223 assert main.nodes, "main.nodes not defined"
3224
3225 # printing colors to terminal
3226 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
3227 'blue': '\033[94m', 'green': '\033[92m',
3228 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
3229 main.case( "Test Cleanup" )
3230 main.step( "Killing tcpdumps" )
3231 main.Mininet2.stopTcpdump()
3232
3233 testname = main.TEST
3234 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
3235 main.step( "Copying MN pcap and ONOS log files to test station" )
3236 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
3237 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
3238 # NOTE: MN Pcap file is being saved to logdir.
3239 # We scp this file as MN and TestON aren't necessarily the same vm
3240
3241 # FIXME: To be replaced with a Jenkin's post script
3242 # TODO: Load these from params
3243 # NOTE: must end in /
3244 logFolder = "/opt/onos/log/"
3245 logFiles = [ "karaf.log", "karaf.log.1" ]
3246 # NOTE: must end in /
3247 for f in logFiles:
3248 for node in main.nodes:
3249 dstName = main.logdir + "/" + node.name + "-" + f
3250 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
3251 logFolder + f, dstName )
3252 # std*.log's
3253 # NOTE: must end in /
3254 logFolder = "/opt/onos/var/"
3255 logFiles = [ "stderr.log", "stdout.log" ]
3256 # NOTE: must end in /
3257 for f in logFiles:
3258 for node in main.nodes:
3259 dstName = main.logdir + "/" + node.name + "-" + f
3260 main.ONOSbench.secureCopy( node.user_name, node.ip_address,
3261 logFolder + f, dstName )
3262 else:
3263 main.log.debug( "skipping saving log files" )
3264
3265 main.step( "Stopping Mininet" )
3266 mnResult = main.Mininet1.stopNet()
3267 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
3268 onpass="Mininet stopped",
3269 onfail="MN cleanup NOT successful" )
3270
3271 main.step( "Checking ONOS Logs for errors" )
3272 for node in main.nodes:
3273 main.log.debug( "Checking logs for errors on " + node.name + ":" )
3274 main.log.warn( main.ONOSbench.checkLogs( node.ip_address ) )
3275
3276 try:
3277 timerLog = open( main.logdir + "/Timers.csv", 'w' )
3278 main.log.debug( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
3279 timerLog.write( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
3280 timerLog.close()
3281 except NameError as e:
3282 main.log.exception( e )
3283 def assignMastership( self, main ):
3284 """
3285 Assign mastership to controllers
3286 """
3287 import time
3288 assert main.numCtrls, "main.numCtrls not defined"
3289 assert main, "main not defined"
3290 assert utilities.assert_equals, "utilities.assert_equals not defined"
3291 assert main.CLIs, "main.CLIs not defined"
3292 assert main.nodes, "main.nodes not defined"
3293
3294 main.case( "Assigning Controller roles for switches" )
3295 main.caseExplanation = "Check that ONOS is connected to each " +\
3296 "device. Then manually assign" +\
3297 " mastership to specific ONOS nodes using" +\
3298 " 'device-role'"
3299 main.step( "Assign mastership of switches to specific controllers" )
3300 # Manually assign mastership to the controller we want
3301 roleCall = main.TRUE
3302
3303 ipList = []
3304 deviceList = []
3305 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
3306 try:
3307 # Assign mastership to specific controllers. This assignment was
3308 # determined for a 7 node cluser, but will work with any sized
3309 # cluster
3310 for i in range( 1, 29 ): # switches 1 through 28
3311 # set up correct variables:
3312 if i == 1:
3313 c = 0
3314 ip = main.nodes[ c ].ip_address # ONOS1
3315 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
3316 elif i == 2:
3317 c = 1 % main.numCtrls
3318 ip = main.nodes[ c ].ip_address # ONOS2
3319 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
3320 elif i == 3:
3321 c = 1 % main.numCtrls
3322 ip = main.nodes[ c ].ip_address # ONOS2
3323 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
3324 elif i == 4:
3325 c = 3 % main.numCtrls
3326 ip = main.nodes[ c ].ip_address # ONOS4
3327 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
3328 elif i == 5:
3329 c = 2 % main.numCtrls
3330 ip = main.nodes[ c ].ip_address # ONOS3
3331 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
3332 elif i == 6:
3333 c = 2 % main.numCtrls
3334 ip = main.nodes[ c ].ip_address # ONOS3
3335 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
3336 elif i == 7:
3337 c = 5 % main.numCtrls
3338 ip = main.nodes[ c ].ip_address # ONOS6
3339 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
3340 elif i >= 8 and i <= 17:
3341 c = 4 % main.numCtrls
3342 ip = main.nodes[ c ].ip_address # ONOS5
3343 dpid = '3' + str( i ).zfill( 3 )
3344 deviceId = onosCli.getDevice( dpid ).get( 'id' )
3345 elif i >= 18 and i <= 27:
3346 c = 6 % main.numCtrls
3347 ip = main.nodes[ c ].ip_address # ONOS7
3348 dpid = '6' + str( i ).zfill( 3 )
3349 deviceId = onosCli.getDevice( dpid ).get( 'id' )
3350 elif i == 28:
3351 c = 0
3352 ip = main.nodes[ c ].ip_address # ONOS1
3353 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
3354 else:
3355 main.log.error( "You didn't write an else statement for " +
3356 "switch s" + str( i ) )
3357 roleCall = main.FALSE
3358 # Assign switch
3359 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
3360 # TODO: make this controller dynamic
3361 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
3362 ipList.append( ip )
3363 deviceList.append( deviceId )
3364 except ( AttributeError, AssertionError ):
3365 main.log.exception( "Something is wrong with ONOS device view" )
3366 main.log.info( onosCli.devices() )
3367 utilities.assert_equals(
3368 expect=main.TRUE,
3369 actual=roleCall,
3370 onpass="Re-assigned switch mastership to designated controller",
3371 onfail="Something wrong with deviceRole calls" )
3372
3373 main.step( "Check mastership was correctly assigned" )
3374 roleCheck = main.TRUE
3375 # NOTE: This is due to the fact that device mastership change is not
3376 # atomic and is actually a multi step process
3377 time.sleep( 5 )
3378 for i in range( len( ipList ) ):
3379 ip = ipList[ i ]
3380 deviceId = deviceList[ i ]
3381 # Check assignment
3382 master = onosCli.getRole( deviceId ).get( 'master' )
3383 if ip in master:
3384 roleCheck = roleCheck and main.TRUE
3385 else:
3386 roleCheck = roleCheck and main.FALSE
3387 main.log.error( "Error, controller " + ip + " is not" +
3388 " master " + "of device " +
3389 str( deviceId ) + ". Master is " +
3390 repr( master ) + "." )
3391 utilities.assert_equals(
3392 expect=main.TRUE,
3393 actual=roleCheck,
3394 onpass="Switches were successfully reassigned to designated " +
3395 "controller",
3396 onfail="Switches were not successfully reassigned" )
3397 def bringUpStoppedNode( self, main ):
3398 """
3399 The bring up stopped nodes
3400 """
3401 import time
3402 assert main.numCtrls, "main.numCtrls not defined"
3403 assert main, "main not defined"
3404 assert utilities.assert_equals, "utilities.assert_equals not defined"
3405 assert main.CLIs, "main.CLIs not defined"
3406 assert main.nodes, "main.nodes not defined"
3407 assert main.kill, "main.kill not defined"
3408 main.case( "Restart minority of ONOS nodes" )
3409
3410 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
3411 startResults = main.TRUE
3412 restartTime = time.time()
3413 for i in main.kill:
3414 startResults = startResults and\
3415 main.ONOSbench.onosStart( main.nodes[ i ].ip_address )
3416 utilities.assert_equals( expect=main.TRUE, actual=startResults,
3417 onpass="ONOS nodes started successfully",
3418 onfail="ONOS nodes NOT successfully started" )
3419
3420 main.step( "Checking if ONOS is up yet" )
3421 count = 0
3422 onosIsupResult = main.FALSE
3423 while onosIsupResult == main.FALSE and count < 10:
3424 onosIsupResult = main.TRUE
3425 for i in main.kill:
3426 onosIsupResult = onosIsupResult and\
3427 main.ONOSbench.isup( main.nodes[ i ].ip_address )
3428 count = count + 1
3429 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
3430 onpass="ONOS restarted successfully",
3431 onfail="ONOS restart NOT successful" )
3432
3433 main.step( "Restarting ONOS main.CLIs" )
3434 cliResults = main.TRUE
3435 for i in main.kill:
3436 cliResults = cliResults and\
3437 main.CLIs[ i ].startOnosCli( main.nodes[ i ].ip_address )
3438 main.activeNodes.append( i )
3439 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
3440 onpass="ONOS cli restarted",
3441 onfail="ONOS cli did not restart" )
3442 main.activeNodes.sort()
3443 try:
3444 assert list( set( main.activeNodes ) ) == main.activeNodes,\
3445 "List of active nodes has duplicates, this likely indicates something was run out of order"
3446 except AssertionError:
3447 main.log.exception( "" )
3448 main.cleanup()
3449 main.exit()
3450
3451 # Grab the time of restart so we chan check how long the gossip
3452 # protocol has had time to work
3453 main.restartTime = time.time() - restartTime
3454 main.log.debug( "Restart time: " + str( main.restartTime ) )
3455 # TODO: MAke this configurable. Also, we are breaking the above timer
3456 main.step( "Checking ONOS nodes" )
3457 nodeResults = utilities.retry( self.nodesCheck,
3458 False,
3459 args=[ main.activeNodes ],
3460 sleep=15,
3461 attempts=5 )
3462
3463 utilities.assert_equals( expect=True, actual=nodeResults,
3464 onpass="Nodes check successful",
3465 onfail="Nodes check NOT successful" )
3466
3467 if not nodeResults:
3468 for i in main.activeNodes:
3469 cli = main.CLIs[ i ]
3470 main.log.debug( "{} components not ACTIVE: \n{}".format(
3471 cli.name,
3472 cli.sendline( "scr:list | grep -v ACTIVE" ) ) )
3473 main.log.error( "Failed to start ONOS, stopping test" )
3474 main.cleanup()
3475 main.exit()
3476
3477 node = main.activeNodes[ 0 ]
3478 main.log.debug( main.CLIs[ node ].nodes( jsonFormat=False ) )
3479 main.log.debug( main.CLIs[ node ].leaders( jsonFormat=False ) )
3480 main.log.debug( main.CLIs[ node ].partitions( jsonFormat=False ) )
3481 main.log.debug(main.CLIs[node].apps(jsonFormat=False))
3482
3483 main.step( "Rerun for election on the node(s) that were killed" )
3484 runResults = main.TRUE
3485 for i in main.kill:
3486 runResults = runResults and\
3487 main.CLIs[ i ].electionTestRun()
3488 utilities.assert_equals( expect=main.TRUE, actual=runResults,
3489 onpass="ONOS nodes reran for election topic",
3490 onfail="Errror rerunning for election" )
3491
3492
3493 def checkStateAfterONOS( self, main, afterWhich, compareSwitch=False, isRestart=False ):
3494 """
3495 afterWhich :
3496 0: failture
3497 1: scaling
3498 """
3499 """
3500 Check state after ONOS failure/scaling
3501 """
3502 import json
3503 assert main.numCtrls, "main.numCtrls not defined"
3504 assert main, "main not defined"
3505 assert utilities.assert_equals, "utilities.assert_equals not defined"
3506 assert main.CLIs, "main.CLIs not defined"
3507 assert main.nodes, "main.nodes not defined"
3508 main.case( "Running ONOS Constant State Tests" )
3509
3510 OnosAfterWhich = [ "failure" , "scaliing" ]
3511
3512 main.step( "Check that each switch has a master" )
3513 # Assert that each device has a master
3514 rolesNotNull = main.TRUE
3515 threads = []
3516 for i in main.activeNodes:
3517 t = main.Thread( target=main.CLIs[ i ].rolesNotNull,
3518 name="rolesNotNull-" + str( i ),
3519 args=[] )
3520 threads.append( t )
3521 t.start()
3522
3523 for t in threads:
3524 t.join()
3525 rolesNotNull = rolesNotNull and t.result
3526 utilities.assert_equals(
3527 expect=main.TRUE,
3528 actual=rolesNotNull,
3529 onpass="Each device has a master",
3530 onfail="Some devices don't have a master assigned" )
3531
3532 main.step( "Read device roles from ONOS" )
3533 ONOSMastership = []
3534 consistentMastership = True
3535 rolesResults = True
3536 threads = []
3537 for i in main.activeNodes:
3538 t = main.Thread( target=main.CLIs[ i ].roles,
3539 name="roles-" + str( i ),
3540 args=[] )
3541 threads.append( t )
3542 t.start()
3543
3544 for t in threads:
3545 t.join()
3546 ONOSMastership.append( t.result )
3547
3548 for i in range( len( ONOSMastership ) ):
3549 node = str( main.activeNodes[ i ] + 1 )
3550 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
3551 main.log.error( "Error in getting ONOS" + node + " roles" )
3552 main.log.warn( "ONOS" + node + " mastership response: " +
3553 repr( ONOSMastership[ i ] ) )
3554 rolesResults = False
3555 utilities.assert_equals(
3556 expect=True,
3557 actual=rolesResults,
3558 onpass="No error in reading roles output",
3559 onfail="Error in reading roles from ONOS" )
3560
3561 main.step( "Check for consistency in roles from each controller" )
3562 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
3563 main.log.info(
3564 "Switch roles are consistent across all ONOS nodes" )
3565 else:
3566 consistentMastership = False
3567 utilities.assert_equals(
3568 expect=True,
3569 actual=consistentMastership,
3570 onpass="Switch roles are consistent across all ONOS nodes",
3571 onfail="ONOS nodes have different views of switch roles" )
3572
3573 if rolesResults and not consistentMastership:
3574 for i in range( len( ONOSMastership ) ):
3575 node = str( main.activeNodes[ i ] + 1 )
3576 main.log.warn( "ONOS" + node + " roles: ",
3577 json.dumps( json.loads( ONOSMastership[ i ] ),
3578 sort_keys=True,
3579 indent=4,
3580 separators=( ',', ': ' ) ) )
3581
3582 if compareSwitch:
3583 description2 = "Compare switch roles from before failure"
3584 main.step( description2 )
3585 try:
3586 currentJson = json.loads( ONOSMastership[ 0 ] )
3587 oldJson = json.loads( mastershipState )
3588 except ( ValueError, TypeError ):
3589 main.log.exception( "Something is wrong with parsing " +
3590 "ONOSMastership[0] or mastershipState" )
3591 main.log.error( "ONOSMastership[0]: " + repr( ONOSMastership[ 0 ] ) )
3592 main.log.error( "mastershipState" + repr( mastershipState ) )
3593 main.cleanup()
3594 main.exit()
3595 mastershipCheck = main.TRUE
3596 for i in range( 1, 29 ):
3597 switchDPID = str(
3598 main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
3599 current = [ switch[ 'master' ] for switch in currentJson
3600 if switchDPID in switch[ 'id' ] ]
3601 old = [ switch[ 'master' ] for switch in oldJson
3602 if switchDPID in switch[ 'id' ] ]
3603 if current == old:
3604 mastershipCheck = mastershipCheck and main.TRUE
3605 else:
3606 main.log.warn( "Mastership of switch %s changed" % switchDPID )
3607 mastershipCheck = main.FALSE
3608 utilities.assert_equals(
3609 expect=main.TRUE,
3610 actual=mastershipCheck,
3611 onpass="Mastership of Switches was not changed",
3612 onfail="Mastership of some switches changed" )
3613
3614 # NOTE: we expect mastership to change on controller failure/scaling down
3615 main.step( "Get the intents and compare across all nodes" )
3616 ONOSIntents = []
3617 intentCheck = main.FALSE
3618 consistentIntents = True
3619 intentsResults = True
3620 threads = []
3621 for i in main.activeNodes:
3622 t = main.Thread( target=main.CLIs[ i ].intents,
3623 name="intents-" + str( i ),
3624 args=[],
3625 kwargs={ 'jsonFormat': True } )
3626 threads.append( t )
3627 t.start()
3628
3629 for t in threads:
3630 t.join()
3631 ONOSIntents.append( t.result )
3632
3633 for i in range( len( ONOSIntents ) ):
3634 node = str( main.activeNodes[ i ] + 1 )
3635 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
3636 main.log.error( "Error in getting ONOS" + node + " intents" )
3637 main.log.warn( "ONOS" + node + " intents response: " +
3638 repr( ONOSIntents[ i ] ) )
3639 intentsResults = False
3640 utilities.assert_equals(
3641 expect=True,
3642 actual=intentsResults,
3643 onpass="No error in reading intents output",
3644 onfail="Error in reading intents from ONOS" )
3645
3646 main.step( "Check for consistency in Intents from each controller" )
3647 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
3648 main.log.info( "Intents are consistent across all ONOS " +
3649 "nodes" )
3650 else:
3651 consistentIntents = False
3652
3653 # Try to make it easy to figure out what is happening
3654 #
3655 # Intent ONOS1 ONOS2 ...
3656 # 0x01 INSTALLED INSTALLING
3657 # ... ... ...
3658 # ... ... ...
3659 title = " ID"
3660 for n in main.activeNodes:
3661 title += " " * 10 + "ONOS" + str( n + 1 )
3662 main.log.warn( title )
3663 # get all intent keys in the cluster
3664 keys = []
3665 for nodeStr in ONOSIntents:
3666 node = json.loads( nodeStr )
3667 for intent in node:
3668 keys.append( intent.get( 'id' ) )
3669 keys = set( keys )
3670 for key in keys:
3671 row = "%-13s" % key
3672 for nodeStr in ONOSIntents:
3673 node = json.loads( nodeStr )
3674 for intent in node:
3675 if intent.get( 'id' ) == key:
3676 row += "%-15s" % intent.get( 'state' )
3677 main.log.warn( row )
3678 # End table view
3679
3680 utilities.assert_equals(
3681 expect=True,
3682 actual=consistentIntents,
3683 onpass="Intents are consistent across all ONOS nodes",
3684 onfail="ONOS nodes have different views of intents" )
3685 intentStates = []
3686 for node in ONOSIntents: # Iter through ONOS nodes
3687 nodeStates = []
3688 # Iter through intents of a node
3689 try:
3690 for intent in json.loads( node ):
3691 nodeStates.append( intent[ 'state' ] )
3692 except ( ValueError, TypeError ):
3693 main.log.exception( "Error in parsing intents" )
3694 main.log.error( repr( node ) )
3695 intentStates.append( nodeStates )
3696 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
3697 main.log.info( dict( out ) )
3698
3699 if intentsResults and not consistentIntents:
3700 for i in range( len( main.activeNodes ) ):
3701 node = str( main.activeNodes[ i ] + 1 )
3702 main.log.warn( "ONOS" + node + " intents: " )
3703 main.log.warn( json.dumps(
3704 json.loads( ONOSIntents[ i ] ),
3705 sort_keys=True,
3706 indent=4,
3707 separators=( ',', ': ' ) ) )
3708 elif intentsResults and consistentIntents:
3709 intentCheck = main.TRUE
3710
3711 # NOTE: Store has no durability, so intents are lost across system
3712 # restarts
3713 if not isRestart:
3714 main.step( "Compare current intents with intents before the " + OnosAfterWhich[ afterWhich ] )
3715 # NOTE: this requires case 5 to pass for intentState to be set.
3716 # maybe we should stop the test if that fails?
3717 sameIntents = main.FALSE
3718 try:
3719 intentState
3720 except NameError:
3721 main.log.warn( "No previous intent state was saved" )
3722 else:
3723 if intentState and intentState == ONOSIntents[ 0 ]:
3724 sameIntents = main.TRUE
3725 main.log.info( "Intents are consistent with before " + OnosAfterWhich[ afterWhich ] )
3726 # TODO: possibly the states have changed? we may need to figure out
3727 # what the acceptable states are
3728 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
3729 sameIntents = main.TRUE
3730 try:
3731 before = json.loads( intentState )
3732 after = json.loads( ONOSIntents[ 0 ] )
3733 for intent in before:
3734 if intent not in after:
3735 sameIntents = main.FALSE
3736 main.log.debug( "Intent is not currently in ONOS " +
3737 "(at least in the same form):" )
3738 main.log.debug( json.dumps( intent ) )
3739 except ( ValueError, TypeError ):
3740 main.log.exception( "Exception printing intents" )
3741 main.log.debug( repr( ONOSIntents[ 0 ] ) )
3742 main.log.debug( repr( intentState ) )
3743 if sameIntents == main.FALSE:
3744 try:
3745 main.log.debug( "ONOS intents before: " )
3746 main.log.debug( json.dumps( json.loads( intentState ),
3747 sort_keys=True, indent=4,
3748 separators=( ',', ': ' ) ) )
3749 main.log.debug( "Current ONOS intents: " )
3750 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
3751 sort_keys=True, indent=4,
3752 separators=( ',', ': ' ) ) )
3753 except ( ValueError, TypeError ):
3754 main.log.exception( "Exception printing intents" )
3755 main.log.debug( repr( ONOSIntents[ 0 ] ) )
3756 main.log.debug( repr( intentState ) )
3757 utilities.assert_equals(
3758 expect=main.TRUE,
3759 actual=sameIntents,
3760 onpass="Intents are consistent with before " + OnosAfterWhich[ afterWhich ] ,
3761 onfail="The Intents changed during " + OnosAfterWhich[ afterWhich ] )
3762 intentCheck = intentCheck and sameIntents
3763
3764 main.step( "Get the OF Table entries and compare to before " +
3765 "component " + OnosAfterWhich[ afterWhich ] )
3766 FlowTables = main.TRUE
3767 for i in range( 28 ):
3768 main.log.info( "Checking flow table on s" + str( i + 1 ) )
3769 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
3770 curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
3771 FlowTables = FlowTables and curSwitch
3772 if curSwitch == main.FALSE:
3773 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
3774 utilities.assert_equals(
3775 expect=main.TRUE,
3776 actual=FlowTables,
3777 onpass="No changes were found in the flow tables",
3778 onfail="Changes were found in the flow tables" )
3779
3780 main.Mininet2.pingLongKill()
3781
3782 """
3783 main.step( "Check the continuous pings to ensure that no packets " +
3784 "were dropped during component failure" )
3785 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
3786 main.params[ 'TESTONIP' ] )
3787 LossInPings = main.FALSE
3788 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
3789 for i in range( 8, 18 ):
3790 main.log.info(
3791 "Checking for a loss in pings along flow from s" +
3792 str( i ) )
3793 LossInPings = main.Mininet2.checkForLoss(
3794 "/tmp/ping.h" +
3795 str( i ) ) or LossInPings
3796 if LossInPings == main.TRUE:
3797 main.log.info( "Loss in ping detected" )
3798 elif LossInPings == main.ERROR:
3799 main.log.info( "There are multiple mininet process running" )
3800 elif LossInPings == main.FALSE:
3801 main.log.info( "No Loss in the pings" )
3802 main.log.info( "No loss of dataplane connectivity" )
3803 utilities.assert_equals(
3804 expect=main.FALSE,
3805 actual=LossInPings,
3806 onpass="No Loss of connectivity",
3807 onfail="Loss of dataplane connectivity detected" )
3808 # NOTE: Since intents are not persisted with IntnentStore,
3809 # we expect loss in dataplane connectivity
3810 LossInPings = main.FALSE
3811 """
3812
3813 def compareTopo( self, main ):
3814 """
3815 Compare topo
3816 """
3817 import json
3818 import time
3819 assert main.numCtrls, "main.numCtrls not defined"
3820 assert main, "main not defined"
3821 assert utilities.assert_equals, "utilities.assert_equals not defined"
3822 assert main.CLIs, "main.CLIs not defined"
3823 assert main.nodes, "main.nodes not defined"
3824 try:
3825 from tests.dependencies.topology import Topology
3826 except ImportError:
3827 main.log.error( "Topology not found exiting the test" )
3828 main.exit()
3829 try:
3830 main.topoRelated
3831 except ( NameError, AttributeError ):
3832 main.topoRelated = Topology()
3833 main.case( "Compare ONOS Topology view to Mininet topology" )
3834 main.caseExplanation = "Compare topology objects between Mininet" +\
3835 " and ONOS"
3836 topoResult = main.FALSE
3837 topoFailMsg = "ONOS topology don't match Mininet"
3838 elapsed = 0
3839 count = 0
3840 main.step( "Comparing ONOS topology to MN topology" )
3841 startTime = time.time()
3842 # Give time for Gossip to work
3843 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
3844 devicesResults = main.TRUE
3845 linksResults = main.TRUE
3846 hostsResults = main.TRUE
3847 hostAttachmentResults = True
3848 count += 1
3849 cliStart = time.time()
3850 devices = main.topoRelated.getAllDevices( main.activeNodes, True,
3851 kwargs={ 'sleep': 5, 'attempts': 5,
3852 'randomTime': True } )
3853 ipResult = main.TRUE
3854
3855 hosts = main.topoRelated.getAllHosts( main.activeNodes, True,
3856 kwargs={ 'sleep': 5, 'attempts': 5,
3857 'randomTime': True },
3858 inJson=True )
3859
3860 for controller in range( 0, len( hosts ) ):
3861 controllerStr = str( main.activeNodes[ controller ] + 1 )
3862 if hosts[ controller ]:
3863 for host in hosts[ controller ]:
3864 if host is None or host.get( 'ipAddresses', [] ) == []:
3865 main.log.error(
3866 "Error with host ipAddresses on controller" +
3867 controllerStr + ": " + str( host ) )
3868 ipResult = main.FALSE
3869 ports = main.topoRelated.getAllPorts( main.activeNodes , True,
3870 kwargs={ 'sleep': 5, 'attempts': 5,
3871 'randomTime': True } )
3872 links = main.topoRelated.getAllLinks( main.activeNodes, True,
3873 kwargs={ 'sleep': 5, 'attempts': 5,
3874 'randomTime': True } )
3875 clusters = main.topoRelated.getAllClusters( main.activeNodes , True,
3876 kwargs={ 'sleep': 5, 'attempts': 5,
3877 'randomTime': True } )
3878
3879 elapsed = time.time() - startTime
3880 cliTime = time.time() - cliStart
3881 print "Elapsed time: " + str( elapsed )
3882 print "CLI time: " + str( cliTime )
3883
3884 if all( e is None for e in devices ) and\
3885 all( e is None for e in hosts ) and\
3886 all( e is None for e in ports ) and\
3887 all( e is None for e in links ) and\
3888 all( e is None for e in clusters ):
3889 topoFailMsg = "Could not get topology from ONOS"
3890 main.log.error( topoFailMsg )
3891 continue # Try again, No use trying to compare
3892
3893 mnSwitches = main.Mininet1.getSwitches()
3894 mnLinks = main.Mininet1.getLinks()
3895 mnHosts = main.Mininet1.getHosts()
3896 for controller in range( len( main.activeNodes ) ):
3897 controllerStr = str( main.activeNodes[ controller ] + 1 )
3898 currentDevicesResult = main.topoRelated.compareDevicePort( main.Mininet1, controller,
3899 mnSwitches,
3900 devices, ports )
3901 utilities.assert_equals( expect=main.TRUE,
3902 actual=currentDevicesResult,
3903 onpass="ONOS" + controllerStr +
3904 " Switches view is correct",
3905 onfail="ONOS" + controllerStr +
3906 " Switches view is incorrect" )
3907
3908
3909 currentLinksResult = main.topoRelated.compareBase( links, controller,
3910 main.Mininet1.compareLinks,
3911 [mnSwitches, mnLinks] )
3912 utilities.assert_equals( expect=main.TRUE,
3913 actual=currentLinksResult,
3914 onpass="ONOS" + controllerStr +
3915 " links view is correct",
3916 onfail="ONOS" + controllerStr +
3917 " links view is incorrect" )
3918 if hosts[ controller ] and "Error" not in hosts[ controller ]:
3919 currentHostsResult = main.Mininet1.compareHosts(
3920 mnHosts,
3921 hosts[ controller ] )
3922 elif hosts[ controller ] == []:
3923 currentHostsResult = main.TRUE
3924 else:
3925 currentHostsResult = main.FALSE
3926 utilities.assert_equals( expect=main.TRUE,
3927 actual=currentHostsResult,
3928 onpass="ONOS" + controllerStr +
3929 " hosts exist in Mininet",
3930 onfail="ONOS" + controllerStr +
3931 " hosts don't match Mininet" )
3932 # CHECKING HOST ATTACHMENT POINTS
3933 hostAttachment = True
3934 zeroHosts = False
3935 # FIXME: topo-HA/obelisk specific mappings:
3936 # key is mac and value is dpid
3937 mappings = {}
3938 for i in range( 1, 29 ): # hosts 1 through 28
3939 # set up correct variables:
3940 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
3941 if i == 1:
3942 deviceId = "1000".zfill( 16 )
3943 elif i == 2:
3944 deviceId = "2000".zfill( 16 )
3945 elif i == 3:
3946 deviceId = "3000".zfill( 16 )
3947 elif i == 4:
3948 deviceId = "3004".zfill( 16 )
3949 elif i == 5:
3950 deviceId = "5000".zfill( 16 )
3951 elif i == 6:
3952 deviceId = "6000".zfill( 16 )
3953 elif i == 7:
3954 deviceId = "6007".zfill( 16 )
3955 elif i >= 8 and i <= 17:
3956 dpid = '3' + str( i ).zfill( 3 )
3957 deviceId = dpid.zfill( 16 )
3958 elif i >= 18 and i <= 27:
3959 dpid = '6' + str( i ).zfill( 3 )
3960 deviceId = dpid.zfill( 16 )
3961 elif i == 28:
3962 deviceId = "2800".zfill( 16 )
3963 mappings[ macId ] = deviceId
3964 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3965 if hosts[ controller ] == []:
3966 main.log.warn( "There are no hosts discovered" )
3967 zeroHosts = True
3968 else:
3969 for host in hosts[ controller ]:
3970 mac = None
3971 location = None
3972 device = None
3973 port = None
3974 try:
3975 mac = host.get( 'mac' )
3976 assert mac, "mac field could not be found for this host object"
3977
3978 location = host.get( 'locations' )[ 0 ]
3979 assert location, "location field could not be found for this host object"
3980
3981 # Trim the protocol identifier off deviceId
3982 device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
3983 assert device, "elementId field could not be found for this host location object"
3984
3985 port = location.get( 'port' )
3986 assert port, "port field could not be found for this host location object"
3987
3988 # Now check if this matches where they should be
3989 if mac and device and port:
3990 if str( port ) != "1":
3991 main.log.error( "The attachment port is incorrect for " +
3992 "host " + str( mac ) +
3993 ". Expected: 1 Actual: " + str( port ) )
3994 hostAttachment = False
3995 if device != mappings[ str( mac ) ]:
3996 main.log.error( "The attachment device is incorrect for " +
3997 "host " + str( mac ) +
3998 ". Expected: " + mappings[ str( mac ) ] +
3999 " Actual: " + device )
4000 hostAttachment = False
4001 else:
4002 hostAttachment = False
4003 except AssertionError:
4004 main.log.exception( "Json object not as expected" )
4005 main.log.error( repr( host ) )
4006 hostAttachment = False
4007 else:
4008 main.log.error( "No hosts json output or \"Error\"" +
4009 " in output. hosts = " +
4010 repr( hosts[ controller ] ) )
4011 if zeroHosts is False:
4012 # TODO: Find a way to know if there should be hosts in a
4013 # given point of the test
4014 hostAttachment = True
4015
4016 # END CHECKING HOST ATTACHMENT POINTS
4017 devicesResults = devicesResults and currentDevicesResult
4018 linksResults = linksResults and currentLinksResult
4019 hostsResults = hostsResults and currentHostsResult
4020 hostAttachmentResults = hostAttachmentResults and\
4021 hostAttachment
4022 topoResult = ( devicesResults and linksResults
4023 and hostsResults and ipResult and
4024 hostAttachmentResults )
4025 utilities.assert_equals( expect=True,
4026 actual=topoResult,
4027 onpass="ONOS topology matches Mininet",
4028 onfail=topoFailMsg )
4029 # End of While loop to pull ONOS state
4030
4031 # Compare json objects for hosts and dataplane clusters
4032
4033 # hosts
4034 main.step( "Hosts view is consistent across all ONOS nodes" )
4035 consistentHostsResult = main.TRUE
4036 for controller in range( len( hosts ) ):
4037 controllerStr = str( main.activeNodes[ controller ] + 1 )
4038 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
4039 if hosts[ controller ] == hosts[ 0 ]:
4040 continue
4041 else: # hosts not consistent
4042 main.log.error( "hosts from ONOS" + controllerStr +
4043 " is inconsistent with ONOS1" )
4044 main.log.warn( repr( hosts[ controller ] ) )
4045 consistentHostsResult = main.FALSE
4046
4047 else:
4048 main.log.error( "Error in getting ONOS hosts from ONOS" +
4049 controllerStr )
4050 consistentHostsResult = main.FALSE
4051 main.log.warn( "ONOS" + controllerStr +
4052 " hosts response: " +
4053 repr( hosts[ controller ] ) )
4054 utilities.assert_equals(
4055 expect=main.TRUE,
4056 actual=consistentHostsResult,
4057 onpass="Hosts view is consistent across all ONOS nodes",
4058 onfail="ONOS nodes have different views of hosts" )
4059
4060 main.step( "Hosts information is correct" )
4061 hostsResults = hostsResults and ipResult
4062 utilities.assert_equals(
4063 expect=main.TRUE,
4064 actual=hostsResults,
4065 onpass="Host information is correct",
4066 onfail="Host information is incorrect" )
4067
4068 main.step( "Host attachment points to the network" )
4069 utilities.assert_equals(
4070 expect=True,
4071 actual=hostAttachmentResults,
4072 onpass="Hosts are correctly attached to the network",
4073 onfail="ONOS did not correctly attach hosts to the network" )
4074
4075 # Strongly connected clusters of devices
4076 main.step( "Clusters view is consistent across all ONOS nodes" )
4077 consistentClustersResult = main.TRUE
4078 for controller in range( len( clusters ) ):
4079 controllerStr = str( main.activeNodes[ controller ] + 1 )
4080 if "Error" not in clusters[ controller ]:
4081 if clusters[ controller ] == clusters[ 0 ]:
4082 continue
4083 else: # clusters not consistent
4084 main.log.error( "clusters from ONOS" +
4085 controllerStr +
4086 " is inconsistent with ONOS1" )
4087 consistentClustersResult = main.FALSE
4088 else:
4089 main.log.error( "Error in getting dataplane clusters " +
4090 "from ONOS" + controllerStr )
4091 consistentClustersResult = main.FALSE
4092 main.log.warn( "ONOS" + controllerStr +
4093 " clusters response: " +
4094 repr( clusters[ controller ] ) )
4095 utilities.assert_equals(
4096 expect=main.TRUE,
4097 actual=consistentClustersResult,
4098 onpass="Clusters view is consistent across all ONOS nodes",
4099 onfail="ONOS nodes have different views of clusters" )
4100 if not consistentClustersResult:
4101 main.log.debug( clusters )
4102 for x in links:
4103 main.log.warn( "{}: {}".format( len( x ), x ) )
4104
4105 main.step( "There is only one SCC" )
4106 # there should always only be one cluster
4107 try:
4108 numClusters = len( json.loads( clusters[ 0 ] ) )
4109 except ( ValueError, TypeError ):
4110 main.log.exception( "Error parsing clusters[0]: " +
4111 repr( clusters[ 0 ] ) )
4112 numClusters = "ERROR"
4113 clusterResults = main.FALSE
4114 if numClusters == 1:
4115 clusterResults = main.TRUE
4116 utilities.assert_equals(
4117 expect=1,
4118 actual=numClusters,
4119 onpass="ONOS shows 1 SCC",
4120 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
4121
4122 topoResult = ( devicesResults and linksResults
4123 and hostsResults and consistentHostsResult
4124 and consistentClustersResult and clusterResults
4125 and ipResult and hostAttachmentResults )
4126
4127 topoResult = topoResult and int( count <= 2 )
4128 note = "note it takes about " + str( int( cliTime ) ) + \
4129 " seconds for the test to make all the cli calls to fetch " +\
4130 "the topology from each ONOS instance"
4131 main.log.info(
4132 "Very crass estimate for topology discovery/convergence( " +
4133 str( note ) + " ): " + str( elapsed ) + " seconds, " +
4134 str( count ) + " tries" )
4135
4136 main.step( "Device information is correct" )
4137 utilities.assert_equals(
4138 expect=main.TRUE,
4139 actual=devicesResults,
4140 onpass="Device information is correct",
4141 onfail="Device information is incorrect" )
4142
4143 main.step( "Links are correct" )
4144 utilities.assert_equals(
4145 expect=main.TRUE,
4146 actual=linksResults,
4147 onpass="Link are correct",
4148 onfail="Links are incorrect" )
4149
4150 main.step( "Hosts are correct" )
4151 utilities.assert_equals(
4152 expect=main.TRUE,
4153 actual=hostsResults,
4154 onpass="Hosts are correct",
4155 onfail="Hosts are incorrect" )
4156
4157 # FIXME: move this to an ONOS state case
4158 main.step( "Checking ONOS nodes" )
4159 nodeResults = utilities.retry( self.nodesCheck,
4160 False,
4161 args=[ main.activeNodes ],
4162 attempts=5 )
4163 utilities.assert_equals( expect=True, actual=nodeResults,
4164 onpass="Nodes check successful",
4165 onfail="Nodes check NOT successful" )
4166 if not nodeResults:
4167 for i in main.activeNodes:
4168 main.log.debug( "{} components not ACTIVE: \n{}".format(
4169 main.CLIs[ i ].name,
4170 main.CLIs[ i ].sendline( "scr:list | grep -v ACTIVE" ) ) )
4171
4172 if not topoResult:
4173 main.cleanup()
4174 main.exit()
4175 def linkDown( self, main, fromS="s3", toS="s28" ):
4176 """
4177 Link fromS-toS down
4178 """
4179 import time
4180 assert main.numCtrls, "main.numCtrls not defined"
4181 assert main, "main not defined"
4182 assert utilities.assert_equals, "utilities.assert_equals not defined"
4183 assert main.CLIs, "main.CLIs not defined"
4184 assert main.nodes, "main.nodes not defined"
4185 # NOTE: You should probably run a topology check after this
4186
4187 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
4188
4189 description = "Turn off a link to ensure that Link Discovery " +\
4190 "is working properly"
4191 main.case( description )
4192
4193 main.step( "Kill Link between " + fromS + " and " + toS )
4194 LinkDown = main.Mininet1.link( END1=fromS, END2=toS, OPTION="down" )
4195 main.log.info( "Waiting " + str( linkSleep ) +
4196 " seconds for link down to be discovered" )
4197 time.sleep( linkSleep )
4198 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
4199 onpass="Link down successful",
4200 onfail="Failed to bring link down" )
4201 # TODO do some sort of check here
4202
4203 def linkUp( self, main, fromS="s3", toS="s28" ):
4204 """
4205 Link fromS-toS up
4206 """
4207 import time
4208 assert main.numCtrls, "main.numCtrls not defined"
4209 assert main, "main not defined"
4210 assert utilities.assert_equals, "utilities.assert_equals not defined"
4211 assert main.CLIs, "main.CLIs not defined"
4212 assert main.nodes, "main.nodes not defined"
4213 # NOTE: You should probably run a topology check after this
4214
4215 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
4216
4217 description = "Restore a link to ensure that Link Discovery is " + \
4218 "working properly"
4219 main.case( description )
4220
4221 main.step( "Bring link between " + fromS + " and " + toS +" back up" )
4222 LinkUp = main.Mininet1.link( END1=fromS, END2=toS, OPTION="up" )
4223 main.log.info( "Waiting " + str( linkSleep ) +
4224 " seconds for link up to be discovered" )
4225 time.sleep( linkSleep )
4226 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
4227 onpass="Link up successful",
4228 onfail="Failed to bring link up" )
4229
4230 def switchDown( self, main ):
4231 """
4232 Switch Down
4233 """
4234 # NOTE: You should probably run a topology check after this
4235 import time
4236 assert main.numCtrls, "main.numCtrls not defined"
4237 assert main, "main not defined"
4238 assert utilities.assert_equals, "utilities.assert_equals not defined"
4239 assert main.CLIs, "main.CLIs not defined"
4240 assert main.nodes, "main.nodes not defined"
4241
4242 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
4243
4244 description = "Killing a switch to ensure it is discovered correctly"
4245 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
4246 main.case( description )
4247 switch = main.params[ 'kill' ][ 'switch' ]
4248 switchDPID = main.params[ 'kill' ][ 'dpid' ]
4249
4250 # TODO: Make this switch parameterizable
4251 main.step( "Kill " + switch )
4252 main.log.info( "Deleting " + switch )
4253 main.Mininet1.delSwitch( switch )
4254 main.log.info( "Waiting " + str( switchSleep ) +
4255 " seconds for switch down to be discovered" )
4256 time.sleep( switchSleep )
4257 device = onosCli.getDevice( dpid=switchDPID )
4258 # Peek at the deleted switch
4259 main.log.warn( str( device ) )
4260 result = main.FALSE
4261 if device and device[ 'available' ] is False:
4262 result = main.TRUE
4263 utilities.assert_equals( expect=main.TRUE, actual=result,
4264 onpass="Kill switch successful",
4265 onfail="Failed to kill switch?" )
4266 def switchUp( self, main ):
4267 """
4268 Switch Up
4269 """
4270 # NOTE: You should probably run a topology check after this
4271 import time
4272 assert main.numCtrls, "main.numCtrls not defined"
4273 assert main, "main not defined"
4274 assert utilities.assert_equals, "utilities.assert_equals not defined"
4275 assert main.CLIs, "main.CLIs not defined"
4276 assert main.nodes, "main.nodes not defined"
4277
4278 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
4279 switch = main.params[ 'kill' ][ 'switch' ]
4280 switchDPID = main.params[ 'kill' ][ 'dpid' ]
4281 links = main.params[ 'kill' ][ 'links' ].split()
4282 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
4283 description = "Adding a switch to ensure it is discovered correctly"
4284 main.case( description )
4285
4286 main.step( "Add back " + switch )
4287 main.Mininet1.addSwitch( switch, dpid=switchDPID )
4288 for peer in links:
4289 main.Mininet1.addLink( switch, peer )
4290 ipList = [ node.ip_address for node in main.nodes ]
4291 main.Mininet1.assignSwController( sw=switch, ip=ipList )
4292 main.log.info( "Waiting " + str( switchSleep ) +
4293 " seconds for switch up to be discovered" )
4294 time.sleep( switchSleep )
4295 device = onosCli.getDevice( dpid=switchDPID )
4296 # Peek at the deleted switch
4297 main.log.warn( str( device ) )
4298 result = main.FALSE
4299 if device and device[ 'available' ]:
4300 result = main.TRUE
4301 utilities.assert_equals( expect=main.TRUE, actual=result,
4302 onpass="add switch successful",
4303 onfail="Failed to add switch?" )
4304
4305 def startElectionApp( self, main ):
4306 """
4307 start election app on all onos nodes
4308 """
4309 assert main.numCtrls, "main.numCtrls not defined"
4310 assert main, "main not defined"
4311 assert utilities.assert_equals, "utilities.assert_equals not defined"
4312 assert main.CLIs, "main.CLIs not defined"
4313 assert main.nodes, "main.nodes not defined"
4314
4315 main.case( "Start Leadership Election app" )
4316 main.step( "Install leadership election app" )
4317 onosCli = main.CLIs[ main.activeNodes[ 0 ] ]
4318 appResult = onosCli.activateApp( "org.onosproject.election" )
4319 utilities.assert_equals(
4320 expect=main.TRUE,
4321 actual=appResult,
4322 onpass="Election app installed",
4323 onfail="Something went wrong with installing Leadership election" )
4324
4325 main.step( "Run for election on each node" )
4326 for i in main.activeNodes:
4327 main.CLIs[ i ].electionTestRun()
4328 time.sleep( 5 )
4329 activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
4330 sameResult, leaders = self.consistentLeaderboards( activeCLIs )
4331 utilities.assert_equals(
4332 expect=True,
4333 actual=sameResult,
4334 onpass="All nodes see the same leaderboards",
4335 onfail="Inconsistent leaderboards" )
4336
4337 if sameResult:
4338 leader = leaders[ 0 ][ 0 ]
4339 if main.nodes[ main.activeNodes[ 0 ] ].ip_address in leader:
4340 correctLeader = True
4341 else:
4342 correctLeader = False
4343 main.step( "First node was elected leader" )
4344 utilities.assert_equals(
4345 expect=True,
4346 actual=correctLeader,
4347 onpass="Correct leader was elected",
4348 onfail="Incorrect leader" )
4349 def isElectionFunctional( self, main ):
4350 """
4351 Check that Leadership Election is still functional
4352 15.1 Run election on each node
4353 15.2 Check that each node has the same leaders and candidates
4354 15.3 Find current leader and withdraw
4355 15.4 Check that a new node was elected leader
4356 15.5 Check that that new leader was the candidate of old leader
4357 15.6 Run for election on old leader
4358 15.7 Check that oldLeader is a candidate, and leader if only 1 node
4359 15.8 Make sure that the old leader was added to the candidate list
4360
4361 old and new variable prefixes refer to data from before vs after
4362 withdrawl and later before withdrawl vs after re-election
4363 """
4364 import time
4365 assert main.numCtrls, "main.numCtrls not defined"
4366 assert main, "main not defined"
4367 assert utilities.assert_equals, "utilities.assert_equals not defined"
4368 assert main.CLIs, "main.CLIs not defined"
4369 assert main.nodes, "main.nodes not defined"
4370
4371 description = "Check that Leadership Election is still functional"
4372 main.case( description )
4373 # NOTE: Need to re-run after restarts since being a canidate is not persistant
4374
4375 oldLeaders = [] # list of lists of each nodes' candidates before
4376 newLeaders = [] # list of lists of each nodes' candidates after
4377 oldLeader = '' # the old leader from oldLeaders, None if not same
4378 newLeader = '' # the new leaders fron newLoeaders, None if not same
4379 oldLeaderCLI = None # the CLI of the old leader used for re-electing
4380 expectNoLeader = False # True when there is only one leader
4381 if main.numCtrls == 1:
4382 expectNoLeader = True
4383
4384 main.step( "Run for election on each node" )
4385 electionResult = main.TRUE
4386
4387 for i in main.activeNodes: # run test election on each node
4388 if main.CLIs[ i ].electionTestRun() == main.FALSE:
4389 electionResult = main.FALSE
4390 utilities.assert_equals(
4391 expect=main.TRUE,
4392 actual=electionResult,
4393 onpass="All nodes successfully ran for leadership",
4394 onfail="At least one node failed to run for leadership" )
4395
4396 if electionResult == main.FALSE:
4397 main.log.error(
4398 "Skipping Test Case because Election Test App isn't loaded" )
4399 main.skipCase()
4400
4401 main.step( "Check that each node shows the same leader and candidates" )
4402 failMessage = "Nodes have different leaderboards"
4403 activeCLIs = [ main.CLIs[ i ] for i in main.activeNodes ]
4404 sameResult, oldLeaders = self.consistentLeaderboards( activeCLIs )
4405 if sameResult:
4406 oldLeader = oldLeaders[ 0 ][ 0 ]
4407 main.log.warn( oldLeader )
4408 else:
4409 oldLeader = None
4410 utilities.assert_equals(
4411 expect=True,
4412 actual=sameResult,
4413 onpass="Leaderboards are consistent for the election topic",
4414 onfail=failMessage )
4415
4416 main.step( "Find current leader and withdraw" )
4417 withdrawResult = main.TRUE
4418 # do some sanity checking on leader before using it
4419 if oldLeader is None:
4420 main.log.error( "Leadership isn't consistent." )
4421 withdrawResult = main.FALSE
4422 # Get the CLI of the oldLeader
4423 for i in main.activeNodes:
4424 if oldLeader == main.nodes[ i ].ip_address:
4425 oldLeaderCLI = main.CLIs[ i ]
4426 break
4427 else: # FOR/ELSE statement
4428 main.log.error( "Leader election, could not find current leader" )
4429 if oldLeader:
4430 withdrawResult = oldLeaderCLI.electionTestWithdraw()
4431 utilities.assert_equals(
4432 expect=main.TRUE,
4433 actual=withdrawResult,
4434 onpass="Node was withdrawn from election",
4435 onfail="Node was not withdrawn from election" )
4436
4437 main.step( "Check that a new node was elected leader" )
4438 failMessage = "Nodes have different leaders"
4439 # Get new leaders and candidates
4440 newLeaderResult, newLeaders = self.consistentLeaderboards( activeCLIs )
4441 newLeader = None
4442 if newLeaderResult:
4443 if newLeaders[ 0 ][ 0 ] == 'none':
4444 main.log.error( "No leader was elected on at least 1 node" )
4445 if not expectNoLeader:
4446 newLeaderResult = False
4447 newLeader = newLeaders[ 0 ][ 0 ]
4448
4449 # Check that the new leader is not the older leader, which was withdrawn
4450 if newLeader == oldLeader:
4451 newLeaderResult = False
4452 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
4453 " as the current leader" )
4454 utilities.assert_equals(
4455 expect=True,
4456 actual=newLeaderResult,
4457 onpass="Leadership election passed",
4458 onfail="Something went wrong with Leadership election" )
4459
4460 main.step( "Check that that new leader was the candidate of old leader" )
4461 # candidates[ 2 ] should become the top candidate after withdrawl
4462 correctCandidateResult = main.TRUE
4463 if expectNoLeader:
4464 if newLeader == 'none':
4465 main.log.info( "No leader expected. None found. Pass" )
4466 correctCandidateResult = main.TRUE
4467 else:
4468 main.log.info( "Expected no leader, got: " + str( newLeader ) )
4469 correctCandidateResult = main.FALSE
4470 elif len( oldLeaders[ 0 ] ) >= 3:
4471 if newLeader == oldLeaders[ 0 ][ 2 ]:
4472 # correct leader was elected
4473 correctCandidateResult = main.TRUE
4474 else:
4475 correctCandidateResult = main.FALSE
4476 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
4477 newLeader, oldLeaders[ 0 ][ 2 ] ) )
4478 else:
4479 main.log.warn( "Could not determine who should be the correct leader" )
4480 main.log.debug( oldLeaders[ 0 ] )
4481 correctCandidateResult = main.FALSE
4482 utilities.assert_equals(
4483 expect=main.TRUE,
4484 actual=correctCandidateResult,
4485 onpass="Correct Candidate Elected",
4486 onfail="Incorrect Candidate Elected" )
4487
4488 main.step( "Run for election on old leader( just so everyone " +
4489 "is in the hat )" )
4490 if oldLeaderCLI is not None:
4491 runResult = oldLeaderCLI.electionTestRun()
4492 else:
4493 main.log.error( "No old leader to re-elect" )
4494 runResult = main.FALSE
4495 utilities.assert_equals(
4496 expect=main.TRUE,
4497 actual=runResult,
4498 onpass="App re-ran for election",
4499 onfail="App failed to run for election" )
4500
4501 main.step(
4502 "Check that oldLeader is a candidate, and leader if only 1 node" )
4503 # verify leader didn't just change
4504 # Get new leaders and candidates
4505 reRunLeaders = []
4506 time.sleep( 5 ) # Paremterize
4507 positionResult, reRunLeaders = self.consistentLeaderboards( activeCLIs )
4508
4509 # Check that the re-elected node is last on the candidate List
4510 if not reRunLeaders[ 0 ]:
4511 positionResult = main.FALSE
4512 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
4513 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
4514 str( reRunLeaders[ 0 ] ) ) )
4515 positionResult = main.FALSE
4516 utilities.assert_equals(
4517 expect=True,
4518 actual=positionResult,
4519 onpass="Old leader successfully re-ran for election",
4520 onfail="Something went wrong with Leadership election after " +
4521 "the old leader re-ran for election" )
4522 def installDistributedPrimitiveApp( self, main ):
4523 """
4524 Install Distributed Primitives app
4525 """
4526 import time
4527 assert main.numCtrls, "main.numCtrls not defined"
4528 assert main, "main not defined"
4529 assert utilities.assert_equals, "utilities.assert_equals not defined"
4530 assert main.CLIs, "main.CLIs not defined"
4531 assert main.nodes, "main.nodes not defined"
4532
4533 # Variables for the distributed primitives tests
4534 main.pCounterName = "TestON-Partitions"
4535 main.pCounterValue = 0
4536 main.onosSet = set( [] )
4537 main.onosSetName = "TestON-set"
4538
4539 description = "Install Primitives app"
4540 main.case( description )
4541 main.step( "Install Primitives app" )
4542 appName = "org.onosproject.distributedprimitives"
4543 node = main.activeNodes[ 0 ]
4544 appResults = main.CLIs[ node ].activateApp( appName )
4545 utilities.assert_equals( expect=main.TRUE,
4546 actual=appResults,
4547 onpass="Primitives app activated",
4548 onfail="Primitives app not activated" )
4549 # TODO check on all nodes instead of sleeping
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07004550 time.sleep( 5 ) # To allow all nodes to activate