blob: 68f19d6db73ad48d8f20ceef096f032b4e1d33b2 [file] [log] [blame]
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07001"""
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002Copyright 2015 Open Networking Foundation ( ONF )
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003
4Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
5the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
6or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
7
8 TestON is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation, either version 2 of the License, or
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -070011 ( at your option ) any later version.
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -070012
13 TestON is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with TestON. If not, see <http://www.gnu.org/licenses/>.
20"""
Jon Halla440e872016-03-31 15:15:50 -070021import json
Jon Hall41d39f12016-04-11 22:54:35 -070022import time
Jon Halle1a3b752015-07-22 13:02:46 -070023
Jon Hallf37d44d2017-05-24 10:37:30 -070024
Jon Hall41d39f12016-04-11 22:54:35 -070025class HA():
Jon Hall57b50432015-10-22 10:20:10 -070026
Jon Halla440e872016-03-31 15:15:50 -070027 def __init__( self ):
28 self.default = ''
Jon Hall57b50432015-10-22 10:20:10 -070029
Devin Lim58046fa2017-07-05 16:55:00 -070030 def customizeOnosGenPartitions( self ):
Devin Lim58046fa2017-07-05 16:55:00 -070031 # copy gen-partions file to ONOS
32 # NOTE: this assumes TestON and ONOS are on the same machine
33 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
34 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
35 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
36 main.ONOSbench.ip_address,
37 srcFile,
38 dstDir,
39 pwd=main.ONOSbench.pwd,
40 direction="from" )
Jon Hallca319892017-06-15 15:25:22 -070041
Devin Lim58046fa2017-07-05 16:55:00 -070042 def cleanUpGenPartition( self ):
43 # clean up gen-partitions file
44 try:
45 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
46 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
47 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
48 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
49 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
50 str( main.ONOSbench.handle.before ) )
51 except ( pexpect.TIMEOUT, pexpect.EOF ):
52 main.log.exception( "ONOSbench: pexpect exception found:" +
53 main.ONOSbench.handle.before )
Devin Lim44075962017-08-11 10:56:37 -070054 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -070055
Devin Lim58046fa2017-07-05 16:55:00 -070056 def startingMininet( self ):
57 main.step( "Starting Mininet" )
58 # scp topo file to mininet
59 # TODO: move to params?
60 topoName = "obelisk.py"
61 filePath = main.ONOSbench.home + "/tools/test/topos/"
62 main.ONOSbench.scp( main.Mininet1,
63 filePath + topoName,
64 main.Mininet1.home,
65 direction="to" )
66 mnResult = main.Mininet1.startNet()
67 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
68 onpass="Mininet Started",
69 onfail="Error starting Mininet" )
Jon Hallca319892017-06-15 15:25:22 -070070
Devin Lim58046fa2017-07-05 16:55:00 -070071 def scalingMetadata( self ):
72 import re
Devin Lim142b5342017-07-20 15:22:39 -070073 main.step( "Generate initial metadata file" )
Devin Lim58046fa2017-07-05 16:55:00 -070074 main.scaling = main.params[ 'scaling' ].split( "," )
75 main.log.debug( main.scaling )
76 scale = main.scaling.pop( 0 )
77 main.log.debug( scale )
78 if "e" in scale:
79 equal = True
80 else:
81 equal = False
82 main.log.debug( equal )
Devin Lim142b5342017-07-20 15:22:39 -070083 main.Cluster.setRunningNode( int( re.search( "\d+", scale ).group( 0 ) ) )
84 genResult = main.Server.generateFile( main.Cluster.numCtrls, equal=equal )
Devin Lim58046fa2017-07-05 16:55:00 -070085 utilities.assert_equals( expect=main.TRUE, actual=genResult,
86 onpass="New cluster metadata file generated",
87 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -070088
Devin Lim58046fa2017-07-05 16:55:00 -070089 def swapNodeMetadata( self ):
Devin Lim142b5342017-07-20 15:22:39 -070090 main.step( "Generate initial metadata file" )
91 if main.Cluster.numCtrls >= 5:
92 main.Cluster.setRunningNode( main.Cluster.numCtrls - 2 )
Devin Lim58046fa2017-07-05 16:55:00 -070093 else:
94 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
Devin Lim142b5342017-07-20 15:22:39 -070095 genResult = main.Server.generateFile( main.Cluster.numCtrls )
Devin Lim58046fa2017-07-05 16:55:00 -070096 utilities.assert_equals( expect=main.TRUE, actual=genResult,
97 onpass="New cluster metadata file generated",
98 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -070099
Devin Lim142b5342017-07-20 15:22:39 -0700100 def setServerForCluster( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700101 import os
102 main.step( "Setup server for cluster metadata file" )
103 main.serverPort = main.params[ 'server' ][ 'port' ]
104 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
105 main.log.debug( "Root dir: {}".format( rootDir ) )
106 status = main.Server.start( main.ONOSbench,
107 rootDir,
108 port=main.serverPort,
109 logDir=main.logdir + "/server.log" )
110 utilities.assert_equals( expect=main.TRUE, actual=status,
111 onpass="Server started",
112 onfail="Failled to start SimpleHTTPServer" )
113
Jon Hall4f360bc2017-09-07 10:19:52 -0700114 def copyBackupConfig( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700115 main.step( "Copying backup config files" )
116 main.onosServicepath = main.ONOSbench.home + "/tools/package/bin/onos-service"
117 cp = main.ONOSbench.scp( main.ONOSbench,
118 main.onosServicepath,
119 main.onosServicepath + ".backup",
120 direction="to" )
121
122 utilities.assert_equals( expect=main.TRUE,
123 actual=cp,
124 onpass="Copy backup config file succeeded",
125 onfail="Copy backup config file failed" )
Jon Hall4f360bc2017-09-07 10:19:52 -0700126
127 def setMetadataUrl( self ):
128 # NOTE: You should probably backup the config before and reset the config after the test
Devin Lim58046fa2017-07-05 16:55:00 -0700129 # we need to modify the onos-service file to use remote metadata file
130 # url for cluster metadata file
131 iface = main.params[ 'server' ].get( 'interface' )
132 ip = main.ONOSbench.getIpAddr( iface=iface )
133 metaFile = "cluster.json"
134 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, main.serverPort, metaFile )
135 main.log.warn( javaArgs )
136 main.log.warn( repr( javaArgs ) )
137 handle = main.ONOSbench.handle
Jon Hall4173b242017-09-12 17:04:38 -0700138 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs,
139 main.onosServicepath )
Devin Lim58046fa2017-07-05 16:55:00 -0700140 main.log.warn( sed )
141 main.log.warn( repr( sed ) )
142 handle.sendline( sed )
143 handle.expect( metaFile )
144 output = handle.before
145 handle.expect( "\$" )
146 output += handle.before
147 main.log.debug( repr( output ) )
148
149 def cleanUpOnosService( self ):
150 # Cleanup custom onos-service file
151 main.ONOSbench.scp( main.ONOSbench,
152 main.onosServicepath + ".backup",
153 main.onosServicepath,
154 direction="to" )
Jon Hallca319892017-06-15 15:25:22 -0700155
Jon Halla440e872016-03-31 15:15:50 -0700156 def consistentCheck( self ):
157 """
158 Checks that TestON counters are consistent across all nodes.
Jon Halle1a3b752015-07-22 13:02:46 -0700159
Jon Hallf37d44d2017-05-24 10:37:30 -0700160 Returns the tuple ( onosCounters, consistent )
Jon Hall41d39f12016-04-11 22:54:35 -0700161 - onosCounters is the parsed json output of the counters command on
162 all nodes
163 - consistent is main.TRUE if all "TestON" counters are consitent across
164 all nodes or main.FALSE
Jon Halla440e872016-03-31 15:15:50 -0700165 """
Jon Halle1a3b752015-07-22 13:02:46 -0700166 try:
Jon Halla440e872016-03-31 15:15:50 -0700167 # Get onos counters results
168 onosCountersRaw = []
169 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700170 for ctrl in main.Cluster.active():
Jon Halla440e872016-03-31 15:15:50 -0700171 t = main.Thread( target=utilities.retry,
Jon Hallca319892017-06-15 15:25:22 -0700172 name="counters-" + str( ctrl ),
173 args=[ ctrl.counters, [ None ] ],
Jon Hallf37d44d2017-05-24 10:37:30 -0700174 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Halla440e872016-03-31 15:15:50 -0700175 'randomTime': True } )
176 threads.append( t )
177 t.start()
178 for t in threads:
179 t.join()
180 onosCountersRaw.append( t.result )
181 onosCounters = []
Jon Hallca319892017-06-15 15:25:22 -0700182 for i in range( len( onosCountersRaw ) ):
Jon Halla440e872016-03-31 15:15:50 -0700183 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700184 onosCounters.append( json.loads( onosCountersRaw[ i ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700185 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700186 main.log.error( "Could not parse counters response from " +
Devin Lim142b5342017-07-20 15:22:39 -0700187 str( main.Cluster.active( i ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700188 main.log.warn( repr( onosCountersRaw[ i ] ) )
189 onosCounters.append( [] )
190
191 testCounters = {}
192 # make a list of all the "TestON-*" counters in ONOS
Jon Hall41d39f12016-04-11 22:54:35 -0700193 # lookes like a dict whose keys are the name of the ONOS node and
194 # values are a list of the counters. I.E.
Jon Hallf37d44d2017-05-24 10:37:30 -0700195 # { "ONOS1": [ { "name":"TestON-Partitions","value":56 } ]
Jon Halla440e872016-03-31 15:15:50 -0700196 # }
197 # NOTE: There is an assumtion that all nodes are active
198 # based on the above for loops
199 for controller in enumerate( onosCounters ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700200 for key, value in controller[ 1 ].iteritems():
Jon Halla440e872016-03-31 15:15:50 -0700201 if 'TestON' in key:
Devin Lim142b5342017-07-20 15:22:39 -0700202 node = str( main.Cluster.active( controller[ 0 ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700203 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700204 testCounters[ node ].append( { key: value } )
Jon Halla440e872016-03-31 15:15:50 -0700205 except KeyError:
Jon Hallf37d44d2017-05-24 10:37:30 -0700206 testCounters[ node ] = [ { key: value } ]
Jon Halla440e872016-03-31 15:15:50 -0700207 # compare the counters on each node
Jon Hallf37d44d2017-05-24 10:37:30 -0700208 firstV = testCounters.values()[ 0 ]
Jon Halla440e872016-03-31 15:15:50 -0700209 tmp = [ v == firstV for k, v in testCounters.iteritems() ]
210 if all( tmp ):
211 consistent = main.TRUE
212 else:
213 consistent = main.FALSE
214 main.log.error( "ONOS nodes have different values for counters:\n" +
215 testCounters )
216 return ( onosCounters, consistent )
217 except Exception:
218 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700219 main.cleanAndExit()
Jon Halla440e872016-03-31 15:15:50 -0700220
221 def counterCheck( self, counterName, counterValue ):
222 """
223 Checks that TestON counters are consistent across all nodes and that
224 specified counter is in ONOS with the given value
225 """
226 try:
227 correctResults = main.TRUE
228 # Get onos counters results and consistentCheck
229 onosCounters, consistent = self.consistentCheck()
230 # Check for correct values
Jon Hallca319892017-06-15 15:25:22 -0700231 for i in range( len( main.Cluster.active() ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700232 current = onosCounters[ i ]
Jon Halla440e872016-03-31 15:15:50 -0700233 onosValue = None
234 try:
235 onosValue = current.get( counterName )
Jon Hall41d39f12016-04-11 22:54:35 -0700236 except AttributeError:
Devin Lim142b5342017-07-20 15:22:39 -0700237 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -0700238 main.log.exception( node + " counters result " +
Jon Hall41d39f12016-04-11 22:54:35 -0700239 "is not as expected" )
Jon Halla440e872016-03-31 15:15:50 -0700240 correctResults = main.FALSE
241 if onosValue == counterValue:
242 main.log.info( counterName + " counter value is correct" )
243 else:
Jon Hall41d39f12016-04-11 22:54:35 -0700244 main.log.error( counterName +
245 " counter value is incorrect," +
246 " expected value: " + str( counterValue ) +
247 " current value: " + str( onosValue ) )
Jon Halla440e872016-03-31 15:15:50 -0700248 correctResults = main.FALSE
249 return consistent and correctResults
250 except Exception:
251 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700252 main.cleanAndExit()
Jon Hall41d39f12016-04-11 22:54:35 -0700253
254 def consistentLeaderboards( self, nodes ):
255 TOPIC = 'org.onosproject.election'
256 # FIXME: use threads
Jon Halle0f0b342017-04-18 11:43:47 -0700257 # FIXME: should we retry outside the function?
Jon Hall41d39f12016-04-11 22:54:35 -0700258 for n in range( 5 ): # Retry in case election is still happening
259 leaderList = []
260 # Get all leaderboards
261 for cli in nodes:
262 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
263 # Compare leaderboards
Jon Hallf37d44d2017-05-24 10:37:30 -0700264 result = all( i == leaderList[ 0 ] for i in leaderList ) and\
Jon Hall41d39f12016-04-11 22:54:35 -0700265 leaderList is not None
266 main.log.debug( leaderList )
267 main.log.warn( result )
268 if result:
269 return ( result, leaderList )
Jon Hallf37d44d2017-05-24 10:37:30 -0700270 time.sleep( 5 ) # TODO: paramerterize
Jon Hall41d39f12016-04-11 22:54:35 -0700271 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
272 return ( result, leaderList )
273
Devin Lim58046fa2017-07-05 16:55:00 -0700274 def generateGraph( self, testName, plotName="Plot-HA", index=2 ):
275 # GRAPHS
276 # NOTE: important params here:
277 # job = name of Jenkins job
278 # Plot Name = Plot-HA, only can be used if multiple plots
279 # index = The number of the graph under plot name
280 job = testName
281 graphs = '<ac:structured-macro ac:name="html">\n'
282 graphs += '<ac:plain-text-body><![CDATA[\n'
283 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
284 '/plot/' + plotName + '/getPlot?index=' + str( index ) +\
285 '&width=500&height=300"' +\
286 'noborder="0" width="500" height="300" scrolling="yes" ' +\
287 'seamless="seamless"></iframe>\n'
288 graphs += ']]></ac:plain-text-body>\n'
289 graphs += '</ac:structured-macro>\n'
290 main.log.wiki( graphs )
Jon Hallca319892017-06-15 15:25:22 -0700291
Devin Lim58046fa2017-07-05 16:55:00 -0700292 def initialSetUp( self, serviceClean=False ):
293 """
294 rest of initialSetup
295 """
Devin Lim58046fa2017-07-05 16:55:00 -0700296 if main.params[ 'tcpdump' ].lower() == "true":
297 main.step( "Start Packet Capture MN" )
298 main.Mininet2.startTcpdump(
299 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
300 + "-MN.pcap",
301 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
302 port=main.params[ 'MNtcpdump' ][ 'port' ] )
303
304 if serviceClean:
305 main.step( "Clean up ONOS service changes" )
Devin Lim142b5342017-07-20 15:22:39 -0700306 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
307 main.ONOSbench.handle.expect( "\$" )
308 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
309 main.ONOSbench.handle.expect( "\$" )
Devin Lim58046fa2017-07-05 16:55:00 -0700310
311 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -0800312 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700313 False,
Devin Lim58046fa2017-07-05 16:55:00 -0700314 attempts=5 )
315
316 utilities.assert_equals( expect=True, actual=nodeResults,
317 onpass="Nodes check successful",
318 onfail="Nodes check NOT successful" )
319
320 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -0700321 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700322 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -0700323 ctrl.name,
324 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700325 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -0700326 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700327
328 main.step( "Activate apps defined in the params file" )
329 # get data from the params
330 apps = main.params.get( 'apps' )
331 if apps:
332 apps = apps.split( ',' )
Jon Hallca319892017-06-15 15:25:22 -0700333 main.log.debug( "Apps: " + str( apps ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700334 activateResult = True
335 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700336 main.Cluster.active( 0 ).app( app, "Activate" )
Devin Lim58046fa2017-07-05 16:55:00 -0700337 # TODO: check this worked
338 time.sleep( 10 ) # wait for apps to activate
339 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700340 state = main.Cluster.active( 0 ).appStatus( app )
Devin Lim58046fa2017-07-05 16:55:00 -0700341 if state == "ACTIVE":
342 activateResult = activateResult and True
343 else:
344 main.log.error( "{} is in {} state".format( app, state ) )
345 activateResult = False
346 utilities.assert_equals( expect=True,
347 actual=activateResult,
348 onpass="Successfully activated apps",
349 onfail="Failed to activate apps" )
350 else:
351 main.log.warn( "No apps were specified to be loaded after startup" )
352
353 main.step( "Set ONOS configurations" )
Jon Hallca319892017-06-15 15:25:22 -0700354 # FIXME: This shoudl be part of the general startup sequence
Devin Lim58046fa2017-07-05 16:55:00 -0700355 config = main.params.get( 'ONOS_Configuration' )
356 if config:
357 main.log.debug( config )
358 checkResult = main.TRUE
359 for component in config:
360 for setting in config[ component ]:
361 value = config[ component ][ setting ]
Jon Hallca319892017-06-15 15:25:22 -0700362 check = main.Cluster.next().setCfg( component, setting, value )
Devin Lim58046fa2017-07-05 16:55:00 -0700363 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
364 checkResult = check and checkResult
365 utilities.assert_equals( expect=main.TRUE,
366 actual=checkResult,
367 onpass="Successfully set config",
368 onfail="Failed to set config" )
369 else:
370 main.log.warn( "No configurations were specified to be changed after startup" )
371
Jon Hallca319892017-06-15 15:25:22 -0700372 main.step( "Check app ids" )
373 appCheck = self.appCheck()
374 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700375 onpass="App Ids seem to be correct",
376 onfail="Something is wrong with app Ids" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -0700377
Jon Hallca319892017-06-15 15:25:22 -0700378 def commonChecks( self ):
379 # TODO: make this assertable or assert in here?
380 self.topicsCheck()
381 self.partitionsCheck()
382 self.pendingMapCheck()
383 self.appCheck()
384
385 def topicsCheck( self, extraTopics=[] ):
386 """
387 Check for work partition topics in leaders output
388 """
389 leaders = main.Cluster.next().leaders()
390 missing = False
391 try:
392 if leaders:
393 parsedLeaders = json.loads( leaders )
394 output = json.dumps( parsedLeaders,
395 sort_keys=True,
396 indent=4,
397 separators=( ',', ': ' ) )
398 main.log.debug( "Leaders: " + output )
399 # check for all intent partitions
400 topics = []
401 for i in range( 14 ):
402 topics.append( "work-partition-" + str( i ) )
403 topics += extraTopics
404 main.log.debug( topics )
405 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
406 for topic in topics:
407 if topic not in ONOStopics:
408 main.log.error( "Error: " + topic +
409 " not in leaders" )
410 missing = True
411 else:
412 main.log.error( "leaders() returned None" )
413 except ( ValueError, TypeError ):
414 main.log.exception( "Error parsing leaders" )
415 main.log.error( repr( leaders ) )
416 if missing:
Jon Hall4173b242017-09-12 17:04:38 -0700417 # NOTE Can we refactor this into the Cluster class?
418 # Maybe an option to print the output of a command from each node?
Jon Hallca319892017-06-15 15:25:22 -0700419 for ctrl in main.Cluster.active():
420 response = ctrl.CLI.leaders( jsonFormat=False )
421 main.log.debug( str( ctrl.name ) + " leaders output: \n" +
422 str( response ) )
423 return missing
424
425 def partitionsCheck( self ):
426 # TODO: return something assertable
427 partitions = main.Cluster.next().partitions()
428 try:
429 if partitions:
430 parsedPartitions = json.loads( partitions )
431 output = json.dumps( parsedPartitions,
432 sort_keys=True,
433 indent=4,
434 separators=( ',', ': ' ) )
435 main.log.debug( "Partitions: " + output )
436 # TODO check for a leader in all paritions
437 # TODO check for consistency among nodes
438 else:
439 main.log.error( "partitions() returned None" )
440 except ( ValueError, TypeError ):
441 main.log.exception( "Error parsing partitions" )
442 main.log.error( repr( partitions ) )
443
444 def pendingMapCheck( self ):
445 pendingMap = main.Cluster.next().pendingMap()
446 try:
447 if pendingMap:
448 parsedPending = json.loads( pendingMap )
449 output = json.dumps( parsedPending,
450 sort_keys=True,
451 indent=4,
452 separators=( ',', ': ' ) )
453 main.log.debug( "Pending map: " + output )
454 # TODO check something here?
455 else:
456 main.log.error( "pendingMap() returned None" )
457 except ( ValueError, TypeError ):
458 main.log.exception( "Error parsing pending map" )
459 main.log.error( repr( pendingMap ) )
460
461 def appCheck( self ):
462 """
463 Check App IDs on all nodes
464 """
465 # FIXME: Rename this to appIDCheck? or add a check for isntalled apps
466 appResults = main.Cluster.command( "appToIDCheck" )
467 appCheck = all( i == main.TRUE for i in appResults )
468 if not appCheck:
Devin Lim142b5342017-07-20 15:22:39 -0700469 ctrl = main.Cluster.active( 0 )
Jon Hallca319892017-06-15 15:25:22 -0700470 main.log.debug( "%s apps: %s" % ( ctrl.name, ctrl.apps() ) )
471 main.log.debug( "%s appIDs: %s" % ( ctrl.name, ctrl.appIDs() ) )
472 return appCheck
473
Jon Halle0f0b342017-04-18 11:43:47 -0700474 def workQueueStatsCheck( self, workQueueName, completed, inProgress, pending ):
475 # Completed
Jon Hallca319892017-06-15 15:25:22 -0700476 completedValues = main.Cluster.command( "workQueueTotalCompleted",
477 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700478 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700479 completedResults = [ int( x ) == completed for x in completedValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700480 completedResult = all( completedResults )
481 if not completedResult:
482 main.log.warn( "Expected Work Queue {} to have {} completed, found {}".format(
483 workQueueName, completed, completedValues ) )
484
485 # In Progress
Jon Hallca319892017-06-15 15:25:22 -0700486 inProgressValues = main.Cluster.command( "workQueueTotalInProgress",
487 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700488 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700489 inProgressResults = [ int( x ) == inProgress for x in inProgressValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700490 inProgressResult = all( inProgressResults )
491 if not inProgressResult:
492 main.log.warn( "Expected Work Queue {} to have {} inProgress, found {}".format(
493 workQueueName, inProgress, inProgressValues ) )
494
495 # Pending
Jon Hallca319892017-06-15 15:25:22 -0700496 pendingValues = main.Cluster.command( "workQueueTotalPending",
497 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700498 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700499 pendingResults = [ int( x ) == pending for x in pendingValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700500 pendingResult = all( pendingResults )
501 if not pendingResult:
502 main.log.warn( "Expected Work Queue {} to have {} pending, found {}".format(
503 workQueueName, pending, pendingValues ) )
504 return completedResult and inProgressResult and pendingResult
505
Devin Lim58046fa2017-07-05 16:55:00 -0700506 def assignDevices( self, main ):
507 """
508 Assign devices to controllers
509 """
510 import re
Devin Lim58046fa2017-07-05 16:55:00 -0700511 assert main, "main not defined"
512 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700513
514 main.case( "Assigning devices to controllers" )
515 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " + \
516 "and check that an ONOS node becomes the " + \
517 "master of the device."
518 main.step( "Assign switches to controllers" )
519
Jon Hallca319892017-06-15 15:25:22 -0700520 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -0700521 swList = []
522 for i in range( 1, 29 ):
523 swList.append( "s" + str( i ) )
524 main.Mininet1.assignSwController( sw=swList, ip=ipList )
525
526 mastershipCheck = main.TRUE
527 for i in range( 1, 29 ):
528 response = main.Mininet1.getSwController( "s" + str( i ) )
529 try:
530 main.log.info( str( response ) )
531 except Exception:
532 main.log.info( repr( response ) )
Devin Lim142b5342017-07-20 15:22:39 -0700533 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -0700534 if re.search( "tcp:" + ctrl.ipAddress, response ):
Devin Lim58046fa2017-07-05 16:55:00 -0700535 mastershipCheck = mastershipCheck and main.TRUE
536 else:
Jon Hall4173b242017-09-12 17:04:38 -0700537 main.log.error( "Error, node " + repr( ctrl ) + " is " +
Devin Lim58046fa2017-07-05 16:55:00 -0700538 "not in the list of controllers s" +
539 str( i ) + " is connecting to." )
540 mastershipCheck = main.FALSE
541 utilities.assert_equals(
542 expect=main.TRUE,
543 actual=mastershipCheck,
544 onpass="Switch mastership assigned correctly",
545 onfail="Switches not assigned correctly to controllers" )
Jon Hallca319892017-06-15 15:25:22 -0700546
Devin Lim58046fa2017-07-05 16:55:00 -0700547 def assignIntents( self, main ):
548 """
549 Assign intents
550 """
551 import time
552 import json
Devin Lim58046fa2017-07-05 16:55:00 -0700553 assert main, "main not defined"
554 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700555 try:
556 main.HAlabels
557 except ( NameError, AttributeError ):
558 main.log.error( "main.HAlabels not defined, setting to []" )
559 main.HAlabels = []
560 try:
561 main.HAdata
562 except ( NameError, AttributeError ):
563 main.log.error( "data not defined, setting to []" )
564 main.HAdata = []
565 main.case( "Adding host Intents" )
566 main.caseExplanation = "Discover hosts by using pingall then " +\
567 "assign predetermined host-to-host intents." +\
568 " After installation, check that the intent" +\
569 " is distributed to all nodes and the state" +\
570 " is INSTALLED"
571
572 # install onos-app-fwd
573 main.step( "Install reactive forwarding app" )
Jon Hallca319892017-06-15 15:25:22 -0700574 onosCli = main.Cluster.next()
Devin Lime9f0ccf2017-08-11 17:25:12 -0700575 installResults = onosCli.CLI.activateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700576 utilities.assert_equals( expect=main.TRUE, actual=installResults,
577 onpass="Install fwd successful",
578 onfail="Install fwd failed" )
579
580 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700581 appCheck = self.appCheck()
582 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700583 onpass="App Ids seem to be correct",
584 onfail="Something is wrong with app Ids" )
585
586 main.step( "Discovering Hosts( Via pingall for now )" )
587 # FIXME: Once we have a host discovery mechanism, use that instead
588 # REACTIVE FWD test
589 pingResult = main.FALSE
590 passMsg = "Reactive Pingall test passed"
591 time1 = time.time()
592 pingResult = main.Mininet1.pingall()
593 time2 = time.time()
594 if not pingResult:
595 main.log.warn( "First pingall failed. Trying again..." )
596 pingResult = main.Mininet1.pingall()
597 passMsg += " on the second try"
598 utilities.assert_equals(
599 expect=main.TRUE,
600 actual=pingResult,
601 onpass=passMsg,
602 onfail="Reactive Pingall failed, " +
603 "one or more ping pairs failed" )
604 main.log.info( "Time for pingall: %2f seconds" %
605 ( time2 - time1 ) )
Jon Hallca319892017-06-15 15:25:22 -0700606 if not pingResult:
Devin Lim44075962017-08-11 10:56:37 -0700607 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700608 # timeout for fwd flows
609 time.sleep( 11 )
610 # uninstall onos-app-fwd
611 main.step( "Uninstall reactive forwarding app" )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700612 uninstallResult = onosCli.CLI.deactivateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700613 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
614 onpass="Uninstall fwd successful",
615 onfail="Uninstall fwd failed" )
616
617 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700618 appCheck2 = self.appCheck()
619 utilities.assert_equals( expect=True, actual=appCheck2,
Devin Lim58046fa2017-07-05 16:55:00 -0700620 onpass="App Ids seem to be correct",
621 onfail="Something is wrong with app Ids" )
622
623 main.step( "Add host intents via cli" )
624 intentIds = []
625 # TODO: move the host numbers to params
626 # Maybe look at all the paths we ping?
627 intentAddResult = True
628 hostResult = main.TRUE
629 for i in range( 8, 18 ):
630 main.log.info( "Adding host intent between h" + str( i ) +
631 " and h" + str( i + 10 ) )
632 host1 = "00:00:00:00:00:" + \
633 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
634 host2 = "00:00:00:00:00:" + \
635 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
636 # NOTE: getHost can return None
Jon Hallca319892017-06-15 15:25:22 -0700637 host1Dict = onosCli.CLI.getHost( host1 )
638 host2Dict = onosCli.CLI.getHost( host2 )
Devin Lim58046fa2017-07-05 16:55:00 -0700639 host1Id = None
640 host2Id = None
641 if host1Dict and host2Dict:
642 host1Id = host1Dict.get( 'id', None )
643 host2Id = host2Dict.get( 'id', None )
644 if host1Id and host2Id:
Jon Hallca319892017-06-15 15:25:22 -0700645 nodeNum = len( main.Cluster.active() )
Devin Lim142b5342017-07-20 15:22:39 -0700646 ctrl = main.Cluster.active( i % nodeNum )
Jon Hallca319892017-06-15 15:25:22 -0700647 tmpId = ctrl.CLI.addHostIntent( host1Id, host2Id )
Devin Lim58046fa2017-07-05 16:55:00 -0700648 if tmpId:
649 main.log.info( "Added intent with id: " + tmpId )
650 intentIds.append( tmpId )
651 else:
652 main.log.error( "addHostIntent returned: " +
653 repr( tmpId ) )
654 else:
655 main.log.error( "Error, getHost() failed for h" + str( i ) +
656 " and/or h" + str( i + 10 ) )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700657 hosts = main.Cluster.next().CLI.hosts()
Devin Lim58046fa2017-07-05 16:55:00 -0700658 try:
Jon Hallca319892017-06-15 15:25:22 -0700659 output = json.dumps( json.loads( hosts ),
660 sort_keys=True,
661 indent=4,
662 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700663 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700664 output = repr( hosts )
665 main.log.debug( "Hosts output: %s" % output )
Devin Lim58046fa2017-07-05 16:55:00 -0700666 hostResult = main.FALSE
667 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
668 onpass="Found a host id for each host",
669 onfail="Error looking up host ids" )
670
671 intentStart = time.time()
672 onosIds = onosCli.getAllIntentsId()
673 main.log.info( "Submitted intents: " + str( intentIds ) )
674 main.log.info( "Intents in ONOS: " + str( onosIds ) )
675 for intent in intentIds:
676 if intent in onosIds:
677 pass # intent submitted is in onos
678 else:
679 intentAddResult = False
680 if intentAddResult:
681 intentStop = time.time()
682 else:
683 intentStop = None
684 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700685 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700686 intentStates = []
687 installedCheck = True
688 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
689 count = 0
690 try:
691 for intent in json.loads( intents ):
692 state = intent.get( 'state', None )
693 if "INSTALLED" not in state:
694 installedCheck = False
695 intentId = intent.get( 'id', None )
696 intentStates.append( ( intentId, state ) )
697 except ( ValueError, TypeError ):
698 main.log.exception( "Error parsing intents" )
699 # add submitted intents not in the store
700 tmplist = [ i for i, s in intentStates ]
701 missingIntents = False
702 for i in intentIds:
703 if i not in tmplist:
704 intentStates.append( ( i, " - " ) )
705 missingIntents = True
706 intentStates.sort()
707 for i, s in intentStates:
708 count += 1
709 main.log.info( "%-6s%-15s%-15s" %
710 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700711 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700712
713 intentAddResult = bool( intentAddResult and not missingIntents and
714 installedCheck )
715 if not intentAddResult:
716 main.log.error( "Error in pushing host intents to ONOS" )
717
718 main.step( "Intent Anti-Entropy dispersion" )
719 for j in range( 100 ):
720 correct = True
721 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700722 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700723 onosIds = []
Jon Hallca319892017-06-15 15:25:22 -0700724 ids = ctrl.getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700725 onosIds.append( ids )
Jon Hallca319892017-06-15 15:25:22 -0700726 main.log.debug( "Intents in " + ctrl.name + ": " +
Devin Lim58046fa2017-07-05 16:55:00 -0700727 str( sorted( onosIds ) ) )
728 if sorted( ids ) != sorted( intentIds ):
729 main.log.warn( "Set of intent IDs doesn't match" )
730 correct = False
731 break
732 else:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700733 intents = json.loads( ctrl.CLI.intents() )
Devin Lim58046fa2017-07-05 16:55:00 -0700734 for intent in intents:
735 if intent[ 'state' ] != "INSTALLED":
736 main.log.warn( "Intent " + intent[ 'id' ] +
737 " is " + intent[ 'state' ] )
738 correct = False
739 break
740 if correct:
741 break
742 else:
743 time.sleep( 1 )
744 if not intentStop:
745 intentStop = time.time()
746 global gossipTime
747 gossipTime = intentStop - intentStart
748 main.log.info( "It took about " + str( gossipTime ) +
749 " seconds for all intents to appear in each node" )
750 append = False
751 title = "Gossip Intents"
752 count = 1
753 while append is False:
754 curTitle = title + str( count )
755 if curTitle not in main.HAlabels:
756 main.HAlabels.append( curTitle )
757 main.HAdata.append( str( gossipTime ) )
758 append = True
759 else:
760 count += 1
761 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Devin Lim142b5342017-07-20 15:22:39 -0700762 maxGossipTime = gossipPeriod * len( main.Cluster.runningNodes )
Devin Lim58046fa2017-07-05 16:55:00 -0700763 utilities.assert_greater_equals(
764 expect=maxGossipTime, actual=gossipTime,
765 onpass="ECM anti-entropy for intents worked within " +
766 "expected time",
767 onfail="Intent ECM anti-entropy took too long. " +
768 "Expected time:{}, Actual time:{}".format( maxGossipTime,
769 gossipTime ) )
770 if gossipTime <= maxGossipTime:
771 intentAddResult = True
772
Jon Hallca319892017-06-15 15:25:22 -0700773 pendingMap = main.Cluster.next().pendingMap()
Devin Lim58046fa2017-07-05 16:55:00 -0700774 if not intentAddResult or "key" in pendingMap:
775 import time
776 installedCheck = True
777 main.log.info( "Sleeping 60 seconds to see if intents are found" )
778 time.sleep( 60 )
779 onosIds = onosCli.getAllIntentsId()
780 main.log.info( "Submitted intents: " + str( intentIds ) )
781 main.log.info( "Intents in ONOS: " + str( onosIds ) )
782 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700783 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700784 intentStates = []
785 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
786 count = 0
787 try:
788 for intent in json.loads( intents ):
789 # Iter through intents of a node
790 state = intent.get( 'state', None )
791 if "INSTALLED" not in state:
792 installedCheck = False
793 intentId = intent.get( 'id', None )
794 intentStates.append( ( intentId, state ) )
795 except ( ValueError, TypeError ):
796 main.log.exception( "Error parsing intents" )
797 # add submitted intents not in the store
798 tmplist = [ i for i, s in intentStates ]
799 for i in intentIds:
800 if i not in tmplist:
801 intentStates.append( ( i, " - " ) )
802 intentStates.sort()
803 for i, s in intentStates:
804 count += 1
805 main.log.info( "%-6s%-15s%-15s" %
806 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700807 self.topicsCheck( [ "org.onosproject.election" ] )
808 self.partitionsCheck()
809 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700810
Jon Hallca319892017-06-15 15:25:22 -0700811 def pingAcrossHostIntent( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -0700812 """
813 Ping across added host intents
814 """
815 import json
816 import time
Devin Lim58046fa2017-07-05 16:55:00 -0700817 assert main, "main not defined"
818 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700819 main.case( "Verify connectivity by sending traffic across Intents" )
820 main.caseExplanation = "Ping across added host intents to check " +\
821 "functionality and check the state of " +\
822 "the intent"
823
Jon Hallca319892017-06-15 15:25:22 -0700824 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -0700825 main.step( "Check Intent state" )
826 installedCheck = False
827 loopCount = 0
828 while not installedCheck and loopCount < 40:
829 installedCheck = True
830 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700831 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700832 intentStates = []
833 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
834 count = 0
835 # Iter through intents of a node
836 try:
837 for intent in json.loads( intents ):
838 state = intent.get( 'state', None )
839 if "INSTALLED" not in state:
840 installedCheck = False
Jon Hall8bafdc02017-09-05 11:36:26 -0700841 main.log.debug( "Failed intent: " + str( intent ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700842 intentId = intent.get( 'id', None )
843 intentStates.append( ( intentId, state ) )
844 except ( ValueError, TypeError ):
845 main.log.exception( "Error parsing intents." )
846 # Print states
847 intentStates.sort()
848 for i, s in intentStates:
849 count += 1
850 main.log.info( "%-6s%-15s%-15s" %
851 ( str( count ), str( i ), str( s ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700852 if not installedCheck:
853 time.sleep( 1 )
854 loopCount += 1
855 utilities.assert_equals( expect=True, actual=installedCheck,
856 onpass="Intents are all INSTALLED",
857 onfail="Intents are not all in " +
858 "INSTALLED state" )
859
860 main.step( "Ping across added host intents" )
861 PingResult = main.TRUE
862 for i in range( 8, 18 ):
863 ping = main.Mininet1.pingHost( src="h" + str( i ),
864 target="h" + str( i + 10 ) )
865 PingResult = PingResult and ping
866 if ping == main.FALSE:
867 main.log.warn( "Ping failed between h" + str( i ) +
868 " and h" + str( i + 10 ) )
869 elif ping == main.TRUE:
870 main.log.info( "Ping test passed!" )
871 # Don't set PingResult or you'd override failures
872 if PingResult == main.FALSE:
873 main.log.error(
874 "Intents have not been installed correctly, pings failed." )
875 # TODO: pretty print
Devin Lim58046fa2017-07-05 16:55:00 -0700876 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700877 tmpIntents = onosCli.CLI.intents()
Jon Hallca319892017-06-15 15:25:22 -0700878 output = json.dumps( json.loads( tmpIntents ),
879 sort_keys=True,
880 indent=4,
881 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700882 except ( ValueError, TypeError ):
Jon Hall4173b242017-09-12 17:04:38 -0700883 output = repr( tmpIntents )
Jon Hallca319892017-06-15 15:25:22 -0700884 main.log.debug( "ONOS1 intents: " + output )
Devin Lim58046fa2017-07-05 16:55:00 -0700885 utilities.assert_equals(
886 expect=main.TRUE,
887 actual=PingResult,
888 onpass="Intents have been installed correctly and pings work",
889 onfail="Intents have not been installed correctly, pings failed." )
890
891 main.step( "Check leadership of topics" )
Jon Hallca319892017-06-15 15:25:22 -0700892 topicsCheck = self.topicsCheck()
893 utilities.assert_equals( expect=False, actual=topicsCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700894 onpass="intent Partitions is in leaders",
Jon Hallca319892017-06-15 15:25:22 -0700895 onfail="Some topics were lost" )
896 self.partitionsCheck()
897 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700898
899 if not installedCheck:
900 main.log.info( "Waiting 60 seconds to see if the state of " +
901 "intents change" )
902 time.sleep( 60 )
903 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700904 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700905 intentStates = []
906 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
907 count = 0
908 # Iter through intents of a node
909 try:
910 for intent in json.loads( intents ):
911 state = intent.get( 'state', None )
912 if "INSTALLED" not in state:
913 installedCheck = False
914 intentId = intent.get( 'id', None )
915 intentStates.append( ( intentId, state ) )
916 except ( ValueError, TypeError ):
917 main.log.exception( "Error parsing intents." )
918 intentStates.sort()
919 for i, s in intentStates:
920 count += 1
921 main.log.info( "%-6s%-15s%-15s" %
922 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700923 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700924
Devin Lim58046fa2017-07-05 16:55:00 -0700925 # Print flowrules
Devin Lime9f0ccf2017-08-11 17:25:12 -0700926 main.log.debug( onosCli.CLI.flows() )
Devin Lim58046fa2017-07-05 16:55:00 -0700927 main.step( "Wait a minute then ping again" )
928 # the wait is above
929 PingResult = main.TRUE
930 for i in range( 8, 18 ):
931 ping = main.Mininet1.pingHost( src="h" + str( i ),
932 target="h" + str( i + 10 ) )
933 PingResult = PingResult and ping
934 if ping == main.FALSE:
935 main.log.warn( "Ping failed between h" + str( i ) +
936 " and h" + str( i + 10 ) )
937 elif ping == main.TRUE:
938 main.log.info( "Ping test passed!" )
939 # Don't set PingResult or you'd override failures
940 if PingResult == main.FALSE:
941 main.log.error(
942 "Intents have not been installed correctly, pings failed." )
943 # TODO: pretty print
Jon Hallca319892017-06-15 15:25:22 -0700944 main.log.warn( str( onosCli.name ) + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -0700945 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700946 tmpIntents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700947 main.log.warn( json.dumps( json.loads( tmpIntents ),
948 sort_keys=True,
949 indent=4,
950 separators=( ',', ': ' ) ) )
951 except ( ValueError, TypeError ):
952 main.log.warn( repr( tmpIntents ) )
953 utilities.assert_equals(
954 expect=main.TRUE,
955 actual=PingResult,
956 onpass="Intents have been installed correctly and pings work",
957 onfail="Intents have not been installed correctly, pings failed." )
958
Devin Lim142b5342017-07-20 15:22:39 -0700959 def checkRoleNotNull( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700960 main.step( "Check that each switch has a master" )
Devin Lim58046fa2017-07-05 16:55:00 -0700961 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -0700962 rolesNotNull = main.Cluster.command( "rolesNotNull", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -0700963 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -0700964 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -0700965 actual=rolesNotNull,
966 onpass="Each device has a master",
967 onfail="Some devices don't have a master assigned" )
968
Devin Lim142b5342017-07-20 15:22:39 -0700969 def checkTheRole( self ):
970 main.step( "Read device roles from ONOS" )
Jon Hallca319892017-06-15 15:25:22 -0700971 ONOSMastership = main.Cluster.command( "roles" )
Devin Lim58046fa2017-07-05 16:55:00 -0700972 consistentMastership = True
973 rolesResults = True
Devin Lim58046fa2017-07-05 16:55:00 -0700974 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -0700975 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700976 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hallca319892017-06-15 15:25:22 -0700977 main.log.error( "Error in getting " + node + " roles" )
978 main.log.warn( node + " mastership response: " +
Devin Lim58046fa2017-07-05 16:55:00 -0700979 repr( ONOSMastership[ i ] ) )
980 rolesResults = False
981 utilities.assert_equals(
982 expect=True,
983 actual=rolesResults,
984 onpass="No error in reading roles output",
985 onfail="Error in reading roles from ONOS" )
986
987 main.step( "Check for consistency in roles from each controller" )
988 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
989 main.log.info(
990 "Switch roles are consistent across all ONOS nodes" )
991 else:
992 consistentMastership = False
993 utilities.assert_equals(
994 expect=True,
995 actual=consistentMastership,
996 onpass="Switch roles are consistent across all ONOS nodes",
997 onfail="ONOS nodes have different views of switch roles" )
Devin Lim142b5342017-07-20 15:22:39 -0700998 return ONOSMastership, rolesResults, consistentMastership
999
1000 def checkingIntents( self ):
1001 main.step( "Get the intents from each controller" )
1002 ONOSIntents = main.Cluster.command( "intents", specificDriver=2 )
1003 intentsResults = True
1004 for i in range( len( ONOSIntents ) ):
1005 node = str( main.Cluster.active( i ) )
1006 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1007 main.log.error( "Error in getting " + node + " intents" )
1008 main.log.warn( node + " intents response: " +
1009 repr( ONOSIntents[ i ] ) )
1010 intentsResults = False
1011 utilities.assert_equals(
1012 expect=True,
1013 actual=intentsResults,
1014 onpass="No error in reading intents output",
1015 onfail="Error in reading intents from ONOS" )
1016 return ONOSIntents, intentsResults
1017
1018 def readingState( self, main ):
1019 """
1020 Reading state of ONOS
1021 """
1022 import json
1023 import time
1024 assert main, "main not defined"
1025 assert utilities.assert_equals, "utilities.assert_equals not defined"
1026 try:
1027 from tests.dependencies.topology import Topology
1028 except ImportError:
1029 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07001030 main.cleanAndExit()
Devin Lim142b5342017-07-20 15:22:39 -07001031 try:
1032 main.topoRelated
1033 except ( NameError, AttributeError ):
1034 main.topoRelated = Topology()
1035 main.case( "Setting up and gathering data for current state" )
1036 # The general idea for this test case is to pull the state of
1037 # ( intents,flows, topology,... ) from each ONOS node
1038 # We can then compare them with each other and also with past states
1039
1040 global mastershipState
1041 mastershipState = '[]'
1042
1043 self.checkRoleNotNull()
1044
1045 main.step( "Get the Mastership of each switch from each controller" )
1046 mastershipCheck = main.FALSE
1047
1048 ONOSMastership, consistentMastership, rolesResults = self.checkTheRole()
Devin Lim58046fa2017-07-05 16:55:00 -07001049
1050 if rolesResults and not consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001051 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001052 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001053 try:
1054 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001055 node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07001056 json.dumps(
1057 json.loads( ONOSMastership[ i ] ),
1058 sort_keys=True,
1059 indent=4,
1060 separators=( ',', ': ' ) ) )
1061 except ( ValueError, TypeError ):
1062 main.log.warn( repr( ONOSMastership[ i ] ) )
1063 elif rolesResults and consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001064 mastershipCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001065 mastershipState = ONOSMastership[ 0 ]
1066
Devin Lim58046fa2017-07-05 16:55:00 -07001067 global intentState
1068 intentState = []
Devin Lim142b5342017-07-20 15:22:39 -07001069 ONOSIntents, intentsResults = self.checkingIntents()
Jon Hallca319892017-06-15 15:25:22 -07001070 intentCheck = main.FALSE
1071 consistentIntents = True
Devin Lim142b5342017-07-20 15:22:39 -07001072
Devin Lim58046fa2017-07-05 16:55:00 -07001073 main.step( "Check for consistency in Intents from each controller" )
1074 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1075 main.log.info( "Intents are consistent across all ONOS " +
1076 "nodes" )
1077 else:
1078 consistentIntents = False
1079 main.log.error( "Intents not consistent" )
1080 utilities.assert_equals(
1081 expect=True,
1082 actual=consistentIntents,
1083 onpass="Intents are consistent across all ONOS nodes",
1084 onfail="ONOS nodes have different views of intents" )
1085
1086 if intentsResults:
1087 # Try to make it easy to figure out what is happening
1088 #
1089 # Intent ONOS1 ONOS2 ...
1090 # 0x01 INSTALLED INSTALLING
1091 # ... ... ...
1092 # ... ... ...
1093 title = " Id"
Jon Hallca319892017-06-15 15:25:22 -07001094 for ctrl in main.Cluster.active():
1095 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07001096 main.log.warn( title )
1097 # get all intent keys in the cluster
1098 keys = []
1099 try:
1100 # Get the set of all intent keys
1101 for nodeStr in ONOSIntents:
1102 node = json.loads( nodeStr )
1103 for intent in node:
1104 keys.append( intent.get( 'id' ) )
1105 keys = set( keys )
1106 # For each intent key, print the state on each node
1107 for key in keys:
1108 row = "%-13s" % key
1109 for nodeStr in ONOSIntents:
1110 node = json.loads( nodeStr )
1111 for intent in node:
1112 if intent.get( 'id', "Error" ) == key:
1113 row += "%-15s" % intent.get( 'state' )
1114 main.log.warn( row )
1115 # End of intent state table
1116 except ValueError as e:
1117 main.log.exception( e )
1118 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1119
1120 if intentsResults and not consistentIntents:
1121 # print the json objects
Jon Hallca319892017-06-15 15:25:22 -07001122 main.log.debug( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001123 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1124 sort_keys=True,
1125 indent=4,
1126 separators=( ',', ': ' ) ) )
1127 for i in range( len( ONOSIntents ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001128 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001129 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallca319892017-06-15 15:25:22 -07001130 main.log.debug( node + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001131 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
1132 sort_keys=True,
1133 indent=4,
1134 separators=( ',', ': ' ) ) )
1135 else:
Jon Hallca319892017-06-15 15:25:22 -07001136 main.log.debug( node + " intents match " + ctrl.name + " intents" )
Devin Lim58046fa2017-07-05 16:55:00 -07001137 elif intentsResults and consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07001138 intentCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001139 intentState = ONOSIntents[ 0 ]
1140
1141 main.step( "Get the flows from each controller" )
1142 global flowState
1143 flowState = []
Jon Hall4173b242017-09-12 17:04:38 -07001144 ONOSFlows = main.Cluster.command( "flows", specificDriver=2 ) # TODO: Possible arg: sleep = 30
Devin Lim58046fa2017-07-05 16:55:00 -07001145 ONOSFlowsJson = []
1146 flowCheck = main.FALSE
1147 consistentFlows = True
1148 flowsResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001149 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001150 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001151 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001152 main.log.error( "Error in getting " + node + " flows" )
1153 main.log.warn( node + " flows response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001154 repr( ONOSFlows[ i ] ) )
1155 flowsResults = False
1156 ONOSFlowsJson.append( None )
1157 else:
1158 try:
1159 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1160 except ( ValueError, TypeError ):
1161 # FIXME: change this to log.error?
Jon Hallca319892017-06-15 15:25:22 -07001162 main.log.exception( "Error in parsing " + node +
Devin Lim58046fa2017-07-05 16:55:00 -07001163 " response as json." )
1164 main.log.error( repr( ONOSFlows[ i ] ) )
1165 ONOSFlowsJson.append( None )
1166 flowsResults = False
1167 utilities.assert_equals(
1168 expect=True,
1169 actual=flowsResults,
1170 onpass="No error in reading flows output",
1171 onfail="Error in reading flows from ONOS" )
1172
1173 main.step( "Check for consistency in Flows from each controller" )
1174 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1175 if all( tmp ):
1176 main.log.info( "Flow count is consistent across all ONOS nodes" )
1177 else:
1178 consistentFlows = False
1179 utilities.assert_equals(
1180 expect=True,
1181 actual=consistentFlows,
1182 onpass="The flow count is consistent across all ONOS nodes",
1183 onfail="ONOS nodes have different flow counts" )
1184
1185 if flowsResults and not consistentFlows:
1186 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001187 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001188 try:
1189 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001190 node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001191 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
1192 indent=4, separators=( ',', ': ' ) ) )
1193 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -07001194 main.log.warn( node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001195 repr( ONOSFlows[ i ] ) )
1196 elif flowsResults and consistentFlows:
1197 flowCheck = main.TRUE
1198 flowState = ONOSFlows[ 0 ]
1199
1200 main.step( "Get the OF Table entries" )
1201 global flows
1202 flows = []
1203 for i in range( 1, 29 ):
1204 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1205 if flowCheck == main.FALSE:
1206 for table in flows:
1207 main.log.warn( table )
1208 # TODO: Compare switch flow tables with ONOS flow tables
1209
1210 main.step( "Start continuous pings" )
1211 main.Mininet2.pingLong(
1212 src=main.params[ 'PING' ][ 'source1' ],
1213 target=main.params[ 'PING' ][ 'target1' ],
1214 pingTime=500 )
1215 main.Mininet2.pingLong(
1216 src=main.params[ 'PING' ][ 'source2' ],
1217 target=main.params[ 'PING' ][ 'target2' ],
1218 pingTime=500 )
1219 main.Mininet2.pingLong(
1220 src=main.params[ 'PING' ][ 'source3' ],
1221 target=main.params[ 'PING' ][ 'target3' ],
1222 pingTime=500 )
1223 main.Mininet2.pingLong(
1224 src=main.params[ 'PING' ][ 'source4' ],
1225 target=main.params[ 'PING' ][ 'target4' ],
1226 pingTime=500 )
1227 main.Mininet2.pingLong(
1228 src=main.params[ 'PING' ][ 'source5' ],
1229 target=main.params[ 'PING' ][ 'target5' ],
1230 pingTime=500 )
1231 main.Mininet2.pingLong(
1232 src=main.params[ 'PING' ][ 'source6' ],
1233 target=main.params[ 'PING' ][ 'target6' ],
1234 pingTime=500 )
1235 main.Mininet2.pingLong(
1236 src=main.params[ 'PING' ][ 'source7' ],
1237 target=main.params[ 'PING' ][ 'target7' ],
1238 pingTime=500 )
1239 main.Mininet2.pingLong(
1240 src=main.params[ 'PING' ][ 'source8' ],
1241 target=main.params[ 'PING' ][ 'target8' ],
1242 pingTime=500 )
1243 main.Mininet2.pingLong(
1244 src=main.params[ 'PING' ][ 'source9' ],
1245 target=main.params[ 'PING' ][ 'target9' ],
1246 pingTime=500 )
1247 main.Mininet2.pingLong(
1248 src=main.params[ 'PING' ][ 'source10' ],
1249 target=main.params[ 'PING' ][ 'target10' ],
1250 pingTime=500 )
1251
1252 main.step( "Collecting topology information from ONOS" )
Devin Lim142b5342017-07-20 15:22:39 -07001253 devices = main.topoRelated.getAll( "devices" )
1254 hosts = main.topoRelated.getAll( "hosts", inJson=True )
1255 ports = main.topoRelated.getAll( "ports" )
1256 links = main.topoRelated.getAll( "links" )
1257 clusters = main.topoRelated.getAll( "clusters" )
Devin Lim58046fa2017-07-05 16:55:00 -07001258 # Compare json objects for hosts and dataplane clusters
1259
1260 # hosts
1261 main.step( "Host view is consistent across ONOS nodes" )
1262 consistentHostsResult = main.TRUE
1263 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001264 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001265 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1266 if hosts[ controller ] == hosts[ 0 ]:
1267 continue
1268 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07001269 main.log.error( "hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001270 controllerStr +
1271 " is inconsistent with ONOS1" )
1272 main.log.warn( repr( hosts[ controller ] ) )
1273 consistentHostsResult = main.FALSE
1274
1275 else:
Jon Hallca319892017-06-15 15:25:22 -07001276 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001277 controllerStr )
1278 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001279 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001280 " hosts response: " +
1281 repr( hosts[ controller ] ) )
1282 utilities.assert_equals(
1283 expect=main.TRUE,
1284 actual=consistentHostsResult,
1285 onpass="Hosts view is consistent across all ONOS nodes",
1286 onfail="ONOS nodes have different views of hosts" )
1287
1288 main.step( "Each host has an IP address" )
1289 ipResult = main.TRUE
1290 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001291 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001292 if hosts[ controller ]:
1293 for host in hosts[ controller ]:
1294 if not host.get( 'ipAddresses', [] ):
Jon Hallca319892017-06-15 15:25:22 -07001295 main.log.error( "Error with host ips on " +
Devin Lim58046fa2017-07-05 16:55:00 -07001296 controllerStr + ": " + str( host ) )
1297 ipResult = main.FALSE
1298 utilities.assert_equals(
1299 expect=main.TRUE,
1300 actual=ipResult,
1301 onpass="The ips of the hosts aren't empty",
1302 onfail="The ip of at least one host is missing" )
1303
1304 # Strongly connected clusters of devices
1305 main.step( "Cluster view is consistent across ONOS nodes" )
1306 consistentClustersResult = main.TRUE
1307 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001308 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001309 if "Error" not in clusters[ controller ]:
1310 if clusters[ controller ] == clusters[ 0 ]:
1311 continue
1312 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07001313 main.log.error( "clusters from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001314 " is inconsistent with ONOS1" )
1315 consistentClustersResult = main.FALSE
1316
1317 else:
1318 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07001319 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07001320 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001321 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001322 " clusters response: " +
1323 repr( clusters[ controller ] ) )
1324 utilities.assert_equals(
1325 expect=main.TRUE,
1326 actual=consistentClustersResult,
1327 onpass="Clusters view is consistent across all ONOS nodes",
1328 onfail="ONOS nodes have different views of clusters" )
1329 if not consistentClustersResult:
1330 main.log.debug( clusters )
1331
1332 # there should always only be one cluster
1333 main.step( "Cluster view correct across ONOS nodes" )
1334 try:
1335 numClusters = len( json.loads( clusters[ 0 ] ) )
1336 except ( ValueError, TypeError ):
1337 main.log.exception( "Error parsing clusters[0]: " +
1338 repr( clusters[ 0 ] ) )
1339 numClusters = "ERROR"
1340 utilities.assert_equals(
1341 expect=1,
1342 actual=numClusters,
1343 onpass="ONOS shows 1 SCC",
1344 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1345
1346 main.step( "Comparing ONOS topology to MN" )
1347 devicesResults = main.TRUE
1348 linksResults = main.TRUE
1349 hostsResults = main.TRUE
1350 mnSwitches = main.Mininet1.getSwitches()
1351 mnLinks = main.Mininet1.getLinks()
1352 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07001353 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001354 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001355 currentDevicesResult = main.topoRelated.compareDevicePort(
1356 main.Mininet1, controller,
1357 mnSwitches, devices, ports )
1358 utilities.assert_equals( expect=main.TRUE,
1359 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07001360 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001361 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001362 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001363 " Switches view is incorrect" )
1364
1365 currentLinksResult = main.topoRelated.compareBase( links, controller,
1366 main.Mininet1.compareLinks,
1367 [ mnSwitches, mnLinks ] )
1368 utilities.assert_equals( expect=main.TRUE,
1369 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07001370 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001371 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001372 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001373 " links view is incorrect" )
1374
1375 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1376 currentHostsResult = main.Mininet1.compareHosts(
1377 mnHosts,
1378 hosts[ controller ] )
1379 else:
1380 currentHostsResult = main.FALSE
1381 utilities.assert_equals( expect=main.TRUE,
1382 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07001383 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001384 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07001385 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001386 " hosts don't match Mininet" )
1387
1388 devicesResults = devicesResults and currentDevicesResult
1389 linksResults = linksResults and currentLinksResult
1390 hostsResults = hostsResults and currentHostsResult
1391
1392 main.step( "Device information is correct" )
1393 utilities.assert_equals(
1394 expect=main.TRUE,
1395 actual=devicesResults,
1396 onpass="Device information is correct",
1397 onfail="Device information is incorrect" )
1398
1399 main.step( "Links are correct" )
1400 utilities.assert_equals(
1401 expect=main.TRUE,
1402 actual=linksResults,
1403 onpass="Link are correct",
1404 onfail="Links are incorrect" )
1405
1406 main.step( "Hosts are correct" )
1407 utilities.assert_equals(
1408 expect=main.TRUE,
1409 actual=hostsResults,
1410 onpass="Hosts are correct",
1411 onfail="Hosts are incorrect" )
1412
1413 def checkDistPrimitivesFunc( self, main ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001414 """
1415 Check for basic functionality with distributed primitives
1416 """
Jon Halle0f0b342017-04-18 11:43:47 -07001417 # TODO: Clean this up so it's not just a cut/paste from the test
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001418 try:
1419 # Make sure variables are defined/set
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001420 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001421 assert main.pCounterName, "main.pCounterName not defined"
1422 assert main.onosSetName, "main.onosSetName not defined"
1423 # NOTE: assert fails if value is 0/None/Empty/False
1424 try:
1425 main.pCounterValue
Devin Lim58046fa2017-07-05 16:55:00 -07001426 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001427 main.log.error( "main.pCounterValue not defined, setting to 0" )
1428 main.pCounterValue = 0
1429 try:
1430 main.onosSet
Devin Lim58046fa2017-07-05 16:55:00 -07001431 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001432 main.log.error( "main.onosSet not defined, setting to empty Set" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001433 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001434 # Variables for the distributed primitives tests. These are local only
1435 addValue = "a"
1436 addAllValue = "a b c d e f"
1437 retainValue = "c d e f"
Jon Halle0f0b342017-04-18 11:43:47 -07001438 valueName = "TestON-Value"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001439 valueValue = None
Jon Halle0f0b342017-04-18 11:43:47 -07001440 workQueueName = "TestON-Queue"
1441 workQueueCompleted = 0
1442 workQueueInProgress = 0
1443 workQueuePending = 0
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001444
1445 description = "Check for basic functionality with distributed " +\
1446 "primitives"
1447 main.case( description )
1448 main.caseExplanation = "Test the methods of the distributed " +\
1449 "primitives (counters and sets) throught the cli"
1450 # DISTRIBUTED ATOMIC COUNTERS
1451 # Partitioned counters
1452 main.step( "Increment then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001453 pCounters = main.Cluster.command( "counterTestAddAndGet",
1454 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001455 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001456 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001457 main.pCounterValue += 1
1458 addedPValues.append( main.pCounterValue )
Jon Hallca319892017-06-15 15:25:22 -07001459 # Check that counter incremented once per controller
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001460 pCounterResults = True
1461 for i in addedPValues:
1462 tmpResult = i in pCounters
1463 pCounterResults = pCounterResults and tmpResult
1464 if not tmpResult:
1465 main.log.error( str( i ) + " is not in partitioned "
1466 "counter incremented results" )
1467 utilities.assert_equals( expect=True,
1468 actual=pCounterResults,
1469 onpass="Default counter incremented",
1470 onfail="Error incrementing default" +
1471 " counter" )
1472
1473 main.step( "Get then Increment a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001474 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1475 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001476 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001477 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001478 addedPValues.append( main.pCounterValue )
1479 main.pCounterValue += 1
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001480 # Check that counter incremented numController times
1481 pCounterResults = True
1482 for i in addedPValues:
1483 tmpResult = i in pCounters
1484 pCounterResults = pCounterResults and tmpResult
1485 if not tmpResult:
1486 main.log.error( str( i ) + " is not in partitioned "
1487 "counter incremented results" )
1488 utilities.assert_equals( expect=True,
1489 actual=pCounterResults,
1490 onpass="Default counter incremented",
1491 onfail="Error incrementing default" +
1492 " counter" )
1493
1494 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001495 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001496 utilities.assert_equals( expect=main.TRUE,
1497 actual=incrementCheck,
1498 onpass="Added counters are correct",
1499 onfail="Added counters are incorrect" )
1500
1501 main.step( "Add -8 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001502 pCounters = main.Cluster.command( "counterTestAddAndGet",
1503 args=[ main.pCounterName ],
1504 kwargs={ "delta": -8 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001505 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001506 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001507 main.pCounterValue += -8
1508 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001509 # Check that counter incremented numController times
1510 pCounterResults = True
1511 for i in addedPValues:
1512 tmpResult = i in pCounters
1513 pCounterResults = pCounterResults and tmpResult
1514 if not tmpResult:
1515 main.log.error( str( i ) + " is not in partitioned "
1516 "counter incremented results" )
1517 utilities.assert_equals( expect=True,
1518 actual=pCounterResults,
1519 onpass="Default counter incremented",
1520 onfail="Error incrementing default" +
1521 " counter" )
1522
1523 main.step( "Add 5 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001524 pCounters = main.Cluster.command( "counterTestAddAndGet",
1525 args=[ main.pCounterName ],
1526 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001527 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001528 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001529 main.pCounterValue += 5
1530 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001531
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001532 # Check that counter incremented numController times
1533 pCounterResults = True
1534 for i in addedPValues:
1535 tmpResult = i in pCounters
1536 pCounterResults = pCounterResults and tmpResult
1537 if not tmpResult:
1538 main.log.error( str( i ) + " is not in partitioned "
1539 "counter incremented results" )
1540 utilities.assert_equals( expect=True,
1541 actual=pCounterResults,
1542 onpass="Default counter incremented",
1543 onfail="Error incrementing default" +
1544 " counter" )
1545
1546 main.step( "Get then add 5 to a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001547 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1548 args=[ main.pCounterName ],
1549 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001550 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001551 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001552 addedPValues.append( main.pCounterValue )
1553 main.pCounterValue += 5
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001554 # Check that counter incremented numController times
1555 pCounterResults = True
1556 for i in addedPValues:
1557 tmpResult = i in pCounters
1558 pCounterResults = pCounterResults and tmpResult
1559 if not tmpResult:
1560 main.log.error( str( i ) + " is not in partitioned "
1561 "counter incremented results" )
1562 utilities.assert_equals( expect=True,
1563 actual=pCounterResults,
1564 onpass="Default counter incremented",
1565 onfail="Error incrementing default" +
1566 " counter" )
1567
1568 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001569 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001570 utilities.assert_equals( expect=main.TRUE,
1571 actual=incrementCheck,
1572 onpass="Added counters are correct",
1573 onfail="Added counters are incorrect" )
1574
1575 # DISTRIBUTED SETS
1576 main.step( "Distributed Set get" )
1577 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001578 getResponses = main.Cluster.command( "setTestGet",
1579 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001580 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001581 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001582 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001583 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001584 current = set( getResponses[ i ] )
1585 if len( current ) == len( getResponses[ i ] ):
1586 # no repeats
1587 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001588 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001589 " has incorrect view" +
1590 " of set " + main.onosSetName + ":\n" +
1591 str( getResponses[ i ] ) )
1592 main.log.debug( "Expected: " + str( main.onosSet ) )
1593 main.log.debug( "Actual: " + str( current ) )
1594 getResults = main.FALSE
1595 else:
1596 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001597 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001598 " has repeat elements in" +
1599 " set " + main.onosSetName + ":\n" +
1600 str( getResponses[ i ] ) )
1601 getResults = main.FALSE
1602 elif getResponses[ i ] == main.ERROR:
1603 getResults = main.FALSE
1604 utilities.assert_equals( expect=main.TRUE,
1605 actual=getResults,
1606 onpass="Set elements are correct",
1607 onfail="Set elements are incorrect" )
1608
1609 main.step( "Distributed Set size" )
Jon Hallca319892017-06-15 15:25:22 -07001610 sizeResponses = main.Cluster.command( "setTestSize",
1611 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001612 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001613 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001614 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001615 if size != sizeResponses[ i ]:
1616 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001617 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001618 " expected a size of " + str( size ) +
1619 " for set " + main.onosSetName +
1620 " but got " + str( sizeResponses[ i ] ) )
1621 utilities.assert_equals( expect=main.TRUE,
1622 actual=sizeResults,
1623 onpass="Set sizes are correct",
1624 onfail="Set sizes are incorrect" )
1625
1626 main.step( "Distributed Set add()" )
1627 main.onosSet.add( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001628 addResponses = main.Cluster.command( "setTestAdd",
1629 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001630 # main.TRUE = successfully changed the set
1631 # main.FALSE = action resulted in no change in set
1632 # main.ERROR - Some error in executing the function
1633 addResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001634 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001635 if addResponses[ i ] == main.TRUE:
1636 # All is well
1637 pass
1638 elif addResponses[ i ] == main.FALSE:
1639 # Already in set, probably fine
1640 pass
1641 elif addResponses[ i ] == main.ERROR:
1642 # Error in execution
1643 addResults = main.FALSE
1644 else:
1645 # unexpected result
1646 addResults = main.FALSE
1647 if addResults != main.TRUE:
1648 main.log.error( "Error executing set add" )
1649
1650 # Check if set is still correct
1651 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001652 getResponses = main.Cluster.command( "setTestGet",
1653 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001654 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001655 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001656 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001657 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001658 current = set( getResponses[ i ] )
1659 if len( current ) == len( getResponses[ i ] ):
1660 # no repeats
1661 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001662 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001663 " of set " + main.onosSetName + ":\n" +
1664 str( getResponses[ i ] ) )
1665 main.log.debug( "Expected: " + str( main.onosSet ) )
1666 main.log.debug( "Actual: " + str( current ) )
1667 getResults = main.FALSE
1668 else:
1669 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001670 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001671 " set " + main.onosSetName + ":\n" +
1672 str( getResponses[ i ] ) )
1673 getResults = main.FALSE
1674 elif getResponses[ i ] == main.ERROR:
1675 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001676 sizeResponses = main.Cluster.command( "setTestSize",
1677 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001678 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001679 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001680 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001681 if size != sizeResponses[ i ]:
1682 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001683 main.log.error( node + " expected a size of " +
1684 str( size ) + " for set " + main.onosSetName +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001685 " but got " + str( sizeResponses[ i ] ) )
1686 addResults = addResults and getResults and sizeResults
1687 utilities.assert_equals( expect=main.TRUE,
1688 actual=addResults,
1689 onpass="Set add correct",
1690 onfail="Set add was incorrect" )
1691
1692 main.step( "Distributed Set addAll()" )
1693 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001694 addResponses = main.Cluster.command( "setTestAdd",
1695 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001696 # main.TRUE = successfully changed the set
1697 # main.FALSE = action resulted in no change in set
1698 # main.ERROR - Some error in executing the function
1699 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001700 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001701 if addResponses[ i ] == main.TRUE:
1702 # All is well
1703 pass
1704 elif addResponses[ i ] == main.FALSE:
1705 # Already in set, probably fine
1706 pass
1707 elif addResponses[ i ] == main.ERROR:
1708 # Error in execution
1709 addAllResults = main.FALSE
1710 else:
1711 # unexpected result
1712 addAllResults = main.FALSE
1713 if addAllResults != main.TRUE:
1714 main.log.error( "Error executing set addAll" )
1715
1716 # Check if set is still correct
1717 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001718 getResponses = main.Cluster.command( "setTestGet",
1719 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001720 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001721 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001722 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001723 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001724 current = set( getResponses[ i ] )
1725 if len( current ) == len( getResponses[ i ] ):
1726 # no repeats
1727 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001728 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001729 " of set " + main.onosSetName + ":\n" +
1730 str( getResponses[ i ] ) )
1731 main.log.debug( "Expected: " + str( main.onosSet ) )
1732 main.log.debug( "Actual: " + str( current ) )
1733 getResults = main.FALSE
1734 else:
1735 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001736 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001737 " set " + main.onosSetName + ":\n" +
1738 str( getResponses[ i ] ) )
1739 getResults = main.FALSE
1740 elif getResponses[ i ] == main.ERROR:
1741 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001742 sizeResponses = main.Cluster.command( "setTestSize",
1743 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001744 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001745 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001746 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001747 if size != sizeResponses[ i ]:
1748 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001749 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001750 " for set " + main.onosSetName +
1751 " but got " + str( sizeResponses[ i ] ) )
1752 addAllResults = addAllResults and getResults and sizeResults
1753 utilities.assert_equals( expect=main.TRUE,
1754 actual=addAllResults,
1755 onpass="Set addAll correct",
1756 onfail="Set addAll was incorrect" )
1757
1758 main.step( "Distributed Set contains()" )
Jon Hallca319892017-06-15 15:25:22 -07001759 containsResponses = main.Cluster.command( "setTestGet",
1760 args=[ main.onosSetName ],
1761 kwargs={ "values": addValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001762 containsResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001763 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001764 if containsResponses[ i ] == main.ERROR:
1765 containsResults = main.FALSE
1766 else:
1767 containsResults = containsResults and\
1768 containsResponses[ i ][ 1 ]
1769 utilities.assert_equals( expect=main.TRUE,
1770 actual=containsResults,
1771 onpass="Set contains is functional",
1772 onfail="Set contains failed" )
1773
1774 main.step( "Distributed Set containsAll()" )
Jon Hallca319892017-06-15 15:25:22 -07001775 containsAllResponses = main.Cluster.command( "setTestGet",
1776 args=[ main.onosSetName ],
1777 kwargs={ "values": addAllValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001778 containsAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001779 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001780 if containsResponses[ i ] == main.ERROR:
1781 containsResults = main.FALSE
1782 else:
1783 containsResults = containsResults and\
1784 containsResponses[ i ][ 1 ]
1785 utilities.assert_equals( expect=main.TRUE,
1786 actual=containsAllResults,
1787 onpass="Set containsAll is functional",
1788 onfail="Set containsAll failed" )
1789
1790 main.step( "Distributed Set remove()" )
1791 main.onosSet.remove( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001792 removeResponses = main.Cluster.command( "setTestRemove",
1793 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001794 # main.TRUE = successfully changed the set
1795 # main.FALSE = action resulted in no change in set
1796 # main.ERROR - Some error in executing the function
1797 removeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001798 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001799 if removeResponses[ i ] == main.TRUE:
1800 # All is well
1801 pass
1802 elif removeResponses[ i ] == main.FALSE:
1803 # not in set, probably fine
1804 pass
1805 elif removeResponses[ i ] == main.ERROR:
1806 # Error in execution
1807 removeResults = main.FALSE
1808 else:
1809 # unexpected result
1810 removeResults = main.FALSE
1811 if removeResults != main.TRUE:
1812 main.log.error( "Error executing set remove" )
1813
1814 # Check if set is still correct
1815 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001816 getResponses = main.Cluster.command( "setTestGet",
1817 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001818 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001819 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001820 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001821 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001822 current = set( getResponses[ i ] )
1823 if len( current ) == len( getResponses[ i ] ):
1824 # no repeats
1825 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001826 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001827 " of set " + main.onosSetName + ":\n" +
1828 str( getResponses[ i ] ) )
1829 main.log.debug( "Expected: " + str( main.onosSet ) )
1830 main.log.debug( "Actual: " + str( current ) )
1831 getResults = main.FALSE
1832 else:
1833 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001834 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001835 " set " + main.onosSetName + ":\n" +
1836 str( getResponses[ i ] ) )
1837 getResults = main.FALSE
1838 elif getResponses[ i ] == main.ERROR:
1839 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001840 sizeResponses = main.Cluster.command( "setTestSize",
1841 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001842 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001843 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001844 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001845 if size != sizeResponses[ i ]:
1846 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001847 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001848 " for set " + main.onosSetName +
1849 " but got " + str( sizeResponses[ i ] ) )
1850 removeResults = removeResults and getResults and sizeResults
1851 utilities.assert_equals( expect=main.TRUE,
1852 actual=removeResults,
1853 onpass="Set remove correct",
1854 onfail="Set remove was incorrect" )
1855
1856 main.step( "Distributed Set removeAll()" )
1857 main.onosSet.difference_update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001858 removeAllResponses = main.Cluster.command( "setTestRemove",
1859 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001860 # main.TRUE = successfully changed the set
1861 # main.FALSE = action resulted in no change in set
1862 # main.ERROR - Some error in executing the function
1863 removeAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001864 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001865 if removeAllResponses[ i ] == main.TRUE:
1866 # All is well
1867 pass
1868 elif removeAllResponses[ i ] == main.FALSE:
1869 # not in set, probably fine
1870 pass
1871 elif removeAllResponses[ i ] == main.ERROR:
1872 # Error in execution
1873 removeAllResults = main.FALSE
1874 else:
1875 # unexpected result
1876 removeAllResults = main.FALSE
1877 if removeAllResults != main.TRUE:
1878 main.log.error( "Error executing set removeAll" )
1879
1880 # Check if set is still correct
1881 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001882 getResponses = main.Cluster.command( "setTestGet",
1883 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001884 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001885 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001886 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001887 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001888 current = set( getResponses[ i ] )
1889 if len( current ) == len( getResponses[ i ] ):
1890 # no repeats
1891 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001892 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001893 " of set " + main.onosSetName + ":\n" +
1894 str( getResponses[ i ] ) )
1895 main.log.debug( "Expected: " + str( main.onosSet ) )
1896 main.log.debug( "Actual: " + str( current ) )
1897 getResults = main.FALSE
1898 else:
1899 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001900 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001901 " set " + main.onosSetName + ":\n" +
1902 str( getResponses[ i ] ) )
1903 getResults = main.FALSE
1904 elif getResponses[ i ] == main.ERROR:
1905 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001906 sizeResponses = main.Cluster.command( "setTestSize",
1907 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001908 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001909 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001910 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001911 if size != sizeResponses[ i ]:
1912 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001913 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001914 " for set " + main.onosSetName +
1915 " but got " + str( sizeResponses[ i ] ) )
1916 removeAllResults = removeAllResults and getResults and sizeResults
1917 utilities.assert_equals( expect=main.TRUE,
1918 actual=removeAllResults,
1919 onpass="Set removeAll correct",
1920 onfail="Set removeAll was incorrect" )
1921
1922 main.step( "Distributed Set addAll()" )
1923 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001924 addResponses = main.Cluster.command( "setTestAdd",
1925 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001926 # main.TRUE = successfully changed the set
1927 # main.FALSE = action resulted in no change in set
1928 # main.ERROR - Some error in executing the function
1929 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001930 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001931 if addResponses[ i ] == main.TRUE:
1932 # All is well
1933 pass
1934 elif addResponses[ i ] == main.FALSE:
1935 # Already in set, probably fine
1936 pass
1937 elif addResponses[ i ] == main.ERROR:
1938 # Error in execution
1939 addAllResults = main.FALSE
1940 else:
1941 # unexpected result
1942 addAllResults = main.FALSE
1943 if addAllResults != main.TRUE:
1944 main.log.error( "Error executing set addAll" )
1945
1946 # Check if set is still correct
1947 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001948 getResponses = main.Cluster.command( "setTestGet",
1949 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001950 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001951 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001952 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001953 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001954 current = set( getResponses[ i ] )
1955 if len( current ) == len( getResponses[ i ] ):
1956 # no repeats
1957 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001958 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001959 " of set " + main.onosSetName + ":\n" +
1960 str( getResponses[ i ] ) )
1961 main.log.debug( "Expected: " + str( main.onosSet ) )
1962 main.log.debug( "Actual: " + str( current ) )
1963 getResults = main.FALSE
1964 else:
1965 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001966 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001967 " set " + main.onosSetName + ":\n" +
1968 str( getResponses[ i ] ) )
1969 getResults = main.FALSE
1970 elif getResponses[ i ] == main.ERROR:
1971 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001972 sizeResponses = main.Cluster.command( "setTestSize",
1973 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001974 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001975 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001976 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001977 if size != sizeResponses[ i ]:
1978 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001979 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001980 " for set " + main.onosSetName +
1981 " but got " + str( sizeResponses[ i ] ) )
1982 addAllResults = addAllResults and getResults and sizeResults
1983 utilities.assert_equals( expect=main.TRUE,
1984 actual=addAllResults,
1985 onpass="Set addAll correct",
1986 onfail="Set addAll was incorrect" )
1987
1988 main.step( "Distributed Set clear()" )
1989 main.onosSet.clear()
Jon Hallca319892017-06-15 15:25:22 -07001990 clearResponses = main.Cluster.command( "setTestRemove",
Jon Hall4173b242017-09-12 17:04:38 -07001991 args=[ main.onosSetName, " " ], # Values doesn't matter
1992 kwargs={ "clear": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001993 # main.TRUE = successfully changed the set
1994 # main.FALSE = action resulted in no change in set
1995 # main.ERROR - Some error in executing the function
1996 clearResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001997 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001998 if clearResponses[ i ] == main.TRUE:
1999 # All is well
2000 pass
2001 elif clearResponses[ i ] == main.FALSE:
2002 # Nothing set, probably fine
2003 pass
2004 elif clearResponses[ i ] == main.ERROR:
2005 # Error in execution
2006 clearResults = main.FALSE
2007 else:
2008 # unexpected result
2009 clearResults = main.FALSE
2010 if clearResults != main.TRUE:
2011 main.log.error( "Error executing set clear" )
2012
2013 # Check if set is still correct
2014 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002015 getResponses = main.Cluster.command( "setTestGet",
2016 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002017 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002018 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002019 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002020 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002021 current = set( getResponses[ i ] )
2022 if len( current ) == len( getResponses[ i ] ):
2023 # no repeats
2024 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002025 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002026 " of set " + main.onosSetName + ":\n" +
2027 str( getResponses[ i ] ) )
2028 main.log.debug( "Expected: " + str( main.onosSet ) )
2029 main.log.debug( "Actual: " + str( current ) )
2030 getResults = main.FALSE
2031 else:
2032 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002033 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002034 " set " + main.onosSetName + ":\n" +
2035 str( getResponses[ i ] ) )
2036 getResults = main.FALSE
2037 elif getResponses[ i ] == main.ERROR:
2038 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002039 sizeResponses = main.Cluster.command( "setTestSize",
2040 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002041 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002042 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002043 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002044 if size != sizeResponses[ i ]:
2045 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002046 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002047 " for set " + main.onosSetName +
2048 " but got " + str( sizeResponses[ i ] ) )
2049 clearResults = clearResults and getResults and sizeResults
2050 utilities.assert_equals( expect=main.TRUE,
2051 actual=clearResults,
2052 onpass="Set clear correct",
2053 onfail="Set clear was incorrect" )
2054
2055 main.step( "Distributed Set addAll()" )
2056 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002057 addResponses = main.Cluster.command( "setTestAdd",
2058 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002059 # main.TRUE = successfully changed the set
2060 # main.FALSE = action resulted in no change in set
2061 # main.ERROR - Some error in executing the function
2062 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002063 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002064 if addResponses[ i ] == main.TRUE:
2065 # All is well
2066 pass
2067 elif addResponses[ i ] == main.FALSE:
2068 # Already in set, probably fine
2069 pass
2070 elif addResponses[ i ] == main.ERROR:
2071 # Error in execution
2072 addAllResults = main.FALSE
2073 else:
2074 # unexpected result
2075 addAllResults = main.FALSE
2076 if addAllResults != main.TRUE:
2077 main.log.error( "Error executing set addAll" )
2078
2079 # Check if set is still correct
2080 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002081 getResponses = main.Cluster.command( "setTestGet",
2082 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002083 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002084 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002085 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002086 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002087 current = set( getResponses[ i ] )
2088 if len( current ) == len( getResponses[ i ] ):
2089 # no repeats
2090 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002091 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002092 " of set " + main.onosSetName + ":\n" +
2093 str( getResponses[ i ] ) )
2094 main.log.debug( "Expected: " + str( main.onosSet ) )
2095 main.log.debug( "Actual: " + str( current ) )
2096 getResults = main.FALSE
2097 else:
2098 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002099 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002100 " set " + main.onosSetName + ":\n" +
2101 str( getResponses[ i ] ) )
2102 getResults = main.FALSE
2103 elif getResponses[ i ] == main.ERROR:
2104 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002105 sizeResponses = main.Cluster.command( "setTestSize",
2106 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002107 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002108 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002109 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002110 if size != sizeResponses[ i ]:
2111 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002112 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002113 " for set " + main.onosSetName +
2114 " but got " + str( sizeResponses[ i ] ) )
2115 addAllResults = addAllResults and getResults and sizeResults
2116 utilities.assert_equals( expect=main.TRUE,
2117 actual=addAllResults,
2118 onpass="Set addAll correct",
2119 onfail="Set addAll was incorrect" )
2120
2121 main.step( "Distributed Set retain()" )
2122 main.onosSet.intersection_update( retainValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002123 retainResponses = main.Cluster.command( "setTestRemove",
2124 args=[ main.onosSetName, retainValue ],
2125 kwargs={ "retain": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002126 # main.TRUE = successfully changed the set
2127 # main.FALSE = action resulted in no change in set
2128 # main.ERROR - Some error in executing the function
2129 retainResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002130 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002131 if retainResponses[ i ] == main.TRUE:
2132 # All is well
2133 pass
2134 elif retainResponses[ i ] == main.FALSE:
2135 # Already in set, probably fine
2136 pass
2137 elif retainResponses[ i ] == main.ERROR:
2138 # Error in execution
2139 retainResults = main.FALSE
2140 else:
2141 # unexpected result
2142 retainResults = main.FALSE
2143 if retainResults != main.TRUE:
2144 main.log.error( "Error executing set retain" )
2145
2146 # Check if set is still correct
2147 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002148 getResponses = main.Cluster.command( "setTestGet",
2149 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002150 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002151 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002152 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002153 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002154 current = set( getResponses[ i ] )
2155 if len( current ) == len( getResponses[ i ] ):
2156 # no repeats
2157 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002158 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002159 " of set " + main.onosSetName + ":\n" +
2160 str( getResponses[ i ] ) )
2161 main.log.debug( "Expected: " + str( main.onosSet ) )
2162 main.log.debug( "Actual: " + str( current ) )
2163 getResults = main.FALSE
2164 else:
2165 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002166 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002167 " set " + main.onosSetName + ":\n" +
2168 str( getResponses[ i ] ) )
2169 getResults = main.FALSE
2170 elif getResponses[ i ] == main.ERROR:
2171 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002172 sizeResponses = main.Cluster.command( "setTestSize",
2173 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002174 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002175 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002176 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002177 if size != sizeResponses[ i ]:
2178 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002179 main.log.error( node + " expected a size of " +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002180 str( size ) + " for set " + main.onosSetName +
2181 " but got " + str( sizeResponses[ i ] ) )
2182 retainResults = retainResults and getResults and sizeResults
2183 utilities.assert_equals( expect=main.TRUE,
2184 actual=retainResults,
2185 onpass="Set retain correct",
2186 onfail="Set retain was incorrect" )
2187
2188 # Transactional maps
2189 main.step( "Partitioned Transactional maps put" )
2190 tMapValue = "Testing"
2191 numKeys = 100
2192 putResult = True
Jon Hallca319892017-06-15 15:25:22 -07002193 ctrl = main.Cluster.next()
2194 putResponses = ctrl.transactionalMapPut( numKeys, tMapValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002195 if putResponses and len( putResponses ) == 100:
2196 for i in putResponses:
2197 if putResponses[ i ][ 'value' ] != tMapValue:
2198 putResult = False
2199 else:
2200 putResult = False
2201 if not putResult:
2202 main.log.debug( "Put response values: " + str( putResponses ) )
2203 utilities.assert_equals( expect=True,
2204 actual=putResult,
2205 onpass="Partitioned Transactional Map put successful",
2206 onfail="Partitioned Transactional Map put values are incorrect" )
2207
2208 main.step( "Partitioned Transactional maps get" )
2209 # FIXME: is this sleep needed?
2210 time.sleep( 5 )
2211
2212 getCheck = True
2213 for n in range( 1, numKeys + 1 ):
Jon Hallca319892017-06-15 15:25:22 -07002214 getResponses = main.Cluster.command( "transactionalMapGet",
2215 args=[ "Key" + str( n ) ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002216 valueCheck = True
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002217 for node in getResponses:
2218 if node != tMapValue:
2219 valueCheck = False
2220 if not valueCheck:
Jon Hallf37d44d2017-05-24 10:37:30 -07002221 main.log.warn( "Values for key 'Key" + str(n) + "' do not match:" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002222 main.log.warn( getResponses )
2223 getCheck = getCheck and valueCheck
2224 utilities.assert_equals( expect=True,
2225 actual=getCheck,
2226 onpass="Partitioned Transactional Map get values were correct",
2227 onfail="Partitioned Transactional Map values incorrect" )
2228
2229 # DISTRIBUTED ATOMIC VALUE
2230 main.step( "Get the value of a new value" )
Jon Hallca319892017-06-15 15:25:22 -07002231 getValues = main.Cluster.command( "valueTestGet",
2232 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002233 main.log.debug( getValues )
2234 # Check the results
2235 atomicValueGetResult = True
2236 expected = valueValue if valueValue is not None else "null"
2237 main.log.debug( "Checking for value of " + expected )
2238 for i in getValues:
2239 if i != expected:
2240 atomicValueGetResult = False
2241 utilities.assert_equals( expect=True,
2242 actual=atomicValueGetResult,
2243 onpass="Atomic Value get successful",
2244 onfail="Error getting atomic Value " +
2245 str( valueValue ) + ", found: " +
2246 str( getValues ) )
2247
2248 main.step( "Atomic Value set()" )
2249 valueValue = "foo"
Jon Hallca319892017-06-15 15:25:22 -07002250 setValues = main.Cluster.command( "valueTestSet",
2251 args=[ valueName, valueValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002252 main.log.debug( setValues )
2253 # Check the results
2254 atomicValueSetResults = True
2255 for i in setValues:
2256 if i != main.TRUE:
2257 atomicValueSetResults = False
2258 utilities.assert_equals( expect=True,
2259 actual=atomicValueSetResults,
2260 onpass="Atomic Value set successful",
2261 onfail="Error setting atomic Value" +
2262 str( setValues ) )
2263
2264 main.step( "Get the value after set()" )
Jon Hallca319892017-06-15 15:25:22 -07002265 getValues = main.Cluster.command( "valueTestGet",
2266 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002267 main.log.debug( getValues )
2268 # Check the results
2269 atomicValueGetResult = True
2270 expected = valueValue if valueValue is not None else "null"
2271 main.log.debug( "Checking for value of " + expected )
2272 for i in getValues:
2273 if i != expected:
2274 atomicValueGetResult = False
2275 utilities.assert_equals( expect=True,
2276 actual=atomicValueGetResult,
2277 onpass="Atomic Value get successful",
2278 onfail="Error getting atomic Value " +
2279 str( valueValue ) + ", found: " +
2280 str( getValues ) )
2281
2282 main.step( "Atomic Value compareAndSet()" )
2283 oldValue = valueValue
2284 valueValue = "bar"
Jon Hallca319892017-06-15 15:25:22 -07002285 ctrl = main.Cluster.next()
2286 CASValue = ctrl.valueTestCompareAndSet( valueName, oldValue, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002287 main.log.debug( CASValue )
2288 utilities.assert_equals( expect=main.TRUE,
2289 actual=CASValue,
2290 onpass="Atomic Value comapreAndSet successful",
2291 onfail="Error setting atomic Value:" +
2292 str( CASValue ) )
2293
2294 main.step( "Get the value after compareAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002295 getValues = main.Cluster.command( "valueTestGet",
2296 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002297 main.log.debug( getValues )
2298 # Check the results
2299 atomicValueGetResult = True
2300 expected = valueValue if valueValue is not None else "null"
2301 main.log.debug( "Checking for value of " + expected )
2302 for i in getValues:
2303 if i != expected:
2304 atomicValueGetResult = False
2305 utilities.assert_equals( expect=True,
2306 actual=atomicValueGetResult,
2307 onpass="Atomic Value get successful",
2308 onfail="Error getting atomic Value " +
2309 str( valueValue ) + ", found: " +
2310 str( getValues ) )
2311
2312 main.step( "Atomic Value getAndSet()" )
2313 oldValue = valueValue
2314 valueValue = "baz"
Jon Hallca319892017-06-15 15:25:22 -07002315 ctrl = main.Cluster.next()
2316 GASValue = ctrl.valueTestGetAndSet( valueName, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002317 main.log.debug( GASValue )
2318 expected = oldValue if oldValue is not None else "null"
2319 utilities.assert_equals( expect=expected,
2320 actual=GASValue,
2321 onpass="Atomic Value GAS successful",
2322 onfail="Error with GetAndSet atomic Value: expected " +
2323 str( expected ) + ", found: " +
2324 str( GASValue ) )
2325
2326 main.step( "Get the value after getAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002327 getValues = main.Cluster.command( "valueTestGet",
2328 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002329 main.log.debug( getValues )
2330 # Check the results
2331 atomicValueGetResult = True
2332 expected = valueValue if valueValue is not None else "null"
2333 main.log.debug( "Checking for value of " + expected )
2334 for i in getValues:
2335 if i != expected:
2336 atomicValueGetResult = False
2337 utilities.assert_equals( expect=True,
2338 actual=atomicValueGetResult,
2339 onpass="Atomic Value get successful",
2340 onfail="Error getting atomic Value: expected " +
2341 str( valueValue ) + ", found: " +
2342 str( getValues ) )
2343
2344 main.step( "Atomic Value destory()" )
2345 valueValue = None
Jon Hallca319892017-06-15 15:25:22 -07002346 ctrl = main.Cluster.next()
2347 destroyResult = ctrl.valueTestDestroy( valueName )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002348 main.log.debug( destroyResult )
2349 # Check the results
2350 utilities.assert_equals( expect=main.TRUE,
2351 actual=destroyResult,
2352 onpass="Atomic Value destroy successful",
2353 onfail="Error destroying atomic Value" )
2354
2355 main.step( "Get the value after destroy()" )
Jon Hallca319892017-06-15 15:25:22 -07002356 getValues = main.Cluster.command( "valueTestGet",
2357 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002358 main.log.debug( getValues )
2359 # Check the results
2360 atomicValueGetResult = True
2361 expected = valueValue if valueValue is not None else "null"
2362 main.log.debug( "Checking for value of " + expected )
2363 for i in getValues:
2364 if i != expected:
2365 atomicValueGetResult = False
2366 utilities.assert_equals( expect=True,
2367 actual=atomicValueGetResult,
2368 onpass="Atomic Value get successful",
2369 onfail="Error getting atomic Value " +
2370 str( valueValue ) + ", found: " +
2371 str( getValues ) )
Jon Halle0f0b342017-04-18 11:43:47 -07002372
2373 # WORK QUEUES
2374 main.step( "Work Queue add()" )
Jon Hallca319892017-06-15 15:25:22 -07002375 ctrl = main.Cluster.next()
2376 addResult = ctrl.workQueueAdd( workQueueName, 'foo' )
Jon Halle0f0b342017-04-18 11:43:47 -07002377 workQueuePending += 1
2378 main.log.debug( addResult )
2379 # Check the results
2380 utilities.assert_equals( expect=main.TRUE,
2381 actual=addResult,
2382 onpass="Work Queue add successful",
2383 onfail="Error adding to Work Queue" )
2384
2385 main.step( "Check the work queue stats" )
2386 statsResults = self.workQueueStatsCheck( workQueueName,
2387 workQueueCompleted,
2388 workQueueInProgress,
2389 workQueuePending )
2390 utilities.assert_equals( expect=True,
2391 actual=statsResults,
2392 onpass="Work Queue stats correct",
2393 onfail="Work Queue stats incorrect " )
2394
2395 main.step( "Work Queue addMultiple()" )
Jon Hallca319892017-06-15 15:25:22 -07002396 ctrl = main.Cluster.next()
2397 addMultipleResult = ctrl.workQueueAddMultiple( workQueueName, 'bar', 'baz' )
Jon Halle0f0b342017-04-18 11:43:47 -07002398 workQueuePending += 2
2399 main.log.debug( addMultipleResult )
2400 # Check the results
2401 utilities.assert_equals( expect=main.TRUE,
2402 actual=addMultipleResult,
2403 onpass="Work Queue add multiple successful",
2404 onfail="Error adding multiple items to Work Queue" )
2405
2406 main.step( "Check the work queue stats" )
2407 statsResults = self.workQueueStatsCheck( workQueueName,
2408 workQueueCompleted,
2409 workQueueInProgress,
2410 workQueuePending )
2411 utilities.assert_equals( expect=True,
2412 actual=statsResults,
2413 onpass="Work Queue stats correct",
2414 onfail="Work Queue stats incorrect " )
2415
2416 main.step( "Work Queue takeAndComplete() 1" )
Jon Hallca319892017-06-15 15:25:22 -07002417 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002418 number = 1
Jon Hallca319892017-06-15 15:25:22 -07002419 take1Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002420 workQueuePending -= number
2421 workQueueCompleted += number
2422 main.log.debug( take1Result )
2423 # Check the results
2424 utilities.assert_equals( expect=main.TRUE,
2425 actual=take1Result,
2426 onpass="Work Queue takeAndComplete 1 successful",
2427 onfail="Error taking 1 from Work Queue" )
2428
2429 main.step( "Check the work queue stats" )
2430 statsResults = self.workQueueStatsCheck( workQueueName,
2431 workQueueCompleted,
2432 workQueueInProgress,
2433 workQueuePending )
2434 utilities.assert_equals( expect=True,
2435 actual=statsResults,
2436 onpass="Work Queue stats correct",
2437 onfail="Work Queue stats incorrect " )
2438
2439 main.step( "Work Queue takeAndComplete() 2" )
Jon Hallca319892017-06-15 15:25:22 -07002440 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002441 number = 2
Jon Hallca319892017-06-15 15:25:22 -07002442 take2Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002443 workQueuePending -= number
2444 workQueueCompleted += number
2445 main.log.debug( take2Result )
2446 # Check the results
2447 utilities.assert_equals( expect=main.TRUE,
2448 actual=take2Result,
2449 onpass="Work Queue takeAndComplete 2 successful",
2450 onfail="Error taking 2 from Work Queue" )
2451
2452 main.step( "Check the work queue stats" )
2453 statsResults = self.workQueueStatsCheck( workQueueName,
2454 workQueueCompleted,
2455 workQueueInProgress,
2456 workQueuePending )
2457 utilities.assert_equals( expect=True,
2458 actual=statsResults,
2459 onpass="Work Queue stats correct",
2460 onfail="Work Queue stats incorrect " )
2461
2462 main.step( "Work Queue destroy()" )
2463 valueValue = None
2464 threads = []
Jon Hallca319892017-06-15 15:25:22 -07002465 ctrl = main.Cluster.next()
2466 destroyResult = ctrl.workQueueDestroy( workQueueName )
Jon Halle0f0b342017-04-18 11:43:47 -07002467 workQueueCompleted = 0
2468 workQueueInProgress = 0
2469 workQueuePending = 0
2470 main.log.debug( destroyResult )
2471 # Check the results
2472 utilities.assert_equals( expect=main.TRUE,
2473 actual=destroyResult,
2474 onpass="Work Queue destroy successful",
2475 onfail="Error destroying Work Queue" )
2476
2477 main.step( "Check the work queue stats" )
2478 statsResults = self.workQueueStatsCheck( workQueueName,
2479 workQueueCompleted,
2480 workQueueInProgress,
2481 workQueuePending )
2482 utilities.assert_equals( expect=True,
2483 actual=statsResults,
2484 onpass="Work Queue stats correct",
2485 onfail="Work Queue stats incorrect " )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002486 except Exception as e:
2487 main.log.error( "Exception: " + str( e ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002488
2489 def cleanUp( self, main ):
2490 """
2491 Clean up
2492 """
2493 import os
2494 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002495 assert main, "main not defined"
2496 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002497
2498 # printing colors to terminal
2499 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2500 'blue': '\033[94m', 'green': '\033[92m',
2501 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
Jon Hall4173b242017-09-12 17:04:38 -07002502
Devin Lim58046fa2017-07-05 16:55:00 -07002503 main.case( "Test Cleanup" )
Jon Hall4173b242017-09-12 17:04:38 -07002504
2505 main.step( "Checking raft log size" )
2506 # TODO: this is a flaky check, but the intent is to make sure the raft logs
2507 # get compacted periodically
2508 logCheck = main.Cluster.checkPartitionSize()
2509 utilities.assert_equals( expect=True, actual=logCheck,
2510 onpass="Raft log size is not too big",
2511 onfail="Raft logs grew too big" )
2512
Devin Lim58046fa2017-07-05 16:55:00 -07002513 main.step( "Killing tcpdumps" )
2514 main.Mininet2.stopTcpdump()
2515
2516 testname = main.TEST
2517 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2518 main.step( "Copying MN pcap and ONOS log files to test station" )
2519 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2520 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2521 # NOTE: MN Pcap file is being saved to logdir.
2522 # We scp this file as MN and TestON aren't necessarily the same vm
2523
2524 # FIXME: To be replaced with a Jenkin's post script
2525 # TODO: Load these from params
2526 # NOTE: must end in /
2527 logFolder = "/opt/onos/log/"
2528 logFiles = [ "karaf.log", "karaf.log.1" ]
2529 # NOTE: must end in /
2530 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002531 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002532 dstName = main.logdir + "/" + ctrl.name + "-" + f
2533 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002534 logFolder + f, dstName )
2535 # std*.log's
2536 # NOTE: must end in /
2537 logFolder = "/opt/onos/var/"
2538 logFiles = [ "stderr.log", "stdout.log" ]
2539 # NOTE: must end in /
2540 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002541 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002542 dstName = main.logdir + "/" + ctrl.name + "-" + f
2543 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002544 logFolder + f, dstName )
2545 else:
2546 main.log.debug( "skipping saving log files" )
2547
2548 main.step( "Stopping Mininet" )
2549 mnResult = main.Mininet1.stopNet()
2550 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2551 onpass="Mininet stopped",
2552 onfail="MN cleanup NOT successful" )
2553
2554 main.step( "Checking ONOS Logs for errors" )
Devin Lim142b5342017-07-20 15:22:39 -07002555 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002556 main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
2557 main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002558
2559 try:
2560 timerLog = open( main.logdir + "/Timers.csv", 'w' )
2561 main.log.debug( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2562 timerLog.write( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2563 timerLog.close()
2564 except NameError as e:
2565 main.log.exception( e )
Jon Hallca319892017-06-15 15:25:22 -07002566
Devin Lim58046fa2017-07-05 16:55:00 -07002567 def assignMastership( self, main ):
2568 """
2569 Assign mastership to controllers
2570 """
2571 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002572 assert main, "main not defined"
2573 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002574
2575 main.case( "Assigning Controller roles for switches" )
2576 main.caseExplanation = "Check that ONOS is connected to each " +\
2577 "device. Then manually assign" +\
2578 " mastership to specific ONOS nodes using" +\
2579 " 'device-role'"
2580 main.step( "Assign mastership of switches to specific controllers" )
2581 # Manually assign mastership to the controller we want
2582 roleCall = main.TRUE
2583
2584 ipList = []
2585 deviceList = []
Jon Hallca319892017-06-15 15:25:22 -07002586 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07002587 try:
2588 # Assign mastership to specific controllers. This assignment was
2589 # determined for a 7 node cluser, but will work with any sized
2590 # cluster
2591 for i in range( 1, 29 ): # switches 1 through 28
2592 # set up correct variables:
2593 if i == 1:
2594 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002595 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002596 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
2597 elif i == 2:
Devin Lim142b5342017-07-20 15:22:39 -07002598 c = 1 % main.Cluster.numCtrls
2599 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002600 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
2601 elif i == 3:
Devin Lim142b5342017-07-20 15:22:39 -07002602 c = 1 % main.Cluster.numCtrls
2603 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002604 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
2605 elif i == 4:
Devin Lim142b5342017-07-20 15:22:39 -07002606 c = 3 % main.Cluster.numCtrls
2607 ip = main.Cluster.active( c ).ip_address # ONOS4
Devin Lim58046fa2017-07-05 16:55:00 -07002608 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
2609 elif i == 5:
Devin Lim142b5342017-07-20 15:22:39 -07002610 c = 2 % main.Cluster.numCtrls
2611 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002612 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
2613 elif i == 6:
Devin Lim142b5342017-07-20 15:22:39 -07002614 c = 2 % main.Cluster.numCtrls
2615 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002616 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
2617 elif i == 7:
Devin Lim142b5342017-07-20 15:22:39 -07002618 c = 5 % main.Cluster.numCtrls
2619 ip = main.Cluster.active( c ).ip_address # ONOS6
Devin Lim58046fa2017-07-05 16:55:00 -07002620 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
2621 elif i >= 8 and i <= 17:
Devin Lim142b5342017-07-20 15:22:39 -07002622 c = 4 % main.Cluster.numCtrls
2623 ip = main.Cluster.active( c ).ip_address # ONOS5
Devin Lim58046fa2017-07-05 16:55:00 -07002624 dpid = '3' + str( i ).zfill( 3 )
2625 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2626 elif i >= 18 and i <= 27:
Devin Lim142b5342017-07-20 15:22:39 -07002627 c = 6 % main.Cluster.numCtrls
2628 ip = main.Cluster.active( c ).ip_address # ONOS7
Devin Lim58046fa2017-07-05 16:55:00 -07002629 dpid = '6' + str( i ).zfill( 3 )
2630 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2631 elif i == 28:
2632 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002633 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002634 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
2635 else:
2636 main.log.error( "You didn't write an else statement for " +
2637 "switch s" + str( i ) )
2638 roleCall = main.FALSE
2639 # Assign switch
2640 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
2641 # TODO: make this controller dynamic
2642 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
2643 ipList.append( ip )
2644 deviceList.append( deviceId )
2645 except ( AttributeError, AssertionError ):
2646 main.log.exception( "Something is wrong with ONOS device view" )
2647 main.log.info( onosCli.devices() )
2648 utilities.assert_equals(
2649 expect=main.TRUE,
2650 actual=roleCall,
2651 onpass="Re-assigned switch mastership to designated controller",
2652 onfail="Something wrong with deviceRole calls" )
2653
2654 main.step( "Check mastership was correctly assigned" )
2655 roleCheck = main.TRUE
2656 # NOTE: This is due to the fact that device mastership change is not
2657 # atomic and is actually a multi step process
2658 time.sleep( 5 )
2659 for i in range( len( ipList ) ):
2660 ip = ipList[ i ]
2661 deviceId = deviceList[ i ]
2662 # Check assignment
2663 master = onosCli.getRole( deviceId ).get( 'master' )
2664 if ip in master:
2665 roleCheck = roleCheck and main.TRUE
2666 else:
2667 roleCheck = roleCheck and main.FALSE
2668 main.log.error( "Error, controller " + ip + " is not" +
2669 " master " + "of device " +
2670 str( deviceId ) + ". Master is " +
2671 repr( master ) + "." )
2672 utilities.assert_equals(
2673 expect=main.TRUE,
2674 actual=roleCheck,
2675 onpass="Switches were successfully reassigned to designated " +
2676 "controller",
2677 onfail="Switches were not successfully reassigned" )
Jon Hallca319892017-06-15 15:25:22 -07002678
Devin Lim58046fa2017-07-05 16:55:00 -07002679 def bringUpStoppedNode( self, main ):
2680 """
2681 The bring up stopped nodes
2682 """
2683 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002684 assert main, "main not defined"
2685 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002686 assert main.kill, "main.kill not defined"
2687 main.case( "Restart minority of ONOS nodes" )
2688
2689 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
2690 startResults = main.TRUE
2691 restartTime = time.time()
Jon Hallca319892017-06-15 15:25:22 -07002692 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002693 startResults = startResults and\
Jon Hallca319892017-06-15 15:25:22 -07002694 ctrl.onosStart( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002695 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2696 onpass="ONOS nodes started successfully",
2697 onfail="ONOS nodes NOT successfully started" )
2698
2699 main.step( "Checking if ONOS is up yet" )
2700 count = 0
2701 onosIsupResult = main.FALSE
2702 while onosIsupResult == main.FALSE and count < 10:
2703 onosIsupResult = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002704 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002705 onosIsupResult = onosIsupResult and\
Jon Hallca319892017-06-15 15:25:22 -07002706 ctrl.isup( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002707 count = count + 1
2708 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
2709 onpass="ONOS restarted successfully",
2710 onfail="ONOS restart NOT successful" )
2711
Jon Hallca319892017-06-15 15:25:22 -07002712 main.step( "Restarting ONOS nodes" )
Devin Lim58046fa2017-07-05 16:55:00 -07002713 cliResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002714 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002715 cliResults = cliResults and\
Jon Hallca319892017-06-15 15:25:22 -07002716 ctrl.startOnosCli( ctrl.ipAddress )
2717 ctrl.active = True
Devin Lim58046fa2017-07-05 16:55:00 -07002718 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
Jon Hallca319892017-06-15 15:25:22 -07002719 onpass="ONOS node(s) restarted",
2720 onfail="ONOS node(s) did not restart" )
Devin Lim58046fa2017-07-05 16:55:00 -07002721
2722 # Grab the time of restart so we chan check how long the gossip
2723 # protocol has had time to work
2724 main.restartTime = time.time() - restartTime
2725 main.log.debug( "Restart time: " + str( main.restartTime ) )
2726 # TODO: MAke this configurable. Also, we are breaking the above timer
2727 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -08002728 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -07002729 False,
Devin Lim58046fa2017-07-05 16:55:00 -07002730 sleep=15,
2731 attempts=5 )
2732
2733 utilities.assert_equals( expect=True, actual=nodeResults,
2734 onpass="Nodes check successful",
2735 onfail="Nodes check NOT successful" )
2736
2737 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07002738 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07002739 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07002740 ctrl.name,
2741 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002742 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -07002743 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002744
Jon Hallca319892017-06-15 15:25:22 -07002745 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -07002746
2747 main.step( "Rerun for election on the node(s) that were killed" )
2748 runResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002749 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002750 runResults = runResults and\
Jon Hallca319892017-06-15 15:25:22 -07002751 ctrl.electionTestRun()
Devin Lim58046fa2017-07-05 16:55:00 -07002752 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2753 onpass="ONOS nodes reran for election topic",
2754 onfail="Errror rerunning for election" )
Jon Hall4173b242017-09-12 17:04:38 -07002755
Devin Lim142b5342017-07-20 15:22:39 -07002756 def tempCell( self, cellName, ipList ):
2757 main.step( "Create cell file" )
2758 cellAppString = main.params[ 'ENV' ][ 'appString' ]
Devin Lim58046fa2017-07-05 16:55:00 -07002759
Devin Lim142b5342017-07-20 15:22:39 -07002760 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
2761 main.Mininet1.ip_address,
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002762 cellAppString, ipList, main.ONOScli1.karafUser )
Devin Lim142b5342017-07-20 15:22:39 -07002763 main.step( "Applying cell variable to environment" )
2764 cellResult = main.ONOSbench.setCell( cellName )
2765 verifyResult = main.ONOSbench.verifyCell()
2766
Devin Lim142b5342017-07-20 15:22:39 -07002767 def checkStateAfterEvent( self, main, afterWhich, compareSwitch=False, isRestart=False ):
Devin Lim58046fa2017-07-05 16:55:00 -07002768 """
2769 afterWhich :
Jon Hallca319892017-06-15 15:25:22 -07002770 0: failure
Devin Lim58046fa2017-07-05 16:55:00 -07002771 1: scaling
2772 """
2773 """
2774 Check state after ONOS failure/scaling
2775 """
2776 import json
Devin Lim58046fa2017-07-05 16:55:00 -07002777 assert main, "main not defined"
2778 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002779 main.case( "Running ONOS Constant State Tests" )
2780
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002781 OnosAfterWhich = [ "failure", "scaliing" ]
Devin Lim58046fa2017-07-05 16:55:00 -07002782
Devin Lim58046fa2017-07-05 16:55:00 -07002783 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -07002784 self.checkRoleNotNull()
Devin Lim58046fa2017-07-05 16:55:00 -07002785
Devin Lim142b5342017-07-20 15:22:39 -07002786 ONOSMastership, rolesResults, consistentMastership = self.checkTheRole()
Jon Hallca319892017-06-15 15:25:22 -07002787 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07002788
2789 if rolesResults and not consistentMastership:
2790 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002791 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -07002792 main.log.warn( node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07002793 json.dumps( json.loads( ONOSMastership[ i ] ),
2794 sort_keys=True,
2795 indent=4,
2796 separators=( ',', ': ' ) ) )
2797
2798 if compareSwitch:
2799 description2 = "Compare switch roles from before failure"
2800 main.step( description2 )
2801 try:
2802 currentJson = json.loads( ONOSMastership[ 0 ] )
2803 oldJson = json.loads( mastershipState )
2804 except ( ValueError, TypeError ):
2805 main.log.exception( "Something is wrong with parsing " +
2806 "ONOSMastership[0] or mastershipState" )
Jon Hallca319892017-06-15 15:25:22 -07002807 main.log.debug( "ONOSMastership[0]: " + repr( ONOSMastership[ 0 ] ) )
2808 main.log.debug( "mastershipState" + repr( mastershipState ) )
Devin Lim44075962017-08-11 10:56:37 -07002809 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002810 mastershipCheck = main.TRUE
2811 for i in range( 1, 29 ):
2812 switchDPID = str(
2813 main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
2814 current = [ switch[ 'master' ] for switch in currentJson
2815 if switchDPID in switch[ 'id' ] ]
2816 old = [ switch[ 'master' ] for switch in oldJson
2817 if switchDPID in switch[ 'id' ] ]
2818 if current == old:
2819 mastershipCheck = mastershipCheck and main.TRUE
2820 else:
2821 main.log.warn( "Mastership of switch %s changed" % switchDPID )
2822 mastershipCheck = main.FALSE
2823 utilities.assert_equals(
2824 expect=main.TRUE,
2825 actual=mastershipCheck,
2826 onpass="Mastership of Switches was not changed",
2827 onfail="Mastership of some switches changed" )
2828
2829 # NOTE: we expect mastership to change on controller failure/scaling down
Devin Lim142b5342017-07-20 15:22:39 -07002830 ONOSIntents, intentsResults = self.checkingIntents()
Devin Lim58046fa2017-07-05 16:55:00 -07002831 intentCheck = main.FALSE
2832 consistentIntents = True
Devin Lim58046fa2017-07-05 16:55:00 -07002833
2834 main.step( "Check for consistency in Intents from each controller" )
2835 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2836 main.log.info( "Intents are consistent across all ONOS " +
2837 "nodes" )
2838 else:
2839 consistentIntents = False
2840
2841 # Try to make it easy to figure out what is happening
2842 #
2843 # Intent ONOS1 ONOS2 ...
2844 # 0x01 INSTALLED INSTALLING
2845 # ... ... ...
2846 # ... ... ...
2847 title = " ID"
Jon Hallca319892017-06-15 15:25:22 -07002848 for ctrl in main.Cluster.active():
2849 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07002850 main.log.warn( title )
2851 # get all intent keys in the cluster
2852 keys = []
2853 for nodeStr in ONOSIntents:
2854 node = json.loads( nodeStr )
2855 for intent in node:
2856 keys.append( intent.get( 'id' ) )
2857 keys = set( keys )
2858 for key in keys:
2859 row = "%-13s" % key
2860 for nodeStr in ONOSIntents:
2861 node = json.loads( nodeStr )
2862 for intent in node:
2863 if intent.get( 'id' ) == key:
2864 row += "%-15s" % intent.get( 'state' )
2865 main.log.warn( row )
2866 # End table view
2867
2868 utilities.assert_equals(
2869 expect=True,
2870 actual=consistentIntents,
2871 onpass="Intents are consistent across all ONOS nodes",
2872 onfail="ONOS nodes have different views of intents" )
2873 intentStates = []
2874 for node in ONOSIntents: # Iter through ONOS nodes
2875 nodeStates = []
2876 # Iter through intents of a node
2877 try:
2878 for intent in json.loads( node ):
2879 nodeStates.append( intent[ 'state' ] )
2880 except ( ValueError, TypeError ):
2881 main.log.exception( "Error in parsing intents" )
2882 main.log.error( repr( node ) )
2883 intentStates.append( nodeStates )
2884 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2885 main.log.info( dict( out ) )
2886
2887 if intentsResults and not consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07002888 for i in range( len( main.Cluster.active() ) ):
Jon Hall6040bcf2017-08-14 11:15:41 -07002889 ctrl = main.Cluster.controllers[ i ]
Jon Hallca319892017-06-15 15:25:22 -07002890 main.log.warn( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07002891 main.log.warn( json.dumps(
2892 json.loads( ONOSIntents[ i ] ),
2893 sort_keys=True,
2894 indent=4,
2895 separators=( ',', ': ' ) ) )
2896 elif intentsResults and consistentIntents:
2897 intentCheck = main.TRUE
2898
2899 # NOTE: Store has no durability, so intents are lost across system
2900 # restarts
2901 if not isRestart:
2902 main.step( "Compare current intents with intents before the " + OnosAfterWhich[ afterWhich ] )
2903 # NOTE: this requires case 5 to pass for intentState to be set.
2904 # maybe we should stop the test if that fails?
2905 sameIntents = main.FALSE
2906 try:
2907 intentState
2908 except NameError:
2909 main.log.warn( "No previous intent state was saved" )
2910 else:
2911 if intentState and intentState == ONOSIntents[ 0 ]:
2912 sameIntents = main.TRUE
2913 main.log.info( "Intents are consistent with before " + OnosAfterWhich[ afterWhich ] )
2914 # TODO: possibly the states have changed? we may need to figure out
2915 # what the acceptable states are
2916 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2917 sameIntents = main.TRUE
2918 try:
2919 before = json.loads( intentState )
2920 after = json.loads( ONOSIntents[ 0 ] )
2921 for intent in before:
2922 if intent not in after:
2923 sameIntents = main.FALSE
2924 main.log.debug( "Intent is not currently in ONOS " +
2925 "(at least in the same form):" )
2926 main.log.debug( json.dumps( intent ) )
2927 except ( ValueError, TypeError ):
2928 main.log.exception( "Exception printing intents" )
2929 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2930 main.log.debug( repr( intentState ) )
2931 if sameIntents == main.FALSE:
2932 try:
2933 main.log.debug( "ONOS intents before: " )
2934 main.log.debug( json.dumps( json.loads( intentState ),
2935 sort_keys=True, indent=4,
2936 separators=( ',', ': ' ) ) )
2937 main.log.debug( "Current ONOS intents: " )
2938 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2939 sort_keys=True, indent=4,
2940 separators=( ',', ': ' ) ) )
2941 except ( ValueError, TypeError ):
2942 main.log.exception( "Exception printing intents" )
2943 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2944 main.log.debug( repr( intentState ) )
2945 utilities.assert_equals(
2946 expect=main.TRUE,
2947 actual=sameIntents,
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002948 onpass="Intents are consistent with before " + OnosAfterWhich[ afterWhich ],
Devin Lim58046fa2017-07-05 16:55:00 -07002949 onfail="The Intents changed during " + OnosAfterWhich[ afterWhich ] )
2950 intentCheck = intentCheck and sameIntents
2951
2952 main.step( "Get the OF Table entries and compare to before " +
2953 "component " + OnosAfterWhich[ afterWhich ] )
2954 FlowTables = main.TRUE
2955 for i in range( 28 ):
2956 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2957 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2958 curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
2959 FlowTables = FlowTables and curSwitch
2960 if curSwitch == main.FALSE:
2961 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2962 utilities.assert_equals(
2963 expect=main.TRUE,
2964 actual=FlowTables,
2965 onpass="No changes were found in the flow tables",
2966 onfail="Changes were found in the flow tables" )
2967
Jon Hallca319892017-06-15 15:25:22 -07002968 main.Mininet2.pingLongKill()
Devin Lim58046fa2017-07-05 16:55:00 -07002969 """
2970 main.step( "Check the continuous pings to ensure that no packets " +
2971 "were dropped during component failure" )
2972 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2973 main.params[ 'TESTONIP' ] )
2974 LossInPings = main.FALSE
2975 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2976 for i in range( 8, 18 ):
2977 main.log.info(
2978 "Checking for a loss in pings along flow from s" +
2979 str( i ) )
2980 LossInPings = main.Mininet2.checkForLoss(
2981 "/tmp/ping.h" +
2982 str( i ) ) or LossInPings
2983 if LossInPings == main.TRUE:
2984 main.log.info( "Loss in ping detected" )
2985 elif LossInPings == main.ERROR:
2986 main.log.info( "There are multiple mininet process running" )
2987 elif LossInPings == main.FALSE:
2988 main.log.info( "No Loss in the pings" )
2989 main.log.info( "No loss of dataplane connectivity" )
2990 utilities.assert_equals(
2991 expect=main.FALSE,
2992 actual=LossInPings,
2993 onpass="No Loss of connectivity",
2994 onfail="Loss of dataplane connectivity detected" )
2995 # NOTE: Since intents are not persisted with IntnentStore,
2996 # we expect loss in dataplane connectivity
2997 LossInPings = main.FALSE
2998 """
Devin Lim58046fa2017-07-05 16:55:00 -07002999 def compareTopo( self, main ):
3000 """
3001 Compare topo
3002 """
3003 import json
3004 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003005 assert main, "main not defined"
3006 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003007 try:
3008 from tests.dependencies.topology import Topology
3009 except ImportError:
3010 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07003011 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07003012 try:
3013 main.topoRelated
3014 except ( NameError, AttributeError ):
3015 main.topoRelated = Topology()
3016 main.case( "Compare ONOS Topology view to Mininet topology" )
3017 main.caseExplanation = "Compare topology objects between Mininet" +\
3018 " and ONOS"
3019 topoResult = main.FALSE
3020 topoFailMsg = "ONOS topology don't match Mininet"
3021 elapsed = 0
3022 count = 0
3023 main.step( "Comparing ONOS topology to MN topology" )
3024 startTime = time.time()
3025 # Give time for Gossip to work
3026 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
3027 devicesResults = main.TRUE
3028 linksResults = main.TRUE
3029 hostsResults = main.TRUE
3030 hostAttachmentResults = True
3031 count += 1
3032 cliStart = time.time()
Devin Lim142b5342017-07-20 15:22:39 -07003033 devices = main.topoRelated.getAll( "devices", True,
Jon Hallca319892017-06-15 15:25:22 -07003034 kwargs={ 'sleep': 5, 'attempts': 5,
3035 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003036 ipResult = main.TRUE
3037
Devin Lim142b5342017-07-20 15:22:39 -07003038 hosts = main.topoRelated.getAll( "hosts", True,
Jon Hallca319892017-06-15 15:25:22 -07003039 kwargs={ 'sleep': 5, 'attempts': 5,
3040 'randomTime': True },
3041 inJson=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003042
3043 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003044 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003045 if hosts[ controller ]:
3046 for host in hosts[ controller ]:
3047 if host is None or host.get( 'ipAddresses', [] ) == []:
3048 main.log.error(
3049 "Error with host ipAddresses on controller" +
3050 controllerStr + ": " + str( host ) )
3051 ipResult = main.FALSE
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07003052 ports = main.topoRelated.getAll( "ports", True,
Jon Hallca319892017-06-15 15:25:22 -07003053 kwargs={ 'sleep': 5, 'attempts': 5,
3054 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003055 links = main.topoRelated.getAll( "links", True,
Jon Hallca319892017-06-15 15:25:22 -07003056 kwargs={ 'sleep': 5, 'attempts': 5,
3057 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003058 clusters = main.topoRelated.getAll( "clusters", True,
Jon Hallca319892017-06-15 15:25:22 -07003059 kwargs={ 'sleep': 5, 'attempts': 5,
3060 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003061
3062 elapsed = time.time() - startTime
3063 cliTime = time.time() - cliStart
3064 print "Elapsed time: " + str( elapsed )
3065 print "CLI time: " + str( cliTime )
3066
3067 if all( e is None for e in devices ) and\
3068 all( e is None for e in hosts ) and\
3069 all( e is None for e in ports ) and\
3070 all( e is None for e in links ) and\
3071 all( e is None for e in clusters ):
3072 topoFailMsg = "Could not get topology from ONOS"
3073 main.log.error( topoFailMsg )
3074 continue # Try again, No use trying to compare
3075
3076 mnSwitches = main.Mininet1.getSwitches()
3077 mnLinks = main.Mininet1.getLinks()
3078 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07003079 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003080 controllerStr = str( main.Cluster.active( controller ) )
Jon Hall4173b242017-09-12 17:04:38 -07003081 currentDevicesResult = main.topoRelated.compareDevicePort( main.Mininet1,
3082 controller,
3083 mnSwitches,
3084 devices,
3085 ports )
Devin Lim58046fa2017-07-05 16:55:00 -07003086 utilities.assert_equals( expect=main.TRUE,
3087 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07003088 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003089 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003090 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003091 " Switches view is incorrect" )
3092
Devin Lim58046fa2017-07-05 16:55:00 -07003093 currentLinksResult = main.topoRelated.compareBase( links, controller,
Jon Hall4173b242017-09-12 17:04:38 -07003094 main.Mininet1.compareLinks,
3095 [ mnSwitches, mnLinks ] )
Devin Lim58046fa2017-07-05 16:55:00 -07003096 utilities.assert_equals( expect=main.TRUE,
3097 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07003098 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003099 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003100 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003101 " links view is incorrect" )
3102 if hosts[ controller ] and "Error" not in hosts[ controller ]:
3103 currentHostsResult = main.Mininet1.compareHosts(
3104 mnHosts,
3105 hosts[ controller ] )
3106 elif hosts[ controller ] == []:
3107 currentHostsResult = main.TRUE
3108 else:
3109 currentHostsResult = main.FALSE
3110 utilities.assert_equals( expect=main.TRUE,
3111 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07003112 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003113 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07003114 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003115 " hosts don't match Mininet" )
3116 # CHECKING HOST ATTACHMENT POINTS
3117 hostAttachment = True
3118 zeroHosts = False
3119 # FIXME: topo-HA/obelisk specific mappings:
3120 # key is mac and value is dpid
3121 mappings = {}
3122 for i in range( 1, 29 ): # hosts 1 through 28
3123 # set up correct variables:
3124 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
3125 if i == 1:
3126 deviceId = "1000".zfill( 16 )
3127 elif i == 2:
3128 deviceId = "2000".zfill( 16 )
3129 elif i == 3:
3130 deviceId = "3000".zfill( 16 )
3131 elif i == 4:
3132 deviceId = "3004".zfill( 16 )
3133 elif i == 5:
3134 deviceId = "5000".zfill( 16 )
3135 elif i == 6:
3136 deviceId = "6000".zfill( 16 )
3137 elif i == 7:
3138 deviceId = "6007".zfill( 16 )
3139 elif i >= 8 and i <= 17:
3140 dpid = '3' + str( i ).zfill( 3 )
3141 deviceId = dpid.zfill( 16 )
3142 elif i >= 18 and i <= 27:
3143 dpid = '6' + str( i ).zfill( 3 )
3144 deviceId = dpid.zfill( 16 )
3145 elif i == 28:
3146 deviceId = "2800".zfill( 16 )
3147 mappings[ macId ] = deviceId
3148 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3149 if hosts[ controller ] == []:
3150 main.log.warn( "There are no hosts discovered" )
3151 zeroHosts = True
3152 else:
3153 for host in hosts[ controller ]:
3154 mac = None
3155 location = None
3156 device = None
3157 port = None
3158 try:
3159 mac = host.get( 'mac' )
3160 assert mac, "mac field could not be found for this host object"
Devin Limefaf3062017-08-14 16:18:19 -07003161 print host
3162 if 'locations' in host:
3163 location = host.get( 'locations' )[ 0 ]
3164 elif 'location' in host:
3165 location = host.get( 'location' )
Devin Lim58046fa2017-07-05 16:55:00 -07003166 assert location, "location field could not be found for this host object"
3167
3168 # Trim the protocol identifier off deviceId
3169 device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
3170 assert device, "elementId field could not be found for this host location object"
3171
3172 port = location.get( 'port' )
3173 assert port, "port field could not be found for this host location object"
3174
3175 # Now check if this matches where they should be
3176 if mac and device and port:
3177 if str( port ) != "1":
3178 main.log.error( "The attachment port is incorrect for " +
3179 "host " + str( mac ) +
3180 ". Expected: 1 Actual: " + str( port ) )
3181 hostAttachment = False
3182 if device != mappings[ str( mac ) ]:
3183 main.log.error( "The attachment device is incorrect for " +
3184 "host " + str( mac ) +
3185 ". Expected: " + mappings[ str( mac ) ] +
3186 " Actual: " + device )
3187 hostAttachment = False
3188 else:
3189 hostAttachment = False
Devin Limefaf3062017-08-14 16:18:19 -07003190 except ( AssertionError, TypeError ):
Devin Lim58046fa2017-07-05 16:55:00 -07003191 main.log.exception( "Json object not as expected" )
3192 main.log.error( repr( host ) )
3193 hostAttachment = False
3194 else:
3195 main.log.error( "No hosts json output or \"Error\"" +
3196 " in output. hosts = " +
3197 repr( hosts[ controller ] ) )
3198 if zeroHosts is False:
3199 # TODO: Find a way to know if there should be hosts in a
3200 # given point of the test
3201 hostAttachment = True
3202
3203 # END CHECKING HOST ATTACHMENT POINTS
3204 devicesResults = devicesResults and currentDevicesResult
3205 linksResults = linksResults and currentLinksResult
3206 hostsResults = hostsResults and currentHostsResult
3207 hostAttachmentResults = hostAttachmentResults and\
3208 hostAttachment
3209 topoResult = ( devicesResults and linksResults
3210 and hostsResults and ipResult and
3211 hostAttachmentResults )
3212 utilities.assert_equals( expect=True,
3213 actual=topoResult,
3214 onpass="ONOS topology matches Mininet",
3215 onfail=topoFailMsg )
3216 # End of While loop to pull ONOS state
3217
3218 # Compare json objects for hosts and dataplane clusters
3219
3220 # hosts
3221 main.step( "Hosts view is consistent across all ONOS nodes" )
3222 consistentHostsResult = main.TRUE
3223 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003224 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003225 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3226 if hosts[ controller ] == hosts[ 0 ]:
3227 continue
3228 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07003229 main.log.error( "hosts from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003230 " is inconsistent with ONOS1" )
Jon Hallca319892017-06-15 15:25:22 -07003231 main.log.debug( repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003232 consistentHostsResult = main.FALSE
3233
3234 else:
Jon Hallca319892017-06-15 15:25:22 -07003235 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003236 controllerStr )
3237 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003238 main.log.debug( controllerStr +
3239 " hosts response: " +
3240 repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003241 utilities.assert_equals(
3242 expect=main.TRUE,
3243 actual=consistentHostsResult,
3244 onpass="Hosts view is consistent across all ONOS nodes",
3245 onfail="ONOS nodes have different views of hosts" )
3246
3247 main.step( "Hosts information is correct" )
3248 hostsResults = hostsResults and ipResult
3249 utilities.assert_equals(
3250 expect=main.TRUE,
3251 actual=hostsResults,
3252 onpass="Host information is correct",
3253 onfail="Host information is incorrect" )
3254
3255 main.step( "Host attachment points to the network" )
3256 utilities.assert_equals(
3257 expect=True,
3258 actual=hostAttachmentResults,
3259 onpass="Hosts are correctly attached to the network",
3260 onfail="ONOS did not correctly attach hosts to the network" )
3261
3262 # Strongly connected clusters of devices
3263 main.step( "Clusters view is consistent across all ONOS nodes" )
3264 consistentClustersResult = main.TRUE
3265 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003266 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003267 if "Error" not in clusters[ controller ]:
3268 if clusters[ controller ] == clusters[ 0 ]:
3269 continue
3270 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07003271 main.log.error( "clusters from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003272 controllerStr +
3273 " is inconsistent with ONOS1" )
3274 consistentClustersResult = main.FALSE
3275 else:
3276 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07003277 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07003278 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003279 main.log.debug( controllerStr +
3280 " clusters response: " +
3281 repr( clusters[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003282 utilities.assert_equals(
3283 expect=main.TRUE,
3284 actual=consistentClustersResult,
3285 onpass="Clusters view is consistent across all ONOS nodes",
3286 onfail="ONOS nodes have different views of clusters" )
3287 if not consistentClustersResult:
3288 main.log.debug( clusters )
3289 for x in links:
Jon Hallca319892017-06-15 15:25:22 -07003290 main.log.debug( "{}: {}".format( len( x ), x ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003291
3292 main.step( "There is only one SCC" )
3293 # there should always only be one cluster
3294 try:
3295 numClusters = len( json.loads( clusters[ 0 ] ) )
3296 except ( ValueError, TypeError ):
3297 main.log.exception( "Error parsing clusters[0]: " +
3298 repr( clusters[ 0 ] ) )
3299 numClusters = "ERROR"
3300 clusterResults = main.FALSE
3301 if numClusters == 1:
3302 clusterResults = main.TRUE
3303 utilities.assert_equals(
3304 expect=1,
3305 actual=numClusters,
3306 onpass="ONOS shows 1 SCC",
3307 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
3308
3309 topoResult = ( devicesResults and linksResults
3310 and hostsResults and consistentHostsResult
3311 and consistentClustersResult and clusterResults
3312 and ipResult and hostAttachmentResults )
3313
3314 topoResult = topoResult and int( count <= 2 )
3315 note = "note it takes about " + str( int( cliTime ) ) + \
3316 " seconds for the test to make all the cli calls to fetch " +\
3317 "the topology from each ONOS instance"
3318 main.log.info(
3319 "Very crass estimate for topology discovery/convergence( " +
3320 str( note ) + " ): " + str( elapsed ) + " seconds, " +
3321 str( count ) + " tries" )
3322
3323 main.step( "Device information is correct" )
3324 utilities.assert_equals(
3325 expect=main.TRUE,
3326 actual=devicesResults,
3327 onpass="Device information is correct",
3328 onfail="Device information is incorrect" )
3329
3330 main.step( "Links are correct" )
3331 utilities.assert_equals(
3332 expect=main.TRUE,
3333 actual=linksResults,
3334 onpass="Link are correct",
3335 onfail="Links are incorrect" )
3336
3337 main.step( "Hosts are correct" )
3338 utilities.assert_equals(
3339 expect=main.TRUE,
3340 actual=hostsResults,
3341 onpass="Hosts are correct",
3342 onfail="Hosts are incorrect" )
3343
3344 # FIXME: move this to an ONOS state case
3345 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -08003346 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -07003347 False,
Devin Lim58046fa2017-07-05 16:55:00 -07003348 attempts=5 )
3349 utilities.assert_equals( expect=True, actual=nodeResults,
3350 onpass="Nodes check successful",
3351 onfail="Nodes check NOT successful" )
3352 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07003353 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07003354 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07003355 ctrl.name,
3356 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003357
3358 if not topoResult:
Devin Lim44075962017-08-11 10:56:37 -07003359 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -07003360
Devin Lim58046fa2017-07-05 16:55:00 -07003361 def linkDown( self, main, fromS="s3", toS="s28" ):
3362 """
3363 Link fromS-toS down
3364 """
3365 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003366 assert main, "main not defined"
3367 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003368 # NOTE: You should probably run a topology check after this
3369
3370 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3371
3372 description = "Turn off a link to ensure that Link Discovery " +\
3373 "is working properly"
3374 main.case( description )
3375
3376 main.step( "Kill Link between " + fromS + " and " + toS )
3377 LinkDown = main.Mininet1.link( END1=fromS, END2=toS, OPTION="down" )
3378 main.log.info( "Waiting " + str( linkSleep ) +
3379 " seconds for link down to be discovered" )
3380 time.sleep( linkSleep )
3381 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
3382 onpass="Link down successful",
3383 onfail="Failed to bring link down" )
3384 # TODO do some sort of check here
3385
3386 def linkUp( self, main, fromS="s3", toS="s28" ):
3387 """
3388 Link fromS-toS up
3389 """
3390 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003391 assert main, "main not defined"
3392 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003393 # NOTE: You should probably run a topology check after this
3394
3395 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3396
3397 description = "Restore a link to ensure that Link Discovery is " + \
3398 "working properly"
3399 main.case( description )
3400
Jon Hall4173b242017-09-12 17:04:38 -07003401 main.step( "Bring link between " + fromS + " and " + toS + " back up" )
Devin Lim58046fa2017-07-05 16:55:00 -07003402 LinkUp = main.Mininet1.link( END1=fromS, END2=toS, OPTION="up" )
3403 main.log.info( "Waiting " + str( linkSleep ) +
3404 " seconds for link up to be discovered" )
3405 time.sleep( linkSleep )
3406 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
3407 onpass="Link up successful",
3408 onfail="Failed to bring link up" )
3409
3410 def switchDown( self, main ):
3411 """
3412 Switch Down
3413 """
3414 # NOTE: You should probably run a topology check after this
3415 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003416 assert main, "main not defined"
3417 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003418
3419 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3420
3421 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallca319892017-06-15 15:25:22 -07003422 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003423 main.case( description )
3424 switch = main.params[ 'kill' ][ 'switch' ]
3425 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3426
3427 # TODO: Make this switch parameterizable
3428 main.step( "Kill " + switch )
3429 main.log.info( "Deleting " + switch )
3430 main.Mininet1.delSwitch( switch )
3431 main.log.info( "Waiting " + str( switchSleep ) +
3432 " seconds for switch down to be discovered" )
3433 time.sleep( switchSleep )
3434 device = onosCli.getDevice( dpid=switchDPID )
3435 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003436 main.log.warn( "Bringing down switch " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003437 result = main.FALSE
3438 if device and device[ 'available' ] is False:
3439 result = main.TRUE
3440 utilities.assert_equals( expect=main.TRUE, actual=result,
3441 onpass="Kill switch successful",
3442 onfail="Failed to kill switch?" )
Jon Hallca319892017-06-15 15:25:22 -07003443
Devin Lim58046fa2017-07-05 16:55:00 -07003444 def switchUp( self, main ):
3445 """
3446 Switch Up
3447 """
3448 # NOTE: You should probably run a topology check after this
3449 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003450 assert main, "main not defined"
3451 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003452
3453 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3454 switch = main.params[ 'kill' ][ 'switch' ]
3455 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3456 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallca319892017-06-15 15:25:22 -07003457 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003458 description = "Adding a switch to ensure it is discovered correctly"
3459 main.case( description )
3460
3461 main.step( "Add back " + switch )
3462 main.Mininet1.addSwitch( switch, dpid=switchDPID )
3463 for peer in links:
3464 main.Mininet1.addLink( switch, peer )
Jon Hallca319892017-06-15 15:25:22 -07003465 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -07003466 main.Mininet1.assignSwController( sw=switch, ip=ipList )
3467 main.log.info( "Waiting " + str( switchSleep ) +
3468 " seconds for switch up to be discovered" )
3469 time.sleep( switchSleep )
3470 device = onosCli.getDevice( dpid=switchDPID )
3471 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003472 main.log.debug( "Added device: " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003473 result = main.FALSE
3474 if device and device[ 'available' ]:
3475 result = main.TRUE
3476 utilities.assert_equals( expect=main.TRUE, actual=result,
3477 onpass="add switch successful",
3478 onfail="Failed to add switch?" )
3479
3480 def startElectionApp( self, main ):
3481 """
3482 start election app on all onos nodes
3483 """
Devin Lim58046fa2017-07-05 16:55:00 -07003484 assert main, "main not defined"
3485 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003486
3487 main.case( "Start Leadership Election app" )
3488 main.step( "Install leadership election app" )
Jon Hallca319892017-06-15 15:25:22 -07003489 onosCli = main.Cluster.next()
Devin Lime9f0ccf2017-08-11 17:25:12 -07003490 appResult = onosCli.CLI.activateApp( "org.onosproject.election" )
Devin Lim58046fa2017-07-05 16:55:00 -07003491 utilities.assert_equals(
3492 expect=main.TRUE,
3493 actual=appResult,
3494 onpass="Election app installed",
3495 onfail="Something went wrong with installing Leadership election" )
3496
3497 main.step( "Run for election on each node" )
Jon Hallca319892017-06-15 15:25:22 -07003498 onosCli.electionTestRun()
3499 main.Cluster.command( "electionTestRun" )
Devin Lim58046fa2017-07-05 16:55:00 -07003500 time.sleep( 5 )
Jon Hallca319892017-06-15 15:25:22 -07003501 sameResult, leaders = main.HA.consistentLeaderboards( main.Cluster.active() )
Devin Lim58046fa2017-07-05 16:55:00 -07003502 utilities.assert_equals(
3503 expect=True,
3504 actual=sameResult,
3505 onpass="All nodes see the same leaderboards",
3506 onfail="Inconsistent leaderboards" )
3507
3508 if sameResult:
3509 leader = leaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003510 if onosCli.ipAddress in leader:
Devin Lim58046fa2017-07-05 16:55:00 -07003511 correctLeader = True
3512 else:
3513 correctLeader = False
3514 main.step( "First node was elected leader" )
3515 utilities.assert_equals(
3516 expect=True,
3517 actual=correctLeader,
3518 onpass="Correct leader was elected",
3519 onfail="Incorrect leader" )
Jon Hallca319892017-06-15 15:25:22 -07003520 main.Cluster.testLeader = leader
3521
Devin Lim58046fa2017-07-05 16:55:00 -07003522 def isElectionFunctional( self, main ):
3523 """
3524 Check that Leadership Election is still functional
3525 15.1 Run election on each node
3526 15.2 Check that each node has the same leaders and candidates
3527 15.3 Find current leader and withdraw
3528 15.4 Check that a new node was elected leader
3529 15.5 Check that that new leader was the candidate of old leader
3530 15.6 Run for election on old leader
3531 15.7 Check that oldLeader is a candidate, and leader if only 1 node
3532 15.8 Make sure that the old leader was added to the candidate list
3533
3534 old and new variable prefixes refer to data from before vs after
3535 withdrawl and later before withdrawl vs after re-election
3536 """
3537 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003538 assert main, "main not defined"
3539 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003540
3541 description = "Check that Leadership Election is still functional"
3542 main.case( description )
3543 # NOTE: Need to re-run after restarts since being a canidate is not persistant
3544
3545 oldLeaders = [] # list of lists of each nodes' candidates before
3546 newLeaders = [] # list of lists of each nodes' candidates after
3547 oldLeader = '' # the old leader from oldLeaders, None if not same
3548 newLeader = '' # the new leaders fron newLoeaders, None if not same
3549 oldLeaderCLI = None # the CLI of the old leader used for re-electing
3550 expectNoLeader = False # True when there is only one leader
Devin Lim142b5342017-07-20 15:22:39 -07003551 if len( main.Cluster.runningNodes ) == 1:
Devin Lim58046fa2017-07-05 16:55:00 -07003552 expectNoLeader = True
3553
3554 main.step( "Run for election on each node" )
Devin Lim142b5342017-07-20 15:22:39 -07003555 electionResult = main.Cluster.command( "electionTestRun", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003556 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -07003557 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -07003558 actual=electionResult,
3559 onpass="All nodes successfully ran for leadership",
3560 onfail="At least one node failed to run for leadership" )
3561
3562 if electionResult == main.FALSE:
3563 main.log.error(
3564 "Skipping Test Case because Election Test App isn't loaded" )
3565 main.skipCase()
3566
3567 main.step( "Check that each node shows the same leader and candidates" )
3568 failMessage = "Nodes have different leaderboards"
Jon Hallca319892017-06-15 15:25:22 -07003569 activeCLIs = main.Cluster.active()
3570 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Devin Lim58046fa2017-07-05 16:55:00 -07003571 if sameResult:
3572 oldLeader = oldLeaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003573 main.log.info( "Old leader: " + oldLeader )
Devin Lim58046fa2017-07-05 16:55:00 -07003574 else:
3575 oldLeader = None
3576 utilities.assert_equals(
3577 expect=True,
3578 actual=sameResult,
3579 onpass="Leaderboards are consistent for the election topic",
3580 onfail=failMessage )
3581
3582 main.step( "Find current leader and withdraw" )
3583 withdrawResult = main.TRUE
3584 # do some sanity checking on leader before using it
3585 if oldLeader is None:
3586 main.log.error( "Leadership isn't consistent." )
3587 withdrawResult = main.FALSE
3588 # Get the CLI of the oldLeader
Jon Hallca319892017-06-15 15:25:22 -07003589 for ctrl in main.Cluster.active():
3590 if oldLeader == ctrl.ipAddress:
3591 oldLeaderCLI = ctrl
Devin Lim58046fa2017-07-05 16:55:00 -07003592 break
3593 else: # FOR/ELSE statement
3594 main.log.error( "Leader election, could not find current leader" )
3595 if oldLeader:
3596 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3597 utilities.assert_equals(
3598 expect=main.TRUE,
3599 actual=withdrawResult,
3600 onpass="Node was withdrawn from election",
3601 onfail="Node was not withdrawn from election" )
3602
3603 main.step( "Check that a new node was elected leader" )
3604 failMessage = "Nodes have different leaders"
3605 # Get new leaders and candidates
3606 newLeaderResult, newLeaders = self.consistentLeaderboards( activeCLIs )
3607 newLeader = None
3608 if newLeaderResult:
3609 if newLeaders[ 0 ][ 0 ] == 'none':
3610 main.log.error( "No leader was elected on at least 1 node" )
3611 if not expectNoLeader:
3612 newLeaderResult = False
3613 newLeader = newLeaders[ 0 ][ 0 ]
3614
3615 # Check that the new leader is not the older leader, which was withdrawn
3616 if newLeader == oldLeader:
3617 newLeaderResult = False
3618 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3619 " as the current leader" )
3620 utilities.assert_equals(
3621 expect=True,
3622 actual=newLeaderResult,
3623 onpass="Leadership election passed",
3624 onfail="Something went wrong with Leadership election" )
3625
3626 main.step( "Check that that new leader was the candidate of old leader" )
3627 # candidates[ 2 ] should become the top candidate after withdrawl
3628 correctCandidateResult = main.TRUE
3629 if expectNoLeader:
3630 if newLeader == 'none':
3631 main.log.info( "No leader expected. None found. Pass" )
3632 correctCandidateResult = main.TRUE
3633 else:
3634 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3635 correctCandidateResult = main.FALSE
3636 elif len( oldLeaders[ 0 ] ) >= 3:
3637 if newLeader == oldLeaders[ 0 ][ 2 ]:
3638 # correct leader was elected
3639 correctCandidateResult = main.TRUE
3640 else:
3641 correctCandidateResult = main.FALSE
3642 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3643 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3644 else:
3645 main.log.warn( "Could not determine who should be the correct leader" )
3646 main.log.debug( oldLeaders[ 0 ] )
3647 correctCandidateResult = main.FALSE
3648 utilities.assert_equals(
3649 expect=main.TRUE,
3650 actual=correctCandidateResult,
3651 onpass="Correct Candidate Elected",
3652 onfail="Incorrect Candidate Elected" )
3653
3654 main.step( "Run for election on old leader( just so everyone " +
3655 "is in the hat )" )
3656 if oldLeaderCLI is not None:
3657 runResult = oldLeaderCLI.electionTestRun()
3658 else:
3659 main.log.error( "No old leader to re-elect" )
3660 runResult = main.FALSE
3661 utilities.assert_equals(
3662 expect=main.TRUE,
3663 actual=runResult,
3664 onpass="App re-ran for election",
3665 onfail="App failed to run for election" )
3666
3667 main.step(
3668 "Check that oldLeader is a candidate, and leader if only 1 node" )
3669 # verify leader didn't just change
3670 # Get new leaders and candidates
3671 reRunLeaders = []
3672 time.sleep( 5 ) # Paremterize
3673 positionResult, reRunLeaders = self.consistentLeaderboards( activeCLIs )
3674
3675 # Check that the re-elected node is last on the candidate List
3676 if not reRunLeaders[ 0 ]:
3677 positionResult = main.FALSE
3678 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3679 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
3680 str( reRunLeaders[ 0 ] ) ) )
3681 positionResult = main.FALSE
3682 utilities.assert_equals(
3683 expect=True,
3684 actual=positionResult,
3685 onpass="Old leader successfully re-ran for election",
3686 onfail="Something went wrong with Leadership election after " +
3687 "the old leader re-ran for election" )
Jon Hallca319892017-06-15 15:25:22 -07003688
Devin Lim58046fa2017-07-05 16:55:00 -07003689 def installDistributedPrimitiveApp( self, main ):
3690 """
3691 Install Distributed Primitives app
3692 """
3693 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003694 assert main, "main not defined"
3695 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003696
3697 # Variables for the distributed primitives tests
3698 main.pCounterName = "TestON-Partitions"
3699 main.pCounterValue = 0
3700 main.onosSet = set( [] )
3701 main.onosSetName = "TestON-set"
3702
3703 description = "Install Primitives app"
3704 main.case( description )
3705 main.step( "Install Primitives app" )
3706 appName = "org.onosproject.distributedprimitives"
Devin Lime9f0ccf2017-08-11 17:25:12 -07003707 appResults = main.Cluster.next().CLI.activateApp( appName )
Devin Lim58046fa2017-07-05 16:55:00 -07003708 utilities.assert_equals( expect=main.TRUE,
3709 actual=appResults,
3710 onpass="Primitives app activated",
3711 onfail="Primitives app not activated" )
3712 # TODO check on all nodes instead of sleeping
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003713 time.sleep( 5 ) # To allow all nodes to activate