blob: 2227146df11e9d482c5e91f419c3cfa0b36735d2 [file] [log] [blame]
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07001"""
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002Copyright 2015 Open Networking Foundation ( ONF )
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003
4Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
5the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
6or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
7
8 TestON is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation, either version 2 of the License, or
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -070011 ( at your option ) any later version.
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -070012
13 TestON is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with TestON. If not, see <http://www.gnu.org/licenses/>.
20"""
Jon Halla440e872016-03-31 15:15:50 -070021import json
Jon Hall41d39f12016-04-11 22:54:35 -070022import time
Jon Halle1a3b752015-07-22 13:02:46 -070023
Jon Hallf37d44d2017-05-24 10:37:30 -070024
Jon Hall41d39f12016-04-11 22:54:35 -070025class HA():
Jon Hall57b50432015-10-22 10:20:10 -070026
Jon Halla440e872016-03-31 15:15:50 -070027 def __init__( self ):
28 self.default = ''
Jon Hall57b50432015-10-22 10:20:10 -070029
Devin Lim58046fa2017-07-05 16:55:00 -070030 def customizeOnosGenPartitions( self ):
Devin Lim58046fa2017-07-05 16:55:00 -070031 # copy gen-partions file to ONOS
32 # NOTE: this assumes TestON and ONOS are on the same machine
33 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
34 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
35 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
36 main.ONOSbench.ip_address,
37 srcFile,
38 dstDir,
39 pwd=main.ONOSbench.pwd,
40 direction="from" )
Jon Hallca319892017-06-15 15:25:22 -070041
Devin Lim58046fa2017-07-05 16:55:00 -070042 def cleanUpGenPartition( self ):
43 # clean up gen-partitions file
44 try:
45 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
46 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
47 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
48 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
49 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
50 str( main.ONOSbench.handle.before ) )
51 except ( pexpect.TIMEOUT, pexpect.EOF ):
52 main.log.exception( "ONOSbench: pexpect exception found:" +
53 main.ONOSbench.handle.before )
Devin Lim44075962017-08-11 10:56:37 -070054 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -070055
Devin Lim58046fa2017-07-05 16:55:00 -070056 def startingMininet( self ):
57 main.step( "Starting Mininet" )
58 # scp topo file to mininet
59 # TODO: move to params?
60 topoName = "obelisk.py"
61 filePath = main.ONOSbench.home + "/tools/test/topos/"
62 main.ONOSbench.scp( main.Mininet1,
63 filePath + topoName,
64 main.Mininet1.home,
65 direction="to" )
66 mnResult = main.Mininet1.startNet()
67 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
68 onpass="Mininet Started",
69 onfail="Error starting Mininet" )
Jon Hallca319892017-06-15 15:25:22 -070070
Devin Lim58046fa2017-07-05 16:55:00 -070071 def scalingMetadata( self ):
72 import re
Devin Lim142b5342017-07-20 15:22:39 -070073 main.step( "Generate initial metadata file" )
Devin Lim58046fa2017-07-05 16:55:00 -070074 main.scaling = main.params[ 'scaling' ].split( "," )
75 main.log.debug( main.scaling )
76 scale = main.scaling.pop( 0 )
77 main.log.debug( scale )
78 if "e" in scale:
79 equal = True
80 else:
81 equal = False
82 main.log.debug( equal )
Devin Lim142b5342017-07-20 15:22:39 -070083 main.Cluster.setRunningNode( int( re.search( "\d+", scale ).group( 0 ) ) )
84 genResult = main.Server.generateFile( main.Cluster.numCtrls, equal=equal )
Devin Lim58046fa2017-07-05 16:55:00 -070085 utilities.assert_equals( expect=main.TRUE, actual=genResult,
86 onpass="New cluster metadata file generated",
87 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -070088
Devin Lim58046fa2017-07-05 16:55:00 -070089 def swapNodeMetadata( self ):
Devin Lim142b5342017-07-20 15:22:39 -070090 main.step( "Generate initial metadata file" )
91 if main.Cluster.numCtrls >= 5:
92 main.Cluster.setRunningNode( main.Cluster.numCtrls - 2 )
Devin Lim58046fa2017-07-05 16:55:00 -070093 else:
94 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
Devin Lim142b5342017-07-20 15:22:39 -070095 genResult = main.Server.generateFile( main.Cluster.numCtrls )
Devin Lim58046fa2017-07-05 16:55:00 -070096 utilities.assert_equals( expect=main.TRUE, actual=genResult,
97 onpass="New cluster metadata file generated",
98 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -070099
Devin Lim142b5342017-07-20 15:22:39 -0700100 def setServerForCluster( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700101 import os
102 main.step( "Setup server for cluster metadata file" )
103 main.serverPort = main.params[ 'server' ][ 'port' ]
104 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
105 main.log.debug( "Root dir: {}".format( rootDir ) )
106 status = main.Server.start( main.ONOSbench,
107 rootDir,
108 port=main.serverPort,
109 logDir=main.logdir + "/server.log" )
110 utilities.assert_equals( expect=main.TRUE, actual=status,
111 onpass="Server started",
112 onfail="Failled to start SimpleHTTPServer" )
113
Jon Hall4f360bc2017-09-07 10:19:52 -0700114 def copyBackupConfig( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700115 main.step( "Copying backup config files" )
116 main.onosServicepath = main.ONOSbench.home + "/tools/package/bin/onos-service"
117 cp = main.ONOSbench.scp( main.ONOSbench,
118 main.onosServicepath,
119 main.onosServicepath + ".backup",
120 direction="to" )
121
122 utilities.assert_equals( expect=main.TRUE,
123 actual=cp,
124 onpass="Copy backup config file succeeded",
125 onfail="Copy backup config file failed" )
Jon Hall4f360bc2017-09-07 10:19:52 -0700126
127 def setMetadataUrl( self ):
128 # NOTE: You should probably backup the config before and reset the config after the test
Devin Lim58046fa2017-07-05 16:55:00 -0700129 # we need to modify the onos-service file to use remote metadata file
130 # url for cluster metadata file
131 iface = main.params[ 'server' ].get( 'interface' )
132 ip = main.ONOSbench.getIpAddr( iface=iface )
133 metaFile = "cluster.json"
134 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, main.serverPort, metaFile )
135 main.log.warn( javaArgs )
136 main.log.warn( repr( javaArgs ) )
137 handle = main.ONOSbench.handle
Jon Hall4173b242017-09-12 17:04:38 -0700138 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs,
139 main.onosServicepath )
Devin Lim58046fa2017-07-05 16:55:00 -0700140 main.log.warn( sed )
141 main.log.warn( repr( sed ) )
142 handle.sendline( sed )
143 handle.expect( metaFile )
144 output = handle.before
145 handle.expect( "\$" )
146 output += handle.before
147 main.log.debug( repr( output ) )
148
149 def cleanUpOnosService( self ):
150 # Cleanup custom onos-service file
151 main.ONOSbench.scp( main.ONOSbench,
152 main.onosServicepath + ".backup",
153 main.onosServicepath,
154 direction="to" )
Jon Hallca319892017-06-15 15:25:22 -0700155
Jon Halla440e872016-03-31 15:15:50 -0700156 def consistentCheck( self ):
157 """
158 Checks that TestON counters are consistent across all nodes.
Jon Halle1a3b752015-07-22 13:02:46 -0700159
Jon Hallf37d44d2017-05-24 10:37:30 -0700160 Returns the tuple ( onosCounters, consistent )
Jon Hall41d39f12016-04-11 22:54:35 -0700161 - onosCounters is the parsed json output of the counters command on
162 all nodes
163 - consistent is main.TRUE if all "TestON" counters are consitent across
164 all nodes or main.FALSE
Jon Halla440e872016-03-31 15:15:50 -0700165 """
Jon Halle1a3b752015-07-22 13:02:46 -0700166 try:
Jon Halla440e872016-03-31 15:15:50 -0700167 # Get onos counters results
168 onosCountersRaw = []
169 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700170 for ctrl in main.Cluster.active():
Jon Halla440e872016-03-31 15:15:50 -0700171 t = main.Thread( target=utilities.retry,
Jon Hallca319892017-06-15 15:25:22 -0700172 name="counters-" + str( ctrl ),
173 args=[ ctrl.counters, [ None ] ],
Jon Hallf37d44d2017-05-24 10:37:30 -0700174 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Halla440e872016-03-31 15:15:50 -0700175 'randomTime': True } )
176 threads.append( t )
177 t.start()
178 for t in threads:
179 t.join()
180 onosCountersRaw.append( t.result )
181 onosCounters = []
Jon Hallca319892017-06-15 15:25:22 -0700182 for i in range( len( onosCountersRaw ) ):
Jon Halla440e872016-03-31 15:15:50 -0700183 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700184 onosCounters.append( json.loads( onosCountersRaw[ i ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700185 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700186 main.log.error( "Could not parse counters response from " +
Devin Lim142b5342017-07-20 15:22:39 -0700187 str( main.Cluster.active( i ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700188 main.log.warn( repr( onosCountersRaw[ i ] ) )
189 onosCounters.append( [] )
190
191 testCounters = {}
192 # make a list of all the "TestON-*" counters in ONOS
Jon Hall41d39f12016-04-11 22:54:35 -0700193 # lookes like a dict whose keys are the name of the ONOS node and
194 # values are a list of the counters. I.E.
Jon Hallf37d44d2017-05-24 10:37:30 -0700195 # { "ONOS1": [ { "name":"TestON-Partitions","value":56 } ]
Jon Halla440e872016-03-31 15:15:50 -0700196 # }
197 # NOTE: There is an assumtion that all nodes are active
198 # based on the above for loops
199 for controller in enumerate( onosCounters ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700200 for key, value in controller[ 1 ].iteritems():
Jon Halla440e872016-03-31 15:15:50 -0700201 if 'TestON' in key:
Devin Lim142b5342017-07-20 15:22:39 -0700202 node = str( main.Cluster.active( controller[ 0 ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700203 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700204 testCounters[ node ].append( { key: value } )
Jon Halla440e872016-03-31 15:15:50 -0700205 except KeyError:
Jon Hallf37d44d2017-05-24 10:37:30 -0700206 testCounters[ node ] = [ { key: value } ]
Jon Halla440e872016-03-31 15:15:50 -0700207 # compare the counters on each node
Jon Hallf37d44d2017-05-24 10:37:30 -0700208 firstV = testCounters.values()[ 0 ]
Jon Halla440e872016-03-31 15:15:50 -0700209 tmp = [ v == firstV for k, v in testCounters.iteritems() ]
210 if all( tmp ):
211 consistent = main.TRUE
212 else:
213 consistent = main.FALSE
214 main.log.error( "ONOS nodes have different values for counters:\n" +
215 testCounters )
216 return ( onosCounters, consistent )
217 except Exception:
218 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700219 main.cleanAndExit()
Jon Halla440e872016-03-31 15:15:50 -0700220
221 def counterCheck( self, counterName, counterValue ):
222 """
223 Checks that TestON counters are consistent across all nodes and that
224 specified counter is in ONOS with the given value
225 """
226 try:
227 correctResults = main.TRUE
228 # Get onos counters results and consistentCheck
229 onosCounters, consistent = self.consistentCheck()
230 # Check for correct values
Jon Hallca319892017-06-15 15:25:22 -0700231 for i in range( len( main.Cluster.active() ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700232 current = onosCounters[ i ]
Jon Halla440e872016-03-31 15:15:50 -0700233 onosValue = None
234 try:
235 onosValue = current.get( counterName )
Jon Hall41d39f12016-04-11 22:54:35 -0700236 except AttributeError:
Devin Lim142b5342017-07-20 15:22:39 -0700237 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -0700238 main.log.exception( node + " counters result " +
Jon Hall41d39f12016-04-11 22:54:35 -0700239 "is not as expected" )
Jon Halla440e872016-03-31 15:15:50 -0700240 correctResults = main.FALSE
241 if onosValue == counterValue:
242 main.log.info( counterName + " counter value is correct" )
243 else:
Jon Hall41d39f12016-04-11 22:54:35 -0700244 main.log.error( counterName +
245 " counter value is incorrect," +
246 " expected value: " + str( counterValue ) +
247 " current value: " + str( onosValue ) )
Jon Halla440e872016-03-31 15:15:50 -0700248 correctResults = main.FALSE
249 return consistent and correctResults
250 except Exception:
251 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700252 main.cleanAndExit()
Jon Hall41d39f12016-04-11 22:54:35 -0700253
254 def consistentLeaderboards( self, nodes ):
255 TOPIC = 'org.onosproject.election'
256 # FIXME: use threads
Jon Halle0f0b342017-04-18 11:43:47 -0700257 # FIXME: should we retry outside the function?
Jon Hall41d39f12016-04-11 22:54:35 -0700258 for n in range( 5 ): # Retry in case election is still happening
259 leaderList = []
260 # Get all leaderboards
261 for cli in nodes:
262 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
263 # Compare leaderboards
Jon Hallf37d44d2017-05-24 10:37:30 -0700264 result = all( i == leaderList[ 0 ] for i in leaderList ) and\
Jon Hall41d39f12016-04-11 22:54:35 -0700265 leaderList is not None
266 main.log.debug( leaderList )
267 main.log.warn( result )
268 if result:
269 return ( result, leaderList )
Jon Hallf37d44d2017-05-24 10:37:30 -0700270 time.sleep( 5 ) # TODO: paramerterize
Jon Hall41d39f12016-04-11 22:54:35 -0700271 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
272 return ( result, leaderList )
273
274 def nodesCheck( self, nodes ):
275 nodesOutput = []
276 results = True
277 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700278 for node in nodes:
279 t = main.Thread( target=node.nodes,
280 name="nodes-" + str( node ),
Jon Hallf37d44d2017-05-24 10:37:30 -0700281 args=[] )
Jon Hall41d39f12016-04-11 22:54:35 -0700282 threads.append( t )
283 t.start()
284
285 for t in threads:
286 t.join()
287 nodesOutput.append( t.result )
Jon Hallca319892017-06-15 15:25:22 -0700288 ips = sorted( main.Cluster.getIps( activeOnly=True ) )
Jon Hall41d39f12016-04-11 22:54:35 -0700289 for i in nodesOutput:
290 try:
291 current = json.loads( i )
292 activeIps = []
293 currentResult = False
294 for node in current:
Jon Hallf37d44d2017-05-24 10:37:30 -0700295 if node[ 'state' ] == 'READY':
296 activeIps.append( node[ 'ip' ] )
Jon Hall41d39f12016-04-11 22:54:35 -0700297 activeIps.sort()
298 if ips == activeIps:
299 currentResult = True
300 except ( ValueError, TypeError ):
301 main.log.error( "Error parsing nodes output" )
302 main.log.warn( repr( i ) )
303 currentResult = False
304 results = results and currentResult
305 return results
Jon Hallca319892017-06-15 15:25:22 -0700306
Devin Lim58046fa2017-07-05 16:55:00 -0700307 def generateGraph( self, testName, plotName="Plot-HA", index=2 ):
308 # GRAPHS
309 # NOTE: important params here:
310 # job = name of Jenkins job
311 # Plot Name = Plot-HA, only can be used if multiple plots
312 # index = The number of the graph under plot name
313 job = testName
314 graphs = '<ac:structured-macro ac:name="html">\n'
315 graphs += '<ac:plain-text-body><![CDATA[\n'
316 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
317 '/plot/' + plotName + '/getPlot?index=' + str( index ) +\
318 '&width=500&height=300"' +\
319 'noborder="0" width="500" height="300" scrolling="yes" ' +\
320 'seamless="seamless"></iframe>\n'
321 graphs += ']]></ac:plain-text-body>\n'
322 graphs += '</ac:structured-macro>\n'
323 main.log.wiki( graphs )
Jon Hallca319892017-06-15 15:25:22 -0700324
Devin Lim58046fa2017-07-05 16:55:00 -0700325 def initialSetUp( self, serviceClean=False ):
326 """
327 rest of initialSetup
328 """
Devin Lim58046fa2017-07-05 16:55:00 -0700329 if main.params[ 'tcpdump' ].lower() == "true":
330 main.step( "Start Packet Capture MN" )
331 main.Mininet2.startTcpdump(
332 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
333 + "-MN.pcap",
334 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
335 port=main.params[ 'MNtcpdump' ][ 'port' ] )
336
337 if serviceClean:
338 main.step( "Clean up ONOS service changes" )
Devin Lim142b5342017-07-20 15:22:39 -0700339 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
340 main.ONOSbench.handle.expect( "\$" )
341 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
342 main.ONOSbench.handle.expect( "\$" )
Devin Lim58046fa2017-07-05 16:55:00 -0700343
344 main.step( "Checking ONOS nodes" )
345 nodeResults = utilities.retry( self.nodesCheck,
346 False,
Jon Hallca319892017-06-15 15:25:22 -0700347 args=[ main.Cluster.active() ],
Devin Lim58046fa2017-07-05 16:55:00 -0700348 attempts=5 )
349
350 utilities.assert_equals( expect=True, actual=nodeResults,
351 onpass="Nodes check successful",
352 onfail="Nodes check NOT successful" )
353
354 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -0700355 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700356 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -0700357 ctrl.name,
358 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700359 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -0700360 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700361
362 main.step( "Activate apps defined in the params file" )
363 # get data from the params
364 apps = main.params.get( 'apps' )
365 if apps:
366 apps = apps.split( ',' )
Jon Hallca319892017-06-15 15:25:22 -0700367 main.log.debug( "Apps: " + str( apps ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700368 activateResult = True
369 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700370 main.Cluster.active( 0 ).app( app, "Activate" )
Devin Lim58046fa2017-07-05 16:55:00 -0700371 # TODO: check this worked
372 time.sleep( 10 ) # wait for apps to activate
373 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700374 state = main.Cluster.active( 0 ).appStatus( app )
Devin Lim58046fa2017-07-05 16:55:00 -0700375 if state == "ACTIVE":
376 activateResult = activateResult and True
377 else:
378 main.log.error( "{} is in {} state".format( app, state ) )
379 activateResult = False
380 utilities.assert_equals( expect=True,
381 actual=activateResult,
382 onpass="Successfully activated apps",
383 onfail="Failed to activate apps" )
384 else:
385 main.log.warn( "No apps were specified to be loaded after startup" )
386
387 main.step( "Set ONOS configurations" )
Jon Hallca319892017-06-15 15:25:22 -0700388 # FIXME: This shoudl be part of the general startup sequence
Devin Lim58046fa2017-07-05 16:55:00 -0700389 config = main.params.get( 'ONOS_Configuration' )
390 if config:
391 main.log.debug( config )
392 checkResult = main.TRUE
393 for component in config:
394 for setting in config[ component ]:
395 value = config[ component ][ setting ]
Jon Hallca319892017-06-15 15:25:22 -0700396 check = main.Cluster.next().setCfg( component, setting, value )
Devin Lim58046fa2017-07-05 16:55:00 -0700397 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
398 checkResult = check and checkResult
399 utilities.assert_equals( expect=main.TRUE,
400 actual=checkResult,
401 onpass="Successfully set config",
402 onfail="Failed to set config" )
403 else:
404 main.log.warn( "No configurations were specified to be changed after startup" )
405
Jon Hallca319892017-06-15 15:25:22 -0700406 main.step( "Check app ids" )
407 appCheck = self.appCheck()
408 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700409 onpass="App Ids seem to be correct",
410 onfail="Something is wrong with app Ids" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -0700411
Jon Hallca319892017-06-15 15:25:22 -0700412 def commonChecks( self ):
413 # TODO: make this assertable or assert in here?
414 self.topicsCheck()
415 self.partitionsCheck()
416 self.pendingMapCheck()
417 self.appCheck()
418
419 def topicsCheck( self, extraTopics=[] ):
420 """
421 Check for work partition topics in leaders output
422 """
423 leaders = main.Cluster.next().leaders()
424 missing = False
425 try:
426 if leaders:
427 parsedLeaders = json.loads( leaders )
428 output = json.dumps( parsedLeaders,
429 sort_keys=True,
430 indent=4,
431 separators=( ',', ': ' ) )
432 main.log.debug( "Leaders: " + output )
433 # check for all intent partitions
434 topics = []
435 for i in range( 14 ):
436 topics.append( "work-partition-" + str( i ) )
437 topics += extraTopics
438 main.log.debug( topics )
439 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
440 for topic in topics:
441 if topic not in ONOStopics:
442 main.log.error( "Error: " + topic +
443 " not in leaders" )
444 missing = True
445 else:
446 main.log.error( "leaders() returned None" )
447 except ( ValueError, TypeError ):
448 main.log.exception( "Error parsing leaders" )
449 main.log.error( repr( leaders ) )
450 if missing:
Jon Hall4173b242017-09-12 17:04:38 -0700451 # NOTE Can we refactor this into the Cluster class?
452 # Maybe an option to print the output of a command from each node?
Jon Hallca319892017-06-15 15:25:22 -0700453 for ctrl in main.Cluster.active():
454 response = ctrl.CLI.leaders( jsonFormat=False )
455 main.log.debug( str( ctrl.name ) + " leaders output: \n" +
456 str( response ) )
457 return missing
458
459 def partitionsCheck( self ):
460 # TODO: return something assertable
461 partitions = main.Cluster.next().partitions()
462 try:
463 if partitions:
464 parsedPartitions = json.loads( partitions )
465 output = json.dumps( parsedPartitions,
466 sort_keys=True,
467 indent=4,
468 separators=( ',', ': ' ) )
469 main.log.debug( "Partitions: " + output )
470 # TODO check for a leader in all paritions
471 # TODO check for consistency among nodes
472 else:
473 main.log.error( "partitions() returned None" )
474 except ( ValueError, TypeError ):
475 main.log.exception( "Error parsing partitions" )
476 main.log.error( repr( partitions ) )
477
478 def pendingMapCheck( self ):
479 pendingMap = main.Cluster.next().pendingMap()
480 try:
481 if pendingMap:
482 parsedPending = json.loads( pendingMap )
483 output = json.dumps( parsedPending,
484 sort_keys=True,
485 indent=4,
486 separators=( ',', ': ' ) )
487 main.log.debug( "Pending map: " + output )
488 # TODO check something here?
489 else:
490 main.log.error( "pendingMap() returned None" )
491 except ( ValueError, TypeError ):
492 main.log.exception( "Error parsing pending map" )
493 main.log.error( repr( pendingMap ) )
494
495 def appCheck( self ):
496 """
497 Check App IDs on all nodes
498 """
499 # FIXME: Rename this to appIDCheck? or add a check for isntalled apps
500 appResults = main.Cluster.command( "appToIDCheck" )
501 appCheck = all( i == main.TRUE for i in appResults )
502 if not appCheck:
Devin Lim142b5342017-07-20 15:22:39 -0700503 ctrl = main.Cluster.active( 0 )
Jon Hallca319892017-06-15 15:25:22 -0700504 main.log.debug( "%s apps: %s" % ( ctrl.name, ctrl.apps() ) )
505 main.log.debug( "%s appIDs: %s" % ( ctrl.name, ctrl.appIDs() ) )
506 return appCheck
507
Jon Halle0f0b342017-04-18 11:43:47 -0700508 def workQueueStatsCheck( self, workQueueName, completed, inProgress, pending ):
509 # Completed
Jon Hallca319892017-06-15 15:25:22 -0700510 completedValues = main.Cluster.command( "workQueueTotalCompleted",
511 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700512 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700513 completedResults = [ int( x ) == completed for x in completedValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700514 completedResult = all( completedResults )
515 if not completedResult:
516 main.log.warn( "Expected Work Queue {} to have {} completed, found {}".format(
517 workQueueName, completed, completedValues ) )
518
519 # In Progress
Jon Hallca319892017-06-15 15:25:22 -0700520 inProgressValues = main.Cluster.command( "workQueueTotalInProgress",
521 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700522 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700523 inProgressResults = [ int( x ) == inProgress for x in inProgressValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700524 inProgressResult = all( inProgressResults )
525 if not inProgressResult:
526 main.log.warn( "Expected Work Queue {} to have {} inProgress, found {}".format(
527 workQueueName, inProgress, inProgressValues ) )
528
529 # Pending
Jon Hallca319892017-06-15 15:25:22 -0700530 pendingValues = main.Cluster.command( "workQueueTotalPending",
531 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700532 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700533 pendingResults = [ int( x ) == pending for x in pendingValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700534 pendingResult = all( pendingResults )
535 if not pendingResult:
536 main.log.warn( "Expected Work Queue {} to have {} pending, found {}".format(
537 workQueueName, pending, pendingValues ) )
538 return completedResult and inProgressResult and pendingResult
539
Devin Lim58046fa2017-07-05 16:55:00 -0700540 def assignDevices( self, main ):
541 """
542 Assign devices to controllers
543 """
544 import re
Devin Lim58046fa2017-07-05 16:55:00 -0700545 assert main, "main not defined"
546 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700547
548 main.case( "Assigning devices to controllers" )
549 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " + \
550 "and check that an ONOS node becomes the " + \
551 "master of the device."
552 main.step( "Assign switches to controllers" )
553
Jon Hallca319892017-06-15 15:25:22 -0700554 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -0700555 swList = []
556 for i in range( 1, 29 ):
557 swList.append( "s" + str( i ) )
558 main.Mininet1.assignSwController( sw=swList, ip=ipList )
559
560 mastershipCheck = main.TRUE
561 for i in range( 1, 29 ):
562 response = main.Mininet1.getSwController( "s" + str( i ) )
563 try:
564 main.log.info( str( response ) )
565 except Exception:
566 main.log.info( repr( response ) )
Devin Lim142b5342017-07-20 15:22:39 -0700567 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -0700568 if re.search( "tcp:" + ctrl.ipAddress, response ):
Devin Lim58046fa2017-07-05 16:55:00 -0700569 mastershipCheck = mastershipCheck and main.TRUE
570 else:
Jon Hall4173b242017-09-12 17:04:38 -0700571 main.log.error( "Error, node " + repr( ctrl ) + " is " +
Devin Lim58046fa2017-07-05 16:55:00 -0700572 "not in the list of controllers s" +
573 str( i ) + " is connecting to." )
574 mastershipCheck = main.FALSE
575 utilities.assert_equals(
576 expect=main.TRUE,
577 actual=mastershipCheck,
578 onpass="Switch mastership assigned correctly",
579 onfail="Switches not assigned correctly to controllers" )
Jon Hallca319892017-06-15 15:25:22 -0700580
Devin Lim58046fa2017-07-05 16:55:00 -0700581 def assignIntents( self, main ):
582 """
583 Assign intents
584 """
585 import time
586 import json
Devin Lim58046fa2017-07-05 16:55:00 -0700587 assert main, "main not defined"
588 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700589 try:
590 main.HAlabels
591 except ( NameError, AttributeError ):
592 main.log.error( "main.HAlabels not defined, setting to []" )
593 main.HAlabels = []
594 try:
595 main.HAdata
596 except ( NameError, AttributeError ):
597 main.log.error( "data not defined, setting to []" )
598 main.HAdata = []
599 main.case( "Adding host Intents" )
600 main.caseExplanation = "Discover hosts by using pingall then " +\
601 "assign predetermined host-to-host intents." +\
602 " After installation, check that the intent" +\
603 " is distributed to all nodes and the state" +\
604 " is INSTALLED"
605
606 # install onos-app-fwd
607 main.step( "Install reactive forwarding app" )
Jon Hallca319892017-06-15 15:25:22 -0700608 onosCli = main.Cluster.next()
Devin Lime9f0ccf2017-08-11 17:25:12 -0700609 installResults = onosCli.CLI.activateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700610 utilities.assert_equals( expect=main.TRUE, actual=installResults,
611 onpass="Install fwd successful",
612 onfail="Install fwd failed" )
613
614 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700615 appCheck = self.appCheck()
616 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700617 onpass="App Ids seem to be correct",
618 onfail="Something is wrong with app Ids" )
619
620 main.step( "Discovering Hosts( Via pingall for now )" )
621 # FIXME: Once we have a host discovery mechanism, use that instead
622 # REACTIVE FWD test
623 pingResult = main.FALSE
624 passMsg = "Reactive Pingall test passed"
625 time1 = time.time()
626 pingResult = main.Mininet1.pingall()
627 time2 = time.time()
628 if not pingResult:
629 main.log.warn( "First pingall failed. Trying again..." )
630 pingResult = main.Mininet1.pingall()
631 passMsg += " on the second try"
632 utilities.assert_equals(
633 expect=main.TRUE,
634 actual=pingResult,
635 onpass=passMsg,
636 onfail="Reactive Pingall failed, " +
637 "one or more ping pairs failed" )
638 main.log.info( "Time for pingall: %2f seconds" %
639 ( time2 - time1 ) )
Jon Hallca319892017-06-15 15:25:22 -0700640 if not pingResult:
Devin Lim44075962017-08-11 10:56:37 -0700641 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700642 # timeout for fwd flows
643 time.sleep( 11 )
644 # uninstall onos-app-fwd
645 main.step( "Uninstall reactive forwarding app" )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700646 uninstallResult = onosCli.CLI.deactivateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700647 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
648 onpass="Uninstall fwd successful",
649 onfail="Uninstall fwd failed" )
650
651 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700652 appCheck2 = self.appCheck()
653 utilities.assert_equals( expect=True, actual=appCheck2,
Devin Lim58046fa2017-07-05 16:55:00 -0700654 onpass="App Ids seem to be correct",
655 onfail="Something is wrong with app Ids" )
656
657 main.step( "Add host intents via cli" )
658 intentIds = []
659 # TODO: move the host numbers to params
660 # Maybe look at all the paths we ping?
661 intentAddResult = True
662 hostResult = main.TRUE
663 for i in range( 8, 18 ):
664 main.log.info( "Adding host intent between h" + str( i ) +
665 " and h" + str( i + 10 ) )
666 host1 = "00:00:00:00:00:" + \
667 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
668 host2 = "00:00:00:00:00:" + \
669 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
670 # NOTE: getHost can return None
Jon Hallca319892017-06-15 15:25:22 -0700671 host1Dict = onosCli.CLI.getHost( host1 )
672 host2Dict = onosCli.CLI.getHost( host2 )
Devin Lim58046fa2017-07-05 16:55:00 -0700673 host1Id = None
674 host2Id = None
675 if host1Dict and host2Dict:
676 host1Id = host1Dict.get( 'id', None )
677 host2Id = host2Dict.get( 'id', None )
678 if host1Id and host2Id:
Jon Hallca319892017-06-15 15:25:22 -0700679 nodeNum = len( main.Cluster.active() )
Devin Lim142b5342017-07-20 15:22:39 -0700680 ctrl = main.Cluster.active( i % nodeNum )
Jon Hallca319892017-06-15 15:25:22 -0700681 tmpId = ctrl.CLI.addHostIntent( host1Id, host2Id )
Devin Lim58046fa2017-07-05 16:55:00 -0700682 if tmpId:
683 main.log.info( "Added intent with id: " + tmpId )
684 intentIds.append( tmpId )
685 else:
686 main.log.error( "addHostIntent returned: " +
687 repr( tmpId ) )
688 else:
689 main.log.error( "Error, getHost() failed for h" + str( i ) +
690 " and/or h" + str( i + 10 ) )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700691 hosts = main.Cluster.next().CLI.hosts()
Devin Lim58046fa2017-07-05 16:55:00 -0700692 try:
Jon Hallca319892017-06-15 15:25:22 -0700693 output = json.dumps( json.loads( hosts ),
694 sort_keys=True,
695 indent=4,
696 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700697 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700698 output = repr( hosts )
699 main.log.debug( "Hosts output: %s" % output )
Devin Lim58046fa2017-07-05 16:55:00 -0700700 hostResult = main.FALSE
701 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
702 onpass="Found a host id for each host",
703 onfail="Error looking up host ids" )
704
705 intentStart = time.time()
706 onosIds = onosCli.getAllIntentsId()
707 main.log.info( "Submitted intents: " + str( intentIds ) )
708 main.log.info( "Intents in ONOS: " + str( onosIds ) )
709 for intent in intentIds:
710 if intent in onosIds:
711 pass # intent submitted is in onos
712 else:
713 intentAddResult = False
714 if intentAddResult:
715 intentStop = time.time()
716 else:
717 intentStop = None
718 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700719 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700720 intentStates = []
721 installedCheck = True
722 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
723 count = 0
724 try:
725 for intent in json.loads( intents ):
726 state = intent.get( 'state', None )
727 if "INSTALLED" not in state:
728 installedCheck = False
729 intentId = intent.get( 'id', None )
730 intentStates.append( ( intentId, state ) )
731 except ( ValueError, TypeError ):
732 main.log.exception( "Error parsing intents" )
733 # add submitted intents not in the store
734 tmplist = [ i for i, s in intentStates ]
735 missingIntents = False
736 for i in intentIds:
737 if i not in tmplist:
738 intentStates.append( ( i, " - " ) )
739 missingIntents = True
740 intentStates.sort()
741 for i, s in intentStates:
742 count += 1
743 main.log.info( "%-6s%-15s%-15s" %
744 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700745 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700746
747 intentAddResult = bool( intentAddResult and not missingIntents and
748 installedCheck )
749 if not intentAddResult:
750 main.log.error( "Error in pushing host intents to ONOS" )
751
752 main.step( "Intent Anti-Entropy dispersion" )
753 for j in range( 100 ):
754 correct = True
755 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700756 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700757 onosIds = []
Jon Hallca319892017-06-15 15:25:22 -0700758 ids = ctrl.getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700759 onosIds.append( ids )
Jon Hallca319892017-06-15 15:25:22 -0700760 main.log.debug( "Intents in " + ctrl.name + ": " +
Devin Lim58046fa2017-07-05 16:55:00 -0700761 str( sorted( onosIds ) ) )
762 if sorted( ids ) != sorted( intentIds ):
763 main.log.warn( "Set of intent IDs doesn't match" )
764 correct = False
765 break
766 else:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700767 intents = json.loads( ctrl.CLI.intents() )
Devin Lim58046fa2017-07-05 16:55:00 -0700768 for intent in intents:
769 if intent[ 'state' ] != "INSTALLED":
770 main.log.warn( "Intent " + intent[ 'id' ] +
771 " is " + intent[ 'state' ] )
772 correct = False
773 break
774 if correct:
775 break
776 else:
777 time.sleep( 1 )
778 if not intentStop:
779 intentStop = time.time()
780 global gossipTime
781 gossipTime = intentStop - intentStart
782 main.log.info( "It took about " + str( gossipTime ) +
783 " seconds for all intents to appear in each node" )
784 append = False
785 title = "Gossip Intents"
786 count = 1
787 while append is False:
788 curTitle = title + str( count )
789 if curTitle not in main.HAlabels:
790 main.HAlabels.append( curTitle )
791 main.HAdata.append( str( gossipTime ) )
792 append = True
793 else:
794 count += 1
795 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Devin Lim142b5342017-07-20 15:22:39 -0700796 maxGossipTime = gossipPeriod * len( main.Cluster.runningNodes )
Devin Lim58046fa2017-07-05 16:55:00 -0700797 utilities.assert_greater_equals(
798 expect=maxGossipTime, actual=gossipTime,
799 onpass="ECM anti-entropy for intents worked within " +
800 "expected time",
801 onfail="Intent ECM anti-entropy took too long. " +
802 "Expected time:{}, Actual time:{}".format( maxGossipTime,
803 gossipTime ) )
804 if gossipTime <= maxGossipTime:
805 intentAddResult = True
806
Jon Hallca319892017-06-15 15:25:22 -0700807 pendingMap = main.Cluster.next().pendingMap()
Devin Lim58046fa2017-07-05 16:55:00 -0700808 if not intentAddResult or "key" in pendingMap:
809 import time
810 installedCheck = True
811 main.log.info( "Sleeping 60 seconds to see if intents are found" )
812 time.sleep( 60 )
813 onosIds = onosCli.getAllIntentsId()
814 main.log.info( "Submitted intents: " + str( intentIds ) )
815 main.log.info( "Intents in ONOS: " + str( onosIds ) )
816 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700817 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700818 intentStates = []
819 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
820 count = 0
821 try:
822 for intent in json.loads( intents ):
823 # Iter through intents of a node
824 state = intent.get( 'state', None )
825 if "INSTALLED" not in state:
826 installedCheck = False
827 intentId = intent.get( 'id', None )
828 intentStates.append( ( intentId, state ) )
829 except ( ValueError, TypeError ):
830 main.log.exception( "Error parsing intents" )
831 # add submitted intents not in the store
832 tmplist = [ i for i, s in intentStates ]
833 for i in intentIds:
834 if i not in tmplist:
835 intentStates.append( ( i, " - " ) )
836 intentStates.sort()
837 for i, s in intentStates:
838 count += 1
839 main.log.info( "%-6s%-15s%-15s" %
840 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700841 self.topicsCheck( [ "org.onosproject.election" ] )
842 self.partitionsCheck()
843 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700844
Jon Hallca319892017-06-15 15:25:22 -0700845 def pingAcrossHostIntent( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -0700846 """
847 Ping across added host intents
848 """
849 import json
850 import time
Devin Lim58046fa2017-07-05 16:55:00 -0700851 assert main, "main not defined"
852 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700853 main.case( "Verify connectivity by sending traffic across Intents" )
854 main.caseExplanation = "Ping across added host intents to check " +\
855 "functionality and check the state of " +\
856 "the intent"
857
Jon Hallca319892017-06-15 15:25:22 -0700858 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -0700859 main.step( "Check Intent state" )
860 installedCheck = False
861 loopCount = 0
862 while not installedCheck and loopCount < 40:
863 installedCheck = True
864 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700865 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700866 intentStates = []
867 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
868 count = 0
869 # Iter through intents of a node
870 try:
871 for intent in json.loads( intents ):
872 state = intent.get( 'state', None )
873 if "INSTALLED" not in state:
874 installedCheck = False
Jon Hall8bafdc02017-09-05 11:36:26 -0700875 main.log.debug( "Failed intent: " + str( intent ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700876 intentId = intent.get( 'id', None )
877 intentStates.append( ( intentId, state ) )
878 except ( ValueError, TypeError ):
879 main.log.exception( "Error parsing intents." )
880 # Print states
881 intentStates.sort()
882 for i, s in intentStates:
883 count += 1
884 main.log.info( "%-6s%-15s%-15s" %
885 ( str( count ), str( i ), str( s ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700886 if not installedCheck:
887 time.sleep( 1 )
888 loopCount += 1
889 utilities.assert_equals( expect=True, actual=installedCheck,
890 onpass="Intents are all INSTALLED",
891 onfail="Intents are not all in " +
892 "INSTALLED state" )
893
894 main.step( "Ping across added host intents" )
895 PingResult = main.TRUE
896 for i in range( 8, 18 ):
897 ping = main.Mininet1.pingHost( src="h" + str( i ),
898 target="h" + str( i + 10 ) )
899 PingResult = PingResult and ping
900 if ping == main.FALSE:
901 main.log.warn( "Ping failed between h" + str( i ) +
902 " and h" + str( i + 10 ) )
903 elif ping == main.TRUE:
904 main.log.info( "Ping test passed!" )
905 # Don't set PingResult or you'd override failures
906 if PingResult == main.FALSE:
907 main.log.error(
908 "Intents have not been installed correctly, pings failed." )
909 # TODO: pretty print
Devin Lim58046fa2017-07-05 16:55:00 -0700910 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700911 tmpIntents = onosCli.CLI.intents()
Jon Hallca319892017-06-15 15:25:22 -0700912 output = json.dumps( json.loads( tmpIntents ),
913 sort_keys=True,
914 indent=4,
915 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700916 except ( ValueError, TypeError ):
Jon Hall4173b242017-09-12 17:04:38 -0700917 output = repr( tmpIntents )
Jon Hallca319892017-06-15 15:25:22 -0700918 main.log.debug( "ONOS1 intents: " + output )
Devin Lim58046fa2017-07-05 16:55:00 -0700919 utilities.assert_equals(
920 expect=main.TRUE,
921 actual=PingResult,
922 onpass="Intents have been installed correctly and pings work",
923 onfail="Intents have not been installed correctly, pings failed." )
924
925 main.step( "Check leadership of topics" )
Jon Hallca319892017-06-15 15:25:22 -0700926 topicsCheck = self.topicsCheck()
927 utilities.assert_equals( expect=False, actual=topicsCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700928 onpass="intent Partitions is in leaders",
Jon Hallca319892017-06-15 15:25:22 -0700929 onfail="Some topics were lost" )
930 self.partitionsCheck()
931 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700932
933 if not installedCheck:
934 main.log.info( "Waiting 60 seconds to see if the state of " +
935 "intents change" )
936 time.sleep( 60 )
937 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700938 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700939 intentStates = []
940 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
941 count = 0
942 # Iter through intents of a node
943 try:
944 for intent in json.loads( intents ):
945 state = intent.get( 'state', None )
946 if "INSTALLED" not in state:
947 installedCheck = False
948 intentId = intent.get( 'id', None )
949 intentStates.append( ( intentId, state ) )
950 except ( ValueError, TypeError ):
951 main.log.exception( "Error parsing intents." )
952 intentStates.sort()
953 for i, s in intentStates:
954 count += 1
955 main.log.info( "%-6s%-15s%-15s" %
956 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700957 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700958
Devin Lim58046fa2017-07-05 16:55:00 -0700959 # Print flowrules
Devin Lime9f0ccf2017-08-11 17:25:12 -0700960 main.log.debug( onosCli.CLI.flows() )
Devin Lim58046fa2017-07-05 16:55:00 -0700961 main.step( "Wait a minute then ping again" )
962 # the wait is above
963 PingResult = main.TRUE
964 for i in range( 8, 18 ):
965 ping = main.Mininet1.pingHost( src="h" + str( i ),
966 target="h" + str( i + 10 ) )
967 PingResult = PingResult and ping
968 if ping == main.FALSE:
969 main.log.warn( "Ping failed between h" + str( i ) +
970 " and h" + str( i + 10 ) )
971 elif ping == main.TRUE:
972 main.log.info( "Ping test passed!" )
973 # Don't set PingResult or you'd override failures
974 if PingResult == main.FALSE:
975 main.log.error(
976 "Intents have not been installed correctly, pings failed." )
977 # TODO: pretty print
Jon Hallca319892017-06-15 15:25:22 -0700978 main.log.warn( str( onosCli.name ) + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -0700979 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700980 tmpIntents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700981 main.log.warn( json.dumps( json.loads( tmpIntents ),
982 sort_keys=True,
983 indent=4,
984 separators=( ',', ': ' ) ) )
985 except ( ValueError, TypeError ):
986 main.log.warn( repr( tmpIntents ) )
987 utilities.assert_equals(
988 expect=main.TRUE,
989 actual=PingResult,
990 onpass="Intents have been installed correctly and pings work",
991 onfail="Intents have not been installed correctly, pings failed." )
992
Devin Lim142b5342017-07-20 15:22:39 -0700993 def checkRoleNotNull( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700994 main.step( "Check that each switch has a master" )
Devin Lim58046fa2017-07-05 16:55:00 -0700995 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -0700996 rolesNotNull = main.Cluster.command( "rolesNotNull", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -0700997 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -0700998 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -0700999 actual=rolesNotNull,
1000 onpass="Each device has a master",
1001 onfail="Some devices don't have a master assigned" )
1002
Devin Lim142b5342017-07-20 15:22:39 -07001003 def checkTheRole( self ):
1004 main.step( "Read device roles from ONOS" )
Jon Hallca319892017-06-15 15:25:22 -07001005 ONOSMastership = main.Cluster.command( "roles" )
Devin Lim58046fa2017-07-05 16:55:00 -07001006 consistentMastership = True
1007 rolesResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001008 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001009 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001010 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001011 main.log.error( "Error in getting " + node + " roles" )
1012 main.log.warn( node + " mastership response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001013 repr( ONOSMastership[ i ] ) )
1014 rolesResults = False
1015 utilities.assert_equals(
1016 expect=True,
1017 actual=rolesResults,
1018 onpass="No error in reading roles output",
1019 onfail="Error in reading roles from ONOS" )
1020
1021 main.step( "Check for consistency in roles from each controller" )
1022 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1023 main.log.info(
1024 "Switch roles are consistent across all ONOS nodes" )
1025 else:
1026 consistentMastership = False
1027 utilities.assert_equals(
1028 expect=True,
1029 actual=consistentMastership,
1030 onpass="Switch roles are consistent across all ONOS nodes",
1031 onfail="ONOS nodes have different views of switch roles" )
Devin Lim142b5342017-07-20 15:22:39 -07001032 return ONOSMastership, rolesResults, consistentMastership
1033
1034 def checkingIntents( self ):
1035 main.step( "Get the intents from each controller" )
1036 ONOSIntents = main.Cluster.command( "intents", specificDriver=2 )
1037 intentsResults = True
1038 for i in range( len( ONOSIntents ) ):
1039 node = str( main.Cluster.active( i ) )
1040 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1041 main.log.error( "Error in getting " + node + " intents" )
1042 main.log.warn( node + " intents response: " +
1043 repr( ONOSIntents[ i ] ) )
1044 intentsResults = False
1045 utilities.assert_equals(
1046 expect=True,
1047 actual=intentsResults,
1048 onpass="No error in reading intents output",
1049 onfail="Error in reading intents from ONOS" )
1050 return ONOSIntents, intentsResults
1051
1052 def readingState( self, main ):
1053 """
1054 Reading state of ONOS
1055 """
1056 import json
1057 import time
1058 assert main, "main not defined"
1059 assert utilities.assert_equals, "utilities.assert_equals not defined"
1060 try:
1061 from tests.dependencies.topology import Topology
1062 except ImportError:
1063 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07001064 main.cleanAndExit()
Devin Lim142b5342017-07-20 15:22:39 -07001065 try:
1066 main.topoRelated
1067 except ( NameError, AttributeError ):
1068 main.topoRelated = Topology()
1069 main.case( "Setting up and gathering data for current state" )
1070 # The general idea for this test case is to pull the state of
1071 # ( intents,flows, topology,... ) from each ONOS node
1072 # We can then compare them with each other and also with past states
1073
1074 global mastershipState
1075 mastershipState = '[]'
1076
1077 self.checkRoleNotNull()
1078
1079 main.step( "Get the Mastership of each switch from each controller" )
1080 mastershipCheck = main.FALSE
1081
1082 ONOSMastership, consistentMastership, rolesResults = self.checkTheRole()
Devin Lim58046fa2017-07-05 16:55:00 -07001083
1084 if rolesResults and not consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001085 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001086 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001087 try:
1088 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001089 node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07001090 json.dumps(
1091 json.loads( ONOSMastership[ i ] ),
1092 sort_keys=True,
1093 indent=4,
1094 separators=( ',', ': ' ) ) )
1095 except ( ValueError, TypeError ):
1096 main.log.warn( repr( ONOSMastership[ i ] ) )
1097 elif rolesResults and consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001098 mastershipCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001099 mastershipState = ONOSMastership[ 0 ]
1100
Devin Lim58046fa2017-07-05 16:55:00 -07001101 global intentState
1102 intentState = []
Devin Lim142b5342017-07-20 15:22:39 -07001103 ONOSIntents, intentsResults = self.checkingIntents()
Jon Hallca319892017-06-15 15:25:22 -07001104 intentCheck = main.FALSE
1105 consistentIntents = True
Devin Lim142b5342017-07-20 15:22:39 -07001106
Devin Lim58046fa2017-07-05 16:55:00 -07001107 main.step( "Check for consistency in Intents from each controller" )
1108 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1109 main.log.info( "Intents are consistent across all ONOS " +
1110 "nodes" )
1111 else:
1112 consistentIntents = False
1113 main.log.error( "Intents not consistent" )
1114 utilities.assert_equals(
1115 expect=True,
1116 actual=consistentIntents,
1117 onpass="Intents are consistent across all ONOS nodes",
1118 onfail="ONOS nodes have different views of intents" )
1119
1120 if intentsResults:
1121 # Try to make it easy to figure out what is happening
1122 #
1123 # Intent ONOS1 ONOS2 ...
1124 # 0x01 INSTALLED INSTALLING
1125 # ... ... ...
1126 # ... ... ...
1127 title = " Id"
Jon Hallca319892017-06-15 15:25:22 -07001128 for ctrl in main.Cluster.active():
1129 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07001130 main.log.warn( title )
1131 # get all intent keys in the cluster
1132 keys = []
1133 try:
1134 # Get the set of all intent keys
1135 for nodeStr in ONOSIntents:
1136 node = json.loads( nodeStr )
1137 for intent in node:
1138 keys.append( intent.get( 'id' ) )
1139 keys = set( keys )
1140 # For each intent key, print the state on each node
1141 for key in keys:
1142 row = "%-13s" % key
1143 for nodeStr in ONOSIntents:
1144 node = json.loads( nodeStr )
1145 for intent in node:
1146 if intent.get( 'id', "Error" ) == key:
1147 row += "%-15s" % intent.get( 'state' )
1148 main.log.warn( row )
1149 # End of intent state table
1150 except ValueError as e:
1151 main.log.exception( e )
1152 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1153
1154 if intentsResults and not consistentIntents:
1155 # print the json objects
Jon Hallca319892017-06-15 15:25:22 -07001156 main.log.debug( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001157 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1158 sort_keys=True,
1159 indent=4,
1160 separators=( ',', ': ' ) ) )
1161 for i in range( len( ONOSIntents ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001162 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001163 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallca319892017-06-15 15:25:22 -07001164 main.log.debug( node + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001165 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
1166 sort_keys=True,
1167 indent=4,
1168 separators=( ',', ': ' ) ) )
1169 else:
Jon Hallca319892017-06-15 15:25:22 -07001170 main.log.debug( node + " intents match " + ctrl.name + " intents" )
Devin Lim58046fa2017-07-05 16:55:00 -07001171 elif intentsResults and consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07001172 intentCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001173 intentState = ONOSIntents[ 0 ]
1174
1175 main.step( "Get the flows from each controller" )
1176 global flowState
1177 flowState = []
Jon Hall4173b242017-09-12 17:04:38 -07001178 ONOSFlows = main.Cluster.command( "flows", specificDriver=2 ) # TODO: Possible arg: sleep = 30
Devin Lim58046fa2017-07-05 16:55:00 -07001179 ONOSFlowsJson = []
1180 flowCheck = main.FALSE
1181 consistentFlows = True
1182 flowsResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001183 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001184 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001185 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001186 main.log.error( "Error in getting " + node + " flows" )
1187 main.log.warn( node + " flows response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001188 repr( ONOSFlows[ i ] ) )
1189 flowsResults = False
1190 ONOSFlowsJson.append( None )
1191 else:
1192 try:
1193 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1194 except ( ValueError, TypeError ):
1195 # FIXME: change this to log.error?
Jon Hallca319892017-06-15 15:25:22 -07001196 main.log.exception( "Error in parsing " + node +
Devin Lim58046fa2017-07-05 16:55:00 -07001197 " response as json." )
1198 main.log.error( repr( ONOSFlows[ i ] ) )
1199 ONOSFlowsJson.append( None )
1200 flowsResults = False
1201 utilities.assert_equals(
1202 expect=True,
1203 actual=flowsResults,
1204 onpass="No error in reading flows output",
1205 onfail="Error in reading flows from ONOS" )
1206
1207 main.step( "Check for consistency in Flows from each controller" )
1208 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1209 if all( tmp ):
1210 main.log.info( "Flow count is consistent across all ONOS nodes" )
1211 else:
1212 consistentFlows = False
1213 utilities.assert_equals(
1214 expect=True,
1215 actual=consistentFlows,
1216 onpass="The flow count is consistent across all ONOS nodes",
1217 onfail="ONOS nodes have different flow counts" )
1218
1219 if flowsResults and not consistentFlows:
1220 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001221 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001222 try:
1223 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001224 node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001225 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
1226 indent=4, separators=( ',', ': ' ) ) )
1227 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -07001228 main.log.warn( node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001229 repr( ONOSFlows[ i ] ) )
1230 elif flowsResults and consistentFlows:
1231 flowCheck = main.TRUE
1232 flowState = ONOSFlows[ 0 ]
1233
1234 main.step( "Get the OF Table entries" )
1235 global flows
1236 flows = []
1237 for i in range( 1, 29 ):
1238 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1239 if flowCheck == main.FALSE:
1240 for table in flows:
1241 main.log.warn( table )
1242 # TODO: Compare switch flow tables with ONOS flow tables
1243
1244 main.step( "Start continuous pings" )
1245 main.Mininet2.pingLong(
1246 src=main.params[ 'PING' ][ 'source1' ],
1247 target=main.params[ 'PING' ][ 'target1' ],
1248 pingTime=500 )
1249 main.Mininet2.pingLong(
1250 src=main.params[ 'PING' ][ 'source2' ],
1251 target=main.params[ 'PING' ][ 'target2' ],
1252 pingTime=500 )
1253 main.Mininet2.pingLong(
1254 src=main.params[ 'PING' ][ 'source3' ],
1255 target=main.params[ 'PING' ][ 'target3' ],
1256 pingTime=500 )
1257 main.Mininet2.pingLong(
1258 src=main.params[ 'PING' ][ 'source4' ],
1259 target=main.params[ 'PING' ][ 'target4' ],
1260 pingTime=500 )
1261 main.Mininet2.pingLong(
1262 src=main.params[ 'PING' ][ 'source5' ],
1263 target=main.params[ 'PING' ][ 'target5' ],
1264 pingTime=500 )
1265 main.Mininet2.pingLong(
1266 src=main.params[ 'PING' ][ 'source6' ],
1267 target=main.params[ 'PING' ][ 'target6' ],
1268 pingTime=500 )
1269 main.Mininet2.pingLong(
1270 src=main.params[ 'PING' ][ 'source7' ],
1271 target=main.params[ 'PING' ][ 'target7' ],
1272 pingTime=500 )
1273 main.Mininet2.pingLong(
1274 src=main.params[ 'PING' ][ 'source8' ],
1275 target=main.params[ 'PING' ][ 'target8' ],
1276 pingTime=500 )
1277 main.Mininet2.pingLong(
1278 src=main.params[ 'PING' ][ 'source9' ],
1279 target=main.params[ 'PING' ][ 'target9' ],
1280 pingTime=500 )
1281 main.Mininet2.pingLong(
1282 src=main.params[ 'PING' ][ 'source10' ],
1283 target=main.params[ 'PING' ][ 'target10' ],
1284 pingTime=500 )
1285
1286 main.step( "Collecting topology information from ONOS" )
Devin Lim142b5342017-07-20 15:22:39 -07001287 devices = main.topoRelated.getAll( "devices" )
1288 hosts = main.topoRelated.getAll( "hosts", inJson=True )
1289 ports = main.topoRelated.getAll( "ports" )
1290 links = main.topoRelated.getAll( "links" )
1291 clusters = main.topoRelated.getAll( "clusters" )
Devin Lim58046fa2017-07-05 16:55:00 -07001292 # Compare json objects for hosts and dataplane clusters
1293
1294 # hosts
1295 main.step( "Host view is consistent across ONOS nodes" )
1296 consistentHostsResult = main.TRUE
1297 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001298 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001299 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1300 if hosts[ controller ] == hosts[ 0 ]:
1301 continue
1302 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07001303 main.log.error( "hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001304 controllerStr +
1305 " is inconsistent with ONOS1" )
1306 main.log.warn( repr( hosts[ controller ] ) )
1307 consistentHostsResult = main.FALSE
1308
1309 else:
Jon Hallca319892017-06-15 15:25:22 -07001310 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001311 controllerStr )
1312 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001313 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001314 " hosts response: " +
1315 repr( hosts[ controller ] ) )
1316 utilities.assert_equals(
1317 expect=main.TRUE,
1318 actual=consistentHostsResult,
1319 onpass="Hosts view is consistent across all ONOS nodes",
1320 onfail="ONOS nodes have different views of hosts" )
1321
1322 main.step( "Each host has an IP address" )
1323 ipResult = main.TRUE
1324 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001325 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001326 if hosts[ controller ]:
1327 for host in hosts[ controller ]:
1328 if not host.get( 'ipAddresses', [] ):
Jon Hallca319892017-06-15 15:25:22 -07001329 main.log.error( "Error with host ips on " +
Devin Lim58046fa2017-07-05 16:55:00 -07001330 controllerStr + ": " + str( host ) )
1331 ipResult = main.FALSE
1332 utilities.assert_equals(
1333 expect=main.TRUE,
1334 actual=ipResult,
1335 onpass="The ips of the hosts aren't empty",
1336 onfail="The ip of at least one host is missing" )
1337
1338 # Strongly connected clusters of devices
1339 main.step( "Cluster view is consistent across ONOS nodes" )
1340 consistentClustersResult = main.TRUE
1341 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001342 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001343 if "Error" not in clusters[ controller ]:
1344 if clusters[ controller ] == clusters[ 0 ]:
1345 continue
1346 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07001347 main.log.error( "clusters from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001348 " is inconsistent with ONOS1" )
1349 consistentClustersResult = main.FALSE
1350
1351 else:
1352 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07001353 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07001354 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001355 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001356 " clusters response: " +
1357 repr( clusters[ controller ] ) )
1358 utilities.assert_equals(
1359 expect=main.TRUE,
1360 actual=consistentClustersResult,
1361 onpass="Clusters view is consistent across all ONOS nodes",
1362 onfail="ONOS nodes have different views of clusters" )
1363 if not consistentClustersResult:
1364 main.log.debug( clusters )
1365
1366 # there should always only be one cluster
1367 main.step( "Cluster view correct across ONOS nodes" )
1368 try:
1369 numClusters = len( json.loads( clusters[ 0 ] ) )
1370 except ( ValueError, TypeError ):
1371 main.log.exception( "Error parsing clusters[0]: " +
1372 repr( clusters[ 0 ] ) )
1373 numClusters = "ERROR"
1374 utilities.assert_equals(
1375 expect=1,
1376 actual=numClusters,
1377 onpass="ONOS shows 1 SCC",
1378 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1379
1380 main.step( "Comparing ONOS topology to MN" )
1381 devicesResults = main.TRUE
1382 linksResults = main.TRUE
1383 hostsResults = main.TRUE
1384 mnSwitches = main.Mininet1.getSwitches()
1385 mnLinks = main.Mininet1.getLinks()
1386 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07001387 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001388 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001389 currentDevicesResult = main.topoRelated.compareDevicePort(
1390 main.Mininet1, controller,
1391 mnSwitches, devices, ports )
1392 utilities.assert_equals( expect=main.TRUE,
1393 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07001394 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001395 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001396 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001397 " Switches view is incorrect" )
1398
1399 currentLinksResult = main.topoRelated.compareBase( links, controller,
1400 main.Mininet1.compareLinks,
1401 [ mnSwitches, mnLinks ] )
1402 utilities.assert_equals( expect=main.TRUE,
1403 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07001404 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001405 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001406 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001407 " links view is incorrect" )
1408
1409 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1410 currentHostsResult = main.Mininet1.compareHosts(
1411 mnHosts,
1412 hosts[ controller ] )
1413 else:
1414 currentHostsResult = main.FALSE
1415 utilities.assert_equals( expect=main.TRUE,
1416 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07001417 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001418 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07001419 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001420 " hosts don't match Mininet" )
1421
1422 devicesResults = devicesResults and currentDevicesResult
1423 linksResults = linksResults and currentLinksResult
1424 hostsResults = hostsResults and currentHostsResult
1425
1426 main.step( "Device information is correct" )
1427 utilities.assert_equals(
1428 expect=main.TRUE,
1429 actual=devicesResults,
1430 onpass="Device information is correct",
1431 onfail="Device information is incorrect" )
1432
1433 main.step( "Links are correct" )
1434 utilities.assert_equals(
1435 expect=main.TRUE,
1436 actual=linksResults,
1437 onpass="Link are correct",
1438 onfail="Links are incorrect" )
1439
1440 main.step( "Hosts are correct" )
1441 utilities.assert_equals(
1442 expect=main.TRUE,
1443 actual=hostsResults,
1444 onpass="Hosts are correct",
1445 onfail="Hosts are incorrect" )
1446
1447 def checkDistPrimitivesFunc( self, main ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001448 """
1449 Check for basic functionality with distributed primitives
1450 """
Jon Halle0f0b342017-04-18 11:43:47 -07001451 # TODO: Clean this up so it's not just a cut/paste from the test
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001452 try:
1453 # Make sure variables are defined/set
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001454 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001455 assert main.pCounterName, "main.pCounterName not defined"
1456 assert main.onosSetName, "main.onosSetName not defined"
1457 # NOTE: assert fails if value is 0/None/Empty/False
1458 try:
1459 main.pCounterValue
Devin Lim58046fa2017-07-05 16:55:00 -07001460 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001461 main.log.error( "main.pCounterValue not defined, setting to 0" )
1462 main.pCounterValue = 0
1463 try:
1464 main.onosSet
Devin Lim58046fa2017-07-05 16:55:00 -07001465 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001466 main.log.error( "main.onosSet not defined, setting to empty Set" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001467 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001468 # Variables for the distributed primitives tests. These are local only
1469 addValue = "a"
1470 addAllValue = "a b c d e f"
1471 retainValue = "c d e f"
Jon Halle0f0b342017-04-18 11:43:47 -07001472 valueName = "TestON-Value"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001473 valueValue = None
Jon Halle0f0b342017-04-18 11:43:47 -07001474 workQueueName = "TestON-Queue"
1475 workQueueCompleted = 0
1476 workQueueInProgress = 0
1477 workQueuePending = 0
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001478
1479 description = "Check for basic functionality with distributed " +\
1480 "primitives"
1481 main.case( description )
1482 main.caseExplanation = "Test the methods of the distributed " +\
1483 "primitives (counters and sets) throught the cli"
1484 # DISTRIBUTED ATOMIC COUNTERS
1485 # Partitioned counters
1486 main.step( "Increment then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001487 pCounters = main.Cluster.command( "counterTestAddAndGet",
1488 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001489 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001490 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001491 main.pCounterValue += 1
1492 addedPValues.append( main.pCounterValue )
Jon Hallca319892017-06-15 15:25:22 -07001493 # Check that counter incremented once per controller
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001494 pCounterResults = True
1495 for i in addedPValues:
1496 tmpResult = i in pCounters
1497 pCounterResults = pCounterResults and tmpResult
1498 if not tmpResult:
1499 main.log.error( str( i ) + " is not in partitioned "
1500 "counter incremented results" )
1501 utilities.assert_equals( expect=True,
1502 actual=pCounterResults,
1503 onpass="Default counter incremented",
1504 onfail="Error incrementing default" +
1505 " counter" )
1506
1507 main.step( "Get then Increment a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001508 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1509 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001510 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001511 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001512 addedPValues.append( main.pCounterValue )
1513 main.pCounterValue += 1
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001514 # Check that counter incremented numController times
1515 pCounterResults = True
1516 for i in addedPValues:
1517 tmpResult = i in pCounters
1518 pCounterResults = pCounterResults and tmpResult
1519 if not tmpResult:
1520 main.log.error( str( i ) + " is not in partitioned "
1521 "counter incremented results" )
1522 utilities.assert_equals( expect=True,
1523 actual=pCounterResults,
1524 onpass="Default counter incremented",
1525 onfail="Error incrementing default" +
1526 " counter" )
1527
1528 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001529 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001530 utilities.assert_equals( expect=main.TRUE,
1531 actual=incrementCheck,
1532 onpass="Added counters are correct",
1533 onfail="Added counters are incorrect" )
1534
1535 main.step( "Add -8 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001536 pCounters = main.Cluster.command( "counterTestAddAndGet",
1537 args=[ main.pCounterName ],
1538 kwargs={ "delta": -8 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001539 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001540 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001541 main.pCounterValue += -8
1542 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001543 # Check that counter incremented numController times
1544 pCounterResults = True
1545 for i in addedPValues:
1546 tmpResult = i in pCounters
1547 pCounterResults = pCounterResults and tmpResult
1548 if not tmpResult:
1549 main.log.error( str( i ) + " is not in partitioned "
1550 "counter incremented results" )
1551 utilities.assert_equals( expect=True,
1552 actual=pCounterResults,
1553 onpass="Default counter incremented",
1554 onfail="Error incrementing default" +
1555 " counter" )
1556
1557 main.step( "Add 5 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001558 pCounters = main.Cluster.command( "counterTestAddAndGet",
1559 args=[ main.pCounterName ],
1560 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001561 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001562 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001563 main.pCounterValue += 5
1564 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001565
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001566 # Check that counter incremented numController times
1567 pCounterResults = True
1568 for i in addedPValues:
1569 tmpResult = i in pCounters
1570 pCounterResults = pCounterResults and tmpResult
1571 if not tmpResult:
1572 main.log.error( str( i ) + " is not in partitioned "
1573 "counter incremented results" )
1574 utilities.assert_equals( expect=True,
1575 actual=pCounterResults,
1576 onpass="Default counter incremented",
1577 onfail="Error incrementing default" +
1578 " counter" )
1579
1580 main.step( "Get then add 5 to a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001581 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1582 args=[ main.pCounterName ],
1583 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001584 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001585 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001586 addedPValues.append( main.pCounterValue )
1587 main.pCounterValue += 5
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001588 # Check that counter incremented numController times
1589 pCounterResults = True
1590 for i in addedPValues:
1591 tmpResult = i in pCounters
1592 pCounterResults = pCounterResults and tmpResult
1593 if not tmpResult:
1594 main.log.error( str( i ) + " is not in partitioned "
1595 "counter incremented results" )
1596 utilities.assert_equals( expect=True,
1597 actual=pCounterResults,
1598 onpass="Default counter incremented",
1599 onfail="Error incrementing default" +
1600 " counter" )
1601
1602 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001603 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001604 utilities.assert_equals( expect=main.TRUE,
1605 actual=incrementCheck,
1606 onpass="Added counters are correct",
1607 onfail="Added counters are incorrect" )
1608
1609 # DISTRIBUTED SETS
1610 main.step( "Distributed Set get" )
1611 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001612 getResponses = main.Cluster.command( "setTestGet",
1613 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001614 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001615 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001616 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001617 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001618 current = set( getResponses[ i ] )
1619 if len( current ) == len( getResponses[ i ] ):
1620 # no repeats
1621 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001622 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001623 " has incorrect view" +
1624 " of set " + main.onosSetName + ":\n" +
1625 str( getResponses[ i ] ) )
1626 main.log.debug( "Expected: " + str( main.onosSet ) )
1627 main.log.debug( "Actual: " + str( current ) )
1628 getResults = main.FALSE
1629 else:
1630 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001631 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001632 " has repeat elements in" +
1633 " set " + main.onosSetName + ":\n" +
1634 str( getResponses[ i ] ) )
1635 getResults = main.FALSE
1636 elif getResponses[ i ] == main.ERROR:
1637 getResults = main.FALSE
1638 utilities.assert_equals( expect=main.TRUE,
1639 actual=getResults,
1640 onpass="Set elements are correct",
1641 onfail="Set elements are incorrect" )
1642
1643 main.step( "Distributed Set size" )
Jon Hallca319892017-06-15 15:25:22 -07001644 sizeResponses = main.Cluster.command( "setTestSize",
1645 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001646 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001647 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001648 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001649 if size != sizeResponses[ i ]:
1650 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001651 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001652 " expected a size of " + str( size ) +
1653 " for set " + main.onosSetName +
1654 " but got " + str( sizeResponses[ i ] ) )
1655 utilities.assert_equals( expect=main.TRUE,
1656 actual=sizeResults,
1657 onpass="Set sizes are correct",
1658 onfail="Set sizes are incorrect" )
1659
1660 main.step( "Distributed Set add()" )
1661 main.onosSet.add( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001662 addResponses = main.Cluster.command( "setTestAdd",
1663 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001664 # main.TRUE = successfully changed the set
1665 # main.FALSE = action resulted in no change in set
1666 # main.ERROR - Some error in executing the function
1667 addResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001668 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001669 if addResponses[ i ] == main.TRUE:
1670 # All is well
1671 pass
1672 elif addResponses[ i ] == main.FALSE:
1673 # Already in set, probably fine
1674 pass
1675 elif addResponses[ i ] == main.ERROR:
1676 # Error in execution
1677 addResults = main.FALSE
1678 else:
1679 # unexpected result
1680 addResults = main.FALSE
1681 if addResults != main.TRUE:
1682 main.log.error( "Error executing set add" )
1683
1684 # Check if set is still correct
1685 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001686 getResponses = main.Cluster.command( "setTestGet",
1687 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001688 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001689 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001690 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001691 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001692 current = set( getResponses[ i ] )
1693 if len( current ) == len( getResponses[ i ] ):
1694 # no repeats
1695 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001696 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001697 " of set " + main.onosSetName + ":\n" +
1698 str( getResponses[ i ] ) )
1699 main.log.debug( "Expected: " + str( main.onosSet ) )
1700 main.log.debug( "Actual: " + str( current ) )
1701 getResults = main.FALSE
1702 else:
1703 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001704 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001705 " set " + main.onosSetName + ":\n" +
1706 str( getResponses[ i ] ) )
1707 getResults = main.FALSE
1708 elif getResponses[ i ] == main.ERROR:
1709 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001710 sizeResponses = main.Cluster.command( "setTestSize",
1711 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001712 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001713 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001714 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001715 if size != sizeResponses[ i ]:
1716 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001717 main.log.error( node + " expected a size of " +
1718 str( size ) + " for set " + main.onosSetName +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001719 " but got " + str( sizeResponses[ i ] ) )
1720 addResults = addResults and getResults and sizeResults
1721 utilities.assert_equals( expect=main.TRUE,
1722 actual=addResults,
1723 onpass="Set add correct",
1724 onfail="Set add was incorrect" )
1725
1726 main.step( "Distributed Set addAll()" )
1727 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001728 addResponses = main.Cluster.command( "setTestAdd",
1729 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001730 # main.TRUE = successfully changed the set
1731 # main.FALSE = action resulted in no change in set
1732 # main.ERROR - Some error in executing the function
1733 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001734 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001735 if addResponses[ i ] == main.TRUE:
1736 # All is well
1737 pass
1738 elif addResponses[ i ] == main.FALSE:
1739 # Already in set, probably fine
1740 pass
1741 elif addResponses[ i ] == main.ERROR:
1742 # Error in execution
1743 addAllResults = main.FALSE
1744 else:
1745 # unexpected result
1746 addAllResults = main.FALSE
1747 if addAllResults != main.TRUE:
1748 main.log.error( "Error executing set addAll" )
1749
1750 # Check if set is still correct
1751 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001752 getResponses = main.Cluster.command( "setTestGet",
1753 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001754 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001755 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001756 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001757 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001758 current = set( getResponses[ i ] )
1759 if len( current ) == len( getResponses[ i ] ):
1760 # no repeats
1761 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001762 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001763 " of set " + main.onosSetName + ":\n" +
1764 str( getResponses[ i ] ) )
1765 main.log.debug( "Expected: " + str( main.onosSet ) )
1766 main.log.debug( "Actual: " + str( current ) )
1767 getResults = main.FALSE
1768 else:
1769 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001770 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001771 " set " + main.onosSetName + ":\n" +
1772 str( getResponses[ i ] ) )
1773 getResults = main.FALSE
1774 elif getResponses[ i ] == main.ERROR:
1775 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001776 sizeResponses = main.Cluster.command( "setTestSize",
1777 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001778 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001779 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001780 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001781 if size != sizeResponses[ i ]:
1782 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001783 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001784 " for set " + main.onosSetName +
1785 " but got " + str( sizeResponses[ i ] ) )
1786 addAllResults = addAllResults and getResults and sizeResults
1787 utilities.assert_equals( expect=main.TRUE,
1788 actual=addAllResults,
1789 onpass="Set addAll correct",
1790 onfail="Set addAll was incorrect" )
1791
1792 main.step( "Distributed Set contains()" )
Jon Hallca319892017-06-15 15:25:22 -07001793 containsResponses = main.Cluster.command( "setTestGet",
1794 args=[ main.onosSetName ],
1795 kwargs={ "values": addValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001796 containsResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001797 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001798 if containsResponses[ i ] == main.ERROR:
1799 containsResults = main.FALSE
1800 else:
1801 containsResults = containsResults and\
1802 containsResponses[ i ][ 1 ]
1803 utilities.assert_equals( expect=main.TRUE,
1804 actual=containsResults,
1805 onpass="Set contains is functional",
1806 onfail="Set contains failed" )
1807
1808 main.step( "Distributed Set containsAll()" )
Jon Hallca319892017-06-15 15:25:22 -07001809 containsAllResponses = main.Cluster.command( "setTestGet",
1810 args=[ main.onosSetName ],
1811 kwargs={ "values": addAllValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001812 containsAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001813 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001814 if containsResponses[ i ] == main.ERROR:
1815 containsResults = main.FALSE
1816 else:
1817 containsResults = containsResults and\
1818 containsResponses[ i ][ 1 ]
1819 utilities.assert_equals( expect=main.TRUE,
1820 actual=containsAllResults,
1821 onpass="Set containsAll is functional",
1822 onfail="Set containsAll failed" )
1823
1824 main.step( "Distributed Set remove()" )
1825 main.onosSet.remove( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001826 removeResponses = main.Cluster.command( "setTestRemove",
1827 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001828 # main.TRUE = successfully changed the set
1829 # main.FALSE = action resulted in no change in set
1830 # main.ERROR - Some error in executing the function
1831 removeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001832 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001833 if removeResponses[ i ] == main.TRUE:
1834 # All is well
1835 pass
1836 elif removeResponses[ i ] == main.FALSE:
1837 # not in set, probably fine
1838 pass
1839 elif removeResponses[ i ] == main.ERROR:
1840 # Error in execution
1841 removeResults = main.FALSE
1842 else:
1843 # unexpected result
1844 removeResults = main.FALSE
1845 if removeResults != main.TRUE:
1846 main.log.error( "Error executing set remove" )
1847
1848 # Check if set is still correct
1849 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001850 getResponses = main.Cluster.command( "setTestGet",
1851 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001852 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001853 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001854 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001855 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001856 current = set( getResponses[ i ] )
1857 if len( current ) == len( getResponses[ i ] ):
1858 # no repeats
1859 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001860 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001861 " of set " + main.onosSetName + ":\n" +
1862 str( getResponses[ i ] ) )
1863 main.log.debug( "Expected: " + str( main.onosSet ) )
1864 main.log.debug( "Actual: " + str( current ) )
1865 getResults = main.FALSE
1866 else:
1867 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001868 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001869 " set " + main.onosSetName + ":\n" +
1870 str( getResponses[ i ] ) )
1871 getResults = main.FALSE
1872 elif getResponses[ i ] == main.ERROR:
1873 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001874 sizeResponses = main.Cluster.command( "setTestSize",
1875 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001876 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001877 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001878 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001879 if size != sizeResponses[ i ]:
1880 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001881 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001882 " for set " + main.onosSetName +
1883 " but got " + str( sizeResponses[ i ] ) )
1884 removeResults = removeResults and getResults and sizeResults
1885 utilities.assert_equals( expect=main.TRUE,
1886 actual=removeResults,
1887 onpass="Set remove correct",
1888 onfail="Set remove was incorrect" )
1889
1890 main.step( "Distributed Set removeAll()" )
1891 main.onosSet.difference_update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001892 removeAllResponses = main.Cluster.command( "setTestRemove",
1893 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001894 # main.TRUE = successfully changed the set
1895 # main.FALSE = action resulted in no change in set
1896 # main.ERROR - Some error in executing the function
1897 removeAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001898 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001899 if removeAllResponses[ i ] == main.TRUE:
1900 # All is well
1901 pass
1902 elif removeAllResponses[ i ] == main.FALSE:
1903 # not in set, probably fine
1904 pass
1905 elif removeAllResponses[ i ] == main.ERROR:
1906 # Error in execution
1907 removeAllResults = main.FALSE
1908 else:
1909 # unexpected result
1910 removeAllResults = main.FALSE
1911 if removeAllResults != main.TRUE:
1912 main.log.error( "Error executing set removeAll" )
1913
1914 # Check if set is still correct
1915 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001916 getResponses = main.Cluster.command( "setTestGet",
1917 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001918 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001919 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001920 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001921 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001922 current = set( getResponses[ i ] )
1923 if len( current ) == len( getResponses[ i ] ):
1924 # no repeats
1925 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001926 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001927 " of set " + main.onosSetName + ":\n" +
1928 str( getResponses[ i ] ) )
1929 main.log.debug( "Expected: " + str( main.onosSet ) )
1930 main.log.debug( "Actual: " + str( current ) )
1931 getResults = main.FALSE
1932 else:
1933 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001934 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001935 " set " + main.onosSetName + ":\n" +
1936 str( getResponses[ i ] ) )
1937 getResults = main.FALSE
1938 elif getResponses[ i ] == main.ERROR:
1939 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001940 sizeResponses = main.Cluster.command( "setTestSize",
1941 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001942 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001943 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001944 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001945 if size != sizeResponses[ i ]:
1946 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001947 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001948 " for set " + main.onosSetName +
1949 " but got " + str( sizeResponses[ i ] ) )
1950 removeAllResults = removeAllResults and getResults and sizeResults
1951 utilities.assert_equals( expect=main.TRUE,
1952 actual=removeAllResults,
1953 onpass="Set removeAll correct",
1954 onfail="Set removeAll was incorrect" )
1955
1956 main.step( "Distributed Set addAll()" )
1957 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001958 addResponses = main.Cluster.command( "setTestAdd",
1959 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001960 # main.TRUE = successfully changed the set
1961 # main.FALSE = action resulted in no change in set
1962 # main.ERROR - Some error in executing the function
1963 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001964 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001965 if addResponses[ i ] == main.TRUE:
1966 # All is well
1967 pass
1968 elif addResponses[ i ] == main.FALSE:
1969 # Already in set, probably fine
1970 pass
1971 elif addResponses[ i ] == main.ERROR:
1972 # Error in execution
1973 addAllResults = main.FALSE
1974 else:
1975 # unexpected result
1976 addAllResults = main.FALSE
1977 if addAllResults != main.TRUE:
1978 main.log.error( "Error executing set addAll" )
1979
1980 # Check if set is still correct
1981 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001982 getResponses = main.Cluster.command( "setTestGet",
1983 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001984 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001985 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001986 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001987 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001988 current = set( getResponses[ i ] )
1989 if len( current ) == len( getResponses[ i ] ):
1990 # no repeats
1991 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001992 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001993 " of set " + main.onosSetName + ":\n" +
1994 str( getResponses[ i ] ) )
1995 main.log.debug( "Expected: " + str( main.onosSet ) )
1996 main.log.debug( "Actual: " + str( current ) )
1997 getResults = main.FALSE
1998 else:
1999 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002000 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002001 " set " + main.onosSetName + ":\n" +
2002 str( getResponses[ i ] ) )
2003 getResults = main.FALSE
2004 elif getResponses[ i ] == main.ERROR:
2005 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002006 sizeResponses = main.Cluster.command( "setTestSize",
2007 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002008 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002009 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002010 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002011 if size != sizeResponses[ i ]:
2012 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002013 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002014 " for set " + main.onosSetName +
2015 " but got " + str( sizeResponses[ i ] ) )
2016 addAllResults = addAllResults and getResults and sizeResults
2017 utilities.assert_equals( expect=main.TRUE,
2018 actual=addAllResults,
2019 onpass="Set addAll correct",
2020 onfail="Set addAll was incorrect" )
2021
2022 main.step( "Distributed Set clear()" )
2023 main.onosSet.clear()
Jon Hallca319892017-06-15 15:25:22 -07002024 clearResponses = main.Cluster.command( "setTestRemove",
Jon Hall4173b242017-09-12 17:04:38 -07002025 args=[ main.onosSetName, " " ], # Values doesn't matter
2026 kwargs={ "clear": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002027 # main.TRUE = successfully changed the set
2028 # main.FALSE = action resulted in no change in set
2029 # main.ERROR - Some error in executing the function
2030 clearResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002031 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002032 if clearResponses[ i ] == main.TRUE:
2033 # All is well
2034 pass
2035 elif clearResponses[ i ] == main.FALSE:
2036 # Nothing set, probably fine
2037 pass
2038 elif clearResponses[ i ] == main.ERROR:
2039 # Error in execution
2040 clearResults = main.FALSE
2041 else:
2042 # unexpected result
2043 clearResults = main.FALSE
2044 if clearResults != main.TRUE:
2045 main.log.error( "Error executing set clear" )
2046
2047 # Check if set is still correct
2048 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002049 getResponses = main.Cluster.command( "setTestGet",
2050 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002051 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002052 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002053 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002054 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002055 current = set( getResponses[ i ] )
2056 if len( current ) == len( getResponses[ i ] ):
2057 # no repeats
2058 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002059 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002060 " of set " + main.onosSetName + ":\n" +
2061 str( getResponses[ i ] ) )
2062 main.log.debug( "Expected: " + str( main.onosSet ) )
2063 main.log.debug( "Actual: " + str( current ) )
2064 getResults = main.FALSE
2065 else:
2066 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002067 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002068 " set " + main.onosSetName + ":\n" +
2069 str( getResponses[ i ] ) )
2070 getResults = main.FALSE
2071 elif getResponses[ i ] == main.ERROR:
2072 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002073 sizeResponses = main.Cluster.command( "setTestSize",
2074 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002075 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002076 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002077 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002078 if size != sizeResponses[ i ]:
2079 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002080 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002081 " for set " + main.onosSetName +
2082 " but got " + str( sizeResponses[ i ] ) )
2083 clearResults = clearResults and getResults and sizeResults
2084 utilities.assert_equals( expect=main.TRUE,
2085 actual=clearResults,
2086 onpass="Set clear correct",
2087 onfail="Set clear was incorrect" )
2088
2089 main.step( "Distributed Set addAll()" )
2090 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002091 addResponses = main.Cluster.command( "setTestAdd",
2092 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002093 # main.TRUE = successfully changed the set
2094 # main.FALSE = action resulted in no change in set
2095 # main.ERROR - Some error in executing the function
2096 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002097 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002098 if addResponses[ i ] == main.TRUE:
2099 # All is well
2100 pass
2101 elif addResponses[ i ] == main.FALSE:
2102 # Already in set, probably fine
2103 pass
2104 elif addResponses[ i ] == main.ERROR:
2105 # Error in execution
2106 addAllResults = main.FALSE
2107 else:
2108 # unexpected result
2109 addAllResults = main.FALSE
2110 if addAllResults != main.TRUE:
2111 main.log.error( "Error executing set addAll" )
2112
2113 # Check if set is still correct
2114 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002115 getResponses = main.Cluster.command( "setTestGet",
2116 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002117 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002118 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002119 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002120 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002121 current = set( getResponses[ i ] )
2122 if len( current ) == len( getResponses[ i ] ):
2123 # no repeats
2124 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002125 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002126 " of set " + main.onosSetName + ":\n" +
2127 str( getResponses[ i ] ) )
2128 main.log.debug( "Expected: " + str( main.onosSet ) )
2129 main.log.debug( "Actual: " + str( current ) )
2130 getResults = main.FALSE
2131 else:
2132 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002133 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002134 " set " + main.onosSetName + ":\n" +
2135 str( getResponses[ i ] ) )
2136 getResults = main.FALSE
2137 elif getResponses[ i ] == main.ERROR:
2138 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002139 sizeResponses = main.Cluster.command( "setTestSize",
2140 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002141 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002142 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002143 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002144 if size != sizeResponses[ i ]:
2145 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002146 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002147 " for set " + main.onosSetName +
2148 " but got " + str( sizeResponses[ i ] ) )
2149 addAllResults = addAllResults and getResults and sizeResults
2150 utilities.assert_equals( expect=main.TRUE,
2151 actual=addAllResults,
2152 onpass="Set addAll correct",
2153 onfail="Set addAll was incorrect" )
2154
2155 main.step( "Distributed Set retain()" )
2156 main.onosSet.intersection_update( retainValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002157 retainResponses = main.Cluster.command( "setTestRemove",
2158 args=[ main.onosSetName, retainValue ],
2159 kwargs={ "retain": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002160 # main.TRUE = successfully changed the set
2161 # main.FALSE = action resulted in no change in set
2162 # main.ERROR - Some error in executing the function
2163 retainResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002164 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002165 if retainResponses[ i ] == main.TRUE:
2166 # All is well
2167 pass
2168 elif retainResponses[ i ] == main.FALSE:
2169 # Already in set, probably fine
2170 pass
2171 elif retainResponses[ i ] == main.ERROR:
2172 # Error in execution
2173 retainResults = main.FALSE
2174 else:
2175 # unexpected result
2176 retainResults = main.FALSE
2177 if retainResults != main.TRUE:
2178 main.log.error( "Error executing set retain" )
2179
2180 # Check if set is still correct
2181 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002182 getResponses = main.Cluster.command( "setTestGet",
2183 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002184 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002185 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002186 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002187 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002188 current = set( getResponses[ i ] )
2189 if len( current ) == len( getResponses[ i ] ):
2190 # no repeats
2191 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002192 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002193 " of set " + main.onosSetName + ":\n" +
2194 str( getResponses[ i ] ) )
2195 main.log.debug( "Expected: " + str( main.onosSet ) )
2196 main.log.debug( "Actual: " + str( current ) )
2197 getResults = main.FALSE
2198 else:
2199 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002200 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002201 " set " + main.onosSetName + ":\n" +
2202 str( getResponses[ i ] ) )
2203 getResults = main.FALSE
2204 elif getResponses[ i ] == main.ERROR:
2205 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002206 sizeResponses = main.Cluster.command( "setTestSize",
2207 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002208 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002209 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002210 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002211 if size != sizeResponses[ i ]:
2212 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002213 main.log.error( node + " expected a size of " +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002214 str( size ) + " for set " + main.onosSetName +
2215 " but got " + str( sizeResponses[ i ] ) )
2216 retainResults = retainResults and getResults and sizeResults
2217 utilities.assert_equals( expect=main.TRUE,
2218 actual=retainResults,
2219 onpass="Set retain correct",
2220 onfail="Set retain was incorrect" )
2221
2222 # Transactional maps
2223 main.step( "Partitioned Transactional maps put" )
2224 tMapValue = "Testing"
2225 numKeys = 100
2226 putResult = True
Jon Hallca319892017-06-15 15:25:22 -07002227 ctrl = main.Cluster.next()
2228 putResponses = ctrl.transactionalMapPut( numKeys, tMapValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002229 if putResponses and len( putResponses ) == 100:
2230 for i in putResponses:
2231 if putResponses[ i ][ 'value' ] != tMapValue:
2232 putResult = False
2233 else:
2234 putResult = False
2235 if not putResult:
2236 main.log.debug( "Put response values: " + str( putResponses ) )
2237 utilities.assert_equals( expect=True,
2238 actual=putResult,
2239 onpass="Partitioned Transactional Map put successful",
2240 onfail="Partitioned Transactional Map put values are incorrect" )
2241
2242 main.step( "Partitioned Transactional maps get" )
2243 # FIXME: is this sleep needed?
2244 time.sleep( 5 )
2245
2246 getCheck = True
2247 for n in range( 1, numKeys + 1 ):
Jon Hallca319892017-06-15 15:25:22 -07002248 getResponses = main.Cluster.command( "transactionalMapGet",
2249 args=[ "Key" + str( n ) ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002250 valueCheck = True
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002251 for node in getResponses:
2252 if node != tMapValue:
2253 valueCheck = False
2254 if not valueCheck:
Jon Hallf37d44d2017-05-24 10:37:30 -07002255 main.log.warn( "Values for key 'Key" + str(n) + "' do not match:" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002256 main.log.warn( getResponses )
2257 getCheck = getCheck and valueCheck
2258 utilities.assert_equals( expect=True,
2259 actual=getCheck,
2260 onpass="Partitioned Transactional Map get values were correct",
2261 onfail="Partitioned Transactional Map values incorrect" )
2262
2263 # DISTRIBUTED ATOMIC VALUE
2264 main.step( "Get the value of a new value" )
Jon Hallca319892017-06-15 15:25:22 -07002265 getValues = main.Cluster.command( "valueTestGet",
2266 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002267 main.log.debug( getValues )
2268 # Check the results
2269 atomicValueGetResult = True
2270 expected = valueValue if valueValue is not None else "null"
2271 main.log.debug( "Checking for value of " + expected )
2272 for i in getValues:
2273 if i != expected:
2274 atomicValueGetResult = False
2275 utilities.assert_equals( expect=True,
2276 actual=atomicValueGetResult,
2277 onpass="Atomic Value get successful",
2278 onfail="Error getting atomic Value " +
2279 str( valueValue ) + ", found: " +
2280 str( getValues ) )
2281
2282 main.step( "Atomic Value set()" )
2283 valueValue = "foo"
Jon Hallca319892017-06-15 15:25:22 -07002284 setValues = main.Cluster.command( "valueTestSet",
2285 args=[ valueName, valueValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002286 main.log.debug( setValues )
2287 # Check the results
2288 atomicValueSetResults = True
2289 for i in setValues:
2290 if i != main.TRUE:
2291 atomicValueSetResults = False
2292 utilities.assert_equals( expect=True,
2293 actual=atomicValueSetResults,
2294 onpass="Atomic Value set successful",
2295 onfail="Error setting atomic Value" +
2296 str( setValues ) )
2297
2298 main.step( "Get the value after set()" )
Jon Hallca319892017-06-15 15:25:22 -07002299 getValues = main.Cluster.command( "valueTestGet",
2300 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002301 main.log.debug( getValues )
2302 # Check the results
2303 atomicValueGetResult = True
2304 expected = valueValue if valueValue is not None else "null"
2305 main.log.debug( "Checking for value of " + expected )
2306 for i in getValues:
2307 if i != expected:
2308 atomicValueGetResult = False
2309 utilities.assert_equals( expect=True,
2310 actual=atomicValueGetResult,
2311 onpass="Atomic Value get successful",
2312 onfail="Error getting atomic Value " +
2313 str( valueValue ) + ", found: " +
2314 str( getValues ) )
2315
2316 main.step( "Atomic Value compareAndSet()" )
2317 oldValue = valueValue
2318 valueValue = "bar"
Jon Hallca319892017-06-15 15:25:22 -07002319 ctrl = main.Cluster.next()
2320 CASValue = ctrl.valueTestCompareAndSet( valueName, oldValue, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002321 main.log.debug( CASValue )
2322 utilities.assert_equals( expect=main.TRUE,
2323 actual=CASValue,
2324 onpass="Atomic Value comapreAndSet successful",
2325 onfail="Error setting atomic Value:" +
2326 str( CASValue ) )
2327
2328 main.step( "Get the value after compareAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002329 getValues = main.Cluster.command( "valueTestGet",
2330 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002331 main.log.debug( getValues )
2332 # Check the results
2333 atomicValueGetResult = True
2334 expected = valueValue if valueValue is not None else "null"
2335 main.log.debug( "Checking for value of " + expected )
2336 for i in getValues:
2337 if i != expected:
2338 atomicValueGetResult = False
2339 utilities.assert_equals( expect=True,
2340 actual=atomicValueGetResult,
2341 onpass="Atomic Value get successful",
2342 onfail="Error getting atomic Value " +
2343 str( valueValue ) + ", found: " +
2344 str( getValues ) )
2345
2346 main.step( "Atomic Value getAndSet()" )
2347 oldValue = valueValue
2348 valueValue = "baz"
Jon Hallca319892017-06-15 15:25:22 -07002349 ctrl = main.Cluster.next()
2350 GASValue = ctrl.valueTestGetAndSet( valueName, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002351 main.log.debug( GASValue )
2352 expected = oldValue if oldValue is not None else "null"
2353 utilities.assert_equals( expect=expected,
2354 actual=GASValue,
2355 onpass="Atomic Value GAS successful",
2356 onfail="Error with GetAndSet atomic Value: expected " +
2357 str( expected ) + ", found: " +
2358 str( GASValue ) )
2359
2360 main.step( "Get the value after getAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002361 getValues = main.Cluster.command( "valueTestGet",
2362 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002363 main.log.debug( getValues )
2364 # Check the results
2365 atomicValueGetResult = True
2366 expected = valueValue if valueValue is not None else "null"
2367 main.log.debug( "Checking for value of " + expected )
2368 for i in getValues:
2369 if i != expected:
2370 atomicValueGetResult = False
2371 utilities.assert_equals( expect=True,
2372 actual=atomicValueGetResult,
2373 onpass="Atomic Value get successful",
2374 onfail="Error getting atomic Value: expected " +
2375 str( valueValue ) + ", found: " +
2376 str( getValues ) )
2377
2378 main.step( "Atomic Value destory()" )
2379 valueValue = None
Jon Hallca319892017-06-15 15:25:22 -07002380 ctrl = main.Cluster.next()
2381 destroyResult = ctrl.valueTestDestroy( valueName )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002382 main.log.debug( destroyResult )
2383 # Check the results
2384 utilities.assert_equals( expect=main.TRUE,
2385 actual=destroyResult,
2386 onpass="Atomic Value destroy successful",
2387 onfail="Error destroying atomic Value" )
2388
2389 main.step( "Get the value after destroy()" )
Jon Hallca319892017-06-15 15:25:22 -07002390 getValues = main.Cluster.command( "valueTestGet",
2391 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002392 main.log.debug( getValues )
2393 # Check the results
2394 atomicValueGetResult = True
2395 expected = valueValue if valueValue is not None else "null"
2396 main.log.debug( "Checking for value of " + expected )
2397 for i in getValues:
2398 if i != expected:
2399 atomicValueGetResult = False
2400 utilities.assert_equals( expect=True,
2401 actual=atomicValueGetResult,
2402 onpass="Atomic Value get successful",
2403 onfail="Error getting atomic Value " +
2404 str( valueValue ) + ", found: " +
2405 str( getValues ) )
Jon Halle0f0b342017-04-18 11:43:47 -07002406
2407 # WORK QUEUES
2408 main.step( "Work Queue add()" )
Jon Hallca319892017-06-15 15:25:22 -07002409 ctrl = main.Cluster.next()
2410 addResult = ctrl.workQueueAdd( workQueueName, 'foo' )
Jon Halle0f0b342017-04-18 11:43:47 -07002411 workQueuePending += 1
2412 main.log.debug( addResult )
2413 # Check the results
2414 utilities.assert_equals( expect=main.TRUE,
2415 actual=addResult,
2416 onpass="Work Queue add successful",
2417 onfail="Error adding to Work Queue" )
2418
2419 main.step( "Check the work queue stats" )
2420 statsResults = self.workQueueStatsCheck( workQueueName,
2421 workQueueCompleted,
2422 workQueueInProgress,
2423 workQueuePending )
2424 utilities.assert_equals( expect=True,
2425 actual=statsResults,
2426 onpass="Work Queue stats correct",
2427 onfail="Work Queue stats incorrect " )
2428
2429 main.step( "Work Queue addMultiple()" )
Jon Hallca319892017-06-15 15:25:22 -07002430 ctrl = main.Cluster.next()
2431 addMultipleResult = ctrl.workQueueAddMultiple( workQueueName, 'bar', 'baz' )
Jon Halle0f0b342017-04-18 11:43:47 -07002432 workQueuePending += 2
2433 main.log.debug( addMultipleResult )
2434 # Check the results
2435 utilities.assert_equals( expect=main.TRUE,
2436 actual=addMultipleResult,
2437 onpass="Work Queue add multiple successful",
2438 onfail="Error adding multiple items to Work Queue" )
2439
2440 main.step( "Check the work queue stats" )
2441 statsResults = self.workQueueStatsCheck( workQueueName,
2442 workQueueCompleted,
2443 workQueueInProgress,
2444 workQueuePending )
2445 utilities.assert_equals( expect=True,
2446 actual=statsResults,
2447 onpass="Work Queue stats correct",
2448 onfail="Work Queue stats incorrect " )
2449
2450 main.step( "Work Queue takeAndComplete() 1" )
Jon Hallca319892017-06-15 15:25:22 -07002451 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002452 number = 1
Jon Hallca319892017-06-15 15:25:22 -07002453 take1Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002454 workQueuePending -= number
2455 workQueueCompleted += number
2456 main.log.debug( take1Result )
2457 # Check the results
2458 utilities.assert_equals( expect=main.TRUE,
2459 actual=take1Result,
2460 onpass="Work Queue takeAndComplete 1 successful",
2461 onfail="Error taking 1 from Work Queue" )
2462
2463 main.step( "Check the work queue stats" )
2464 statsResults = self.workQueueStatsCheck( workQueueName,
2465 workQueueCompleted,
2466 workQueueInProgress,
2467 workQueuePending )
2468 utilities.assert_equals( expect=True,
2469 actual=statsResults,
2470 onpass="Work Queue stats correct",
2471 onfail="Work Queue stats incorrect " )
2472
2473 main.step( "Work Queue takeAndComplete() 2" )
Jon Hallca319892017-06-15 15:25:22 -07002474 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002475 number = 2
Jon Hallca319892017-06-15 15:25:22 -07002476 take2Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002477 workQueuePending -= number
2478 workQueueCompleted += number
2479 main.log.debug( take2Result )
2480 # Check the results
2481 utilities.assert_equals( expect=main.TRUE,
2482 actual=take2Result,
2483 onpass="Work Queue takeAndComplete 2 successful",
2484 onfail="Error taking 2 from Work Queue" )
2485
2486 main.step( "Check the work queue stats" )
2487 statsResults = self.workQueueStatsCheck( workQueueName,
2488 workQueueCompleted,
2489 workQueueInProgress,
2490 workQueuePending )
2491 utilities.assert_equals( expect=True,
2492 actual=statsResults,
2493 onpass="Work Queue stats correct",
2494 onfail="Work Queue stats incorrect " )
2495
2496 main.step( "Work Queue destroy()" )
2497 valueValue = None
2498 threads = []
Jon Hallca319892017-06-15 15:25:22 -07002499 ctrl = main.Cluster.next()
2500 destroyResult = ctrl.workQueueDestroy( workQueueName )
Jon Halle0f0b342017-04-18 11:43:47 -07002501 workQueueCompleted = 0
2502 workQueueInProgress = 0
2503 workQueuePending = 0
2504 main.log.debug( destroyResult )
2505 # Check the results
2506 utilities.assert_equals( expect=main.TRUE,
2507 actual=destroyResult,
2508 onpass="Work Queue destroy successful",
2509 onfail="Error destroying Work Queue" )
2510
2511 main.step( "Check the work queue stats" )
2512 statsResults = self.workQueueStatsCheck( workQueueName,
2513 workQueueCompleted,
2514 workQueueInProgress,
2515 workQueuePending )
2516 utilities.assert_equals( expect=True,
2517 actual=statsResults,
2518 onpass="Work Queue stats correct",
2519 onfail="Work Queue stats incorrect " )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002520 except Exception as e:
2521 main.log.error( "Exception: " + str( e ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002522
2523 def cleanUp( self, main ):
2524 """
2525 Clean up
2526 """
2527 import os
2528 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002529 assert main, "main not defined"
2530 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002531
2532 # printing colors to terminal
2533 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2534 'blue': '\033[94m', 'green': '\033[92m',
2535 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
Jon Hall4173b242017-09-12 17:04:38 -07002536
Devin Lim58046fa2017-07-05 16:55:00 -07002537 main.case( "Test Cleanup" )
Jon Hall4173b242017-09-12 17:04:38 -07002538
2539 main.step( "Checking raft log size" )
2540 # TODO: this is a flaky check, but the intent is to make sure the raft logs
2541 # get compacted periodically
2542 logCheck = main.Cluster.checkPartitionSize()
2543 utilities.assert_equals( expect=True, actual=logCheck,
2544 onpass="Raft log size is not too big",
2545 onfail="Raft logs grew too big" )
2546
Devin Lim58046fa2017-07-05 16:55:00 -07002547 main.step( "Killing tcpdumps" )
2548 main.Mininet2.stopTcpdump()
2549
2550 testname = main.TEST
2551 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2552 main.step( "Copying MN pcap and ONOS log files to test station" )
2553 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2554 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2555 # NOTE: MN Pcap file is being saved to logdir.
2556 # We scp this file as MN and TestON aren't necessarily the same vm
2557
2558 # FIXME: To be replaced with a Jenkin's post script
2559 # TODO: Load these from params
2560 # NOTE: must end in /
2561 logFolder = "/opt/onos/log/"
2562 logFiles = [ "karaf.log", "karaf.log.1" ]
2563 # NOTE: must end in /
2564 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002565 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002566 dstName = main.logdir + "/" + ctrl.name + "-" + f
2567 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002568 logFolder + f, dstName )
2569 # std*.log's
2570 # NOTE: must end in /
2571 logFolder = "/opt/onos/var/"
2572 logFiles = [ "stderr.log", "stdout.log" ]
2573 # NOTE: must end in /
2574 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002575 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002576 dstName = main.logdir + "/" + ctrl.name + "-" + f
2577 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002578 logFolder + f, dstName )
2579 else:
2580 main.log.debug( "skipping saving log files" )
2581
2582 main.step( "Stopping Mininet" )
2583 mnResult = main.Mininet1.stopNet()
2584 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2585 onpass="Mininet stopped",
2586 onfail="MN cleanup NOT successful" )
2587
2588 main.step( "Checking ONOS Logs for errors" )
Devin Lim142b5342017-07-20 15:22:39 -07002589 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002590 main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
2591 main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002592
2593 try:
2594 timerLog = open( main.logdir + "/Timers.csv", 'w' )
2595 main.log.debug( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2596 timerLog.write( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2597 timerLog.close()
2598 except NameError as e:
2599 main.log.exception( e )
Jon Hallca319892017-06-15 15:25:22 -07002600
Devin Lim58046fa2017-07-05 16:55:00 -07002601 def assignMastership( self, main ):
2602 """
2603 Assign mastership to controllers
2604 """
2605 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002606 assert main, "main not defined"
2607 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002608
2609 main.case( "Assigning Controller roles for switches" )
2610 main.caseExplanation = "Check that ONOS is connected to each " +\
2611 "device. Then manually assign" +\
2612 " mastership to specific ONOS nodes using" +\
2613 " 'device-role'"
2614 main.step( "Assign mastership of switches to specific controllers" )
2615 # Manually assign mastership to the controller we want
2616 roleCall = main.TRUE
2617
2618 ipList = []
2619 deviceList = []
Jon Hallca319892017-06-15 15:25:22 -07002620 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07002621 try:
2622 # Assign mastership to specific controllers. This assignment was
2623 # determined for a 7 node cluser, but will work with any sized
2624 # cluster
2625 for i in range( 1, 29 ): # switches 1 through 28
2626 # set up correct variables:
2627 if i == 1:
2628 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002629 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002630 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
2631 elif i == 2:
Devin Lim142b5342017-07-20 15:22:39 -07002632 c = 1 % main.Cluster.numCtrls
2633 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002634 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
2635 elif i == 3:
Devin Lim142b5342017-07-20 15:22:39 -07002636 c = 1 % main.Cluster.numCtrls
2637 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002638 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
2639 elif i == 4:
Devin Lim142b5342017-07-20 15:22:39 -07002640 c = 3 % main.Cluster.numCtrls
2641 ip = main.Cluster.active( c ).ip_address # ONOS4
Devin Lim58046fa2017-07-05 16:55:00 -07002642 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
2643 elif i == 5:
Devin Lim142b5342017-07-20 15:22:39 -07002644 c = 2 % main.Cluster.numCtrls
2645 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002646 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
2647 elif i == 6:
Devin Lim142b5342017-07-20 15:22:39 -07002648 c = 2 % main.Cluster.numCtrls
2649 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002650 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
2651 elif i == 7:
Devin Lim142b5342017-07-20 15:22:39 -07002652 c = 5 % main.Cluster.numCtrls
2653 ip = main.Cluster.active( c ).ip_address # ONOS6
Devin Lim58046fa2017-07-05 16:55:00 -07002654 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
2655 elif i >= 8 and i <= 17:
Devin Lim142b5342017-07-20 15:22:39 -07002656 c = 4 % main.Cluster.numCtrls
2657 ip = main.Cluster.active( c ).ip_address # ONOS5
Devin Lim58046fa2017-07-05 16:55:00 -07002658 dpid = '3' + str( i ).zfill( 3 )
2659 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2660 elif i >= 18 and i <= 27:
Devin Lim142b5342017-07-20 15:22:39 -07002661 c = 6 % main.Cluster.numCtrls
2662 ip = main.Cluster.active( c ).ip_address # ONOS7
Devin Lim58046fa2017-07-05 16:55:00 -07002663 dpid = '6' + str( i ).zfill( 3 )
2664 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2665 elif i == 28:
2666 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002667 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002668 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
2669 else:
2670 main.log.error( "You didn't write an else statement for " +
2671 "switch s" + str( i ) )
2672 roleCall = main.FALSE
2673 # Assign switch
2674 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
2675 # TODO: make this controller dynamic
2676 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
2677 ipList.append( ip )
2678 deviceList.append( deviceId )
2679 except ( AttributeError, AssertionError ):
2680 main.log.exception( "Something is wrong with ONOS device view" )
2681 main.log.info( onosCli.devices() )
2682 utilities.assert_equals(
2683 expect=main.TRUE,
2684 actual=roleCall,
2685 onpass="Re-assigned switch mastership to designated controller",
2686 onfail="Something wrong with deviceRole calls" )
2687
2688 main.step( "Check mastership was correctly assigned" )
2689 roleCheck = main.TRUE
2690 # NOTE: This is due to the fact that device mastership change is not
2691 # atomic and is actually a multi step process
2692 time.sleep( 5 )
2693 for i in range( len( ipList ) ):
2694 ip = ipList[ i ]
2695 deviceId = deviceList[ i ]
2696 # Check assignment
2697 master = onosCli.getRole( deviceId ).get( 'master' )
2698 if ip in master:
2699 roleCheck = roleCheck and main.TRUE
2700 else:
2701 roleCheck = roleCheck and main.FALSE
2702 main.log.error( "Error, controller " + ip + " is not" +
2703 " master " + "of device " +
2704 str( deviceId ) + ". Master is " +
2705 repr( master ) + "." )
2706 utilities.assert_equals(
2707 expect=main.TRUE,
2708 actual=roleCheck,
2709 onpass="Switches were successfully reassigned to designated " +
2710 "controller",
2711 onfail="Switches were not successfully reassigned" )
Jon Hallca319892017-06-15 15:25:22 -07002712
Devin Lim58046fa2017-07-05 16:55:00 -07002713 def bringUpStoppedNode( self, main ):
2714 """
2715 The bring up stopped nodes
2716 """
2717 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002718 assert main, "main not defined"
2719 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002720 assert main.kill, "main.kill not defined"
2721 main.case( "Restart minority of ONOS nodes" )
2722
2723 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
2724 startResults = main.TRUE
2725 restartTime = time.time()
Jon Hallca319892017-06-15 15:25:22 -07002726 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002727 startResults = startResults and\
Jon Hallca319892017-06-15 15:25:22 -07002728 ctrl.onosStart( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002729 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2730 onpass="ONOS nodes started successfully",
2731 onfail="ONOS nodes NOT successfully started" )
2732
2733 main.step( "Checking if ONOS is up yet" )
2734 count = 0
2735 onosIsupResult = main.FALSE
2736 while onosIsupResult == main.FALSE and count < 10:
2737 onosIsupResult = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002738 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002739 onosIsupResult = onosIsupResult and\
Jon Hallca319892017-06-15 15:25:22 -07002740 ctrl.isup( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002741 count = count + 1
2742 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
2743 onpass="ONOS restarted successfully",
2744 onfail="ONOS restart NOT successful" )
2745
Jon Hallca319892017-06-15 15:25:22 -07002746 main.step( "Restarting ONOS nodes" )
Devin Lim58046fa2017-07-05 16:55:00 -07002747 cliResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002748 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002749 cliResults = cliResults and\
Jon Hallca319892017-06-15 15:25:22 -07002750 ctrl.startOnosCli( ctrl.ipAddress )
2751 ctrl.active = True
Devin Lim58046fa2017-07-05 16:55:00 -07002752 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
Jon Hallca319892017-06-15 15:25:22 -07002753 onpass="ONOS node(s) restarted",
2754 onfail="ONOS node(s) did not restart" )
Devin Lim58046fa2017-07-05 16:55:00 -07002755
2756 # Grab the time of restart so we chan check how long the gossip
2757 # protocol has had time to work
2758 main.restartTime = time.time() - restartTime
2759 main.log.debug( "Restart time: " + str( main.restartTime ) )
2760 # TODO: MAke this configurable. Also, we are breaking the above timer
2761 main.step( "Checking ONOS nodes" )
2762 nodeResults = utilities.retry( self.nodesCheck,
2763 False,
Jon Hallca319892017-06-15 15:25:22 -07002764 args=[ main.Cluster.active() ],
Devin Lim58046fa2017-07-05 16:55:00 -07002765 sleep=15,
2766 attempts=5 )
2767
2768 utilities.assert_equals( expect=True, actual=nodeResults,
2769 onpass="Nodes check successful",
2770 onfail="Nodes check NOT successful" )
2771
2772 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07002773 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07002774 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07002775 ctrl.name,
2776 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002777 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -07002778 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002779
Jon Hallca319892017-06-15 15:25:22 -07002780 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -07002781
2782 main.step( "Rerun for election on the node(s) that were killed" )
2783 runResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002784 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002785 runResults = runResults and\
Jon Hallca319892017-06-15 15:25:22 -07002786 ctrl.electionTestRun()
Devin Lim58046fa2017-07-05 16:55:00 -07002787 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2788 onpass="ONOS nodes reran for election topic",
2789 onfail="Errror rerunning for election" )
Jon Hall4173b242017-09-12 17:04:38 -07002790
Devin Lim142b5342017-07-20 15:22:39 -07002791 def tempCell( self, cellName, ipList ):
2792 main.step( "Create cell file" )
2793 cellAppString = main.params[ 'ENV' ][ 'appString' ]
Devin Lim58046fa2017-07-05 16:55:00 -07002794
Devin Lim142b5342017-07-20 15:22:39 -07002795 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
2796 main.Mininet1.ip_address,
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002797 cellAppString, ipList, main.ONOScli1.karafUser )
Devin Lim142b5342017-07-20 15:22:39 -07002798 main.step( "Applying cell variable to environment" )
2799 cellResult = main.ONOSbench.setCell( cellName )
2800 verifyResult = main.ONOSbench.verifyCell()
2801
Devin Lim142b5342017-07-20 15:22:39 -07002802 def checkStateAfterEvent( self, main, afterWhich, compareSwitch=False, isRestart=False ):
Devin Lim58046fa2017-07-05 16:55:00 -07002803 """
2804 afterWhich :
Jon Hallca319892017-06-15 15:25:22 -07002805 0: failure
Devin Lim58046fa2017-07-05 16:55:00 -07002806 1: scaling
2807 """
2808 """
2809 Check state after ONOS failure/scaling
2810 """
2811 import json
Devin Lim58046fa2017-07-05 16:55:00 -07002812 assert main, "main not defined"
2813 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002814 main.case( "Running ONOS Constant State Tests" )
2815
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002816 OnosAfterWhich = [ "failure", "scaliing" ]
Devin Lim58046fa2017-07-05 16:55:00 -07002817
Devin Lim58046fa2017-07-05 16:55:00 -07002818 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -07002819 self.checkRoleNotNull()
Devin Lim58046fa2017-07-05 16:55:00 -07002820
Devin Lim142b5342017-07-20 15:22:39 -07002821 ONOSMastership, rolesResults, consistentMastership = self.checkTheRole()
Jon Hallca319892017-06-15 15:25:22 -07002822 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07002823
2824 if rolesResults and not consistentMastership:
2825 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002826 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -07002827 main.log.warn( node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07002828 json.dumps( json.loads( ONOSMastership[ i ] ),
2829 sort_keys=True,
2830 indent=4,
2831 separators=( ',', ': ' ) ) )
2832
2833 if compareSwitch:
2834 description2 = "Compare switch roles from before failure"
2835 main.step( description2 )
2836 try:
2837 currentJson = json.loads( ONOSMastership[ 0 ] )
2838 oldJson = json.loads( mastershipState )
2839 except ( ValueError, TypeError ):
2840 main.log.exception( "Something is wrong with parsing " +
2841 "ONOSMastership[0] or mastershipState" )
Jon Hallca319892017-06-15 15:25:22 -07002842 main.log.debug( "ONOSMastership[0]: " + repr( ONOSMastership[ 0 ] ) )
2843 main.log.debug( "mastershipState" + repr( mastershipState ) )
Devin Lim44075962017-08-11 10:56:37 -07002844 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002845 mastershipCheck = main.TRUE
2846 for i in range( 1, 29 ):
2847 switchDPID = str(
2848 main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
2849 current = [ switch[ 'master' ] for switch in currentJson
2850 if switchDPID in switch[ 'id' ] ]
2851 old = [ switch[ 'master' ] for switch in oldJson
2852 if switchDPID in switch[ 'id' ] ]
2853 if current == old:
2854 mastershipCheck = mastershipCheck and main.TRUE
2855 else:
2856 main.log.warn( "Mastership of switch %s changed" % switchDPID )
2857 mastershipCheck = main.FALSE
2858 utilities.assert_equals(
2859 expect=main.TRUE,
2860 actual=mastershipCheck,
2861 onpass="Mastership of Switches was not changed",
2862 onfail="Mastership of some switches changed" )
2863
2864 # NOTE: we expect mastership to change on controller failure/scaling down
Devin Lim142b5342017-07-20 15:22:39 -07002865 ONOSIntents, intentsResults = self.checkingIntents()
Devin Lim58046fa2017-07-05 16:55:00 -07002866 intentCheck = main.FALSE
2867 consistentIntents = True
Devin Lim58046fa2017-07-05 16:55:00 -07002868
2869 main.step( "Check for consistency in Intents from each controller" )
2870 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2871 main.log.info( "Intents are consistent across all ONOS " +
2872 "nodes" )
2873 else:
2874 consistentIntents = False
2875
2876 # Try to make it easy to figure out what is happening
2877 #
2878 # Intent ONOS1 ONOS2 ...
2879 # 0x01 INSTALLED INSTALLING
2880 # ... ... ...
2881 # ... ... ...
2882 title = " ID"
Jon Hallca319892017-06-15 15:25:22 -07002883 for ctrl in main.Cluster.active():
2884 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07002885 main.log.warn( title )
2886 # get all intent keys in the cluster
2887 keys = []
2888 for nodeStr in ONOSIntents:
2889 node = json.loads( nodeStr )
2890 for intent in node:
2891 keys.append( intent.get( 'id' ) )
2892 keys = set( keys )
2893 for key in keys:
2894 row = "%-13s" % key
2895 for nodeStr in ONOSIntents:
2896 node = json.loads( nodeStr )
2897 for intent in node:
2898 if intent.get( 'id' ) == key:
2899 row += "%-15s" % intent.get( 'state' )
2900 main.log.warn( row )
2901 # End table view
2902
2903 utilities.assert_equals(
2904 expect=True,
2905 actual=consistentIntents,
2906 onpass="Intents are consistent across all ONOS nodes",
2907 onfail="ONOS nodes have different views of intents" )
2908 intentStates = []
2909 for node in ONOSIntents: # Iter through ONOS nodes
2910 nodeStates = []
2911 # Iter through intents of a node
2912 try:
2913 for intent in json.loads( node ):
2914 nodeStates.append( intent[ 'state' ] )
2915 except ( ValueError, TypeError ):
2916 main.log.exception( "Error in parsing intents" )
2917 main.log.error( repr( node ) )
2918 intentStates.append( nodeStates )
2919 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2920 main.log.info( dict( out ) )
2921
2922 if intentsResults and not consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07002923 for i in range( len( main.Cluster.active() ) ):
Jon Hall6040bcf2017-08-14 11:15:41 -07002924 ctrl = main.Cluster.controllers[ i ]
Jon Hallca319892017-06-15 15:25:22 -07002925 main.log.warn( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07002926 main.log.warn( json.dumps(
2927 json.loads( ONOSIntents[ i ] ),
2928 sort_keys=True,
2929 indent=4,
2930 separators=( ',', ': ' ) ) )
2931 elif intentsResults and consistentIntents:
2932 intentCheck = main.TRUE
2933
2934 # NOTE: Store has no durability, so intents are lost across system
2935 # restarts
2936 if not isRestart:
2937 main.step( "Compare current intents with intents before the " + OnosAfterWhich[ afterWhich ] )
2938 # NOTE: this requires case 5 to pass for intentState to be set.
2939 # maybe we should stop the test if that fails?
2940 sameIntents = main.FALSE
2941 try:
2942 intentState
2943 except NameError:
2944 main.log.warn( "No previous intent state was saved" )
2945 else:
2946 if intentState and intentState == ONOSIntents[ 0 ]:
2947 sameIntents = main.TRUE
2948 main.log.info( "Intents are consistent with before " + OnosAfterWhich[ afterWhich ] )
2949 # TODO: possibly the states have changed? we may need to figure out
2950 # what the acceptable states are
2951 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2952 sameIntents = main.TRUE
2953 try:
2954 before = json.loads( intentState )
2955 after = json.loads( ONOSIntents[ 0 ] )
2956 for intent in before:
2957 if intent not in after:
2958 sameIntents = main.FALSE
2959 main.log.debug( "Intent is not currently in ONOS " +
2960 "(at least in the same form):" )
2961 main.log.debug( json.dumps( intent ) )
2962 except ( ValueError, TypeError ):
2963 main.log.exception( "Exception printing intents" )
2964 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2965 main.log.debug( repr( intentState ) )
2966 if sameIntents == main.FALSE:
2967 try:
2968 main.log.debug( "ONOS intents before: " )
2969 main.log.debug( json.dumps( json.loads( intentState ),
2970 sort_keys=True, indent=4,
2971 separators=( ',', ': ' ) ) )
2972 main.log.debug( "Current ONOS intents: " )
2973 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2974 sort_keys=True, indent=4,
2975 separators=( ',', ': ' ) ) )
2976 except ( ValueError, TypeError ):
2977 main.log.exception( "Exception printing intents" )
2978 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2979 main.log.debug( repr( intentState ) )
2980 utilities.assert_equals(
2981 expect=main.TRUE,
2982 actual=sameIntents,
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002983 onpass="Intents are consistent with before " + OnosAfterWhich[ afterWhich ],
Devin Lim58046fa2017-07-05 16:55:00 -07002984 onfail="The Intents changed during " + OnosAfterWhich[ afterWhich ] )
2985 intentCheck = intentCheck and sameIntents
2986
2987 main.step( "Get the OF Table entries and compare to before " +
2988 "component " + OnosAfterWhich[ afterWhich ] )
2989 FlowTables = main.TRUE
2990 for i in range( 28 ):
2991 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2992 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2993 curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
2994 FlowTables = FlowTables and curSwitch
2995 if curSwitch == main.FALSE:
2996 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2997 utilities.assert_equals(
2998 expect=main.TRUE,
2999 actual=FlowTables,
3000 onpass="No changes were found in the flow tables",
3001 onfail="Changes were found in the flow tables" )
3002
Jon Hallca319892017-06-15 15:25:22 -07003003 main.Mininet2.pingLongKill()
Devin Lim58046fa2017-07-05 16:55:00 -07003004 """
3005 main.step( "Check the continuous pings to ensure that no packets " +
3006 "were dropped during component failure" )
3007 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
3008 main.params[ 'TESTONIP' ] )
3009 LossInPings = main.FALSE
3010 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
3011 for i in range( 8, 18 ):
3012 main.log.info(
3013 "Checking for a loss in pings along flow from s" +
3014 str( i ) )
3015 LossInPings = main.Mininet2.checkForLoss(
3016 "/tmp/ping.h" +
3017 str( i ) ) or LossInPings
3018 if LossInPings == main.TRUE:
3019 main.log.info( "Loss in ping detected" )
3020 elif LossInPings == main.ERROR:
3021 main.log.info( "There are multiple mininet process running" )
3022 elif LossInPings == main.FALSE:
3023 main.log.info( "No Loss in the pings" )
3024 main.log.info( "No loss of dataplane connectivity" )
3025 utilities.assert_equals(
3026 expect=main.FALSE,
3027 actual=LossInPings,
3028 onpass="No Loss of connectivity",
3029 onfail="Loss of dataplane connectivity detected" )
3030 # NOTE: Since intents are not persisted with IntnentStore,
3031 # we expect loss in dataplane connectivity
3032 LossInPings = main.FALSE
3033 """
Devin Lim58046fa2017-07-05 16:55:00 -07003034 def compareTopo( self, main ):
3035 """
3036 Compare topo
3037 """
3038 import json
3039 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003040 assert main, "main not defined"
3041 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003042 try:
3043 from tests.dependencies.topology import Topology
3044 except ImportError:
3045 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07003046 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07003047 try:
3048 main.topoRelated
3049 except ( NameError, AttributeError ):
3050 main.topoRelated = Topology()
3051 main.case( "Compare ONOS Topology view to Mininet topology" )
3052 main.caseExplanation = "Compare topology objects between Mininet" +\
3053 " and ONOS"
3054 topoResult = main.FALSE
3055 topoFailMsg = "ONOS topology don't match Mininet"
3056 elapsed = 0
3057 count = 0
3058 main.step( "Comparing ONOS topology to MN topology" )
3059 startTime = time.time()
3060 # Give time for Gossip to work
3061 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
3062 devicesResults = main.TRUE
3063 linksResults = main.TRUE
3064 hostsResults = main.TRUE
3065 hostAttachmentResults = True
3066 count += 1
3067 cliStart = time.time()
Devin Lim142b5342017-07-20 15:22:39 -07003068 devices = main.topoRelated.getAll( "devices", True,
Jon Hallca319892017-06-15 15:25:22 -07003069 kwargs={ 'sleep': 5, 'attempts': 5,
3070 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003071 ipResult = main.TRUE
3072
Devin Lim142b5342017-07-20 15:22:39 -07003073 hosts = main.topoRelated.getAll( "hosts", True,
Jon Hallca319892017-06-15 15:25:22 -07003074 kwargs={ 'sleep': 5, 'attempts': 5,
3075 'randomTime': True },
3076 inJson=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003077
3078 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003079 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003080 if hosts[ controller ]:
3081 for host in hosts[ controller ]:
3082 if host is None or host.get( 'ipAddresses', [] ) == []:
3083 main.log.error(
3084 "Error with host ipAddresses on controller" +
3085 controllerStr + ": " + str( host ) )
3086 ipResult = main.FALSE
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07003087 ports = main.topoRelated.getAll( "ports", True,
Jon Hallca319892017-06-15 15:25:22 -07003088 kwargs={ 'sleep': 5, 'attempts': 5,
3089 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003090 links = main.topoRelated.getAll( "links", True,
Jon Hallca319892017-06-15 15:25:22 -07003091 kwargs={ 'sleep': 5, 'attempts': 5,
3092 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003093 clusters = main.topoRelated.getAll( "clusters", True,
Jon Hallca319892017-06-15 15:25:22 -07003094 kwargs={ 'sleep': 5, 'attempts': 5,
3095 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003096
3097 elapsed = time.time() - startTime
3098 cliTime = time.time() - cliStart
3099 print "Elapsed time: " + str( elapsed )
3100 print "CLI time: " + str( cliTime )
3101
3102 if all( e is None for e in devices ) and\
3103 all( e is None for e in hosts ) and\
3104 all( e is None for e in ports ) and\
3105 all( e is None for e in links ) and\
3106 all( e is None for e in clusters ):
3107 topoFailMsg = "Could not get topology from ONOS"
3108 main.log.error( topoFailMsg )
3109 continue # Try again, No use trying to compare
3110
3111 mnSwitches = main.Mininet1.getSwitches()
3112 mnLinks = main.Mininet1.getLinks()
3113 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07003114 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003115 controllerStr = str( main.Cluster.active( controller ) )
Jon Hall4173b242017-09-12 17:04:38 -07003116 currentDevicesResult = main.topoRelated.compareDevicePort( main.Mininet1,
3117 controller,
3118 mnSwitches,
3119 devices,
3120 ports )
Devin Lim58046fa2017-07-05 16:55:00 -07003121 utilities.assert_equals( expect=main.TRUE,
3122 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07003123 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003124 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003125 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003126 " Switches view is incorrect" )
3127
Devin Lim58046fa2017-07-05 16:55:00 -07003128 currentLinksResult = main.topoRelated.compareBase( links, controller,
Jon Hall4173b242017-09-12 17:04:38 -07003129 main.Mininet1.compareLinks,
3130 [ mnSwitches, mnLinks ] )
Devin Lim58046fa2017-07-05 16:55:00 -07003131 utilities.assert_equals( expect=main.TRUE,
3132 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07003133 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003134 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003135 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003136 " links view is incorrect" )
3137 if hosts[ controller ] and "Error" not in hosts[ controller ]:
3138 currentHostsResult = main.Mininet1.compareHosts(
3139 mnHosts,
3140 hosts[ controller ] )
3141 elif hosts[ controller ] == []:
3142 currentHostsResult = main.TRUE
3143 else:
3144 currentHostsResult = main.FALSE
3145 utilities.assert_equals( expect=main.TRUE,
3146 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07003147 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003148 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07003149 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003150 " hosts don't match Mininet" )
3151 # CHECKING HOST ATTACHMENT POINTS
3152 hostAttachment = True
3153 zeroHosts = False
3154 # FIXME: topo-HA/obelisk specific mappings:
3155 # key is mac and value is dpid
3156 mappings = {}
3157 for i in range( 1, 29 ): # hosts 1 through 28
3158 # set up correct variables:
3159 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
3160 if i == 1:
3161 deviceId = "1000".zfill( 16 )
3162 elif i == 2:
3163 deviceId = "2000".zfill( 16 )
3164 elif i == 3:
3165 deviceId = "3000".zfill( 16 )
3166 elif i == 4:
3167 deviceId = "3004".zfill( 16 )
3168 elif i == 5:
3169 deviceId = "5000".zfill( 16 )
3170 elif i == 6:
3171 deviceId = "6000".zfill( 16 )
3172 elif i == 7:
3173 deviceId = "6007".zfill( 16 )
3174 elif i >= 8 and i <= 17:
3175 dpid = '3' + str( i ).zfill( 3 )
3176 deviceId = dpid.zfill( 16 )
3177 elif i >= 18 and i <= 27:
3178 dpid = '6' + str( i ).zfill( 3 )
3179 deviceId = dpid.zfill( 16 )
3180 elif i == 28:
3181 deviceId = "2800".zfill( 16 )
3182 mappings[ macId ] = deviceId
3183 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3184 if hosts[ controller ] == []:
3185 main.log.warn( "There are no hosts discovered" )
3186 zeroHosts = True
3187 else:
3188 for host in hosts[ controller ]:
3189 mac = None
3190 location = None
3191 device = None
3192 port = None
3193 try:
3194 mac = host.get( 'mac' )
3195 assert mac, "mac field could not be found for this host object"
Devin Limefaf3062017-08-14 16:18:19 -07003196 print host
3197 if 'locations' in host:
3198 location = host.get( 'locations' )[ 0 ]
3199 elif 'location' in host:
3200 location = host.get( 'location' )
Devin Lim58046fa2017-07-05 16:55:00 -07003201 assert location, "location field could not be found for this host object"
3202
3203 # Trim the protocol identifier off deviceId
3204 device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
3205 assert device, "elementId field could not be found for this host location object"
3206
3207 port = location.get( 'port' )
3208 assert port, "port field could not be found for this host location object"
3209
3210 # Now check if this matches where they should be
3211 if mac and device and port:
3212 if str( port ) != "1":
3213 main.log.error( "The attachment port is incorrect for " +
3214 "host " + str( mac ) +
3215 ". Expected: 1 Actual: " + str( port ) )
3216 hostAttachment = False
3217 if device != mappings[ str( mac ) ]:
3218 main.log.error( "The attachment device is incorrect for " +
3219 "host " + str( mac ) +
3220 ". Expected: " + mappings[ str( mac ) ] +
3221 " Actual: " + device )
3222 hostAttachment = False
3223 else:
3224 hostAttachment = False
Devin Limefaf3062017-08-14 16:18:19 -07003225 except ( AssertionError, TypeError ):
Devin Lim58046fa2017-07-05 16:55:00 -07003226 main.log.exception( "Json object not as expected" )
3227 main.log.error( repr( host ) )
3228 hostAttachment = False
3229 else:
3230 main.log.error( "No hosts json output or \"Error\"" +
3231 " in output. hosts = " +
3232 repr( hosts[ controller ] ) )
3233 if zeroHosts is False:
3234 # TODO: Find a way to know if there should be hosts in a
3235 # given point of the test
3236 hostAttachment = True
3237
3238 # END CHECKING HOST ATTACHMENT POINTS
3239 devicesResults = devicesResults and currentDevicesResult
3240 linksResults = linksResults and currentLinksResult
3241 hostsResults = hostsResults and currentHostsResult
3242 hostAttachmentResults = hostAttachmentResults and\
3243 hostAttachment
3244 topoResult = ( devicesResults and linksResults
3245 and hostsResults and ipResult and
3246 hostAttachmentResults )
3247 utilities.assert_equals( expect=True,
3248 actual=topoResult,
3249 onpass="ONOS topology matches Mininet",
3250 onfail=topoFailMsg )
3251 # End of While loop to pull ONOS state
3252
3253 # Compare json objects for hosts and dataplane clusters
3254
3255 # hosts
3256 main.step( "Hosts view is consistent across all ONOS nodes" )
3257 consistentHostsResult = main.TRUE
3258 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003259 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003260 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3261 if hosts[ controller ] == hosts[ 0 ]:
3262 continue
3263 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07003264 main.log.error( "hosts from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003265 " is inconsistent with ONOS1" )
Jon Hallca319892017-06-15 15:25:22 -07003266 main.log.debug( repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003267 consistentHostsResult = main.FALSE
3268
3269 else:
Jon Hallca319892017-06-15 15:25:22 -07003270 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003271 controllerStr )
3272 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003273 main.log.debug( controllerStr +
3274 " hosts response: " +
3275 repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003276 utilities.assert_equals(
3277 expect=main.TRUE,
3278 actual=consistentHostsResult,
3279 onpass="Hosts view is consistent across all ONOS nodes",
3280 onfail="ONOS nodes have different views of hosts" )
3281
3282 main.step( "Hosts information is correct" )
3283 hostsResults = hostsResults and ipResult
3284 utilities.assert_equals(
3285 expect=main.TRUE,
3286 actual=hostsResults,
3287 onpass="Host information is correct",
3288 onfail="Host information is incorrect" )
3289
3290 main.step( "Host attachment points to the network" )
3291 utilities.assert_equals(
3292 expect=True,
3293 actual=hostAttachmentResults,
3294 onpass="Hosts are correctly attached to the network",
3295 onfail="ONOS did not correctly attach hosts to the network" )
3296
3297 # Strongly connected clusters of devices
3298 main.step( "Clusters view is consistent across all ONOS nodes" )
3299 consistentClustersResult = main.TRUE
3300 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003301 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003302 if "Error" not in clusters[ controller ]:
3303 if clusters[ controller ] == clusters[ 0 ]:
3304 continue
3305 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07003306 main.log.error( "clusters from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003307 controllerStr +
3308 " is inconsistent with ONOS1" )
3309 consistentClustersResult = main.FALSE
3310 else:
3311 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07003312 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07003313 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003314 main.log.debug( controllerStr +
3315 " clusters response: " +
3316 repr( clusters[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003317 utilities.assert_equals(
3318 expect=main.TRUE,
3319 actual=consistentClustersResult,
3320 onpass="Clusters view is consistent across all ONOS nodes",
3321 onfail="ONOS nodes have different views of clusters" )
3322 if not consistentClustersResult:
3323 main.log.debug( clusters )
3324 for x in links:
Jon Hallca319892017-06-15 15:25:22 -07003325 main.log.debug( "{}: {}".format( len( x ), x ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003326
3327 main.step( "There is only one SCC" )
3328 # there should always only be one cluster
3329 try:
3330 numClusters = len( json.loads( clusters[ 0 ] ) )
3331 except ( ValueError, TypeError ):
3332 main.log.exception( "Error parsing clusters[0]: " +
3333 repr( clusters[ 0 ] ) )
3334 numClusters = "ERROR"
3335 clusterResults = main.FALSE
3336 if numClusters == 1:
3337 clusterResults = main.TRUE
3338 utilities.assert_equals(
3339 expect=1,
3340 actual=numClusters,
3341 onpass="ONOS shows 1 SCC",
3342 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
3343
3344 topoResult = ( devicesResults and linksResults
3345 and hostsResults and consistentHostsResult
3346 and consistentClustersResult and clusterResults
3347 and ipResult and hostAttachmentResults )
3348
3349 topoResult = topoResult and int( count <= 2 )
3350 note = "note it takes about " + str( int( cliTime ) ) + \
3351 " seconds for the test to make all the cli calls to fetch " +\
3352 "the topology from each ONOS instance"
3353 main.log.info(
3354 "Very crass estimate for topology discovery/convergence( " +
3355 str( note ) + " ): " + str( elapsed ) + " seconds, " +
3356 str( count ) + " tries" )
3357
3358 main.step( "Device information is correct" )
3359 utilities.assert_equals(
3360 expect=main.TRUE,
3361 actual=devicesResults,
3362 onpass="Device information is correct",
3363 onfail="Device information is incorrect" )
3364
3365 main.step( "Links are correct" )
3366 utilities.assert_equals(
3367 expect=main.TRUE,
3368 actual=linksResults,
3369 onpass="Link are correct",
3370 onfail="Links are incorrect" )
3371
3372 main.step( "Hosts are correct" )
3373 utilities.assert_equals(
3374 expect=main.TRUE,
3375 actual=hostsResults,
3376 onpass="Hosts are correct",
3377 onfail="Hosts are incorrect" )
3378
3379 # FIXME: move this to an ONOS state case
3380 main.step( "Checking ONOS nodes" )
3381 nodeResults = utilities.retry( self.nodesCheck,
3382 False,
Jon Hallca319892017-06-15 15:25:22 -07003383 args=[ main.Cluster.active() ],
Devin Lim58046fa2017-07-05 16:55:00 -07003384 attempts=5 )
3385 utilities.assert_equals( expect=True, actual=nodeResults,
3386 onpass="Nodes check successful",
3387 onfail="Nodes check NOT successful" )
3388 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07003389 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07003390 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07003391 ctrl.name,
3392 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003393
3394 if not topoResult:
Devin Lim44075962017-08-11 10:56:37 -07003395 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -07003396
Devin Lim58046fa2017-07-05 16:55:00 -07003397 def linkDown( self, main, fromS="s3", toS="s28" ):
3398 """
3399 Link fromS-toS down
3400 """
3401 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003402 assert main, "main not defined"
3403 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003404 # NOTE: You should probably run a topology check after this
3405
3406 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3407
3408 description = "Turn off a link to ensure that Link Discovery " +\
3409 "is working properly"
3410 main.case( description )
3411
3412 main.step( "Kill Link between " + fromS + " and " + toS )
3413 LinkDown = main.Mininet1.link( END1=fromS, END2=toS, OPTION="down" )
3414 main.log.info( "Waiting " + str( linkSleep ) +
3415 " seconds for link down to be discovered" )
3416 time.sleep( linkSleep )
3417 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
3418 onpass="Link down successful",
3419 onfail="Failed to bring link down" )
3420 # TODO do some sort of check here
3421
3422 def linkUp( self, main, fromS="s3", toS="s28" ):
3423 """
3424 Link fromS-toS up
3425 """
3426 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003427 assert main, "main not defined"
3428 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003429 # NOTE: You should probably run a topology check after this
3430
3431 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3432
3433 description = "Restore a link to ensure that Link Discovery is " + \
3434 "working properly"
3435 main.case( description )
3436
Jon Hall4173b242017-09-12 17:04:38 -07003437 main.step( "Bring link between " + fromS + " and " + toS + " back up" )
Devin Lim58046fa2017-07-05 16:55:00 -07003438 LinkUp = main.Mininet1.link( END1=fromS, END2=toS, OPTION="up" )
3439 main.log.info( "Waiting " + str( linkSleep ) +
3440 " seconds for link up to be discovered" )
3441 time.sleep( linkSleep )
3442 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
3443 onpass="Link up successful",
3444 onfail="Failed to bring link up" )
3445
3446 def switchDown( self, main ):
3447 """
3448 Switch Down
3449 """
3450 # NOTE: You should probably run a topology check after this
3451 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003452 assert main, "main not defined"
3453 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003454
3455 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3456
3457 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallca319892017-06-15 15:25:22 -07003458 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003459 main.case( description )
3460 switch = main.params[ 'kill' ][ 'switch' ]
3461 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3462
3463 # TODO: Make this switch parameterizable
3464 main.step( "Kill " + switch )
3465 main.log.info( "Deleting " + switch )
3466 main.Mininet1.delSwitch( switch )
3467 main.log.info( "Waiting " + str( switchSleep ) +
3468 " seconds for switch down to be discovered" )
3469 time.sleep( switchSleep )
3470 device = onosCli.getDevice( dpid=switchDPID )
3471 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003472 main.log.warn( "Bringing down switch " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003473 result = main.FALSE
3474 if device and device[ 'available' ] is False:
3475 result = main.TRUE
3476 utilities.assert_equals( expect=main.TRUE, actual=result,
3477 onpass="Kill switch successful",
3478 onfail="Failed to kill switch?" )
Jon Hallca319892017-06-15 15:25:22 -07003479
Devin Lim58046fa2017-07-05 16:55:00 -07003480 def switchUp( self, main ):
3481 """
3482 Switch Up
3483 """
3484 # NOTE: You should probably run a topology check after this
3485 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003486 assert main, "main not defined"
3487 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003488
3489 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3490 switch = main.params[ 'kill' ][ 'switch' ]
3491 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3492 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallca319892017-06-15 15:25:22 -07003493 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003494 description = "Adding a switch to ensure it is discovered correctly"
3495 main.case( description )
3496
3497 main.step( "Add back " + switch )
3498 main.Mininet1.addSwitch( switch, dpid=switchDPID )
3499 for peer in links:
3500 main.Mininet1.addLink( switch, peer )
Jon Hallca319892017-06-15 15:25:22 -07003501 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -07003502 main.Mininet1.assignSwController( sw=switch, ip=ipList )
3503 main.log.info( "Waiting " + str( switchSleep ) +
3504 " seconds for switch up to be discovered" )
3505 time.sleep( switchSleep )
3506 device = onosCli.getDevice( dpid=switchDPID )
3507 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003508 main.log.debug( "Added device: " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003509 result = main.FALSE
3510 if device and device[ 'available' ]:
3511 result = main.TRUE
3512 utilities.assert_equals( expect=main.TRUE, actual=result,
3513 onpass="add switch successful",
3514 onfail="Failed to add switch?" )
3515
3516 def startElectionApp( self, main ):
3517 """
3518 start election app on all onos nodes
3519 """
Devin Lim58046fa2017-07-05 16:55:00 -07003520 assert main, "main not defined"
3521 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003522
3523 main.case( "Start Leadership Election app" )
3524 main.step( "Install leadership election app" )
Jon Hallca319892017-06-15 15:25:22 -07003525 onosCli = main.Cluster.next()
Devin Lime9f0ccf2017-08-11 17:25:12 -07003526 appResult = onosCli.CLI.activateApp( "org.onosproject.election" )
Devin Lim58046fa2017-07-05 16:55:00 -07003527 utilities.assert_equals(
3528 expect=main.TRUE,
3529 actual=appResult,
3530 onpass="Election app installed",
3531 onfail="Something went wrong with installing Leadership election" )
3532
3533 main.step( "Run for election on each node" )
Jon Hallca319892017-06-15 15:25:22 -07003534 onosCli.electionTestRun()
3535 main.Cluster.command( "electionTestRun" )
Devin Lim58046fa2017-07-05 16:55:00 -07003536 time.sleep( 5 )
Jon Hallca319892017-06-15 15:25:22 -07003537 sameResult, leaders = main.HA.consistentLeaderboards( main.Cluster.active() )
Devin Lim58046fa2017-07-05 16:55:00 -07003538 utilities.assert_equals(
3539 expect=True,
3540 actual=sameResult,
3541 onpass="All nodes see the same leaderboards",
3542 onfail="Inconsistent leaderboards" )
3543
3544 if sameResult:
3545 leader = leaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003546 if onosCli.ipAddress in leader:
Devin Lim58046fa2017-07-05 16:55:00 -07003547 correctLeader = True
3548 else:
3549 correctLeader = False
3550 main.step( "First node was elected leader" )
3551 utilities.assert_equals(
3552 expect=True,
3553 actual=correctLeader,
3554 onpass="Correct leader was elected",
3555 onfail="Incorrect leader" )
Jon Hallca319892017-06-15 15:25:22 -07003556 main.Cluster.testLeader = leader
3557
Devin Lim58046fa2017-07-05 16:55:00 -07003558 def isElectionFunctional( self, main ):
3559 """
3560 Check that Leadership Election is still functional
3561 15.1 Run election on each node
3562 15.2 Check that each node has the same leaders and candidates
3563 15.3 Find current leader and withdraw
3564 15.4 Check that a new node was elected leader
3565 15.5 Check that that new leader was the candidate of old leader
3566 15.6 Run for election on old leader
3567 15.7 Check that oldLeader is a candidate, and leader if only 1 node
3568 15.8 Make sure that the old leader was added to the candidate list
3569
3570 old and new variable prefixes refer to data from before vs after
3571 withdrawl and later before withdrawl vs after re-election
3572 """
3573 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003574 assert main, "main not defined"
3575 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003576
3577 description = "Check that Leadership Election is still functional"
3578 main.case( description )
3579 # NOTE: Need to re-run after restarts since being a canidate is not persistant
3580
3581 oldLeaders = [] # list of lists of each nodes' candidates before
3582 newLeaders = [] # list of lists of each nodes' candidates after
3583 oldLeader = '' # the old leader from oldLeaders, None if not same
3584 newLeader = '' # the new leaders fron newLoeaders, None if not same
3585 oldLeaderCLI = None # the CLI of the old leader used for re-electing
3586 expectNoLeader = False # True when there is only one leader
Devin Lim142b5342017-07-20 15:22:39 -07003587 if len( main.Cluster.runningNodes ) == 1:
Devin Lim58046fa2017-07-05 16:55:00 -07003588 expectNoLeader = True
3589
3590 main.step( "Run for election on each node" )
Devin Lim142b5342017-07-20 15:22:39 -07003591 electionResult = main.Cluster.command( "electionTestRun", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003592 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -07003593 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -07003594 actual=electionResult,
3595 onpass="All nodes successfully ran for leadership",
3596 onfail="At least one node failed to run for leadership" )
3597
3598 if electionResult == main.FALSE:
3599 main.log.error(
3600 "Skipping Test Case because Election Test App isn't loaded" )
3601 main.skipCase()
3602
3603 main.step( "Check that each node shows the same leader and candidates" )
3604 failMessage = "Nodes have different leaderboards"
Jon Hallca319892017-06-15 15:25:22 -07003605 activeCLIs = main.Cluster.active()
3606 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Devin Lim58046fa2017-07-05 16:55:00 -07003607 if sameResult:
3608 oldLeader = oldLeaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003609 main.log.info( "Old leader: " + oldLeader )
Devin Lim58046fa2017-07-05 16:55:00 -07003610 else:
3611 oldLeader = None
3612 utilities.assert_equals(
3613 expect=True,
3614 actual=sameResult,
3615 onpass="Leaderboards are consistent for the election topic",
3616 onfail=failMessage )
3617
3618 main.step( "Find current leader and withdraw" )
3619 withdrawResult = main.TRUE
3620 # do some sanity checking on leader before using it
3621 if oldLeader is None:
3622 main.log.error( "Leadership isn't consistent." )
3623 withdrawResult = main.FALSE
3624 # Get the CLI of the oldLeader
Jon Hallca319892017-06-15 15:25:22 -07003625 for ctrl in main.Cluster.active():
3626 if oldLeader == ctrl.ipAddress:
3627 oldLeaderCLI = ctrl
Devin Lim58046fa2017-07-05 16:55:00 -07003628 break
3629 else: # FOR/ELSE statement
3630 main.log.error( "Leader election, could not find current leader" )
3631 if oldLeader:
3632 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3633 utilities.assert_equals(
3634 expect=main.TRUE,
3635 actual=withdrawResult,
3636 onpass="Node was withdrawn from election",
3637 onfail="Node was not withdrawn from election" )
3638
3639 main.step( "Check that a new node was elected leader" )
3640 failMessage = "Nodes have different leaders"
3641 # Get new leaders and candidates
3642 newLeaderResult, newLeaders = self.consistentLeaderboards( activeCLIs )
3643 newLeader = None
3644 if newLeaderResult:
3645 if newLeaders[ 0 ][ 0 ] == 'none':
3646 main.log.error( "No leader was elected on at least 1 node" )
3647 if not expectNoLeader:
3648 newLeaderResult = False
3649 newLeader = newLeaders[ 0 ][ 0 ]
3650
3651 # Check that the new leader is not the older leader, which was withdrawn
3652 if newLeader == oldLeader:
3653 newLeaderResult = False
3654 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3655 " as the current leader" )
3656 utilities.assert_equals(
3657 expect=True,
3658 actual=newLeaderResult,
3659 onpass="Leadership election passed",
3660 onfail="Something went wrong with Leadership election" )
3661
3662 main.step( "Check that that new leader was the candidate of old leader" )
3663 # candidates[ 2 ] should become the top candidate after withdrawl
3664 correctCandidateResult = main.TRUE
3665 if expectNoLeader:
3666 if newLeader == 'none':
3667 main.log.info( "No leader expected. None found. Pass" )
3668 correctCandidateResult = main.TRUE
3669 else:
3670 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3671 correctCandidateResult = main.FALSE
3672 elif len( oldLeaders[ 0 ] ) >= 3:
3673 if newLeader == oldLeaders[ 0 ][ 2 ]:
3674 # correct leader was elected
3675 correctCandidateResult = main.TRUE
3676 else:
3677 correctCandidateResult = main.FALSE
3678 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3679 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3680 else:
3681 main.log.warn( "Could not determine who should be the correct leader" )
3682 main.log.debug( oldLeaders[ 0 ] )
3683 correctCandidateResult = main.FALSE
3684 utilities.assert_equals(
3685 expect=main.TRUE,
3686 actual=correctCandidateResult,
3687 onpass="Correct Candidate Elected",
3688 onfail="Incorrect Candidate Elected" )
3689
3690 main.step( "Run for election on old leader( just so everyone " +
3691 "is in the hat )" )
3692 if oldLeaderCLI is not None:
3693 runResult = oldLeaderCLI.electionTestRun()
3694 else:
3695 main.log.error( "No old leader to re-elect" )
3696 runResult = main.FALSE
3697 utilities.assert_equals(
3698 expect=main.TRUE,
3699 actual=runResult,
3700 onpass="App re-ran for election",
3701 onfail="App failed to run for election" )
3702
3703 main.step(
3704 "Check that oldLeader is a candidate, and leader if only 1 node" )
3705 # verify leader didn't just change
3706 # Get new leaders and candidates
3707 reRunLeaders = []
3708 time.sleep( 5 ) # Paremterize
3709 positionResult, reRunLeaders = self.consistentLeaderboards( activeCLIs )
3710
3711 # Check that the re-elected node is last on the candidate List
3712 if not reRunLeaders[ 0 ]:
3713 positionResult = main.FALSE
3714 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3715 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
3716 str( reRunLeaders[ 0 ] ) ) )
3717 positionResult = main.FALSE
3718 utilities.assert_equals(
3719 expect=True,
3720 actual=positionResult,
3721 onpass="Old leader successfully re-ran for election",
3722 onfail="Something went wrong with Leadership election after " +
3723 "the old leader re-ran for election" )
Jon Hallca319892017-06-15 15:25:22 -07003724
Devin Lim58046fa2017-07-05 16:55:00 -07003725 def installDistributedPrimitiveApp( self, main ):
3726 """
3727 Install Distributed Primitives app
3728 """
3729 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003730 assert main, "main not defined"
3731 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003732
3733 # Variables for the distributed primitives tests
3734 main.pCounterName = "TestON-Partitions"
3735 main.pCounterValue = 0
3736 main.onosSet = set( [] )
3737 main.onosSetName = "TestON-set"
3738
3739 description = "Install Primitives app"
3740 main.case( description )
3741 main.step( "Install Primitives app" )
3742 appName = "org.onosproject.distributedprimitives"
Devin Lime9f0ccf2017-08-11 17:25:12 -07003743 appResults = main.Cluster.next().CLI.activateApp( appName )
Devin Lim58046fa2017-07-05 16:55:00 -07003744 utilities.assert_equals( expect=main.TRUE,
3745 actual=appResults,
3746 onpass="Primitives app activated",
3747 onfail="Primitives app not activated" )
3748 # TODO check on all nodes instead of sleeping
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003749 time.sleep( 5 ) # To allow all nodes to activate