blob: 48b64c62da992b16ac424fcc6fff02c1695b3c81 [file] [log] [blame]
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07001"""
2Copyright 2015 Open Networking Foundation (ONF)
3
4Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
5the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
6or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
7
8 TestON is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation, either version 2 of the License, or
11 (at your option) any later version.
12
13 TestON is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with TestON. If not, see <http://www.gnu.org/licenses/>.
20"""
21
Jon Halla440e872016-03-31 15:15:50 -070022import json
Jon Hall41d39f12016-04-11 22:54:35 -070023import time
Jon Halle1a3b752015-07-22 13:02:46 -070024
Jon Hallf37d44d2017-05-24 10:37:30 -070025
Jon Hall41d39f12016-04-11 22:54:35 -070026class HA():
Jon Hall57b50432015-10-22 10:20:10 -070027
Jon Halla440e872016-03-31 15:15:50 -070028 def __init__( self ):
29 self.default = ''
Jon Hall57b50432015-10-22 10:20:10 -070030
Devin Lim58046fa2017-07-05 16:55:00 -070031 def customizeOnosGenPartitions( self ):
Devin Lim58046fa2017-07-05 16:55:00 -070032 # copy gen-partions file to ONOS
33 # NOTE: this assumes TestON and ONOS are on the same machine
34 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
35 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
36 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
37 main.ONOSbench.ip_address,
38 srcFile,
39 dstDir,
40 pwd=main.ONOSbench.pwd,
41 direction="from" )
Jon Hallca319892017-06-15 15:25:22 -070042
Devin Lim58046fa2017-07-05 16:55:00 -070043 def cleanUpGenPartition( self ):
44 # clean up gen-partitions file
45 try:
46 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
47 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
48 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
49 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
50 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
51 str( main.ONOSbench.handle.before ) )
52 except ( pexpect.TIMEOUT, pexpect.EOF ):
53 main.log.exception( "ONOSbench: pexpect exception found:" +
54 main.ONOSbench.handle.before )
Devin Lim44075962017-08-11 10:56:37 -070055 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -070056
Devin Lim58046fa2017-07-05 16:55:00 -070057 def startingMininet( self ):
58 main.step( "Starting Mininet" )
59 # scp topo file to mininet
60 # TODO: move to params?
61 topoName = "obelisk.py"
62 filePath = main.ONOSbench.home + "/tools/test/topos/"
63 main.ONOSbench.scp( main.Mininet1,
64 filePath + topoName,
65 main.Mininet1.home,
66 direction="to" )
67 mnResult = main.Mininet1.startNet()
68 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
69 onpass="Mininet Started",
70 onfail="Error starting Mininet" )
Jon Hallca319892017-06-15 15:25:22 -070071
Devin Lim58046fa2017-07-05 16:55:00 -070072 def scalingMetadata( self ):
73 import re
Devin Lim142b5342017-07-20 15:22:39 -070074 main.step( "Generate initial metadata file" )
Devin Lim58046fa2017-07-05 16:55:00 -070075 main.scaling = main.params[ 'scaling' ].split( "," )
76 main.log.debug( main.scaling )
77 scale = main.scaling.pop( 0 )
78 main.log.debug( scale )
79 if "e" in scale:
80 equal = True
81 else:
82 equal = False
83 main.log.debug( equal )
Devin Lim142b5342017-07-20 15:22:39 -070084 main.Cluster.setRunningNode( int( re.search( "\d+", scale ).group( 0 ) ) )
85 genResult = main.Server.generateFile( main.Cluster.numCtrls, equal=equal )
Devin Lim58046fa2017-07-05 16:55:00 -070086 utilities.assert_equals( expect=main.TRUE, actual=genResult,
87 onpass="New cluster metadata file generated",
88 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -070089
Devin Lim58046fa2017-07-05 16:55:00 -070090 def swapNodeMetadata( self ):
Devin Lim142b5342017-07-20 15:22:39 -070091 main.step( "Generate initial metadata file" )
92 if main.Cluster.numCtrls >= 5:
93 main.Cluster.setRunningNode( main.Cluster.numCtrls - 2 )
Devin Lim58046fa2017-07-05 16:55:00 -070094 else:
95 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
Devin Lim142b5342017-07-20 15:22:39 -070096 genResult = main.Server.generateFile( main.Cluster.numCtrls )
Devin Lim58046fa2017-07-05 16:55:00 -070097 utilities.assert_equals( expect=main.TRUE, actual=genResult,
98 onpass="New cluster metadata file generated",
99 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -0700100
Devin Lim142b5342017-07-20 15:22:39 -0700101 def setServerForCluster( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700102 import os
103 main.step( "Setup server for cluster metadata file" )
104 main.serverPort = main.params[ 'server' ][ 'port' ]
105 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
106 main.log.debug( "Root dir: {}".format( rootDir ) )
107 status = main.Server.start( main.ONOSbench,
108 rootDir,
109 port=main.serverPort,
110 logDir=main.logdir + "/server.log" )
111 utilities.assert_equals( expect=main.TRUE, actual=status,
112 onpass="Server started",
113 onfail="Failled to start SimpleHTTPServer" )
114
Jon Hall4f360bc2017-09-07 10:19:52 -0700115 def copyBackupConfig( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700116 main.step( "Copying backup config files" )
117 main.onosServicepath = main.ONOSbench.home + "/tools/package/bin/onos-service"
118 cp = main.ONOSbench.scp( main.ONOSbench,
119 main.onosServicepath,
120 main.onosServicepath + ".backup",
121 direction="to" )
122
123 utilities.assert_equals( expect=main.TRUE,
124 actual=cp,
125 onpass="Copy backup config file succeeded",
126 onfail="Copy backup config file failed" )
Jon Hall4f360bc2017-09-07 10:19:52 -0700127
128 def setMetadataUrl( self ):
129 # NOTE: You should probably backup the config before and reset the config after the test
Devin Lim58046fa2017-07-05 16:55:00 -0700130 # we need to modify the onos-service file to use remote metadata file
131 # url for cluster metadata file
132 iface = main.params[ 'server' ].get( 'interface' )
133 ip = main.ONOSbench.getIpAddr( iface=iface )
134 metaFile = "cluster.json"
135 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, main.serverPort, metaFile )
136 main.log.warn( javaArgs )
137 main.log.warn( repr( javaArgs ) )
138 handle = main.ONOSbench.handle
Jon Hall4173b242017-09-12 17:04:38 -0700139 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs,
140 main.onosServicepath )
Devin Lim58046fa2017-07-05 16:55:00 -0700141 main.log.warn( sed )
142 main.log.warn( repr( sed ) )
143 handle.sendline( sed )
144 handle.expect( metaFile )
145 output = handle.before
146 handle.expect( "\$" )
147 output += handle.before
148 main.log.debug( repr( output ) )
149
150 def cleanUpOnosService( self ):
151 # Cleanup custom onos-service file
152 main.ONOSbench.scp( main.ONOSbench,
153 main.onosServicepath + ".backup",
154 main.onosServicepath,
155 direction="to" )
Jon Hallca319892017-06-15 15:25:22 -0700156
Jon Halla440e872016-03-31 15:15:50 -0700157 def consistentCheck( self ):
158 """
159 Checks that TestON counters are consistent across all nodes.
Jon Halle1a3b752015-07-22 13:02:46 -0700160
Jon Hallf37d44d2017-05-24 10:37:30 -0700161 Returns the tuple ( onosCounters, consistent )
Jon Hall41d39f12016-04-11 22:54:35 -0700162 - onosCounters is the parsed json output of the counters command on
163 all nodes
164 - consistent is main.TRUE if all "TestON" counters are consitent across
165 all nodes or main.FALSE
Jon Halla440e872016-03-31 15:15:50 -0700166 """
Jon Halle1a3b752015-07-22 13:02:46 -0700167 try:
Jon Halla440e872016-03-31 15:15:50 -0700168 # Get onos counters results
169 onosCountersRaw = []
170 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700171 for ctrl in main.Cluster.active():
Jon Halla440e872016-03-31 15:15:50 -0700172 t = main.Thread( target=utilities.retry,
Jon Hallca319892017-06-15 15:25:22 -0700173 name="counters-" + str( ctrl ),
174 args=[ ctrl.counters, [ None ] ],
Jon Hallf37d44d2017-05-24 10:37:30 -0700175 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Halla440e872016-03-31 15:15:50 -0700176 'randomTime': True } )
177 threads.append( t )
178 t.start()
179 for t in threads:
180 t.join()
181 onosCountersRaw.append( t.result )
182 onosCounters = []
Jon Hallca319892017-06-15 15:25:22 -0700183 for i in range( len( onosCountersRaw ) ):
Jon Halla440e872016-03-31 15:15:50 -0700184 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700185 onosCounters.append( json.loads( onosCountersRaw[ i ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700186 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700187 main.log.error( "Could not parse counters response from " +
Devin Lim142b5342017-07-20 15:22:39 -0700188 str( main.Cluster.active( i ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700189 main.log.warn( repr( onosCountersRaw[ i ] ) )
190 onosCounters.append( [] )
191
192 testCounters = {}
193 # make a list of all the "TestON-*" counters in ONOS
Jon Hall41d39f12016-04-11 22:54:35 -0700194 # lookes like a dict whose keys are the name of the ONOS node and
195 # values are a list of the counters. I.E.
Jon Hallf37d44d2017-05-24 10:37:30 -0700196 # { "ONOS1": [ { "name":"TestON-Partitions","value":56 } ]
Jon Halla440e872016-03-31 15:15:50 -0700197 # }
198 # NOTE: There is an assumtion that all nodes are active
199 # based on the above for loops
200 for controller in enumerate( onosCounters ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700201 for key, value in controller[ 1 ].iteritems():
Jon Halla440e872016-03-31 15:15:50 -0700202 if 'TestON' in key:
Devin Lim142b5342017-07-20 15:22:39 -0700203 node = str( main.Cluster.active( controller[ 0 ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700204 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700205 testCounters[ node ].append( { key: value } )
Jon Halla440e872016-03-31 15:15:50 -0700206 except KeyError:
Jon Hallf37d44d2017-05-24 10:37:30 -0700207 testCounters[ node ] = [ { key: value } ]
Jon Halla440e872016-03-31 15:15:50 -0700208 # compare the counters on each node
Jon Hallf37d44d2017-05-24 10:37:30 -0700209 firstV = testCounters.values()[ 0 ]
Jon Halla440e872016-03-31 15:15:50 -0700210 tmp = [ v == firstV for k, v in testCounters.iteritems() ]
211 if all( tmp ):
212 consistent = main.TRUE
213 else:
214 consistent = main.FALSE
215 main.log.error( "ONOS nodes have different values for counters:\n" +
216 testCounters )
217 return ( onosCounters, consistent )
218 except Exception:
219 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700220 main.cleanAndExit()
Jon Halla440e872016-03-31 15:15:50 -0700221
222 def counterCheck( self, counterName, counterValue ):
223 """
224 Checks that TestON counters are consistent across all nodes and that
225 specified counter is in ONOS with the given value
226 """
227 try:
228 correctResults = main.TRUE
229 # Get onos counters results and consistentCheck
230 onosCounters, consistent = self.consistentCheck()
231 # Check for correct values
Jon Hallca319892017-06-15 15:25:22 -0700232 for i in range( len( main.Cluster.active() ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700233 current = onosCounters[ i ]
Jon Halla440e872016-03-31 15:15:50 -0700234 onosValue = None
235 try:
236 onosValue = current.get( counterName )
Jon Hall41d39f12016-04-11 22:54:35 -0700237 except AttributeError:
Devin Lim142b5342017-07-20 15:22:39 -0700238 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -0700239 main.log.exception( node + " counters result " +
Jon Hall41d39f12016-04-11 22:54:35 -0700240 "is not as expected" )
Jon Halla440e872016-03-31 15:15:50 -0700241 correctResults = main.FALSE
242 if onosValue == counterValue:
243 main.log.info( counterName + " counter value is correct" )
244 else:
Jon Hall41d39f12016-04-11 22:54:35 -0700245 main.log.error( counterName +
246 " counter value is incorrect," +
247 " expected value: " + str( counterValue ) +
248 " current value: " + str( onosValue ) )
Jon Halla440e872016-03-31 15:15:50 -0700249 correctResults = main.FALSE
250 return consistent and correctResults
251 except Exception:
252 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700253 main.cleanAndExit()
Jon Hall41d39f12016-04-11 22:54:35 -0700254
255 def consistentLeaderboards( self, nodes ):
256 TOPIC = 'org.onosproject.election'
257 # FIXME: use threads
Jon Halle0f0b342017-04-18 11:43:47 -0700258 # FIXME: should we retry outside the function?
Jon Hall41d39f12016-04-11 22:54:35 -0700259 for n in range( 5 ): # Retry in case election is still happening
260 leaderList = []
261 # Get all leaderboards
262 for cli in nodes:
263 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
264 # Compare leaderboards
Jon Hallf37d44d2017-05-24 10:37:30 -0700265 result = all( i == leaderList[ 0 ] for i in leaderList ) and\
Jon Hall41d39f12016-04-11 22:54:35 -0700266 leaderList is not None
267 main.log.debug( leaderList )
268 main.log.warn( result )
269 if result:
270 return ( result, leaderList )
Jon Hallf37d44d2017-05-24 10:37:30 -0700271 time.sleep( 5 ) # TODO: paramerterize
Jon Hall41d39f12016-04-11 22:54:35 -0700272 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
273 return ( result, leaderList )
274
275 def nodesCheck( self, nodes ):
276 nodesOutput = []
277 results = True
278 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700279 for node in nodes:
280 t = main.Thread( target=node.nodes,
281 name="nodes-" + str( node ),
Jon Hallf37d44d2017-05-24 10:37:30 -0700282 args=[] )
Jon Hall41d39f12016-04-11 22:54:35 -0700283 threads.append( t )
284 t.start()
285
286 for t in threads:
287 t.join()
288 nodesOutput.append( t.result )
Jon Hallca319892017-06-15 15:25:22 -0700289 ips = sorted( main.Cluster.getIps( activeOnly=True ) )
Jon Hall41d39f12016-04-11 22:54:35 -0700290 for i in nodesOutput:
291 try:
292 current = json.loads( i )
293 activeIps = []
294 currentResult = False
295 for node in current:
Jon Hallf37d44d2017-05-24 10:37:30 -0700296 if node[ 'state' ] == 'READY':
297 activeIps.append( node[ 'ip' ] )
Jon Hall41d39f12016-04-11 22:54:35 -0700298 activeIps.sort()
299 if ips == activeIps:
300 currentResult = True
301 except ( ValueError, TypeError ):
302 main.log.error( "Error parsing nodes output" )
303 main.log.warn( repr( i ) )
304 currentResult = False
305 results = results and currentResult
306 return results
Jon Hallca319892017-06-15 15:25:22 -0700307
Devin Lim58046fa2017-07-05 16:55:00 -0700308 def generateGraph( self, testName, plotName="Plot-HA", index=2 ):
309 # GRAPHS
310 # NOTE: important params here:
311 # job = name of Jenkins job
312 # Plot Name = Plot-HA, only can be used if multiple plots
313 # index = The number of the graph under plot name
314 job = testName
315 graphs = '<ac:structured-macro ac:name="html">\n'
316 graphs += '<ac:plain-text-body><![CDATA[\n'
317 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
318 '/plot/' + plotName + '/getPlot?index=' + str( index ) +\
319 '&width=500&height=300"' +\
320 'noborder="0" width="500" height="300" scrolling="yes" ' +\
321 'seamless="seamless"></iframe>\n'
322 graphs += ']]></ac:plain-text-body>\n'
323 graphs += '</ac:structured-macro>\n'
324 main.log.wiki( graphs )
Jon Hallca319892017-06-15 15:25:22 -0700325
Devin Lim58046fa2017-07-05 16:55:00 -0700326 def initialSetUp( self, serviceClean=False ):
327 """
328 rest of initialSetup
329 """
Devin Lim58046fa2017-07-05 16:55:00 -0700330 if main.params[ 'tcpdump' ].lower() == "true":
331 main.step( "Start Packet Capture MN" )
332 main.Mininet2.startTcpdump(
333 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
334 + "-MN.pcap",
335 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
336 port=main.params[ 'MNtcpdump' ][ 'port' ] )
337
338 if serviceClean:
339 main.step( "Clean up ONOS service changes" )
Devin Lim142b5342017-07-20 15:22:39 -0700340 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
341 main.ONOSbench.handle.expect( "\$" )
342 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
343 main.ONOSbench.handle.expect( "\$" )
Devin Lim58046fa2017-07-05 16:55:00 -0700344
345 main.step( "Checking ONOS nodes" )
346 nodeResults = utilities.retry( self.nodesCheck,
347 False,
Jon Hallca319892017-06-15 15:25:22 -0700348 args=[ main.Cluster.active() ],
Devin Lim58046fa2017-07-05 16:55:00 -0700349 attempts=5 )
350
351 utilities.assert_equals( expect=True, actual=nodeResults,
352 onpass="Nodes check successful",
353 onfail="Nodes check NOT successful" )
354
355 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -0700356 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700357 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -0700358 ctrl.name,
359 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700360 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -0700361 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700362
363 main.step( "Activate apps defined in the params file" )
364 # get data from the params
365 apps = main.params.get( 'apps' )
366 if apps:
367 apps = apps.split( ',' )
Jon Hallca319892017-06-15 15:25:22 -0700368 main.log.debug( "Apps: " + str( apps ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700369 activateResult = True
370 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700371 main.Cluster.active( 0 ).app( app, "Activate" )
Devin Lim58046fa2017-07-05 16:55:00 -0700372 # TODO: check this worked
373 time.sleep( 10 ) # wait for apps to activate
374 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700375 state = main.Cluster.active( 0 ).appStatus( app )
Devin Lim58046fa2017-07-05 16:55:00 -0700376 if state == "ACTIVE":
377 activateResult = activateResult and True
378 else:
379 main.log.error( "{} is in {} state".format( app, state ) )
380 activateResult = False
381 utilities.assert_equals( expect=True,
382 actual=activateResult,
383 onpass="Successfully activated apps",
384 onfail="Failed to activate apps" )
385 else:
386 main.log.warn( "No apps were specified to be loaded after startup" )
387
388 main.step( "Set ONOS configurations" )
Jon Hallca319892017-06-15 15:25:22 -0700389 # FIXME: This shoudl be part of the general startup sequence
Devin Lim58046fa2017-07-05 16:55:00 -0700390 config = main.params.get( 'ONOS_Configuration' )
391 if config:
392 main.log.debug( config )
393 checkResult = main.TRUE
394 for component in config:
395 for setting in config[ component ]:
396 value = config[ component ][ setting ]
Jon Hallca319892017-06-15 15:25:22 -0700397 check = main.Cluster.next().setCfg( component, setting, value )
Devin Lim58046fa2017-07-05 16:55:00 -0700398 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
399 checkResult = check and checkResult
400 utilities.assert_equals( expect=main.TRUE,
401 actual=checkResult,
402 onpass="Successfully set config",
403 onfail="Failed to set config" )
404 else:
405 main.log.warn( "No configurations were specified to be changed after startup" )
406
Jon Hallca319892017-06-15 15:25:22 -0700407 main.step( "Check app ids" )
408 appCheck = self.appCheck()
409 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700410 onpass="App Ids seem to be correct",
411 onfail="Something is wrong with app Ids" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -0700412
Jon Hallca319892017-06-15 15:25:22 -0700413 def commonChecks( self ):
414 # TODO: make this assertable or assert in here?
415 self.topicsCheck()
416 self.partitionsCheck()
417 self.pendingMapCheck()
418 self.appCheck()
419
420 def topicsCheck( self, extraTopics=[] ):
421 """
422 Check for work partition topics in leaders output
423 """
424 leaders = main.Cluster.next().leaders()
425 missing = False
426 try:
427 if leaders:
428 parsedLeaders = json.loads( leaders )
429 output = json.dumps( parsedLeaders,
430 sort_keys=True,
431 indent=4,
432 separators=( ',', ': ' ) )
433 main.log.debug( "Leaders: " + output )
434 # check for all intent partitions
435 topics = []
436 for i in range( 14 ):
437 topics.append( "work-partition-" + str( i ) )
438 topics += extraTopics
439 main.log.debug( topics )
440 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
441 for topic in topics:
442 if topic not in ONOStopics:
443 main.log.error( "Error: " + topic +
444 " not in leaders" )
445 missing = True
446 else:
447 main.log.error( "leaders() returned None" )
448 except ( ValueError, TypeError ):
449 main.log.exception( "Error parsing leaders" )
450 main.log.error( repr( leaders ) )
451 if missing:
Jon Hall4173b242017-09-12 17:04:38 -0700452 # NOTE Can we refactor this into the Cluster class?
453 # Maybe an option to print the output of a command from each node?
Jon Hallca319892017-06-15 15:25:22 -0700454 for ctrl in main.Cluster.active():
455 response = ctrl.CLI.leaders( jsonFormat=False )
456 main.log.debug( str( ctrl.name ) + " leaders output: \n" +
457 str( response ) )
458 return missing
459
460 def partitionsCheck( self ):
461 # TODO: return something assertable
462 partitions = main.Cluster.next().partitions()
463 try:
464 if partitions:
465 parsedPartitions = json.loads( partitions )
466 output = json.dumps( parsedPartitions,
467 sort_keys=True,
468 indent=4,
469 separators=( ',', ': ' ) )
470 main.log.debug( "Partitions: " + output )
471 # TODO check for a leader in all paritions
472 # TODO check for consistency among nodes
473 else:
474 main.log.error( "partitions() returned None" )
475 except ( ValueError, TypeError ):
476 main.log.exception( "Error parsing partitions" )
477 main.log.error( repr( partitions ) )
478
479 def pendingMapCheck( self ):
480 pendingMap = main.Cluster.next().pendingMap()
481 try:
482 if pendingMap:
483 parsedPending = json.loads( pendingMap )
484 output = json.dumps( parsedPending,
485 sort_keys=True,
486 indent=4,
487 separators=( ',', ': ' ) )
488 main.log.debug( "Pending map: " + output )
489 # TODO check something here?
490 else:
491 main.log.error( "pendingMap() returned None" )
492 except ( ValueError, TypeError ):
493 main.log.exception( "Error parsing pending map" )
494 main.log.error( repr( pendingMap ) )
495
496 def appCheck( self ):
497 """
498 Check App IDs on all nodes
499 """
500 # FIXME: Rename this to appIDCheck? or add a check for isntalled apps
501 appResults = main.Cluster.command( "appToIDCheck" )
502 appCheck = all( i == main.TRUE for i in appResults )
503 if not appCheck:
Devin Lim142b5342017-07-20 15:22:39 -0700504 ctrl = main.Cluster.active( 0 )
Jon Hallca319892017-06-15 15:25:22 -0700505 main.log.debug( "%s apps: %s" % ( ctrl.name, ctrl.apps() ) )
506 main.log.debug( "%s appIDs: %s" % ( ctrl.name, ctrl.appIDs() ) )
507 return appCheck
508
Jon Halle0f0b342017-04-18 11:43:47 -0700509 def workQueueStatsCheck( self, workQueueName, completed, inProgress, pending ):
510 # Completed
Jon Hallca319892017-06-15 15:25:22 -0700511 completedValues = main.Cluster.command( "workQueueTotalCompleted",
512 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700513 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700514 completedResults = [ int( x ) == completed for x in completedValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700515 completedResult = all( completedResults )
516 if not completedResult:
517 main.log.warn( "Expected Work Queue {} to have {} completed, found {}".format(
518 workQueueName, completed, completedValues ) )
519
520 # In Progress
Jon Hallca319892017-06-15 15:25:22 -0700521 inProgressValues = main.Cluster.command( "workQueueTotalInProgress",
522 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700523 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700524 inProgressResults = [ int( x ) == inProgress for x in inProgressValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700525 inProgressResult = all( inProgressResults )
526 if not inProgressResult:
527 main.log.warn( "Expected Work Queue {} to have {} inProgress, found {}".format(
528 workQueueName, inProgress, inProgressValues ) )
529
530 # Pending
Jon Hallca319892017-06-15 15:25:22 -0700531 pendingValues = main.Cluster.command( "workQueueTotalPending",
532 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700533 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700534 pendingResults = [ int( x ) == pending for x in pendingValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700535 pendingResult = all( pendingResults )
536 if not pendingResult:
537 main.log.warn( "Expected Work Queue {} to have {} pending, found {}".format(
538 workQueueName, pending, pendingValues ) )
539 return completedResult and inProgressResult and pendingResult
540
Devin Lim58046fa2017-07-05 16:55:00 -0700541 def assignDevices( self, main ):
542 """
543 Assign devices to controllers
544 """
545 import re
Devin Lim58046fa2017-07-05 16:55:00 -0700546 assert main, "main not defined"
547 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700548
549 main.case( "Assigning devices to controllers" )
550 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " + \
551 "and check that an ONOS node becomes the " + \
552 "master of the device."
553 main.step( "Assign switches to controllers" )
554
Jon Hallca319892017-06-15 15:25:22 -0700555 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -0700556 swList = []
557 for i in range( 1, 29 ):
558 swList.append( "s" + str( i ) )
559 main.Mininet1.assignSwController( sw=swList, ip=ipList )
560
561 mastershipCheck = main.TRUE
562 for i in range( 1, 29 ):
563 response = main.Mininet1.getSwController( "s" + str( i ) )
564 try:
565 main.log.info( str( response ) )
566 except Exception:
567 main.log.info( repr( response ) )
Devin Lim142b5342017-07-20 15:22:39 -0700568 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -0700569 if re.search( "tcp:" + ctrl.ipAddress, response ):
Devin Lim58046fa2017-07-05 16:55:00 -0700570 mastershipCheck = mastershipCheck and main.TRUE
571 else:
Jon Hall4173b242017-09-12 17:04:38 -0700572 main.log.error( "Error, node " + repr( ctrl ) + " is " +
Devin Lim58046fa2017-07-05 16:55:00 -0700573 "not in the list of controllers s" +
574 str( i ) + " is connecting to." )
575 mastershipCheck = main.FALSE
576 utilities.assert_equals(
577 expect=main.TRUE,
578 actual=mastershipCheck,
579 onpass="Switch mastership assigned correctly",
580 onfail="Switches not assigned correctly to controllers" )
Jon Hallca319892017-06-15 15:25:22 -0700581
Devin Lim58046fa2017-07-05 16:55:00 -0700582 def assignIntents( self, main ):
583 """
584 Assign intents
585 """
586 import time
587 import json
Devin Lim58046fa2017-07-05 16:55:00 -0700588 assert main, "main not defined"
589 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700590 try:
591 main.HAlabels
592 except ( NameError, AttributeError ):
593 main.log.error( "main.HAlabels not defined, setting to []" )
594 main.HAlabels = []
595 try:
596 main.HAdata
597 except ( NameError, AttributeError ):
598 main.log.error( "data not defined, setting to []" )
599 main.HAdata = []
600 main.case( "Adding host Intents" )
601 main.caseExplanation = "Discover hosts by using pingall then " +\
602 "assign predetermined host-to-host intents." +\
603 " After installation, check that the intent" +\
604 " is distributed to all nodes and the state" +\
605 " is INSTALLED"
606
607 # install onos-app-fwd
608 main.step( "Install reactive forwarding app" )
Jon Hallca319892017-06-15 15:25:22 -0700609 onosCli = main.Cluster.next()
Devin Lime9f0ccf2017-08-11 17:25:12 -0700610 installResults = onosCli.CLI.activateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700611 utilities.assert_equals( expect=main.TRUE, actual=installResults,
612 onpass="Install fwd successful",
613 onfail="Install fwd failed" )
614
615 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700616 appCheck = self.appCheck()
617 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700618 onpass="App Ids seem to be correct",
619 onfail="Something is wrong with app Ids" )
620
621 main.step( "Discovering Hosts( Via pingall for now )" )
622 # FIXME: Once we have a host discovery mechanism, use that instead
623 # REACTIVE FWD test
624 pingResult = main.FALSE
625 passMsg = "Reactive Pingall test passed"
626 time1 = time.time()
627 pingResult = main.Mininet1.pingall()
628 time2 = time.time()
629 if not pingResult:
630 main.log.warn( "First pingall failed. Trying again..." )
631 pingResult = main.Mininet1.pingall()
632 passMsg += " on the second try"
633 utilities.assert_equals(
634 expect=main.TRUE,
635 actual=pingResult,
636 onpass=passMsg,
637 onfail="Reactive Pingall failed, " +
638 "one or more ping pairs failed" )
639 main.log.info( "Time for pingall: %2f seconds" %
640 ( time2 - time1 ) )
Jon Hallca319892017-06-15 15:25:22 -0700641 if not pingResult:
Devin Lim44075962017-08-11 10:56:37 -0700642 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700643 # timeout for fwd flows
644 time.sleep( 11 )
645 # uninstall onos-app-fwd
646 main.step( "Uninstall reactive forwarding app" )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700647 uninstallResult = onosCli.CLI.deactivateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700648 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
649 onpass="Uninstall fwd successful",
650 onfail="Uninstall fwd failed" )
651
652 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700653 appCheck2 = self.appCheck()
654 utilities.assert_equals( expect=True, actual=appCheck2,
Devin Lim58046fa2017-07-05 16:55:00 -0700655 onpass="App Ids seem to be correct",
656 onfail="Something is wrong with app Ids" )
657
658 main.step( "Add host intents via cli" )
659 intentIds = []
660 # TODO: move the host numbers to params
661 # Maybe look at all the paths we ping?
662 intentAddResult = True
663 hostResult = main.TRUE
664 for i in range( 8, 18 ):
665 main.log.info( "Adding host intent between h" + str( i ) +
666 " and h" + str( i + 10 ) )
667 host1 = "00:00:00:00:00:" + \
668 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
669 host2 = "00:00:00:00:00:" + \
670 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
671 # NOTE: getHost can return None
Jon Hallca319892017-06-15 15:25:22 -0700672 host1Dict = onosCli.CLI.getHost( host1 )
673 host2Dict = onosCli.CLI.getHost( host2 )
Devin Lim58046fa2017-07-05 16:55:00 -0700674 host1Id = None
675 host2Id = None
676 if host1Dict and host2Dict:
677 host1Id = host1Dict.get( 'id', None )
678 host2Id = host2Dict.get( 'id', None )
679 if host1Id and host2Id:
Jon Hallca319892017-06-15 15:25:22 -0700680 nodeNum = len( main.Cluster.active() )
Devin Lim142b5342017-07-20 15:22:39 -0700681 ctrl = main.Cluster.active( i % nodeNum )
Jon Hallca319892017-06-15 15:25:22 -0700682 tmpId = ctrl.CLI.addHostIntent( host1Id, host2Id )
Devin Lim58046fa2017-07-05 16:55:00 -0700683 if tmpId:
684 main.log.info( "Added intent with id: " + tmpId )
685 intentIds.append( tmpId )
686 else:
687 main.log.error( "addHostIntent returned: " +
688 repr( tmpId ) )
689 else:
690 main.log.error( "Error, getHost() failed for h" + str( i ) +
691 " and/or h" + str( i + 10 ) )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700692 hosts = main.Cluster.next().CLI.hosts()
Devin Lim58046fa2017-07-05 16:55:00 -0700693 try:
Jon Hallca319892017-06-15 15:25:22 -0700694 output = json.dumps( json.loads( hosts ),
695 sort_keys=True,
696 indent=4,
697 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700698 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700699 output = repr( hosts )
700 main.log.debug( "Hosts output: %s" % output )
Devin Lim58046fa2017-07-05 16:55:00 -0700701 hostResult = main.FALSE
702 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
703 onpass="Found a host id for each host",
704 onfail="Error looking up host ids" )
705
706 intentStart = time.time()
707 onosIds = onosCli.getAllIntentsId()
708 main.log.info( "Submitted intents: " + str( intentIds ) )
709 main.log.info( "Intents in ONOS: " + str( onosIds ) )
710 for intent in intentIds:
711 if intent in onosIds:
712 pass # intent submitted is in onos
713 else:
714 intentAddResult = False
715 if intentAddResult:
716 intentStop = time.time()
717 else:
718 intentStop = None
719 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700720 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700721 intentStates = []
722 installedCheck = True
723 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
724 count = 0
725 try:
726 for intent in json.loads( intents ):
727 state = intent.get( 'state', None )
728 if "INSTALLED" not in state:
729 installedCheck = False
730 intentId = intent.get( 'id', None )
731 intentStates.append( ( intentId, state ) )
732 except ( ValueError, TypeError ):
733 main.log.exception( "Error parsing intents" )
734 # add submitted intents not in the store
735 tmplist = [ i for i, s in intentStates ]
736 missingIntents = False
737 for i in intentIds:
738 if i not in tmplist:
739 intentStates.append( ( i, " - " ) )
740 missingIntents = True
741 intentStates.sort()
742 for i, s in intentStates:
743 count += 1
744 main.log.info( "%-6s%-15s%-15s" %
745 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700746 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700747
748 intentAddResult = bool( intentAddResult and not missingIntents and
749 installedCheck )
750 if not intentAddResult:
751 main.log.error( "Error in pushing host intents to ONOS" )
752
753 main.step( "Intent Anti-Entropy dispersion" )
754 for j in range( 100 ):
755 correct = True
756 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700757 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700758 onosIds = []
Jon Hallca319892017-06-15 15:25:22 -0700759 ids = ctrl.getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700760 onosIds.append( ids )
Jon Hallca319892017-06-15 15:25:22 -0700761 main.log.debug( "Intents in " + ctrl.name + ": " +
Devin Lim58046fa2017-07-05 16:55:00 -0700762 str( sorted( onosIds ) ) )
763 if sorted( ids ) != sorted( intentIds ):
764 main.log.warn( "Set of intent IDs doesn't match" )
765 correct = False
766 break
767 else:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700768 intents = json.loads( ctrl.CLI.intents() )
Devin Lim58046fa2017-07-05 16:55:00 -0700769 for intent in intents:
770 if intent[ 'state' ] != "INSTALLED":
771 main.log.warn( "Intent " + intent[ 'id' ] +
772 " is " + intent[ 'state' ] )
773 correct = False
774 break
775 if correct:
776 break
777 else:
778 time.sleep( 1 )
779 if not intentStop:
780 intentStop = time.time()
781 global gossipTime
782 gossipTime = intentStop - intentStart
783 main.log.info( "It took about " + str( gossipTime ) +
784 " seconds for all intents to appear in each node" )
785 append = False
786 title = "Gossip Intents"
787 count = 1
788 while append is False:
789 curTitle = title + str( count )
790 if curTitle not in main.HAlabels:
791 main.HAlabels.append( curTitle )
792 main.HAdata.append( str( gossipTime ) )
793 append = True
794 else:
795 count += 1
796 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Devin Lim142b5342017-07-20 15:22:39 -0700797 maxGossipTime = gossipPeriod * len( main.Cluster.runningNodes )
Devin Lim58046fa2017-07-05 16:55:00 -0700798 utilities.assert_greater_equals(
799 expect=maxGossipTime, actual=gossipTime,
800 onpass="ECM anti-entropy for intents worked within " +
801 "expected time",
802 onfail="Intent ECM anti-entropy took too long. " +
803 "Expected time:{}, Actual time:{}".format( maxGossipTime,
804 gossipTime ) )
805 if gossipTime <= maxGossipTime:
806 intentAddResult = True
807
Jon Hallca319892017-06-15 15:25:22 -0700808 pendingMap = main.Cluster.next().pendingMap()
Devin Lim58046fa2017-07-05 16:55:00 -0700809 if not intentAddResult or "key" in pendingMap:
810 import time
811 installedCheck = True
812 main.log.info( "Sleeping 60 seconds to see if intents are found" )
813 time.sleep( 60 )
814 onosIds = onosCli.getAllIntentsId()
815 main.log.info( "Submitted intents: " + str( intentIds ) )
816 main.log.info( "Intents in ONOS: " + str( onosIds ) )
817 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700818 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700819 intentStates = []
820 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
821 count = 0
822 try:
823 for intent in json.loads( intents ):
824 # Iter through intents of a node
825 state = intent.get( 'state', None )
826 if "INSTALLED" not in state:
827 installedCheck = False
828 intentId = intent.get( 'id', None )
829 intentStates.append( ( intentId, state ) )
830 except ( ValueError, TypeError ):
831 main.log.exception( "Error parsing intents" )
832 # add submitted intents not in the store
833 tmplist = [ i for i, s in intentStates ]
834 for i in intentIds:
835 if i not in tmplist:
836 intentStates.append( ( i, " - " ) )
837 intentStates.sort()
838 for i, s in intentStates:
839 count += 1
840 main.log.info( "%-6s%-15s%-15s" %
841 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700842 self.topicsCheck( [ "org.onosproject.election" ] )
843 self.partitionsCheck()
844 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700845
Jon Hallca319892017-06-15 15:25:22 -0700846 def pingAcrossHostIntent( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -0700847 """
848 Ping across added host intents
849 """
850 import json
851 import time
Devin Lim58046fa2017-07-05 16:55:00 -0700852 assert main, "main not defined"
853 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700854 main.case( "Verify connectivity by sending traffic across Intents" )
855 main.caseExplanation = "Ping across added host intents to check " +\
856 "functionality and check the state of " +\
857 "the intent"
858
Jon Hallca319892017-06-15 15:25:22 -0700859 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -0700860 main.step( "Check Intent state" )
861 installedCheck = False
862 loopCount = 0
863 while not installedCheck and loopCount < 40:
864 installedCheck = True
865 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700866 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700867 intentStates = []
868 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
869 count = 0
870 # Iter through intents of a node
871 try:
872 for intent in json.loads( intents ):
873 state = intent.get( 'state', None )
874 if "INSTALLED" not in state:
875 installedCheck = False
Jon Hall8bafdc02017-09-05 11:36:26 -0700876 main.log.debug( "Failed intent: " + str( intent ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700877 intentId = intent.get( 'id', None )
878 intentStates.append( ( intentId, state ) )
879 except ( ValueError, TypeError ):
880 main.log.exception( "Error parsing intents." )
881 # Print states
882 intentStates.sort()
883 for i, s in intentStates:
884 count += 1
885 main.log.info( "%-6s%-15s%-15s" %
886 ( str( count ), str( i ), str( s ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700887 if not installedCheck:
888 time.sleep( 1 )
889 loopCount += 1
890 utilities.assert_equals( expect=True, actual=installedCheck,
891 onpass="Intents are all INSTALLED",
892 onfail="Intents are not all in " +
893 "INSTALLED state" )
894
895 main.step( "Ping across added host intents" )
896 PingResult = main.TRUE
897 for i in range( 8, 18 ):
898 ping = main.Mininet1.pingHost( src="h" + str( i ),
899 target="h" + str( i + 10 ) )
900 PingResult = PingResult and ping
901 if ping == main.FALSE:
902 main.log.warn( "Ping failed between h" + str( i ) +
903 " and h" + str( i + 10 ) )
904 elif ping == main.TRUE:
905 main.log.info( "Ping test passed!" )
906 # Don't set PingResult or you'd override failures
907 if PingResult == main.FALSE:
908 main.log.error(
909 "Intents have not been installed correctly, pings failed." )
910 # TODO: pretty print
Devin Lim58046fa2017-07-05 16:55:00 -0700911 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700912 tmpIntents = onosCli.CLI.intents()
Jon Hallca319892017-06-15 15:25:22 -0700913 output = json.dumps( json.loads( tmpIntents ),
914 sort_keys=True,
915 indent=4,
916 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700917 except ( ValueError, TypeError ):
Jon Hall4173b242017-09-12 17:04:38 -0700918 output = repr( tmpIntents )
Jon Hallca319892017-06-15 15:25:22 -0700919 main.log.debug( "ONOS1 intents: " + output )
Devin Lim58046fa2017-07-05 16:55:00 -0700920 utilities.assert_equals(
921 expect=main.TRUE,
922 actual=PingResult,
923 onpass="Intents have been installed correctly and pings work",
924 onfail="Intents have not been installed correctly, pings failed." )
925
926 main.step( "Check leadership of topics" )
Jon Hallca319892017-06-15 15:25:22 -0700927 topicsCheck = self.topicsCheck()
928 utilities.assert_equals( expect=False, actual=topicsCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700929 onpass="intent Partitions is in leaders",
Jon Hallca319892017-06-15 15:25:22 -0700930 onfail="Some topics were lost" )
931 self.partitionsCheck()
932 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700933
934 if not installedCheck:
935 main.log.info( "Waiting 60 seconds to see if the state of " +
936 "intents change" )
937 time.sleep( 60 )
938 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700939 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700940 intentStates = []
941 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
942 count = 0
943 # Iter through intents of a node
944 try:
945 for intent in json.loads( intents ):
946 state = intent.get( 'state', None )
947 if "INSTALLED" not in state:
948 installedCheck = False
949 intentId = intent.get( 'id', None )
950 intentStates.append( ( intentId, state ) )
951 except ( ValueError, TypeError ):
952 main.log.exception( "Error parsing intents." )
953 intentStates.sort()
954 for i, s in intentStates:
955 count += 1
956 main.log.info( "%-6s%-15s%-15s" %
957 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700958 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700959
Devin Lim58046fa2017-07-05 16:55:00 -0700960 # Print flowrules
Devin Lime9f0ccf2017-08-11 17:25:12 -0700961 main.log.debug( onosCli.CLI.flows() )
Devin Lim58046fa2017-07-05 16:55:00 -0700962 main.step( "Wait a minute then ping again" )
963 # the wait is above
964 PingResult = main.TRUE
965 for i in range( 8, 18 ):
966 ping = main.Mininet1.pingHost( src="h" + str( i ),
967 target="h" + str( i + 10 ) )
968 PingResult = PingResult and ping
969 if ping == main.FALSE:
970 main.log.warn( "Ping failed between h" + str( i ) +
971 " and h" + str( i + 10 ) )
972 elif ping == main.TRUE:
973 main.log.info( "Ping test passed!" )
974 # Don't set PingResult or you'd override failures
975 if PingResult == main.FALSE:
976 main.log.error(
977 "Intents have not been installed correctly, pings failed." )
978 # TODO: pretty print
Jon Hallca319892017-06-15 15:25:22 -0700979 main.log.warn( str( onosCli.name ) + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -0700980 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700981 tmpIntents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700982 main.log.warn( json.dumps( json.loads( tmpIntents ),
983 sort_keys=True,
984 indent=4,
985 separators=( ',', ': ' ) ) )
986 except ( ValueError, TypeError ):
987 main.log.warn( repr( tmpIntents ) )
988 utilities.assert_equals(
989 expect=main.TRUE,
990 actual=PingResult,
991 onpass="Intents have been installed correctly and pings work",
992 onfail="Intents have not been installed correctly, pings failed." )
993
Devin Lim142b5342017-07-20 15:22:39 -0700994 def checkRoleNotNull( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700995 main.step( "Check that each switch has a master" )
Devin Lim58046fa2017-07-05 16:55:00 -0700996 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -0700997 rolesNotNull = main.Cluster.command( "rolesNotNull", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -0700998 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -0700999 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -07001000 actual=rolesNotNull,
1001 onpass="Each device has a master",
1002 onfail="Some devices don't have a master assigned" )
1003
Devin Lim142b5342017-07-20 15:22:39 -07001004 def checkTheRole( self ):
1005 main.step( "Read device roles from ONOS" )
Jon Hallca319892017-06-15 15:25:22 -07001006 ONOSMastership = main.Cluster.command( "roles" )
Devin Lim58046fa2017-07-05 16:55:00 -07001007 consistentMastership = True
1008 rolesResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001009 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001010 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001011 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001012 main.log.error( "Error in getting " + node + " roles" )
1013 main.log.warn( node + " mastership response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001014 repr( ONOSMastership[ i ] ) )
1015 rolesResults = False
1016 utilities.assert_equals(
1017 expect=True,
1018 actual=rolesResults,
1019 onpass="No error in reading roles output",
1020 onfail="Error in reading roles from ONOS" )
1021
1022 main.step( "Check for consistency in roles from each controller" )
1023 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1024 main.log.info(
1025 "Switch roles are consistent across all ONOS nodes" )
1026 else:
1027 consistentMastership = False
1028 utilities.assert_equals(
1029 expect=True,
1030 actual=consistentMastership,
1031 onpass="Switch roles are consistent across all ONOS nodes",
1032 onfail="ONOS nodes have different views of switch roles" )
Devin Lim142b5342017-07-20 15:22:39 -07001033 return ONOSMastership, rolesResults, consistentMastership
1034
1035 def checkingIntents( self ):
1036 main.step( "Get the intents from each controller" )
1037 ONOSIntents = main.Cluster.command( "intents", specificDriver=2 )
1038 intentsResults = True
1039 for i in range( len( ONOSIntents ) ):
1040 node = str( main.Cluster.active( i ) )
1041 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1042 main.log.error( "Error in getting " + node + " intents" )
1043 main.log.warn( node + " intents response: " +
1044 repr( ONOSIntents[ i ] ) )
1045 intentsResults = False
1046 utilities.assert_equals(
1047 expect=True,
1048 actual=intentsResults,
1049 onpass="No error in reading intents output",
1050 onfail="Error in reading intents from ONOS" )
1051 return ONOSIntents, intentsResults
1052
1053 def readingState( self, main ):
1054 """
1055 Reading state of ONOS
1056 """
1057 import json
1058 import time
1059 assert main, "main not defined"
1060 assert utilities.assert_equals, "utilities.assert_equals not defined"
1061 try:
1062 from tests.dependencies.topology import Topology
1063 except ImportError:
1064 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07001065 main.cleanAndExit()
Devin Lim142b5342017-07-20 15:22:39 -07001066 try:
1067 main.topoRelated
1068 except ( NameError, AttributeError ):
1069 main.topoRelated = Topology()
1070 main.case( "Setting up and gathering data for current state" )
1071 # The general idea for this test case is to pull the state of
1072 # ( intents,flows, topology,... ) from each ONOS node
1073 # We can then compare them with each other and also with past states
1074
1075 global mastershipState
1076 mastershipState = '[]'
1077
1078 self.checkRoleNotNull()
1079
1080 main.step( "Get the Mastership of each switch from each controller" )
1081 mastershipCheck = main.FALSE
1082
1083 ONOSMastership, consistentMastership, rolesResults = self.checkTheRole()
Devin Lim58046fa2017-07-05 16:55:00 -07001084
1085 if rolesResults and not consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001086 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001087 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001088 try:
1089 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001090 node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07001091 json.dumps(
1092 json.loads( ONOSMastership[ i ] ),
1093 sort_keys=True,
1094 indent=4,
1095 separators=( ',', ': ' ) ) )
1096 except ( ValueError, TypeError ):
1097 main.log.warn( repr( ONOSMastership[ i ] ) )
1098 elif rolesResults and consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001099 mastershipCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001100 mastershipState = ONOSMastership[ 0 ]
1101
Devin Lim58046fa2017-07-05 16:55:00 -07001102 global intentState
1103 intentState = []
Devin Lim142b5342017-07-20 15:22:39 -07001104 ONOSIntents, intentsResults = self.checkingIntents()
Jon Hallca319892017-06-15 15:25:22 -07001105 intentCheck = main.FALSE
1106 consistentIntents = True
Devin Lim142b5342017-07-20 15:22:39 -07001107
Devin Lim58046fa2017-07-05 16:55:00 -07001108 main.step( "Check for consistency in Intents from each controller" )
1109 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1110 main.log.info( "Intents are consistent across all ONOS " +
1111 "nodes" )
1112 else:
1113 consistentIntents = False
1114 main.log.error( "Intents not consistent" )
1115 utilities.assert_equals(
1116 expect=True,
1117 actual=consistentIntents,
1118 onpass="Intents are consistent across all ONOS nodes",
1119 onfail="ONOS nodes have different views of intents" )
1120
1121 if intentsResults:
1122 # Try to make it easy to figure out what is happening
1123 #
1124 # Intent ONOS1 ONOS2 ...
1125 # 0x01 INSTALLED INSTALLING
1126 # ... ... ...
1127 # ... ... ...
1128 title = " Id"
Jon Hallca319892017-06-15 15:25:22 -07001129 for ctrl in main.Cluster.active():
1130 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07001131 main.log.warn( title )
1132 # get all intent keys in the cluster
1133 keys = []
1134 try:
1135 # Get the set of all intent keys
1136 for nodeStr in ONOSIntents:
1137 node = json.loads( nodeStr )
1138 for intent in node:
1139 keys.append( intent.get( 'id' ) )
1140 keys = set( keys )
1141 # For each intent key, print the state on each node
1142 for key in keys:
1143 row = "%-13s" % key
1144 for nodeStr in ONOSIntents:
1145 node = json.loads( nodeStr )
1146 for intent in node:
1147 if intent.get( 'id', "Error" ) == key:
1148 row += "%-15s" % intent.get( 'state' )
1149 main.log.warn( row )
1150 # End of intent state table
1151 except ValueError as e:
1152 main.log.exception( e )
1153 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1154
1155 if intentsResults and not consistentIntents:
1156 # print the json objects
Jon Hallca319892017-06-15 15:25:22 -07001157 main.log.debug( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001158 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1159 sort_keys=True,
1160 indent=4,
1161 separators=( ',', ': ' ) ) )
1162 for i in range( len( ONOSIntents ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001163 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001164 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallca319892017-06-15 15:25:22 -07001165 main.log.debug( node + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001166 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
1167 sort_keys=True,
1168 indent=4,
1169 separators=( ',', ': ' ) ) )
1170 else:
Jon Hallca319892017-06-15 15:25:22 -07001171 main.log.debug( node + " intents match " + ctrl.name + " intents" )
Devin Lim58046fa2017-07-05 16:55:00 -07001172 elif intentsResults and consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07001173 intentCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001174 intentState = ONOSIntents[ 0 ]
1175
1176 main.step( "Get the flows from each controller" )
1177 global flowState
1178 flowState = []
Jon Hall4173b242017-09-12 17:04:38 -07001179 ONOSFlows = main.Cluster.command( "flows", specificDriver=2 ) # TODO: Possible arg: sleep = 30
Devin Lim58046fa2017-07-05 16:55:00 -07001180 ONOSFlowsJson = []
1181 flowCheck = main.FALSE
1182 consistentFlows = True
1183 flowsResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001184 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001185 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001186 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001187 main.log.error( "Error in getting " + node + " flows" )
1188 main.log.warn( node + " flows response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001189 repr( ONOSFlows[ i ] ) )
1190 flowsResults = False
1191 ONOSFlowsJson.append( None )
1192 else:
1193 try:
1194 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1195 except ( ValueError, TypeError ):
1196 # FIXME: change this to log.error?
Jon Hallca319892017-06-15 15:25:22 -07001197 main.log.exception( "Error in parsing " + node +
Devin Lim58046fa2017-07-05 16:55:00 -07001198 " response as json." )
1199 main.log.error( repr( ONOSFlows[ i ] ) )
1200 ONOSFlowsJson.append( None )
1201 flowsResults = False
1202 utilities.assert_equals(
1203 expect=True,
1204 actual=flowsResults,
1205 onpass="No error in reading flows output",
1206 onfail="Error in reading flows from ONOS" )
1207
1208 main.step( "Check for consistency in Flows from each controller" )
1209 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1210 if all( tmp ):
1211 main.log.info( "Flow count is consistent across all ONOS nodes" )
1212 else:
1213 consistentFlows = False
1214 utilities.assert_equals(
1215 expect=True,
1216 actual=consistentFlows,
1217 onpass="The flow count is consistent across all ONOS nodes",
1218 onfail="ONOS nodes have different flow counts" )
1219
1220 if flowsResults and not consistentFlows:
1221 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001222 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001223 try:
1224 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001225 node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001226 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
1227 indent=4, separators=( ',', ': ' ) ) )
1228 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -07001229 main.log.warn( node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001230 repr( ONOSFlows[ i ] ) )
1231 elif flowsResults and consistentFlows:
1232 flowCheck = main.TRUE
1233 flowState = ONOSFlows[ 0 ]
1234
1235 main.step( "Get the OF Table entries" )
1236 global flows
1237 flows = []
1238 for i in range( 1, 29 ):
1239 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1240 if flowCheck == main.FALSE:
1241 for table in flows:
1242 main.log.warn( table )
1243 # TODO: Compare switch flow tables with ONOS flow tables
1244
1245 main.step( "Start continuous pings" )
1246 main.Mininet2.pingLong(
1247 src=main.params[ 'PING' ][ 'source1' ],
1248 target=main.params[ 'PING' ][ 'target1' ],
1249 pingTime=500 )
1250 main.Mininet2.pingLong(
1251 src=main.params[ 'PING' ][ 'source2' ],
1252 target=main.params[ 'PING' ][ 'target2' ],
1253 pingTime=500 )
1254 main.Mininet2.pingLong(
1255 src=main.params[ 'PING' ][ 'source3' ],
1256 target=main.params[ 'PING' ][ 'target3' ],
1257 pingTime=500 )
1258 main.Mininet2.pingLong(
1259 src=main.params[ 'PING' ][ 'source4' ],
1260 target=main.params[ 'PING' ][ 'target4' ],
1261 pingTime=500 )
1262 main.Mininet2.pingLong(
1263 src=main.params[ 'PING' ][ 'source5' ],
1264 target=main.params[ 'PING' ][ 'target5' ],
1265 pingTime=500 )
1266 main.Mininet2.pingLong(
1267 src=main.params[ 'PING' ][ 'source6' ],
1268 target=main.params[ 'PING' ][ 'target6' ],
1269 pingTime=500 )
1270 main.Mininet2.pingLong(
1271 src=main.params[ 'PING' ][ 'source7' ],
1272 target=main.params[ 'PING' ][ 'target7' ],
1273 pingTime=500 )
1274 main.Mininet2.pingLong(
1275 src=main.params[ 'PING' ][ 'source8' ],
1276 target=main.params[ 'PING' ][ 'target8' ],
1277 pingTime=500 )
1278 main.Mininet2.pingLong(
1279 src=main.params[ 'PING' ][ 'source9' ],
1280 target=main.params[ 'PING' ][ 'target9' ],
1281 pingTime=500 )
1282 main.Mininet2.pingLong(
1283 src=main.params[ 'PING' ][ 'source10' ],
1284 target=main.params[ 'PING' ][ 'target10' ],
1285 pingTime=500 )
1286
1287 main.step( "Collecting topology information from ONOS" )
Devin Lim142b5342017-07-20 15:22:39 -07001288 devices = main.topoRelated.getAll( "devices" )
1289 hosts = main.topoRelated.getAll( "hosts", inJson=True )
1290 ports = main.topoRelated.getAll( "ports" )
1291 links = main.topoRelated.getAll( "links" )
1292 clusters = main.topoRelated.getAll( "clusters" )
Devin Lim58046fa2017-07-05 16:55:00 -07001293 # Compare json objects for hosts and dataplane clusters
1294
1295 # hosts
1296 main.step( "Host view is consistent across ONOS nodes" )
1297 consistentHostsResult = main.TRUE
1298 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001299 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001300 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1301 if hosts[ controller ] == hosts[ 0 ]:
1302 continue
1303 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07001304 main.log.error( "hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001305 controllerStr +
1306 " is inconsistent with ONOS1" )
1307 main.log.warn( repr( hosts[ controller ] ) )
1308 consistentHostsResult = main.FALSE
1309
1310 else:
Jon Hallca319892017-06-15 15:25:22 -07001311 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001312 controllerStr )
1313 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001314 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001315 " hosts response: " +
1316 repr( hosts[ controller ] ) )
1317 utilities.assert_equals(
1318 expect=main.TRUE,
1319 actual=consistentHostsResult,
1320 onpass="Hosts view is consistent across all ONOS nodes",
1321 onfail="ONOS nodes have different views of hosts" )
1322
1323 main.step( "Each host has an IP address" )
1324 ipResult = main.TRUE
1325 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001326 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001327 if hosts[ controller ]:
1328 for host in hosts[ controller ]:
1329 if not host.get( 'ipAddresses', [] ):
Jon Hallca319892017-06-15 15:25:22 -07001330 main.log.error( "Error with host ips on " +
Devin Lim58046fa2017-07-05 16:55:00 -07001331 controllerStr + ": " + str( host ) )
1332 ipResult = main.FALSE
1333 utilities.assert_equals(
1334 expect=main.TRUE,
1335 actual=ipResult,
1336 onpass="The ips of the hosts aren't empty",
1337 onfail="The ip of at least one host is missing" )
1338
1339 # Strongly connected clusters of devices
1340 main.step( "Cluster view is consistent across ONOS nodes" )
1341 consistentClustersResult = main.TRUE
1342 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001343 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001344 if "Error" not in clusters[ controller ]:
1345 if clusters[ controller ] == clusters[ 0 ]:
1346 continue
1347 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07001348 main.log.error( "clusters from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001349 " is inconsistent with ONOS1" )
1350 consistentClustersResult = main.FALSE
1351
1352 else:
1353 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07001354 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07001355 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001356 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001357 " clusters response: " +
1358 repr( clusters[ controller ] ) )
1359 utilities.assert_equals(
1360 expect=main.TRUE,
1361 actual=consistentClustersResult,
1362 onpass="Clusters view is consistent across all ONOS nodes",
1363 onfail="ONOS nodes have different views of clusters" )
1364 if not consistentClustersResult:
1365 main.log.debug( clusters )
1366
1367 # there should always only be one cluster
1368 main.step( "Cluster view correct across ONOS nodes" )
1369 try:
1370 numClusters = len( json.loads( clusters[ 0 ] ) )
1371 except ( ValueError, TypeError ):
1372 main.log.exception( "Error parsing clusters[0]: " +
1373 repr( clusters[ 0 ] ) )
1374 numClusters = "ERROR"
1375 utilities.assert_equals(
1376 expect=1,
1377 actual=numClusters,
1378 onpass="ONOS shows 1 SCC",
1379 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1380
1381 main.step( "Comparing ONOS topology to MN" )
1382 devicesResults = main.TRUE
1383 linksResults = main.TRUE
1384 hostsResults = main.TRUE
1385 mnSwitches = main.Mininet1.getSwitches()
1386 mnLinks = main.Mininet1.getLinks()
1387 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07001388 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001389 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001390 currentDevicesResult = main.topoRelated.compareDevicePort(
1391 main.Mininet1, controller,
1392 mnSwitches, devices, ports )
1393 utilities.assert_equals( expect=main.TRUE,
1394 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07001395 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001396 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001397 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001398 " Switches view is incorrect" )
1399
1400 currentLinksResult = main.topoRelated.compareBase( links, controller,
1401 main.Mininet1.compareLinks,
1402 [ mnSwitches, mnLinks ] )
1403 utilities.assert_equals( expect=main.TRUE,
1404 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07001405 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001406 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001407 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001408 " links view is incorrect" )
1409
1410 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1411 currentHostsResult = main.Mininet1.compareHosts(
1412 mnHosts,
1413 hosts[ controller ] )
1414 else:
1415 currentHostsResult = main.FALSE
1416 utilities.assert_equals( expect=main.TRUE,
1417 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07001418 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001419 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07001420 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001421 " hosts don't match Mininet" )
1422
1423 devicesResults = devicesResults and currentDevicesResult
1424 linksResults = linksResults and currentLinksResult
1425 hostsResults = hostsResults and currentHostsResult
1426
1427 main.step( "Device information is correct" )
1428 utilities.assert_equals(
1429 expect=main.TRUE,
1430 actual=devicesResults,
1431 onpass="Device information is correct",
1432 onfail="Device information is incorrect" )
1433
1434 main.step( "Links are correct" )
1435 utilities.assert_equals(
1436 expect=main.TRUE,
1437 actual=linksResults,
1438 onpass="Link are correct",
1439 onfail="Links are incorrect" )
1440
1441 main.step( "Hosts are correct" )
1442 utilities.assert_equals(
1443 expect=main.TRUE,
1444 actual=hostsResults,
1445 onpass="Hosts are correct",
1446 onfail="Hosts are incorrect" )
1447
1448 def checkDistPrimitivesFunc( self, main ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001449 """
1450 Check for basic functionality with distributed primitives
1451 """
Jon Halle0f0b342017-04-18 11:43:47 -07001452 # TODO: Clean this up so it's not just a cut/paste from the test
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001453 try:
1454 # Make sure variables are defined/set
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001455 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001456 assert main.pCounterName, "main.pCounterName not defined"
1457 assert main.onosSetName, "main.onosSetName not defined"
1458 # NOTE: assert fails if value is 0/None/Empty/False
1459 try:
1460 main.pCounterValue
Devin Lim58046fa2017-07-05 16:55:00 -07001461 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001462 main.log.error( "main.pCounterValue not defined, setting to 0" )
1463 main.pCounterValue = 0
1464 try:
1465 main.onosSet
Devin Lim58046fa2017-07-05 16:55:00 -07001466 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001467 main.log.error( "main.onosSet not defined, setting to empty Set" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001468 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001469 # Variables for the distributed primitives tests. These are local only
1470 addValue = "a"
1471 addAllValue = "a b c d e f"
1472 retainValue = "c d e f"
Jon Halle0f0b342017-04-18 11:43:47 -07001473 valueName = "TestON-Value"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001474 valueValue = None
Jon Halle0f0b342017-04-18 11:43:47 -07001475 workQueueName = "TestON-Queue"
1476 workQueueCompleted = 0
1477 workQueueInProgress = 0
1478 workQueuePending = 0
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001479
1480 description = "Check for basic functionality with distributed " +\
1481 "primitives"
1482 main.case( description )
1483 main.caseExplanation = "Test the methods of the distributed " +\
1484 "primitives (counters and sets) throught the cli"
1485 # DISTRIBUTED ATOMIC COUNTERS
1486 # Partitioned counters
1487 main.step( "Increment then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001488 pCounters = main.Cluster.command( "counterTestAddAndGet",
1489 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001490 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001491 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001492 main.pCounterValue += 1
1493 addedPValues.append( main.pCounterValue )
Jon Hallca319892017-06-15 15:25:22 -07001494 # Check that counter incremented once per controller
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001495 pCounterResults = True
1496 for i in addedPValues:
1497 tmpResult = i in pCounters
1498 pCounterResults = pCounterResults and tmpResult
1499 if not tmpResult:
1500 main.log.error( str( i ) + " is not in partitioned "
1501 "counter incremented results" )
1502 utilities.assert_equals( expect=True,
1503 actual=pCounterResults,
1504 onpass="Default counter incremented",
1505 onfail="Error incrementing default" +
1506 " counter" )
1507
1508 main.step( "Get then Increment a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001509 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1510 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001511 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001512 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001513 addedPValues.append( main.pCounterValue )
1514 main.pCounterValue += 1
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001515 # Check that counter incremented numController times
1516 pCounterResults = True
1517 for i in addedPValues:
1518 tmpResult = i in pCounters
1519 pCounterResults = pCounterResults and tmpResult
1520 if not tmpResult:
1521 main.log.error( str( i ) + " is not in partitioned "
1522 "counter incremented results" )
1523 utilities.assert_equals( expect=True,
1524 actual=pCounterResults,
1525 onpass="Default counter incremented",
1526 onfail="Error incrementing default" +
1527 " counter" )
1528
1529 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001530 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001531 utilities.assert_equals( expect=main.TRUE,
1532 actual=incrementCheck,
1533 onpass="Added counters are correct",
1534 onfail="Added counters are incorrect" )
1535
1536 main.step( "Add -8 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001537 pCounters = main.Cluster.command( "counterTestAddAndGet",
1538 args=[ main.pCounterName ],
1539 kwargs={ "delta": -8 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001540 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001541 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001542 main.pCounterValue += -8
1543 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001544 # Check that counter incremented numController times
1545 pCounterResults = True
1546 for i in addedPValues:
1547 tmpResult = i in pCounters
1548 pCounterResults = pCounterResults and tmpResult
1549 if not tmpResult:
1550 main.log.error( str( i ) + " is not in partitioned "
1551 "counter incremented results" )
1552 utilities.assert_equals( expect=True,
1553 actual=pCounterResults,
1554 onpass="Default counter incremented",
1555 onfail="Error incrementing default" +
1556 " counter" )
1557
1558 main.step( "Add 5 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001559 pCounters = main.Cluster.command( "counterTestAddAndGet",
1560 args=[ main.pCounterName ],
1561 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001562 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001563 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001564 main.pCounterValue += 5
1565 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001566
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001567 # Check that counter incremented numController times
1568 pCounterResults = True
1569 for i in addedPValues:
1570 tmpResult = i in pCounters
1571 pCounterResults = pCounterResults and tmpResult
1572 if not tmpResult:
1573 main.log.error( str( i ) + " is not in partitioned "
1574 "counter incremented results" )
1575 utilities.assert_equals( expect=True,
1576 actual=pCounterResults,
1577 onpass="Default counter incremented",
1578 onfail="Error incrementing default" +
1579 " counter" )
1580
1581 main.step( "Get then add 5 to a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001582 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1583 args=[ main.pCounterName ],
1584 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001585 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001586 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001587 addedPValues.append( main.pCounterValue )
1588 main.pCounterValue += 5
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001589 # Check that counter incremented numController times
1590 pCounterResults = True
1591 for i in addedPValues:
1592 tmpResult = i in pCounters
1593 pCounterResults = pCounterResults and tmpResult
1594 if not tmpResult:
1595 main.log.error( str( i ) + " is not in partitioned "
1596 "counter incremented results" )
1597 utilities.assert_equals( expect=True,
1598 actual=pCounterResults,
1599 onpass="Default counter incremented",
1600 onfail="Error incrementing default" +
1601 " counter" )
1602
1603 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001604 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001605 utilities.assert_equals( expect=main.TRUE,
1606 actual=incrementCheck,
1607 onpass="Added counters are correct",
1608 onfail="Added counters are incorrect" )
1609
1610 # DISTRIBUTED SETS
1611 main.step( "Distributed Set get" )
1612 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001613 getResponses = main.Cluster.command( "setTestGet",
1614 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001615 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001616 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001617 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001618 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001619 current = set( getResponses[ i ] )
1620 if len( current ) == len( getResponses[ i ] ):
1621 # no repeats
1622 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001623 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001624 " has incorrect view" +
1625 " of set " + main.onosSetName + ":\n" +
1626 str( getResponses[ i ] ) )
1627 main.log.debug( "Expected: " + str( main.onosSet ) )
1628 main.log.debug( "Actual: " + str( current ) )
1629 getResults = main.FALSE
1630 else:
1631 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001632 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001633 " has repeat elements in" +
1634 " set " + main.onosSetName + ":\n" +
1635 str( getResponses[ i ] ) )
1636 getResults = main.FALSE
1637 elif getResponses[ i ] == main.ERROR:
1638 getResults = main.FALSE
1639 utilities.assert_equals( expect=main.TRUE,
1640 actual=getResults,
1641 onpass="Set elements are correct",
1642 onfail="Set elements are incorrect" )
1643
1644 main.step( "Distributed Set size" )
Jon Hallca319892017-06-15 15:25:22 -07001645 sizeResponses = main.Cluster.command( "setTestSize",
1646 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001647 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001648 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001649 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001650 if size != sizeResponses[ i ]:
1651 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001652 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001653 " expected a size of " + str( size ) +
1654 " for set " + main.onosSetName +
1655 " but got " + str( sizeResponses[ i ] ) )
1656 utilities.assert_equals( expect=main.TRUE,
1657 actual=sizeResults,
1658 onpass="Set sizes are correct",
1659 onfail="Set sizes are incorrect" )
1660
1661 main.step( "Distributed Set add()" )
1662 main.onosSet.add( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001663 addResponses = main.Cluster.command( "setTestAdd",
1664 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001665 # main.TRUE = successfully changed the set
1666 # main.FALSE = action resulted in no change in set
1667 # main.ERROR - Some error in executing the function
1668 addResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001669 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001670 if addResponses[ i ] == main.TRUE:
1671 # All is well
1672 pass
1673 elif addResponses[ i ] == main.FALSE:
1674 # Already in set, probably fine
1675 pass
1676 elif addResponses[ i ] == main.ERROR:
1677 # Error in execution
1678 addResults = main.FALSE
1679 else:
1680 # unexpected result
1681 addResults = main.FALSE
1682 if addResults != main.TRUE:
1683 main.log.error( "Error executing set add" )
1684
1685 # Check if set is still correct
1686 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001687 getResponses = main.Cluster.command( "setTestGet",
1688 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001689 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001690 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001691 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001692 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001693 current = set( getResponses[ i ] )
1694 if len( current ) == len( getResponses[ i ] ):
1695 # no repeats
1696 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001697 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001698 " of set " + main.onosSetName + ":\n" +
1699 str( getResponses[ i ] ) )
1700 main.log.debug( "Expected: " + str( main.onosSet ) )
1701 main.log.debug( "Actual: " + str( current ) )
1702 getResults = main.FALSE
1703 else:
1704 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001705 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001706 " set " + main.onosSetName + ":\n" +
1707 str( getResponses[ i ] ) )
1708 getResults = main.FALSE
1709 elif getResponses[ i ] == main.ERROR:
1710 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001711 sizeResponses = main.Cluster.command( "setTestSize",
1712 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001713 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001714 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001715 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001716 if size != sizeResponses[ i ]:
1717 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001718 main.log.error( node + " expected a size of " +
1719 str( size ) + " for set " + main.onosSetName +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001720 " but got " + str( sizeResponses[ i ] ) )
1721 addResults = addResults and getResults and sizeResults
1722 utilities.assert_equals( expect=main.TRUE,
1723 actual=addResults,
1724 onpass="Set add correct",
1725 onfail="Set add was incorrect" )
1726
1727 main.step( "Distributed Set addAll()" )
1728 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001729 addResponses = main.Cluster.command( "setTestAdd",
1730 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001731 # main.TRUE = successfully changed the set
1732 # main.FALSE = action resulted in no change in set
1733 # main.ERROR - Some error in executing the function
1734 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001735 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001736 if addResponses[ i ] == main.TRUE:
1737 # All is well
1738 pass
1739 elif addResponses[ i ] == main.FALSE:
1740 # Already in set, probably fine
1741 pass
1742 elif addResponses[ i ] == main.ERROR:
1743 # Error in execution
1744 addAllResults = main.FALSE
1745 else:
1746 # unexpected result
1747 addAllResults = main.FALSE
1748 if addAllResults != main.TRUE:
1749 main.log.error( "Error executing set addAll" )
1750
1751 # Check if set is still correct
1752 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001753 getResponses = main.Cluster.command( "setTestGet",
1754 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001755 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001756 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001757 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001758 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001759 current = set( getResponses[ i ] )
1760 if len( current ) == len( getResponses[ i ] ):
1761 # no repeats
1762 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001763 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001764 " of set " + main.onosSetName + ":\n" +
1765 str( getResponses[ i ] ) )
1766 main.log.debug( "Expected: " + str( main.onosSet ) )
1767 main.log.debug( "Actual: " + str( current ) )
1768 getResults = main.FALSE
1769 else:
1770 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001771 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001772 " set " + main.onosSetName + ":\n" +
1773 str( getResponses[ i ] ) )
1774 getResults = main.FALSE
1775 elif getResponses[ i ] == main.ERROR:
1776 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001777 sizeResponses = main.Cluster.command( "setTestSize",
1778 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001779 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001780 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001781 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001782 if size != sizeResponses[ i ]:
1783 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001784 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001785 " for set " + main.onosSetName +
1786 " but got " + str( sizeResponses[ i ] ) )
1787 addAllResults = addAllResults and getResults and sizeResults
1788 utilities.assert_equals( expect=main.TRUE,
1789 actual=addAllResults,
1790 onpass="Set addAll correct",
1791 onfail="Set addAll was incorrect" )
1792
1793 main.step( "Distributed Set contains()" )
Jon Hallca319892017-06-15 15:25:22 -07001794 containsResponses = main.Cluster.command( "setTestGet",
1795 args=[ main.onosSetName ],
1796 kwargs={ "values": addValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001797 containsResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001798 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001799 if containsResponses[ i ] == main.ERROR:
1800 containsResults = main.FALSE
1801 else:
1802 containsResults = containsResults and\
1803 containsResponses[ i ][ 1 ]
1804 utilities.assert_equals( expect=main.TRUE,
1805 actual=containsResults,
1806 onpass="Set contains is functional",
1807 onfail="Set contains failed" )
1808
1809 main.step( "Distributed Set containsAll()" )
Jon Hallca319892017-06-15 15:25:22 -07001810 containsAllResponses = main.Cluster.command( "setTestGet",
1811 args=[ main.onosSetName ],
1812 kwargs={ "values": addAllValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001813 containsAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001814 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001815 if containsResponses[ i ] == main.ERROR:
1816 containsResults = main.FALSE
1817 else:
1818 containsResults = containsResults and\
1819 containsResponses[ i ][ 1 ]
1820 utilities.assert_equals( expect=main.TRUE,
1821 actual=containsAllResults,
1822 onpass="Set containsAll is functional",
1823 onfail="Set containsAll failed" )
1824
1825 main.step( "Distributed Set remove()" )
1826 main.onosSet.remove( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001827 removeResponses = main.Cluster.command( "setTestRemove",
1828 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001829 # main.TRUE = successfully changed the set
1830 # main.FALSE = action resulted in no change in set
1831 # main.ERROR - Some error in executing the function
1832 removeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001833 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001834 if removeResponses[ i ] == main.TRUE:
1835 # All is well
1836 pass
1837 elif removeResponses[ i ] == main.FALSE:
1838 # not in set, probably fine
1839 pass
1840 elif removeResponses[ i ] == main.ERROR:
1841 # Error in execution
1842 removeResults = main.FALSE
1843 else:
1844 # unexpected result
1845 removeResults = main.FALSE
1846 if removeResults != main.TRUE:
1847 main.log.error( "Error executing set remove" )
1848
1849 # Check if set is still correct
1850 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001851 getResponses = main.Cluster.command( "setTestGet",
1852 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001853 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001854 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001855 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001856 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001857 current = set( getResponses[ i ] )
1858 if len( current ) == len( getResponses[ i ] ):
1859 # no repeats
1860 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001861 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001862 " of set " + main.onosSetName + ":\n" +
1863 str( getResponses[ i ] ) )
1864 main.log.debug( "Expected: " + str( main.onosSet ) )
1865 main.log.debug( "Actual: " + str( current ) )
1866 getResults = main.FALSE
1867 else:
1868 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001869 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001870 " set " + main.onosSetName + ":\n" +
1871 str( getResponses[ i ] ) )
1872 getResults = main.FALSE
1873 elif getResponses[ i ] == main.ERROR:
1874 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001875 sizeResponses = main.Cluster.command( "setTestSize",
1876 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001877 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001878 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001879 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001880 if size != sizeResponses[ i ]:
1881 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001882 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001883 " for set " + main.onosSetName +
1884 " but got " + str( sizeResponses[ i ] ) )
1885 removeResults = removeResults and getResults and sizeResults
1886 utilities.assert_equals( expect=main.TRUE,
1887 actual=removeResults,
1888 onpass="Set remove correct",
1889 onfail="Set remove was incorrect" )
1890
1891 main.step( "Distributed Set removeAll()" )
1892 main.onosSet.difference_update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001893 removeAllResponses = main.Cluster.command( "setTestRemove",
1894 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001895 # main.TRUE = successfully changed the set
1896 # main.FALSE = action resulted in no change in set
1897 # main.ERROR - Some error in executing the function
1898 removeAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001899 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001900 if removeAllResponses[ i ] == main.TRUE:
1901 # All is well
1902 pass
1903 elif removeAllResponses[ i ] == main.FALSE:
1904 # not in set, probably fine
1905 pass
1906 elif removeAllResponses[ i ] == main.ERROR:
1907 # Error in execution
1908 removeAllResults = main.FALSE
1909 else:
1910 # unexpected result
1911 removeAllResults = main.FALSE
1912 if removeAllResults != main.TRUE:
1913 main.log.error( "Error executing set removeAll" )
1914
1915 # Check if set is still correct
1916 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001917 getResponses = main.Cluster.command( "setTestGet",
1918 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001919 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001920 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001921 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001922 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001923 current = set( getResponses[ i ] )
1924 if len( current ) == len( getResponses[ i ] ):
1925 # no repeats
1926 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001927 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001928 " of set " + main.onosSetName + ":\n" +
1929 str( getResponses[ i ] ) )
1930 main.log.debug( "Expected: " + str( main.onosSet ) )
1931 main.log.debug( "Actual: " + str( current ) )
1932 getResults = main.FALSE
1933 else:
1934 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001935 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001936 " set " + main.onosSetName + ":\n" +
1937 str( getResponses[ i ] ) )
1938 getResults = main.FALSE
1939 elif getResponses[ i ] == main.ERROR:
1940 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001941 sizeResponses = main.Cluster.command( "setTestSize",
1942 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001943 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001944 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001945 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001946 if size != sizeResponses[ i ]:
1947 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001948 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001949 " for set " + main.onosSetName +
1950 " but got " + str( sizeResponses[ i ] ) )
1951 removeAllResults = removeAllResults and getResults and sizeResults
1952 utilities.assert_equals( expect=main.TRUE,
1953 actual=removeAllResults,
1954 onpass="Set removeAll correct",
1955 onfail="Set removeAll was incorrect" )
1956
1957 main.step( "Distributed Set addAll()" )
1958 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001959 addResponses = main.Cluster.command( "setTestAdd",
1960 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001961 # main.TRUE = successfully changed the set
1962 # main.FALSE = action resulted in no change in set
1963 # main.ERROR - Some error in executing the function
1964 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001965 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001966 if addResponses[ i ] == main.TRUE:
1967 # All is well
1968 pass
1969 elif addResponses[ i ] == main.FALSE:
1970 # Already in set, probably fine
1971 pass
1972 elif addResponses[ i ] == main.ERROR:
1973 # Error in execution
1974 addAllResults = main.FALSE
1975 else:
1976 # unexpected result
1977 addAllResults = main.FALSE
1978 if addAllResults != main.TRUE:
1979 main.log.error( "Error executing set addAll" )
1980
1981 # Check if set is still correct
1982 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001983 getResponses = main.Cluster.command( "setTestGet",
1984 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001985 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001986 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001987 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001988 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001989 current = set( getResponses[ i ] )
1990 if len( current ) == len( getResponses[ i ] ):
1991 # no repeats
1992 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001993 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001994 " of set " + main.onosSetName + ":\n" +
1995 str( getResponses[ i ] ) )
1996 main.log.debug( "Expected: " + str( main.onosSet ) )
1997 main.log.debug( "Actual: " + str( current ) )
1998 getResults = main.FALSE
1999 else:
2000 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002001 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002002 " set " + main.onosSetName + ":\n" +
2003 str( getResponses[ i ] ) )
2004 getResults = main.FALSE
2005 elif getResponses[ i ] == main.ERROR:
2006 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002007 sizeResponses = main.Cluster.command( "setTestSize",
2008 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002009 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002010 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002011 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002012 if size != sizeResponses[ i ]:
2013 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002014 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002015 " for set " + main.onosSetName +
2016 " but got " + str( sizeResponses[ i ] ) )
2017 addAllResults = addAllResults and getResults and sizeResults
2018 utilities.assert_equals( expect=main.TRUE,
2019 actual=addAllResults,
2020 onpass="Set addAll correct",
2021 onfail="Set addAll was incorrect" )
2022
2023 main.step( "Distributed Set clear()" )
2024 main.onosSet.clear()
Jon Hallca319892017-06-15 15:25:22 -07002025 clearResponses = main.Cluster.command( "setTestRemove",
Jon Hall4173b242017-09-12 17:04:38 -07002026 args=[ main.onosSetName, " " ], # Values doesn't matter
2027 kwargs={ "clear": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002028 # main.TRUE = successfully changed the set
2029 # main.FALSE = action resulted in no change in set
2030 # main.ERROR - Some error in executing the function
2031 clearResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002032 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002033 if clearResponses[ i ] == main.TRUE:
2034 # All is well
2035 pass
2036 elif clearResponses[ i ] == main.FALSE:
2037 # Nothing set, probably fine
2038 pass
2039 elif clearResponses[ i ] == main.ERROR:
2040 # Error in execution
2041 clearResults = main.FALSE
2042 else:
2043 # unexpected result
2044 clearResults = main.FALSE
2045 if clearResults != main.TRUE:
2046 main.log.error( "Error executing set clear" )
2047
2048 # Check if set is still correct
2049 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002050 getResponses = main.Cluster.command( "setTestGet",
2051 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002052 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002053 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002054 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002055 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002056 current = set( getResponses[ i ] )
2057 if len( current ) == len( getResponses[ i ] ):
2058 # no repeats
2059 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002060 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002061 " of set " + main.onosSetName + ":\n" +
2062 str( getResponses[ i ] ) )
2063 main.log.debug( "Expected: " + str( main.onosSet ) )
2064 main.log.debug( "Actual: " + str( current ) )
2065 getResults = main.FALSE
2066 else:
2067 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002068 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002069 " set " + main.onosSetName + ":\n" +
2070 str( getResponses[ i ] ) )
2071 getResults = main.FALSE
2072 elif getResponses[ i ] == main.ERROR:
2073 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002074 sizeResponses = main.Cluster.command( "setTestSize",
2075 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002076 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002077 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002078 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002079 if size != sizeResponses[ i ]:
2080 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002081 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002082 " for set " + main.onosSetName +
2083 " but got " + str( sizeResponses[ i ] ) )
2084 clearResults = clearResults and getResults and sizeResults
2085 utilities.assert_equals( expect=main.TRUE,
2086 actual=clearResults,
2087 onpass="Set clear correct",
2088 onfail="Set clear was incorrect" )
2089
2090 main.step( "Distributed Set addAll()" )
2091 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002092 addResponses = main.Cluster.command( "setTestAdd",
2093 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002094 # main.TRUE = successfully changed the set
2095 # main.FALSE = action resulted in no change in set
2096 # main.ERROR - Some error in executing the function
2097 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002098 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002099 if addResponses[ i ] == main.TRUE:
2100 # All is well
2101 pass
2102 elif addResponses[ i ] == main.FALSE:
2103 # Already in set, probably fine
2104 pass
2105 elif addResponses[ i ] == main.ERROR:
2106 # Error in execution
2107 addAllResults = main.FALSE
2108 else:
2109 # unexpected result
2110 addAllResults = main.FALSE
2111 if addAllResults != main.TRUE:
2112 main.log.error( "Error executing set addAll" )
2113
2114 # Check if set is still correct
2115 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002116 getResponses = main.Cluster.command( "setTestGet",
2117 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002118 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002119 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002120 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002121 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002122 current = set( getResponses[ i ] )
2123 if len( current ) == len( getResponses[ i ] ):
2124 # no repeats
2125 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002126 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002127 " of set " + main.onosSetName + ":\n" +
2128 str( getResponses[ i ] ) )
2129 main.log.debug( "Expected: " + str( main.onosSet ) )
2130 main.log.debug( "Actual: " + str( current ) )
2131 getResults = main.FALSE
2132 else:
2133 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002134 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002135 " set " + main.onosSetName + ":\n" +
2136 str( getResponses[ i ] ) )
2137 getResults = main.FALSE
2138 elif getResponses[ i ] == main.ERROR:
2139 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002140 sizeResponses = main.Cluster.command( "setTestSize",
2141 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002142 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002143 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002144 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002145 if size != sizeResponses[ i ]:
2146 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002147 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002148 " for set " + main.onosSetName +
2149 " but got " + str( sizeResponses[ i ] ) )
2150 addAllResults = addAllResults and getResults and sizeResults
2151 utilities.assert_equals( expect=main.TRUE,
2152 actual=addAllResults,
2153 onpass="Set addAll correct",
2154 onfail="Set addAll was incorrect" )
2155
2156 main.step( "Distributed Set retain()" )
2157 main.onosSet.intersection_update( retainValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002158 retainResponses = main.Cluster.command( "setTestRemove",
2159 args=[ main.onosSetName, retainValue ],
2160 kwargs={ "retain": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002161 # main.TRUE = successfully changed the set
2162 # main.FALSE = action resulted in no change in set
2163 # main.ERROR - Some error in executing the function
2164 retainResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002165 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002166 if retainResponses[ i ] == main.TRUE:
2167 # All is well
2168 pass
2169 elif retainResponses[ i ] == main.FALSE:
2170 # Already in set, probably fine
2171 pass
2172 elif retainResponses[ i ] == main.ERROR:
2173 # Error in execution
2174 retainResults = main.FALSE
2175 else:
2176 # unexpected result
2177 retainResults = main.FALSE
2178 if retainResults != main.TRUE:
2179 main.log.error( "Error executing set retain" )
2180
2181 # Check if set is still correct
2182 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002183 getResponses = main.Cluster.command( "setTestGet",
2184 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002185 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002186 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002187 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002188 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002189 current = set( getResponses[ i ] )
2190 if len( current ) == len( getResponses[ i ] ):
2191 # no repeats
2192 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002193 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002194 " of set " + main.onosSetName + ":\n" +
2195 str( getResponses[ i ] ) )
2196 main.log.debug( "Expected: " + str( main.onosSet ) )
2197 main.log.debug( "Actual: " + str( current ) )
2198 getResults = main.FALSE
2199 else:
2200 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002201 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002202 " set " + main.onosSetName + ":\n" +
2203 str( getResponses[ i ] ) )
2204 getResults = main.FALSE
2205 elif getResponses[ i ] == main.ERROR:
2206 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002207 sizeResponses = main.Cluster.command( "setTestSize",
2208 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002209 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002210 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002211 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002212 if size != sizeResponses[ i ]:
2213 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002214 main.log.error( node + " expected a size of " +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002215 str( size ) + " for set " + main.onosSetName +
2216 " but got " + str( sizeResponses[ i ] ) )
2217 retainResults = retainResults and getResults and sizeResults
2218 utilities.assert_equals( expect=main.TRUE,
2219 actual=retainResults,
2220 onpass="Set retain correct",
2221 onfail="Set retain was incorrect" )
2222
2223 # Transactional maps
2224 main.step( "Partitioned Transactional maps put" )
2225 tMapValue = "Testing"
2226 numKeys = 100
2227 putResult = True
Jon Hallca319892017-06-15 15:25:22 -07002228 ctrl = main.Cluster.next()
2229 putResponses = ctrl.transactionalMapPut( numKeys, tMapValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002230 if putResponses and len( putResponses ) == 100:
2231 for i in putResponses:
2232 if putResponses[ i ][ 'value' ] != tMapValue:
2233 putResult = False
2234 else:
2235 putResult = False
2236 if not putResult:
2237 main.log.debug( "Put response values: " + str( putResponses ) )
2238 utilities.assert_equals( expect=True,
2239 actual=putResult,
2240 onpass="Partitioned Transactional Map put successful",
2241 onfail="Partitioned Transactional Map put values are incorrect" )
2242
2243 main.step( "Partitioned Transactional maps get" )
2244 # FIXME: is this sleep needed?
2245 time.sleep( 5 )
2246
2247 getCheck = True
2248 for n in range( 1, numKeys + 1 ):
Jon Hallca319892017-06-15 15:25:22 -07002249 getResponses = main.Cluster.command( "transactionalMapGet",
2250 args=[ "Key" + str( n ) ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002251 valueCheck = True
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002252 for node in getResponses:
2253 if node != tMapValue:
2254 valueCheck = False
2255 if not valueCheck:
Jon Hallf37d44d2017-05-24 10:37:30 -07002256 main.log.warn( "Values for key 'Key" + str(n) + "' do not match:" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002257 main.log.warn( getResponses )
2258 getCheck = getCheck and valueCheck
2259 utilities.assert_equals( expect=True,
2260 actual=getCheck,
2261 onpass="Partitioned Transactional Map get values were correct",
2262 onfail="Partitioned Transactional Map values incorrect" )
2263
2264 # DISTRIBUTED ATOMIC VALUE
2265 main.step( "Get the value of a new value" )
Jon Hallca319892017-06-15 15:25:22 -07002266 getValues = main.Cluster.command( "valueTestGet",
2267 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002268 main.log.debug( getValues )
2269 # Check the results
2270 atomicValueGetResult = True
2271 expected = valueValue if valueValue is not None else "null"
2272 main.log.debug( "Checking for value of " + expected )
2273 for i in getValues:
2274 if i != expected:
2275 atomicValueGetResult = False
2276 utilities.assert_equals( expect=True,
2277 actual=atomicValueGetResult,
2278 onpass="Atomic Value get successful",
2279 onfail="Error getting atomic Value " +
2280 str( valueValue ) + ", found: " +
2281 str( getValues ) )
2282
2283 main.step( "Atomic Value set()" )
2284 valueValue = "foo"
Jon Hallca319892017-06-15 15:25:22 -07002285 setValues = main.Cluster.command( "valueTestSet",
2286 args=[ valueName, valueValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002287 main.log.debug( setValues )
2288 # Check the results
2289 atomicValueSetResults = True
2290 for i in setValues:
2291 if i != main.TRUE:
2292 atomicValueSetResults = False
2293 utilities.assert_equals( expect=True,
2294 actual=atomicValueSetResults,
2295 onpass="Atomic Value set successful",
2296 onfail="Error setting atomic Value" +
2297 str( setValues ) )
2298
2299 main.step( "Get the value after set()" )
Jon Hallca319892017-06-15 15:25:22 -07002300 getValues = main.Cluster.command( "valueTestGet",
2301 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002302 main.log.debug( getValues )
2303 # Check the results
2304 atomicValueGetResult = True
2305 expected = valueValue if valueValue is not None else "null"
2306 main.log.debug( "Checking for value of " + expected )
2307 for i in getValues:
2308 if i != expected:
2309 atomicValueGetResult = False
2310 utilities.assert_equals( expect=True,
2311 actual=atomicValueGetResult,
2312 onpass="Atomic Value get successful",
2313 onfail="Error getting atomic Value " +
2314 str( valueValue ) + ", found: " +
2315 str( getValues ) )
2316
2317 main.step( "Atomic Value compareAndSet()" )
2318 oldValue = valueValue
2319 valueValue = "bar"
Jon Hallca319892017-06-15 15:25:22 -07002320 ctrl = main.Cluster.next()
2321 CASValue = ctrl.valueTestCompareAndSet( valueName, oldValue, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002322 main.log.debug( CASValue )
2323 utilities.assert_equals( expect=main.TRUE,
2324 actual=CASValue,
2325 onpass="Atomic Value comapreAndSet successful",
2326 onfail="Error setting atomic Value:" +
2327 str( CASValue ) )
2328
2329 main.step( "Get the value after compareAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002330 getValues = main.Cluster.command( "valueTestGet",
2331 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002332 main.log.debug( getValues )
2333 # Check the results
2334 atomicValueGetResult = True
2335 expected = valueValue if valueValue is not None else "null"
2336 main.log.debug( "Checking for value of " + expected )
2337 for i in getValues:
2338 if i != expected:
2339 atomicValueGetResult = False
2340 utilities.assert_equals( expect=True,
2341 actual=atomicValueGetResult,
2342 onpass="Atomic Value get successful",
2343 onfail="Error getting atomic Value " +
2344 str( valueValue ) + ", found: " +
2345 str( getValues ) )
2346
2347 main.step( "Atomic Value getAndSet()" )
2348 oldValue = valueValue
2349 valueValue = "baz"
Jon Hallca319892017-06-15 15:25:22 -07002350 ctrl = main.Cluster.next()
2351 GASValue = ctrl.valueTestGetAndSet( valueName, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002352 main.log.debug( GASValue )
2353 expected = oldValue if oldValue is not None else "null"
2354 utilities.assert_equals( expect=expected,
2355 actual=GASValue,
2356 onpass="Atomic Value GAS successful",
2357 onfail="Error with GetAndSet atomic Value: expected " +
2358 str( expected ) + ", found: " +
2359 str( GASValue ) )
2360
2361 main.step( "Get the value after getAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002362 getValues = main.Cluster.command( "valueTestGet",
2363 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002364 main.log.debug( getValues )
2365 # Check the results
2366 atomicValueGetResult = True
2367 expected = valueValue if valueValue is not None else "null"
2368 main.log.debug( "Checking for value of " + expected )
2369 for i in getValues:
2370 if i != expected:
2371 atomicValueGetResult = False
2372 utilities.assert_equals( expect=True,
2373 actual=atomicValueGetResult,
2374 onpass="Atomic Value get successful",
2375 onfail="Error getting atomic Value: expected " +
2376 str( valueValue ) + ", found: " +
2377 str( getValues ) )
2378
2379 main.step( "Atomic Value destory()" )
2380 valueValue = None
Jon Hallca319892017-06-15 15:25:22 -07002381 ctrl = main.Cluster.next()
2382 destroyResult = ctrl.valueTestDestroy( valueName )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002383 main.log.debug( destroyResult )
2384 # Check the results
2385 utilities.assert_equals( expect=main.TRUE,
2386 actual=destroyResult,
2387 onpass="Atomic Value destroy successful",
2388 onfail="Error destroying atomic Value" )
2389
2390 main.step( "Get the value after destroy()" )
Jon Hallca319892017-06-15 15:25:22 -07002391 getValues = main.Cluster.command( "valueTestGet",
2392 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002393 main.log.debug( getValues )
2394 # Check the results
2395 atomicValueGetResult = True
2396 expected = valueValue if valueValue is not None else "null"
2397 main.log.debug( "Checking for value of " + expected )
2398 for i in getValues:
2399 if i != expected:
2400 atomicValueGetResult = False
2401 utilities.assert_equals( expect=True,
2402 actual=atomicValueGetResult,
2403 onpass="Atomic Value get successful",
2404 onfail="Error getting atomic Value " +
2405 str( valueValue ) + ", found: " +
2406 str( getValues ) )
Jon Halle0f0b342017-04-18 11:43:47 -07002407
2408 # WORK QUEUES
2409 main.step( "Work Queue add()" )
Jon Hallca319892017-06-15 15:25:22 -07002410 ctrl = main.Cluster.next()
2411 addResult = ctrl.workQueueAdd( workQueueName, 'foo' )
Jon Halle0f0b342017-04-18 11:43:47 -07002412 workQueuePending += 1
2413 main.log.debug( addResult )
2414 # Check the results
2415 utilities.assert_equals( expect=main.TRUE,
2416 actual=addResult,
2417 onpass="Work Queue add successful",
2418 onfail="Error adding to Work Queue" )
2419
2420 main.step( "Check the work queue stats" )
2421 statsResults = self.workQueueStatsCheck( workQueueName,
2422 workQueueCompleted,
2423 workQueueInProgress,
2424 workQueuePending )
2425 utilities.assert_equals( expect=True,
2426 actual=statsResults,
2427 onpass="Work Queue stats correct",
2428 onfail="Work Queue stats incorrect " )
2429
2430 main.step( "Work Queue addMultiple()" )
Jon Hallca319892017-06-15 15:25:22 -07002431 ctrl = main.Cluster.next()
2432 addMultipleResult = ctrl.workQueueAddMultiple( workQueueName, 'bar', 'baz' )
Jon Halle0f0b342017-04-18 11:43:47 -07002433 workQueuePending += 2
2434 main.log.debug( addMultipleResult )
2435 # Check the results
2436 utilities.assert_equals( expect=main.TRUE,
2437 actual=addMultipleResult,
2438 onpass="Work Queue add multiple successful",
2439 onfail="Error adding multiple items to Work Queue" )
2440
2441 main.step( "Check the work queue stats" )
2442 statsResults = self.workQueueStatsCheck( workQueueName,
2443 workQueueCompleted,
2444 workQueueInProgress,
2445 workQueuePending )
2446 utilities.assert_equals( expect=True,
2447 actual=statsResults,
2448 onpass="Work Queue stats correct",
2449 onfail="Work Queue stats incorrect " )
2450
2451 main.step( "Work Queue takeAndComplete() 1" )
Jon Hallca319892017-06-15 15:25:22 -07002452 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002453 number = 1
Jon Hallca319892017-06-15 15:25:22 -07002454 take1Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002455 workQueuePending -= number
2456 workQueueCompleted += number
2457 main.log.debug( take1Result )
2458 # Check the results
2459 utilities.assert_equals( expect=main.TRUE,
2460 actual=take1Result,
2461 onpass="Work Queue takeAndComplete 1 successful",
2462 onfail="Error taking 1 from Work Queue" )
2463
2464 main.step( "Check the work queue stats" )
2465 statsResults = self.workQueueStatsCheck( workQueueName,
2466 workQueueCompleted,
2467 workQueueInProgress,
2468 workQueuePending )
2469 utilities.assert_equals( expect=True,
2470 actual=statsResults,
2471 onpass="Work Queue stats correct",
2472 onfail="Work Queue stats incorrect " )
2473
2474 main.step( "Work Queue takeAndComplete() 2" )
Jon Hallca319892017-06-15 15:25:22 -07002475 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002476 number = 2
Jon Hallca319892017-06-15 15:25:22 -07002477 take2Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002478 workQueuePending -= number
2479 workQueueCompleted += number
2480 main.log.debug( take2Result )
2481 # Check the results
2482 utilities.assert_equals( expect=main.TRUE,
2483 actual=take2Result,
2484 onpass="Work Queue takeAndComplete 2 successful",
2485 onfail="Error taking 2 from Work Queue" )
2486
2487 main.step( "Check the work queue stats" )
2488 statsResults = self.workQueueStatsCheck( workQueueName,
2489 workQueueCompleted,
2490 workQueueInProgress,
2491 workQueuePending )
2492 utilities.assert_equals( expect=True,
2493 actual=statsResults,
2494 onpass="Work Queue stats correct",
2495 onfail="Work Queue stats incorrect " )
2496
2497 main.step( "Work Queue destroy()" )
2498 valueValue = None
2499 threads = []
Jon Hallca319892017-06-15 15:25:22 -07002500 ctrl = main.Cluster.next()
2501 destroyResult = ctrl.workQueueDestroy( workQueueName )
Jon Halle0f0b342017-04-18 11:43:47 -07002502 workQueueCompleted = 0
2503 workQueueInProgress = 0
2504 workQueuePending = 0
2505 main.log.debug( destroyResult )
2506 # Check the results
2507 utilities.assert_equals( expect=main.TRUE,
2508 actual=destroyResult,
2509 onpass="Work Queue destroy successful",
2510 onfail="Error destroying Work Queue" )
2511
2512 main.step( "Check the work queue stats" )
2513 statsResults = self.workQueueStatsCheck( workQueueName,
2514 workQueueCompleted,
2515 workQueueInProgress,
2516 workQueuePending )
2517 utilities.assert_equals( expect=True,
2518 actual=statsResults,
2519 onpass="Work Queue stats correct",
2520 onfail="Work Queue stats incorrect " )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002521 except Exception as e:
2522 main.log.error( "Exception: " + str( e ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002523
2524 def cleanUp( self, main ):
2525 """
2526 Clean up
2527 """
2528 import os
2529 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002530 assert main, "main not defined"
2531 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002532
2533 # printing colors to terminal
2534 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2535 'blue': '\033[94m', 'green': '\033[92m',
2536 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
Jon Hall4173b242017-09-12 17:04:38 -07002537
Devin Lim58046fa2017-07-05 16:55:00 -07002538 main.case( "Test Cleanup" )
Jon Hall4173b242017-09-12 17:04:38 -07002539
2540 main.step( "Checking raft log size" )
2541 # TODO: this is a flaky check, but the intent is to make sure the raft logs
2542 # get compacted periodically
2543 logCheck = main.Cluster.checkPartitionSize()
2544 utilities.assert_equals( expect=True, actual=logCheck,
2545 onpass="Raft log size is not too big",
2546 onfail="Raft logs grew too big" )
2547
Devin Lim58046fa2017-07-05 16:55:00 -07002548 main.step( "Killing tcpdumps" )
2549 main.Mininet2.stopTcpdump()
2550
2551 testname = main.TEST
2552 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2553 main.step( "Copying MN pcap and ONOS log files to test station" )
2554 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2555 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2556 # NOTE: MN Pcap file is being saved to logdir.
2557 # We scp this file as MN and TestON aren't necessarily the same vm
2558
2559 # FIXME: To be replaced with a Jenkin's post script
2560 # TODO: Load these from params
2561 # NOTE: must end in /
2562 logFolder = "/opt/onos/log/"
2563 logFiles = [ "karaf.log", "karaf.log.1" ]
2564 # NOTE: must end in /
2565 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002566 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002567 dstName = main.logdir + "/" + ctrl.name + "-" + f
2568 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002569 logFolder + f, dstName )
2570 # std*.log's
2571 # NOTE: must end in /
2572 logFolder = "/opt/onos/var/"
2573 logFiles = [ "stderr.log", "stdout.log" ]
2574 # NOTE: must end in /
2575 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002576 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002577 dstName = main.logdir + "/" + ctrl.name + "-" + f
2578 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002579 logFolder + f, dstName )
2580 else:
2581 main.log.debug( "skipping saving log files" )
2582
2583 main.step( "Stopping Mininet" )
2584 mnResult = main.Mininet1.stopNet()
2585 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2586 onpass="Mininet stopped",
2587 onfail="MN cleanup NOT successful" )
2588
2589 main.step( "Checking ONOS Logs for errors" )
Devin Lim142b5342017-07-20 15:22:39 -07002590 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002591 main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
2592 main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002593
2594 try:
2595 timerLog = open( main.logdir + "/Timers.csv", 'w' )
2596 main.log.debug( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2597 timerLog.write( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2598 timerLog.close()
2599 except NameError as e:
2600 main.log.exception( e )
Jon Hallca319892017-06-15 15:25:22 -07002601
Devin Lim58046fa2017-07-05 16:55:00 -07002602 def assignMastership( self, main ):
2603 """
2604 Assign mastership to controllers
2605 """
2606 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002607 assert main, "main not defined"
2608 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002609
2610 main.case( "Assigning Controller roles for switches" )
2611 main.caseExplanation = "Check that ONOS is connected to each " +\
2612 "device. Then manually assign" +\
2613 " mastership to specific ONOS nodes using" +\
2614 " 'device-role'"
2615 main.step( "Assign mastership of switches to specific controllers" )
2616 # Manually assign mastership to the controller we want
2617 roleCall = main.TRUE
2618
2619 ipList = []
2620 deviceList = []
Jon Hallca319892017-06-15 15:25:22 -07002621 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07002622 try:
2623 # Assign mastership to specific controllers. This assignment was
2624 # determined for a 7 node cluser, but will work with any sized
2625 # cluster
2626 for i in range( 1, 29 ): # switches 1 through 28
2627 # set up correct variables:
2628 if i == 1:
2629 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002630 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002631 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
2632 elif i == 2:
Devin Lim142b5342017-07-20 15:22:39 -07002633 c = 1 % main.Cluster.numCtrls
2634 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002635 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
2636 elif i == 3:
Devin Lim142b5342017-07-20 15:22:39 -07002637 c = 1 % main.Cluster.numCtrls
2638 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002639 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
2640 elif i == 4:
Devin Lim142b5342017-07-20 15:22:39 -07002641 c = 3 % main.Cluster.numCtrls
2642 ip = main.Cluster.active( c ).ip_address # ONOS4
Devin Lim58046fa2017-07-05 16:55:00 -07002643 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
2644 elif i == 5:
Devin Lim142b5342017-07-20 15:22:39 -07002645 c = 2 % main.Cluster.numCtrls
2646 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002647 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
2648 elif i == 6:
Devin Lim142b5342017-07-20 15:22:39 -07002649 c = 2 % main.Cluster.numCtrls
2650 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002651 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
2652 elif i == 7:
Devin Lim142b5342017-07-20 15:22:39 -07002653 c = 5 % main.Cluster.numCtrls
2654 ip = main.Cluster.active( c ).ip_address # ONOS6
Devin Lim58046fa2017-07-05 16:55:00 -07002655 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
2656 elif i >= 8 and i <= 17:
Devin Lim142b5342017-07-20 15:22:39 -07002657 c = 4 % main.Cluster.numCtrls
2658 ip = main.Cluster.active( c ).ip_address # ONOS5
Devin Lim58046fa2017-07-05 16:55:00 -07002659 dpid = '3' + str( i ).zfill( 3 )
2660 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2661 elif i >= 18 and i <= 27:
Devin Lim142b5342017-07-20 15:22:39 -07002662 c = 6 % main.Cluster.numCtrls
2663 ip = main.Cluster.active( c ).ip_address # ONOS7
Devin Lim58046fa2017-07-05 16:55:00 -07002664 dpid = '6' + str( i ).zfill( 3 )
2665 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2666 elif i == 28:
2667 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002668 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002669 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
2670 else:
2671 main.log.error( "You didn't write an else statement for " +
2672 "switch s" + str( i ) )
2673 roleCall = main.FALSE
2674 # Assign switch
2675 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
2676 # TODO: make this controller dynamic
2677 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
2678 ipList.append( ip )
2679 deviceList.append( deviceId )
2680 except ( AttributeError, AssertionError ):
2681 main.log.exception( "Something is wrong with ONOS device view" )
2682 main.log.info( onosCli.devices() )
2683 utilities.assert_equals(
2684 expect=main.TRUE,
2685 actual=roleCall,
2686 onpass="Re-assigned switch mastership to designated controller",
2687 onfail="Something wrong with deviceRole calls" )
2688
2689 main.step( "Check mastership was correctly assigned" )
2690 roleCheck = main.TRUE
2691 # NOTE: This is due to the fact that device mastership change is not
2692 # atomic and is actually a multi step process
2693 time.sleep( 5 )
2694 for i in range( len( ipList ) ):
2695 ip = ipList[ i ]
2696 deviceId = deviceList[ i ]
2697 # Check assignment
2698 master = onosCli.getRole( deviceId ).get( 'master' )
2699 if ip in master:
2700 roleCheck = roleCheck and main.TRUE
2701 else:
2702 roleCheck = roleCheck and main.FALSE
2703 main.log.error( "Error, controller " + ip + " is not" +
2704 " master " + "of device " +
2705 str( deviceId ) + ". Master is " +
2706 repr( master ) + "." )
2707 utilities.assert_equals(
2708 expect=main.TRUE,
2709 actual=roleCheck,
2710 onpass="Switches were successfully reassigned to designated " +
2711 "controller",
2712 onfail="Switches were not successfully reassigned" )
Jon Hallca319892017-06-15 15:25:22 -07002713
Devin Lim58046fa2017-07-05 16:55:00 -07002714 def bringUpStoppedNode( self, main ):
2715 """
2716 The bring up stopped nodes
2717 """
2718 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002719 assert main, "main not defined"
2720 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002721 assert main.kill, "main.kill not defined"
2722 main.case( "Restart minority of ONOS nodes" )
2723
2724 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
2725 startResults = main.TRUE
2726 restartTime = time.time()
Jon Hallca319892017-06-15 15:25:22 -07002727 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002728 startResults = startResults and\
Jon Hallca319892017-06-15 15:25:22 -07002729 ctrl.onosStart( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002730 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2731 onpass="ONOS nodes started successfully",
2732 onfail="ONOS nodes NOT successfully started" )
2733
2734 main.step( "Checking if ONOS is up yet" )
2735 count = 0
2736 onosIsupResult = main.FALSE
2737 while onosIsupResult == main.FALSE and count < 10:
2738 onosIsupResult = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002739 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002740 onosIsupResult = onosIsupResult and\
Jon Hallca319892017-06-15 15:25:22 -07002741 ctrl.isup( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002742 count = count + 1
2743 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
2744 onpass="ONOS restarted successfully",
2745 onfail="ONOS restart NOT successful" )
2746
Jon Hallca319892017-06-15 15:25:22 -07002747 main.step( "Restarting ONOS nodes" )
Devin Lim58046fa2017-07-05 16:55:00 -07002748 cliResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002749 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002750 cliResults = cliResults and\
Jon Hallca319892017-06-15 15:25:22 -07002751 ctrl.startOnosCli( ctrl.ipAddress )
2752 ctrl.active = True
Devin Lim58046fa2017-07-05 16:55:00 -07002753 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
Jon Hallca319892017-06-15 15:25:22 -07002754 onpass="ONOS node(s) restarted",
2755 onfail="ONOS node(s) did not restart" )
Devin Lim58046fa2017-07-05 16:55:00 -07002756
2757 # Grab the time of restart so we chan check how long the gossip
2758 # protocol has had time to work
2759 main.restartTime = time.time() - restartTime
2760 main.log.debug( "Restart time: " + str( main.restartTime ) )
2761 # TODO: MAke this configurable. Also, we are breaking the above timer
2762 main.step( "Checking ONOS nodes" )
2763 nodeResults = utilities.retry( self.nodesCheck,
2764 False,
Jon Hallca319892017-06-15 15:25:22 -07002765 args=[ main.Cluster.active() ],
Devin Lim58046fa2017-07-05 16:55:00 -07002766 sleep=15,
2767 attempts=5 )
2768
2769 utilities.assert_equals( expect=True, actual=nodeResults,
2770 onpass="Nodes check successful",
2771 onfail="Nodes check NOT successful" )
2772
2773 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07002774 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07002775 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07002776 ctrl.name,
2777 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002778 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -07002779 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002780
Jon Hallca319892017-06-15 15:25:22 -07002781 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -07002782
2783 main.step( "Rerun for election on the node(s) that were killed" )
2784 runResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002785 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002786 runResults = runResults and\
Jon Hallca319892017-06-15 15:25:22 -07002787 ctrl.electionTestRun()
Devin Lim58046fa2017-07-05 16:55:00 -07002788 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2789 onpass="ONOS nodes reran for election topic",
2790 onfail="Errror rerunning for election" )
Jon Hall4173b242017-09-12 17:04:38 -07002791
Devin Lim142b5342017-07-20 15:22:39 -07002792 def tempCell( self, cellName, ipList ):
2793 main.step( "Create cell file" )
2794 cellAppString = main.params[ 'ENV' ][ 'appString' ]
Devin Lim58046fa2017-07-05 16:55:00 -07002795
Devin Lim142b5342017-07-20 15:22:39 -07002796 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
2797 main.Mininet1.ip_address,
2798 cellAppString, ipList , main.ONOScli1.karafUser )
2799 main.step( "Applying cell variable to environment" )
2800 cellResult = main.ONOSbench.setCell( cellName )
2801 verifyResult = main.ONOSbench.verifyCell()
2802
Devin Lim142b5342017-07-20 15:22:39 -07002803 def checkStateAfterEvent( self, main, afterWhich, compareSwitch=False, isRestart=False ):
Devin Lim58046fa2017-07-05 16:55:00 -07002804 """
2805 afterWhich :
Jon Hallca319892017-06-15 15:25:22 -07002806 0: failure
Devin Lim58046fa2017-07-05 16:55:00 -07002807 1: scaling
2808 """
2809 """
2810 Check state after ONOS failure/scaling
2811 """
2812 import json
Devin Lim58046fa2017-07-05 16:55:00 -07002813 assert main, "main not defined"
2814 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002815 main.case( "Running ONOS Constant State Tests" )
2816
2817 OnosAfterWhich = [ "failure" , "scaliing" ]
2818
Devin Lim58046fa2017-07-05 16:55:00 -07002819 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -07002820 self.checkRoleNotNull()
Devin Lim58046fa2017-07-05 16:55:00 -07002821
Devin Lim142b5342017-07-20 15:22:39 -07002822 ONOSMastership, rolesResults, consistentMastership = self.checkTheRole()
Jon Hallca319892017-06-15 15:25:22 -07002823 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07002824
2825 if rolesResults and not consistentMastership:
2826 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002827 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -07002828 main.log.warn( node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07002829 json.dumps( json.loads( ONOSMastership[ i ] ),
2830 sort_keys=True,
2831 indent=4,
2832 separators=( ',', ': ' ) ) )
2833
2834 if compareSwitch:
2835 description2 = "Compare switch roles from before failure"
2836 main.step( description2 )
2837 try:
2838 currentJson = json.loads( ONOSMastership[ 0 ] )
2839 oldJson = json.loads( mastershipState )
2840 except ( ValueError, TypeError ):
2841 main.log.exception( "Something is wrong with parsing " +
2842 "ONOSMastership[0] or mastershipState" )
Jon Hallca319892017-06-15 15:25:22 -07002843 main.log.debug( "ONOSMastership[0]: " + repr( ONOSMastership[ 0 ] ) )
2844 main.log.debug( "mastershipState" + repr( mastershipState ) )
Devin Lim44075962017-08-11 10:56:37 -07002845 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002846 mastershipCheck = main.TRUE
2847 for i in range( 1, 29 ):
2848 switchDPID = str(
2849 main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
2850 current = [ switch[ 'master' ] for switch in currentJson
2851 if switchDPID in switch[ 'id' ] ]
2852 old = [ switch[ 'master' ] for switch in oldJson
2853 if switchDPID in switch[ 'id' ] ]
2854 if current == old:
2855 mastershipCheck = mastershipCheck and main.TRUE
2856 else:
2857 main.log.warn( "Mastership of switch %s changed" % switchDPID )
2858 mastershipCheck = main.FALSE
2859 utilities.assert_equals(
2860 expect=main.TRUE,
2861 actual=mastershipCheck,
2862 onpass="Mastership of Switches was not changed",
2863 onfail="Mastership of some switches changed" )
2864
2865 # NOTE: we expect mastership to change on controller failure/scaling down
Devin Lim142b5342017-07-20 15:22:39 -07002866 ONOSIntents, intentsResults = self.checkingIntents()
Devin Lim58046fa2017-07-05 16:55:00 -07002867 intentCheck = main.FALSE
2868 consistentIntents = True
Devin Lim58046fa2017-07-05 16:55:00 -07002869
2870 main.step( "Check for consistency in Intents from each controller" )
2871 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2872 main.log.info( "Intents are consistent across all ONOS " +
2873 "nodes" )
2874 else:
2875 consistentIntents = False
2876
2877 # Try to make it easy to figure out what is happening
2878 #
2879 # Intent ONOS1 ONOS2 ...
2880 # 0x01 INSTALLED INSTALLING
2881 # ... ... ...
2882 # ... ... ...
2883 title = " ID"
Jon Hallca319892017-06-15 15:25:22 -07002884 for ctrl in main.Cluster.active():
2885 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07002886 main.log.warn( title )
2887 # get all intent keys in the cluster
2888 keys = []
2889 for nodeStr in ONOSIntents:
2890 node = json.loads( nodeStr )
2891 for intent in node:
2892 keys.append( intent.get( 'id' ) )
2893 keys = set( keys )
2894 for key in keys:
2895 row = "%-13s" % key
2896 for nodeStr in ONOSIntents:
2897 node = json.loads( nodeStr )
2898 for intent in node:
2899 if intent.get( 'id' ) == key:
2900 row += "%-15s" % intent.get( 'state' )
2901 main.log.warn( row )
2902 # End table view
2903
2904 utilities.assert_equals(
2905 expect=True,
2906 actual=consistentIntents,
2907 onpass="Intents are consistent across all ONOS nodes",
2908 onfail="ONOS nodes have different views of intents" )
2909 intentStates = []
2910 for node in ONOSIntents: # Iter through ONOS nodes
2911 nodeStates = []
2912 # Iter through intents of a node
2913 try:
2914 for intent in json.loads( node ):
2915 nodeStates.append( intent[ 'state' ] )
2916 except ( ValueError, TypeError ):
2917 main.log.exception( "Error in parsing intents" )
2918 main.log.error( repr( node ) )
2919 intentStates.append( nodeStates )
2920 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2921 main.log.info( dict( out ) )
2922
2923 if intentsResults and not consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07002924 for i in range( len( main.Cluster.active() ) ):
Jon Hall6040bcf2017-08-14 11:15:41 -07002925 ctrl = main.Cluster.controllers[ i ]
Jon Hallca319892017-06-15 15:25:22 -07002926 main.log.warn( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07002927 main.log.warn( json.dumps(
2928 json.loads( ONOSIntents[ i ] ),
2929 sort_keys=True,
2930 indent=4,
2931 separators=( ',', ': ' ) ) )
2932 elif intentsResults and consistentIntents:
2933 intentCheck = main.TRUE
2934
2935 # NOTE: Store has no durability, so intents are lost across system
2936 # restarts
2937 if not isRestart:
2938 main.step( "Compare current intents with intents before the " + OnosAfterWhich[ afterWhich ] )
2939 # NOTE: this requires case 5 to pass for intentState to be set.
2940 # maybe we should stop the test if that fails?
2941 sameIntents = main.FALSE
2942 try:
2943 intentState
2944 except NameError:
2945 main.log.warn( "No previous intent state was saved" )
2946 else:
2947 if intentState and intentState == ONOSIntents[ 0 ]:
2948 sameIntents = main.TRUE
2949 main.log.info( "Intents are consistent with before " + OnosAfterWhich[ afterWhich ] )
2950 # TODO: possibly the states have changed? we may need to figure out
2951 # what the acceptable states are
2952 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2953 sameIntents = main.TRUE
2954 try:
2955 before = json.loads( intentState )
2956 after = json.loads( ONOSIntents[ 0 ] )
2957 for intent in before:
2958 if intent not in after:
2959 sameIntents = main.FALSE
2960 main.log.debug( "Intent is not currently in ONOS " +
2961 "(at least in the same form):" )
2962 main.log.debug( json.dumps( intent ) )
2963 except ( ValueError, TypeError ):
2964 main.log.exception( "Exception printing intents" )
2965 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2966 main.log.debug( repr( intentState ) )
2967 if sameIntents == main.FALSE:
2968 try:
2969 main.log.debug( "ONOS intents before: " )
2970 main.log.debug( json.dumps( json.loads( intentState ),
2971 sort_keys=True, indent=4,
2972 separators=( ',', ': ' ) ) )
2973 main.log.debug( "Current ONOS intents: " )
2974 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2975 sort_keys=True, indent=4,
2976 separators=( ',', ': ' ) ) )
2977 except ( ValueError, TypeError ):
2978 main.log.exception( "Exception printing intents" )
2979 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2980 main.log.debug( repr( intentState ) )
2981 utilities.assert_equals(
2982 expect=main.TRUE,
2983 actual=sameIntents,
2984 onpass="Intents are consistent with before " + OnosAfterWhich[ afterWhich ] ,
2985 onfail="The Intents changed during " + OnosAfterWhich[ afterWhich ] )
2986 intentCheck = intentCheck and sameIntents
2987
2988 main.step( "Get the OF Table entries and compare to before " +
2989 "component " + OnosAfterWhich[ afterWhich ] )
2990 FlowTables = main.TRUE
2991 for i in range( 28 ):
2992 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2993 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2994 curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
2995 FlowTables = FlowTables and curSwitch
2996 if curSwitch == main.FALSE:
2997 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2998 utilities.assert_equals(
2999 expect=main.TRUE,
3000 actual=FlowTables,
3001 onpass="No changes were found in the flow tables",
3002 onfail="Changes were found in the flow tables" )
3003
Jon Hallca319892017-06-15 15:25:22 -07003004 main.Mininet2.pingLongKill()
Devin Lim58046fa2017-07-05 16:55:00 -07003005 """
3006 main.step( "Check the continuous pings to ensure that no packets " +
3007 "were dropped during component failure" )
3008 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
3009 main.params[ 'TESTONIP' ] )
3010 LossInPings = main.FALSE
3011 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
3012 for i in range( 8, 18 ):
3013 main.log.info(
3014 "Checking for a loss in pings along flow from s" +
3015 str( i ) )
3016 LossInPings = main.Mininet2.checkForLoss(
3017 "/tmp/ping.h" +
3018 str( i ) ) or LossInPings
3019 if LossInPings == main.TRUE:
3020 main.log.info( "Loss in ping detected" )
3021 elif LossInPings == main.ERROR:
3022 main.log.info( "There are multiple mininet process running" )
3023 elif LossInPings == main.FALSE:
3024 main.log.info( "No Loss in the pings" )
3025 main.log.info( "No loss of dataplane connectivity" )
3026 utilities.assert_equals(
3027 expect=main.FALSE,
3028 actual=LossInPings,
3029 onpass="No Loss of connectivity",
3030 onfail="Loss of dataplane connectivity detected" )
3031 # NOTE: Since intents are not persisted with IntnentStore,
3032 # we expect loss in dataplane connectivity
3033 LossInPings = main.FALSE
3034 """
3035
3036 def compareTopo( self, main ):
3037 """
3038 Compare topo
3039 """
3040 import json
3041 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003042 assert main, "main not defined"
3043 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003044 try:
3045 from tests.dependencies.topology import Topology
3046 except ImportError:
3047 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07003048 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07003049 try:
3050 main.topoRelated
3051 except ( NameError, AttributeError ):
3052 main.topoRelated = Topology()
3053 main.case( "Compare ONOS Topology view to Mininet topology" )
3054 main.caseExplanation = "Compare topology objects between Mininet" +\
3055 " and ONOS"
3056 topoResult = main.FALSE
3057 topoFailMsg = "ONOS topology don't match Mininet"
3058 elapsed = 0
3059 count = 0
3060 main.step( "Comparing ONOS topology to MN topology" )
3061 startTime = time.time()
3062 # Give time for Gossip to work
3063 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
3064 devicesResults = main.TRUE
3065 linksResults = main.TRUE
3066 hostsResults = main.TRUE
3067 hostAttachmentResults = True
3068 count += 1
3069 cliStart = time.time()
Devin Lim142b5342017-07-20 15:22:39 -07003070 devices = main.topoRelated.getAll( "devices", True,
Jon Hallca319892017-06-15 15:25:22 -07003071 kwargs={ 'sleep': 5, 'attempts': 5,
3072 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003073 ipResult = main.TRUE
3074
Devin Lim142b5342017-07-20 15:22:39 -07003075 hosts = main.topoRelated.getAll( "hosts", True,
Jon Hallca319892017-06-15 15:25:22 -07003076 kwargs={ 'sleep': 5, 'attempts': 5,
3077 'randomTime': True },
3078 inJson=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003079
3080 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003081 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003082 if hosts[ controller ]:
3083 for host in hosts[ controller ]:
3084 if host is None or host.get( 'ipAddresses', [] ) == []:
3085 main.log.error(
3086 "Error with host ipAddresses on controller" +
3087 controllerStr + ": " + str( host ) )
3088 ipResult = main.FALSE
Devin Lim142b5342017-07-20 15:22:39 -07003089 ports = main.topoRelated.getAll( "ports" , True,
Jon Hallca319892017-06-15 15:25:22 -07003090 kwargs={ 'sleep': 5, 'attempts': 5,
3091 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003092 links = main.topoRelated.getAll( "links", True,
Jon Hallca319892017-06-15 15:25:22 -07003093 kwargs={ 'sleep': 5, 'attempts': 5,
3094 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003095 clusters = main.topoRelated.getAll( "clusters", True,
Jon Hallca319892017-06-15 15:25:22 -07003096 kwargs={ 'sleep': 5, 'attempts': 5,
3097 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003098
3099 elapsed = time.time() - startTime
3100 cliTime = time.time() - cliStart
3101 print "Elapsed time: " + str( elapsed )
3102 print "CLI time: " + str( cliTime )
3103
3104 if all( e is None for e in devices ) and\
3105 all( e is None for e in hosts ) and\
3106 all( e is None for e in ports ) and\
3107 all( e is None for e in links ) and\
3108 all( e is None for e in clusters ):
3109 topoFailMsg = "Could not get topology from ONOS"
3110 main.log.error( topoFailMsg )
3111 continue # Try again, No use trying to compare
3112
3113 mnSwitches = main.Mininet1.getSwitches()
3114 mnLinks = main.Mininet1.getLinks()
3115 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07003116 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003117 controllerStr = str( main.Cluster.active( controller ) )
Jon Hall4173b242017-09-12 17:04:38 -07003118 currentDevicesResult = main.topoRelated.compareDevicePort( main.Mininet1,
3119 controller,
3120 mnSwitches,
3121 devices,
3122 ports )
Devin Lim58046fa2017-07-05 16:55:00 -07003123 utilities.assert_equals( expect=main.TRUE,
3124 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07003125 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003126 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003127 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003128 " Switches view is incorrect" )
3129
Devin Lim58046fa2017-07-05 16:55:00 -07003130 currentLinksResult = main.topoRelated.compareBase( links, controller,
Jon Hall4173b242017-09-12 17:04:38 -07003131 main.Mininet1.compareLinks,
3132 [ mnSwitches, mnLinks ] )
Devin Lim58046fa2017-07-05 16:55:00 -07003133 utilities.assert_equals( expect=main.TRUE,
3134 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07003135 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003136 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003137 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003138 " links view is incorrect" )
3139 if hosts[ controller ] and "Error" not in hosts[ controller ]:
3140 currentHostsResult = main.Mininet1.compareHosts(
3141 mnHosts,
3142 hosts[ controller ] )
3143 elif hosts[ controller ] == []:
3144 currentHostsResult = main.TRUE
3145 else:
3146 currentHostsResult = main.FALSE
3147 utilities.assert_equals( expect=main.TRUE,
3148 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07003149 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003150 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07003151 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003152 " hosts don't match Mininet" )
3153 # CHECKING HOST ATTACHMENT POINTS
3154 hostAttachment = True
3155 zeroHosts = False
3156 # FIXME: topo-HA/obelisk specific mappings:
3157 # key is mac and value is dpid
3158 mappings = {}
3159 for i in range( 1, 29 ): # hosts 1 through 28
3160 # set up correct variables:
3161 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
3162 if i == 1:
3163 deviceId = "1000".zfill( 16 )
3164 elif i == 2:
3165 deviceId = "2000".zfill( 16 )
3166 elif i == 3:
3167 deviceId = "3000".zfill( 16 )
3168 elif i == 4:
3169 deviceId = "3004".zfill( 16 )
3170 elif i == 5:
3171 deviceId = "5000".zfill( 16 )
3172 elif i == 6:
3173 deviceId = "6000".zfill( 16 )
3174 elif i == 7:
3175 deviceId = "6007".zfill( 16 )
3176 elif i >= 8 and i <= 17:
3177 dpid = '3' + str( i ).zfill( 3 )
3178 deviceId = dpid.zfill( 16 )
3179 elif i >= 18 and i <= 27:
3180 dpid = '6' + str( i ).zfill( 3 )
3181 deviceId = dpid.zfill( 16 )
3182 elif i == 28:
3183 deviceId = "2800".zfill( 16 )
3184 mappings[ macId ] = deviceId
3185 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3186 if hosts[ controller ] == []:
3187 main.log.warn( "There are no hosts discovered" )
3188 zeroHosts = True
3189 else:
3190 for host in hosts[ controller ]:
3191 mac = None
3192 location = None
3193 device = None
3194 port = None
3195 try:
3196 mac = host.get( 'mac' )
3197 assert mac, "mac field could not be found for this host object"
Devin Limefaf3062017-08-14 16:18:19 -07003198 print host
3199 if 'locations' in host:
3200 location = host.get( 'locations' )[ 0 ]
3201 elif 'location' in host:
3202 location = host.get( 'location' )
Devin Lim58046fa2017-07-05 16:55:00 -07003203 assert location, "location field could not be found for this host object"
3204
3205 # Trim the protocol identifier off deviceId
3206 device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
3207 assert device, "elementId field could not be found for this host location object"
3208
3209 port = location.get( 'port' )
3210 assert port, "port field could not be found for this host location object"
3211
3212 # Now check if this matches where they should be
3213 if mac and device and port:
3214 if str( port ) != "1":
3215 main.log.error( "The attachment port is incorrect for " +
3216 "host " + str( mac ) +
3217 ". Expected: 1 Actual: " + str( port ) )
3218 hostAttachment = False
3219 if device != mappings[ str( mac ) ]:
3220 main.log.error( "The attachment device is incorrect for " +
3221 "host " + str( mac ) +
3222 ". Expected: " + mappings[ str( mac ) ] +
3223 " Actual: " + device )
3224 hostAttachment = False
3225 else:
3226 hostAttachment = False
Devin Limefaf3062017-08-14 16:18:19 -07003227 except ( AssertionError, TypeError ):
Devin Lim58046fa2017-07-05 16:55:00 -07003228 main.log.exception( "Json object not as expected" )
3229 main.log.error( repr( host ) )
3230 hostAttachment = False
3231 else:
3232 main.log.error( "No hosts json output or \"Error\"" +
3233 " in output. hosts = " +
3234 repr( hosts[ controller ] ) )
3235 if zeroHosts is False:
3236 # TODO: Find a way to know if there should be hosts in a
3237 # given point of the test
3238 hostAttachment = True
3239
3240 # END CHECKING HOST ATTACHMENT POINTS
3241 devicesResults = devicesResults and currentDevicesResult
3242 linksResults = linksResults and currentLinksResult
3243 hostsResults = hostsResults and currentHostsResult
3244 hostAttachmentResults = hostAttachmentResults and\
3245 hostAttachment
3246 topoResult = ( devicesResults and linksResults
3247 and hostsResults and ipResult and
3248 hostAttachmentResults )
3249 utilities.assert_equals( expect=True,
3250 actual=topoResult,
3251 onpass="ONOS topology matches Mininet",
3252 onfail=topoFailMsg )
3253 # End of While loop to pull ONOS state
3254
3255 # Compare json objects for hosts and dataplane clusters
3256
3257 # hosts
3258 main.step( "Hosts view is consistent across all ONOS nodes" )
3259 consistentHostsResult = main.TRUE
3260 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003261 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003262 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3263 if hosts[ controller ] == hosts[ 0 ]:
3264 continue
3265 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07003266 main.log.error( "hosts from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003267 " is inconsistent with ONOS1" )
Jon Hallca319892017-06-15 15:25:22 -07003268 main.log.debug( repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003269 consistentHostsResult = main.FALSE
3270
3271 else:
Jon Hallca319892017-06-15 15:25:22 -07003272 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003273 controllerStr )
3274 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003275 main.log.debug( controllerStr +
3276 " hosts response: " +
3277 repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003278 utilities.assert_equals(
3279 expect=main.TRUE,
3280 actual=consistentHostsResult,
3281 onpass="Hosts view is consistent across all ONOS nodes",
3282 onfail="ONOS nodes have different views of hosts" )
3283
3284 main.step( "Hosts information is correct" )
3285 hostsResults = hostsResults and ipResult
3286 utilities.assert_equals(
3287 expect=main.TRUE,
3288 actual=hostsResults,
3289 onpass="Host information is correct",
3290 onfail="Host information is incorrect" )
3291
3292 main.step( "Host attachment points to the network" )
3293 utilities.assert_equals(
3294 expect=True,
3295 actual=hostAttachmentResults,
3296 onpass="Hosts are correctly attached to the network",
3297 onfail="ONOS did not correctly attach hosts to the network" )
3298
3299 # Strongly connected clusters of devices
3300 main.step( "Clusters view is consistent across all ONOS nodes" )
3301 consistentClustersResult = main.TRUE
3302 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003303 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003304 if "Error" not in clusters[ controller ]:
3305 if clusters[ controller ] == clusters[ 0 ]:
3306 continue
3307 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07003308 main.log.error( "clusters from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003309 controllerStr +
3310 " is inconsistent with ONOS1" )
3311 consistentClustersResult = main.FALSE
3312 else:
3313 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07003314 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07003315 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003316 main.log.debug( controllerStr +
3317 " clusters response: " +
3318 repr( clusters[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003319 utilities.assert_equals(
3320 expect=main.TRUE,
3321 actual=consistentClustersResult,
3322 onpass="Clusters view is consistent across all ONOS nodes",
3323 onfail="ONOS nodes have different views of clusters" )
3324 if not consistentClustersResult:
3325 main.log.debug( clusters )
3326 for x in links:
Jon Hallca319892017-06-15 15:25:22 -07003327 main.log.debug( "{}: {}".format( len( x ), x ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003328
3329 main.step( "There is only one SCC" )
3330 # there should always only be one cluster
3331 try:
3332 numClusters = len( json.loads( clusters[ 0 ] ) )
3333 except ( ValueError, TypeError ):
3334 main.log.exception( "Error parsing clusters[0]: " +
3335 repr( clusters[ 0 ] ) )
3336 numClusters = "ERROR"
3337 clusterResults = main.FALSE
3338 if numClusters == 1:
3339 clusterResults = main.TRUE
3340 utilities.assert_equals(
3341 expect=1,
3342 actual=numClusters,
3343 onpass="ONOS shows 1 SCC",
3344 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
3345
3346 topoResult = ( devicesResults and linksResults
3347 and hostsResults and consistentHostsResult
3348 and consistentClustersResult and clusterResults
3349 and ipResult and hostAttachmentResults )
3350
3351 topoResult = topoResult and int( count <= 2 )
3352 note = "note it takes about " + str( int( cliTime ) ) + \
3353 " seconds for the test to make all the cli calls to fetch " +\
3354 "the topology from each ONOS instance"
3355 main.log.info(
3356 "Very crass estimate for topology discovery/convergence( " +
3357 str( note ) + " ): " + str( elapsed ) + " seconds, " +
3358 str( count ) + " tries" )
3359
3360 main.step( "Device information is correct" )
3361 utilities.assert_equals(
3362 expect=main.TRUE,
3363 actual=devicesResults,
3364 onpass="Device information is correct",
3365 onfail="Device information is incorrect" )
3366
3367 main.step( "Links are correct" )
3368 utilities.assert_equals(
3369 expect=main.TRUE,
3370 actual=linksResults,
3371 onpass="Link are correct",
3372 onfail="Links are incorrect" )
3373
3374 main.step( "Hosts are correct" )
3375 utilities.assert_equals(
3376 expect=main.TRUE,
3377 actual=hostsResults,
3378 onpass="Hosts are correct",
3379 onfail="Hosts are incorrect" )
3380
3381 # FIXME: move this to an ONOS state case
3382 main.step( "Checking ONOS nodes" )
3383 nodeResults = utilities.retry( self.nodesCheck,
3384 False,
Jon Hallca319892017-06-15 15:25:22 -07003385 args=[ main.Cluster.active() ],
Devin Lim58046fa2017-07-05 16:55:00 -07003386 attempts=5 )
3387 utilities.assert_equals( expect=True, actual=nodeResults,
3388 onpass="Nodes check successful",
3389 onfail="Nodes check NOT successful" )
3390 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07003391 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07003392 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07003393 ctrl.name,
3394 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003395
3396 if not topoResult:
Devin Lim44075962017-08-11 10:56:37 -07003397 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -07003398
Devin Lim58046fa2017-07-05 16:55:00 -07003399 def linkDown( self, main, fromS="s3", toS="s28" ):
3400 """
3401 Link fromS-toS down
3402 """
3403 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003404 assert main, "main not defined"
3405 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003406 # NOTE: You should probably run a topology check after this
3407
3408 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3409
3410 description = "Turn off a link to ensure that Link Discovery " +\
3411 "is working properly"
3412 main.case( description )
3413
3414 main.step( "Kill Link between " + fromS + " and " + toS )
3415 LinkDown = main.Mininet1.link( END1=fromS, END2=toS, OPTION="down" )
3416 main.log.info( "Waiting " + str( linkSleep ) +
3417 " seconds for link down to be discovered" )
3418 time.sleep( linkSleep )
3419 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
3420 onpass="Link down successful",
3421 onfail="Failed to bring link down" )
3422 # TODO do some sort of check here
3423
3424 def linkUp( self, main, fromS="s3", toS="s28" ):
3425 """
3426 Link fromS-toS up
3427 """
3428 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003429 assert main, "main not defined"
3430 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003431 # NOTE: You should probably run a topology check after this
3432
3433 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3434
3435 description = "Restore a link to ensure that Link Discovery is " + \
3436 "working properly"
3437 main.case( description )
3438
Jon Hall4173b242017-09-12 17:04:38 -07003439 main.step( "Bring link between " + fromS + " and " + toS + " back up" )
Devin Lim58046fa2017-07-05 16:55:00 -07003440 LinkUp = main.Mininet1.link( END1=fromS, END2=toS, OPTION="up" )
3441 main.log.info( "Waiting " + str( linkSleep ) +
3442 " seconds for link up to be discovered" )
3443 time.sleep( linkSleep )
3444 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
3445 onpass="Link up successful",
3446 onfail="Failed to bring link up" )
3447
3448 def switchDown( self, main ):
3449 """
3450 Switch Down
3451 """
3452 # NOTE: You should probably run a topology check after this
3453 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003454 assert main, "main not defined"
3455 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003456
3457 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3458
3459 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallca319892017-06-15 15:25:22 -07003460 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003461 main.case( description )
3462 switch = main.params[ 'kill' ][ 'switch' ]
3463 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3464
3465 # TODO: Make this switch parameterizable
3466 main.step( "Kill " + switch )
3467 main.log.info( "Deleting " + switch )
3468 main.Mininet1.delSwitch( switch )
3469 main.log.info( "Waiting " + str( switchSleep ) +
3470 " seconds for switch down to be discovered" )
3471 time.sleep( switchSleep )
3472 device = onosCli.getDevice( dpid=switchDPID )
3473 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003474 main.log.warn( "Bringing down switch " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003475 result = main.FALSE
3476 if device and device[ 'available' ] is False:
3477 result = main.TRUE
3478 utilities.assert_equals( expect=main.TRUE, actual=result,
3479 onpass="Kill switch successful",
3480 onfail="Failed to kill switch?" )
Jon Hallca319892017-06-15 15:25:22 -07003481
Devin Lim58046fa2017-07-05 16:55:00 -07003482 def switchUp( self, main ):
3483 """
3484 Switch Up
3485 """
3486 # NOTE: You should probably run a topology check after this
3487 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003488 assert main, "main not defined"
3489 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003490
3491 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3492 switch = main.params[ 'kill' ][ 'switch' ]
3493 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3494 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallca319892017-06-15 15:25:22 -07003495 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003496 description = "Adding a switch to ensure it is discovered correctly"
3497 main.case( description )
3498
3499 main.step( "Add back " + switch )
3500 main.Mininet1.addSwitch( switch, dpid=switchDPID )
3501 for peer in links:
3502 main.Mininet1.addLink( switch, peer )
Jon Hallca319892017-06-15 15:25:22 -07003503 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -07003504 main.Mininet1.assignSwController( sw=switch, ip=ipList )
3505 main.log.info( "Waiting " + str( switchSleep ) +
3506 " seconds for switch up to be discovered" )
3507 time.sleep( switchSleep )
3508 device = onosCli.getDevice( dpid=switchDPID )
3509 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003510 main.log.debug( "Added device: " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003511 result = main.FALSE
3512 if device and device[ 'available' ]:
3513 result = main.TRUE
3514 utilities.assert_equals( expect=main.TRUE, actual=result,
3515 onpass="add switch successful",
3516 onfail="Failed to add switch?" )
3517
3518 def startElectionApp( self, main ):
3519 """
3520 start election app on all onos nodes
3521 """
Devin Lim58046fa2017-07-05 16:55:00 -07003522 assert main, "main not defined"
3523 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003524
3525 main.case( "Start Leadership Election app" )
3526 main.step( "Install leadership election app" )
Jon Hallca319892017-06-15 15:25:22 -07003527 onosCli = main.Cluster.next()
Devin Lime9f0ccf2017-08-11 17:25:12 -07003528 appResult = onosCli.CLI.activateApp( "org.onosproject.election" )
Devin Lim58046fa2017-07-05 16:55:00 -07003529 utilities.assert_equals(
3530 expect=main.TRUE,
3531 actual=appResult,
3532 onpass="Election app installed",
3533 onfail="Something went wrong with installing Leadership election" )
3534
3535 main.step( "Run for election on each node" )
Jon Hallca319892017-06-15 15:25:22 -07003536 onosCli.electionTestRun()
3537 main.Cluster.command( "electionTestRun" )
Devin Lim58046fa2017-07-05 16:55:00 -07003538 time.sleep( 5 )
Jon Hallca319892017-06-15 15:25:22 -07003539 sameResult, leaders = main.HA.consistentLeaderboards( main.Cluster.active() )
Devin Lim58046fa2017-07-05 16:55:00 -07003540 utilities.assert_equals(
3541 expect=True,
3542 actual=sameResult,
3543 onpass="All nodes see the same leaderboards",
3544 onfail="Inconsistent leaderboards" )
3545
3546 if sameResult:
3547 leader = leaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003548 if onosCli.ipAddress in leader:
Devin Lim58046fa2017-07-05 16:55:00 -07003549 correctLeader = True
3550 else:
3551 correctLeader = False
3552 main.step( "First node was elected leader" )
3553 utilities.assert_equals(
3554 expect=True,
3555 actual=correctLeader,
3556 onpass="Correct leader was elected",
3557 onfail="Incorrect leader" )
Jon Hallca319892017-06-15 15:25:22 -07003558 main.Cluster.testLeader = leader
3559
Devin Lim58046fa2017-07-05 16:55:00 -07003560 def isElectionFunctional( self, main ):
3561 """
3562 Check that Leadership Election is still functional
3563 15.1 Run election on each node
3564 15.2 Check that each node has the same leaders and candidates
3565 15.3 Find current leader and withdraw
3566 15.4 Check that a new node was elected leader
3567 15.5 Check that that new leader was the candidate of old leader
3568 15.6 Run for election on old leader
3569 15.7 Check that oldLeader is a candidate, and leader if only 1 node
3570 15.8 Make sure that the old leader was added to the candidate list
3571
3572 old and new variable prefixes refer to data from before vs after
3573 withdrawl and later before withdrawl vs after re-election
3574 """
3575 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003576 assert main, "main not defined"
3577 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003578
3579 description = "Check that Leadership Election is still functional"
3580 main.case( description )
3581 # NOTE: Need to re-run after restarts since being a canidate is not persistant
3582
3583 oldLeaders = [] # list of lists of each nodes' candidates before
3584 newLeaders = [] # list of lists of each nodes' candidates after
3585 oldLeader = '' # the old leader from oldLeaders, None if not same
3586 newLeader = '' # the new leaders fron newLoeaders, None if not same
3587 oldLeaderCLI = None # the CLI of the old leader used for re-electing
3588 expectNoLeader = False # True when there is only one leader
Devin Lim142b5342017-07-20 15:22:39 -07003589 if len( main.Cluster.runningNodes ) == 1:
Devin Lim58046fa2017-07-05 16:55:00 -07003590 expectNoLeader = True
3591
3592 main.step( "Run for election on each node" )
Devin Lim142b5342017-07-20 15:22:39 -07003593 electionResult = main.Cluster.command( "electionTestRun", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003594 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -07003595 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -07003596 actual=electionResult,
3597 onpass="All nodes successfully ran for leadership",
3598 onfail="At least one node failed to run for leadership" )
3599
3600 if electionResult == main.FALSE:
3601 main.log.error(
3602 "Skipping Test Case because Election Test App isn't loaded" )
3603 main.skipCase()
3604
3605 main.step( "Check that each node shows the same leader and candidates" )
3606 failMessage = "Nodes have different leaderboards"
Jon Hallca319892017-06-15 15:25:22 -07003607 activeCLIs = main.Cluster.active()
3608 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Devin Lim58046fa2017-07-05 16:55:00 -07003609 if sameResult:
3610 oldLeader = oldLeaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003611 main.log.info( "Old leader: " + oldLeader )
Devin Lim58046fa2017-07-05 16:55:00 -07003612 else:
3613 oldLeader = None
3614 utilities.assert_equals(
3615 expect=True,
3616 actual=sameResult,
3617 onpass="Leaderboards are consistent for the election topic",
3618 onfail=failMessage )
3619
3620 main.step( "Find current leader and withdraw" )
3621 withdrawResult = main.TRUE
3622 # do some sanity checking on leader before using it
3623 if oldLeader is None:
3624 main.log.error( "Leadership isn't consistent." )
3625 withdrawResult = main.FALSE
3626 # Get the CLI of the oldLeader
Jon Hallca319892017-06-15 15:25:22 -07003627 for ctrl in main.Cluster.active():
3628 if oldLeader == ctrl.ipAddress:
3629 oldLeaderCLI = ctrl
Devin Lim58046fa2017-07-05 16:55:00 -07003630 break
3631 else: # FOR/ELSE statement
3632 main.log.error( "Leader election, could not find current leader" )
3633 if oldLeader:
3634 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3635 utilities.assert_equals(
3636 expect=main.TRUE,
3637 actual=withdrawResult,
3638 onpass="Node was withdrawn from election",
3639 onfail="Node was not withdrawn from election" )
3640
3641 main.step( "Check that a new node was elected leader" )
3642 failMessage = "Nodes have different leaders"
3643 # Get new leaders and candidates
3644 newLeaderResult, newLeaders = self.consistentLeaderboards( activeCLIs )
3645 newLeader = None
3646 if newLeaderResult:
3647 if newLeaders[ 0 ][ 0 ] == 'none':
3648 main.log.error( "No leader was elected on at least 1 node" )
3649 if not expectNoLeader:
3650 newLeaderResult = False
3651 newLeader = newLeaders[ 0 ][ 0 ]
3652
3653 # Check that the new leader is not the older leader, which was withdrawn
3654 if newLeader == oldLeader:
3655 newLeaderResult = False
3656 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3657 " as the current leader" )
3658 utilities.assert_equals(
3659 expect=True,
3660 actual=newLeaderResult,
3661 onpass="Leadership election passed",
3662 onfail="Something went wrong with Leadership election" )
3663
3664 main.step( "Check that that new leader was the candidate of old leader" )
3665 # candidates[ 2 ] should become the top candidate after withdrawl
3666 correctCandidateResult = main.TRUE
3667 if expectNoLeader:
3668 if newLeader == 'none':
3669 main.log.info( "No leader expected. None found. Pass" )
3670 correctCandidateResult = main.TRUE
3671 else:
3672 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3673 correctCandidateResult = main.FALSE
3674 elif len( oldLeaders[ 0 ] ) >= 3:
3675 if newLeader == oldLeaders[ 0 ][ 2 ]:
3676 # correct leader was elected
3677 correctCandidateResult = main.TRUE
3678 else:
3679 correctCandidateResult = main.FALSE
3680 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3681 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3682 else:
3683 main.log.warn( "Could not determine who should be the correct leader" )
3684 main.log.debug( oldLeaders[ 0 ] )
3685 correctCandidateResult = main.FALSE
3686 utilities.assert_equals(
3687 expect=main.TRUE,
3688 actual=correctCandidateResult,
3689 onpass="Correct Candidate Elected",
3690 onfail="Incorrect Candidate Elected" )
3691
3692 main.step( "Run for election on old leader( just so everyone " +
3693 "is in the hat )" )
3694 if oldLeaderCLI is not None:
3695 runResult = oldLeaderCLI.electionTestRun()
3696 else:
3697 main.log.error( "No old leader to re-elect" )
3698 runResult = main.FALSE
3699 utilities.assert_equals(
3700 expect=main.TRUE,
3701 actual=runResult,
3702 onpass="App re-ran for election",
3703 onfail="App failed to run for election" )
3704
3705 main.step(
3706 "Check that oldLeader is a candidate, and leader if only 1 node" )
3707 # verify leader didn't just change
3708 # Get new leaders and candidates
3709 reRunLeaders = []
3710 time.sleep( 5 ) # Paremterize
3711 positionResult, reRunLeaders = self.consistentLeaderboards( activeCLIs )
3712
3713 # Check that the re-elected node is last on the candidate List
3714 if not reRunLeaders[ 0 ]:
3715 positionResult = main.FALSE
3716 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3717 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
3718 str( reRunLeaders[ 0 ] ) ) )
3719 positionResult = main.FALSE
3720 utilities.assert_equals(
3721 expect=True,
3722 actual=positionResult,
3723 onpass="Old leader successfully re-ran for election",
3724 onfail="Something went wrong with Leadership election after " +
3725 "the old leader re-ran for election" )
Jon Hallca319892017-06-15 15:25:22 -07003726
Devin Lim58046fa2017-07-05 16:55:00 -07003727 def installDistributedPrimitiveApp( self, main ):
3728 """
3729 Install Distributed Primitives app
3730 """
3731 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003732 assert main, "main not defined"
3733 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003734
3735 # Variables for the distributed primitives tests
3736 main.pCounterName = "TestON-Partitions"
3737 main.pCounterValue = 0
3738 main.onosSet = set( [] )
3739 main.onosSetName = "TestON-set"
3740
3741 description = "Install Primitives app"
3742 main.case( description )
3743 main.step( "Install Primitives app" )
3744 appName = "org.onosproject.distributedprimitives"
Devin Lime9f0ccf2017-08-11 17:25:12 -07003745 appResults = main.Cluster.next().CLI.activateApp( appName )
Devin Lim58046fa2017-07-05 16:55:00 -07003746 utilities.assert_equals( expect=main.TRUE,
3747 actual=appResults,
3748 onpass="Primitives app activated",
3749 onfail="Primitives app not activated" )
3750 # TODO check on all nodes instead of sleeping
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003751 time.sleep( 5 ) # To allow all nodes to activate