blob: f1ddaef3ed3b25f0f4eacb5a478dc67546adeadf [file] [log] [blame]
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07001"""
2Copyright 2015 Open Networking Foundation (ONF)
3
4Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
5the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
6or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
7
8 TestON is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation, either version 2 of the License, or
11 (at your option) any later version.
12
13 TestON is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with TestON. If not, see <http://www.gnu.org/licenses/>.
20"""
21
Jon Halla440e872016-03-31 15:15:50 -070022import json
Jon Hall41d39f12016-04-11 22:54:35 -070023import time
Jon Halle1a3b752015-07-22 13:02:46 -070024
Jon Hallf37d44d2017-05-24 10:37:30 -070025
Jon Hall41d39f12016-04-11 22:54:35 -070026class HA():
Jon Hall57b50432015-10-22 10:20:10 -070027
Jon Halla440e872016-03-31 15:15:50 -070028 def __init__( self ):
29 self.default = ''
Jon Hall57b50432015-10-22 10:20:10 -070030
Devin Lim58046fa2017-07-05 16:55:00 -070031 def customizeOnosGenPartitions( self ):
Devin Lim58046fa2017-07-05 16:55:00 -070032 # copy gen-partions file to ONOS
33 # NOTE: this assumes TestON and ONOS are on the same machine
34 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
35 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
36 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
37 main.ONOSbench.ip_address,
38 srcFile,
39 dstDir,
40 pwd=main.ONOSbench.pwd,
41 direction="from" )
Jon Hallca319892017-06-15 15:25:22 -070042
Devin Lim58046fa2017-07-05 16:55:00 -070043 def cleanUpGenPartition( self ):
44 # clean up gen-partitions file
45 try:
46 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
47 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
48 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
49 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
50 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
51 str( main.ONOSbench.handle.before ) )
52 except ( pexpect.TIMEOUT, pexpect.EOF ):
53 main.log.exception( "ONOSbench: pexpect exception found:" +
54 main.ONOSbench.handle.before )
Devin Lim44075962017-08-11 10:56:37 -070055 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -070056
Devin Lim58046fa2017-07-05 16:55:00 -070057 def startingMininet( self ):
58 main.step( "Starting Mininet" )
59 # scp topo file to mininet
60 # TODO: move to params?
61 topoName = "obelisk.py"
62 filePath = main.ONOSbench.home + "/tools/test/topos/"
63 main.ONOSbench.scp( main.Mininet1,
64 filePath + topoName,
65 main.Mininet1.home,
66 direction="to" )
67 mnResult = main.Mininet1.startNet()
68 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
69 onpass="Mininet Started",
70 onfail="Error starting Mininet" )
Jon Hallca319892017-06-15 15:25:22 -070071
Devin Lim58046fa2017-07-05 16:55:00 -070072 def scalingMetadata( self ):
73 import re
Devin Lim142b5342017-07-20 15:22:39 -070074 main.step( "Generate initial metadata file" )
Devin Lim58046fa2017-07-05 16:55:00 -070075 main.scaling = main.params[ 'scaling' ].split( "," )
76 main.log.debug( main.scaling )
77 scale = main.scaling.pop( 0 )
78 main.log.debug( scale )
79 if "e" in scale:
80 equal = True
81 else:
82 equal = False
83 main.log.debug( equal )
Devin Lim142b5342017-07-20 15:22:39 -070084 main.Cluster.setRunningNode( int( re.search( "\d+", scale ).group( 0 ) ) )
85 genResult = main.Server.generateFile( main.Cluster.numCtrls, equal=equal )
Devin Lim58046fa2017-07-05 16:55:00 -070086 utilities.assert_equals( expect=main.TRUE, actual=genResult,
87 onpass="New cluster metadata file generated",
88 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -070089
Devin Lim58046fa2017-07-05 16:55:00 -070090 def swapNodeMetadata( self ):
Devin Lim142b5342017-07-20 15:22:39 -070091 main.step( "Generate initial metadata file" )
92 if main.Cluster.numCtrls >= 5:
93 main.Cluster.setRunningNode( main.Cluster.numCtrls - 2 )
Devin Lim58046fa2017-07-05 16:55:00 -070094 else:
95 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
Devin Lim142b5342017-07-20 15:22:39 -070096 genResult = main.Server.generateFile( main.Cluster.numCtrls )
Devin Lim58046fa2017-07-05 16:55:00 -070097 utilities.assert_equals( expect=main.TRUE, actual=genResult,
98 onpass="New cluster metadata file generated",
99 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -0700100
Devin Lim142b5342017-07-20 15:22:39 -0700101 def setServerForCluster( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700102 import os
103 main.step( "Setup server for cluster metadata file" )
104 main.serverPort = main.params[ 'server' ][ 'port' ]
105 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
106 main.log.debug( "Root dir: {}".format( rootDir ) )
107 status = main.Server.start( main.ONOSbench,
108 rootDir,
109 port=main.serverPort,
110 logDir=main.logdir + "/server.log" )
111 utilities.assert_equals( expect=main.TRUE, actual=status,
112 onpass="Server started",
113 onfail="Failled to start SimpleHTTPServer" )
114
Jon Hall4f360bc2017-09-07 10:19:52 -0700115 def copyBackupConfig( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700116 main.step( "Copying backup config files" )
117 main.onosServicepath = main.ONOSbench.home + "/tools/package/bin/onos-service"
118 cp = main.ONOSbench.scp( main.ONOSbench,
119 main.onosServicepath,
120 main.onosServicepath + ".backup",
121 direction="to" )
122
123 utilities.assert_equals( expect=main.TRUE,
124 actual=cp,
125 onpass="Copy backup config file succeeded",
126 onfail="Copy backup config file failed" )
Jon Hall4f360bc2017-09-07 10:19:52 -0700127
128 def setMetadataUrl( self ):
129 # NOTE: You should probably backup the config before and reset the config after the test
Devin Lim58046fa2017-07-05 16:55:00 -0700130 # we need to modify the onos-service file to use remote metadata file
131 # url for cluster metadata file
132 iface = main.params[ 'server' ].get( 'interface' )
133 ip = main.ONOSbench.getIpAddr( iface=iface )
134 metaFile = "cluster.json"
135 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, main.serverPort, metaFile )
136 main.log.warn( javaArgs )
137 main.log.warn( repr( javaArgs ) )
138 handle = main.ONOSbench.handle
139 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, main.onosServicepath )
140 main.log.warn( sed )
141 main.log.warn( repr( sed ) )
142 handle.sendline( sed )
143 handle.expect( metaFile )
144 output = handle.before
145 handle.expect( "\$" )
146 output += handle.before
147 main.log.debug( repr( output ) )
148
149 def cleanUpOnosService( self ):
150 # Cleanup custom onos-service file
151 main.ONOSbench.scp( main.ONOSbench,
152 main.onosServicepath + ".backup",
153 main.onosServicepath,
154 direction="to" )
Jon Hallca319892017-06-15 15:25:22 -0700155
Jon Halla440e872016-03-31 15:15:50 -0700156 def consistentCheck( self ):
157 """
158 Checks that TestON counters are consistent across all nodes.
Jon Halle1a3b752015-07-22 13:02:46 -0700159
Jon Hallf37d44d2017-05-24 10:37:30 -0700160 Returns the tuple ( onosCounters, consistent )
Jon Hall41d39f12016-04-11 22:54:35 -0700161 - onosCounters is the parsed json output of the counters command on
162 all nodes
163 - consistent is main.TRUE if all "TestON" counters are consitent across
164 all nodes or main.FALSE
Jon Halla440e872016-03-31 15:15:50 -0700165 """
Jon Halle1a3b752015-07-22 13:02:46 -0700166 try:
Jon Halla440e872016-03-31 15:15:50 -0700167 # Get onos counters results
168 onosCountersRaw = []
169 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700170 for ctrl in main.Cluster.active():
Jon Halla440e872016-03-31 15:15:50 -0700171 t = main.Thread( target=utilities.retry,
Jon Hallca319892017-06-15 15:25:22 -0700172 name="counters-" + str( ctrl ),
173 args=[ ctrl.counters, [ None ] ],
Jon Hallf37d44d2017-05-24 10:37:30 -0700174 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Halla440e872016-03-31 15:15:50 -0700175 'randomTime': True } )
176 threads.append( t )
177 t.start()
178 for t in threads:
179 t.join()
180 onosCountersRaw.append( t.result )
181 onosCounters = []
Jon Hallca319892017-06-15 15:25:22 -0700182 for i in range( len( onosCountersRaw ) ):
Jon Halla440e872016-03-31 15:15:50 -0700183 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700184 onosCounters.append( json.loads( onosCountersRaw[ i ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700185 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700186 main.log.error( "Could not parse counters response from " +
Devin Lim142b5342017-07-20 15:22:39 -0700187 str( main.Cluster.active( i ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700188 main.log.warn( repr( onosCountersRaw[ i ] ) )
189 onosCounters.append( [] )
190
191 testCounters = {}
192 # make a list of all the "TestON-*" counters in ONOS
Jon Hall41d39f12016-04-11 22:54:35 -0700193 # lookes like a dict whose keys are the name of the ONOS node and
194 # values are a list of the counters. I.E.
Jon Hallf37d44d2017-05-24 10:37:30 -0700195 # { "ONOS1": [ { "name":"TestON-Partitions","value":56 } ]
Jon Halla440e872016-03-31 15:15:50 -0700196 # }
197 # NOTE: There is an assumtion that all nodes are active
198 # based on the above for loops
199 for controller in enumerate( onosCounters ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700200 for key, value in controller[ 1 ].iteritems():
Jon Halla440e872016-03-31 15:15:50 -0700201 if 'TestON' in key:
Devin Lim142b5342017-07-20 15:22:39 -0700202 node = str( main.Cluster.active( controller[ 0 ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700203 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700204 testCounters[ node ].append( { key: value } )
Jon Halla440e872016-03-31 15:15:50 -0700205 except KeyError:
Jon Hallf37d44d2017-05-24 10:37:30 -0700206 testCounters[ node ] = [ { key: value } ]
Jon Halla440e872016-03-31 15:15:50 -0700207 # compare the counters on each node
Jon Hallf37d44d2017-05-24 10:37:30 -0700208 firstV = testCounters.values()[ 0 ]
Jon Halla440e872016-03-31 15:15:50 -0700209 tmp = [ v == firstV for k, v in testCounters.iteritems() ]
210 if all( tmp ):
211 consistent = main.TRUE
212 else:
213 consistent = main.FALSE
214 main.log.error( "ONOS nodes have different values for counters:\n" +
215 testCounters )
216 return ( onosCounters, consistent )
217 except Exception:
218 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700219 main.cleanAndExit()
Jon Halla440e872016-03-31 15:15:50 -0700220
221 def counterCheck( self, counterName, counterValue ):
222 """
223 Checks that TestON counters are consistent across all nodes and that
224 specified counter is in ONOS with the given value
225 """
226 try:
227 correctResults = main.TRUE
228 # Get onos counters results and consistentCheck
229 onosCounters, consistent = self.consistentCheck()
230 # Check for correct values
Jon Hallca319892017-06-15 15:25:22 -0700231 for i in range( len( main.Cluster.active() ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700232 current = onosCounters[ i ]
Jon Halla440e872016-03-31 15:15:50 -0700233 onosValue = None
234 try:
235 onosValue = current.get( counterName )
Jon Hall41d39f12016-04-11 22:54:35 -0700236 except AttributeError:
Devin Lim142b5342017-07-20 15:22:39 -0700237 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -0700238 main.log.exception( node + " counters result " +
Jon Hall41d39f12016-04-11 22:54:35 -0700239 "is not as expected" )
Jon Halla440e872016-03-31 15:15:50 -0700240 correctResults = main.FALSE
241 if onosValue == counterValue:
242 main.log.info( counterName + " counter value is correct" )
243 else:
Jon Hall41d39f12016-04-11 22:54:35 -0700244 main.log.error( counterName +
245 " counter value is incorrect," +
246 " expected value: " + str( counterValue ) +
247 " current value: " + str( onosValue ) )
Jon Halla440e872016-03-31 15:15:50 -0700248 correctResults = main.FALSE
249 return consistent and correctResults
250 except Exception:
251 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700252 main.cleanAndExit()
Jon Hall41d39f12016-04-11 22:54:35 -0700253
254 def consistentLeaderboards( self, nodes ):
255 TOPIC = 'org.onosproject.election'
256 # FIXME: use threads
Jon Halle0f0b342017-04-18 11:43:47 -0700257 # FIXME: should we retry outside the function?
Jon Hall41d39f12016-04-11 22:54:35 -0700258 for n in range( 5 ): # Retry in case election is still happening
259 leaderList = []
260 # Get all leaderboards
261 for cli in nodes:
262 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
263 # Compare leaderboards
Jon Hallf37d44d2017-05-24 10:37:30 -0700264 result = all( i == leaderList[ 0 ] for i in leaderList ) and\
Jon Hall41d39f12016-04-11 22:54:35 -0700265 leaderList is not None
266 main.log.debug( leaderList )
267 main.log.warn( result )
268 if result:
269 return ( result, leaderList )
Jon Hallf37d44d2017-05-24 10:37:30 -0700270 time.sleep( 5 ) # TODO: paramerterize
Jon Hall41d39f12016-04-11 22:54:35 -0700271 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
272 return ( result, leaderList )
273
274 def nodesCheck( self, nodes ):
275 nodesOutput = []
276 results = True
277 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700278 for node in nodes:
279 t = main.Thread( target=node.nodes,
280 name="nodes-" + str( node ),
Jon Hallf37d44d2017-05-24 10:37:30 -0700281 args=[] )
Jon Hall41d39f12016-04-11 22:54:35 -0700282 threads.append( t )
283 t.start()
284
285 for t in threads:
286 t.join()
287 nodesOutput.append( t.result )
Jon Hallca319892017-06-15 15:25:22 -0700288 ips = sorted( main.Cluster.getIps( activeOnly=True ) )
Jon Hall41d39f12016-04-11 22:54:35 -0700289 for i in nodesOutput:
290 try:
291 current = json.loads( i )
292 activeIps = []
293 currentResult = False
294 for node in current:
Jon Hallf37d44d2017-05-24 10:37:30 -0700295 if node[ 'state' ] == 'READY':
296 activeIps.append( node[ 'ip' ] )
Jon Hall41d39f12016-04-11 22:54:35 -0700297 activeIps.sort()
298 if ips == activeIps:
299 currentResult = True
300 except ( ValueError, TypeError ):
301 main.log.error( "Error parsing nodes output" )
302 main.log.warn( repr( i ) )
303 currentResult = False
304 results = results and currentResult
305 return results
Jon Hallca319892017-06-15 15:25:22 -0700306
Devin Lim58046fa2017-07-05 16:55:00 -0700307 def generateGraph( self, testName, plotName="Plot-HA", index=2 ):
308 # GRAPHS
309 # NOTE: important params here:
310 # job = name of Jenkins job
311 # Plot Name = Plot-HA, only can be used if multiple plots
312 # index = The number of the graph under plot name
313 job = testName
314 graphs = '<ac:structured-macro ac:name="html">\n'
315 graphs += '<ac:plain-text-body><![CDATA[\n'
316 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
317 '/plot/' + plotName + '/getPlot?index=' + str( index ) +\
318 '&width=500&height=300"' +\
319 'noborder="0" width="500" height="300" scrolling="yes" ' +\
320 'seamless="seamless"></iframe>\n'
321 graphs += ']]></ac:plain-text-body>\n'
322 graphs += '</ac:structured-macro>\n'
323 main.log.wiki( graphs )
Jon Hallca319892017-06-15 15:25:22 -0700324
Devin Lim58046fa2017-07-05 16:55:00 -0700325 def initialSetUp( self, serviceClean=False ):
326 """
327 rest of initialSetup
328 """
329
Devin Lim58046fa2017-07-05 16:55:00 -0700330
331 if main.params[ 'tcpdump' ].lower() == "true":
332 main.step( "Start Packet Capture MN" )
333 main.Mininet2.startTcpdump(
334 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
335 + "-MN.pcap",
336 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
337 port=main.params[ 'MNtcpdump' ][ 'port' ] )
338
339 if serviceClean:
340 main.step( "Clean up ONOS service changes" )
Devin Lim142b5342017-07-20 15:22:39 -0700341 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
342 main.ONOSbench.handle.expect( "\$" )
343 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
344 main.ONOSbench.handle.expect( "\$" )
Devin Lim58046fa2017-07-05 16:55:00 -0700345
346 main.step( "Checking ONOS nodes" )
347 nodeResults = utilities.retry( self.nodesCheck,
348 False,
Jon Hallca319892017-06-15 15:25:22 -0700349 args=[ main.Cluster.active() ],
Devin Lim58046fa2017-07-05 16:55:00 -0700350 attempts=5 )
351
352 utilities.assert_equals( expect=True, actual=nodeResults,
353 onpass="Nodes check successful",
354 onfail="Nodes check NOT successful" )
355
356 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -0700357 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700358 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -0700359 ctrl.name,
360 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700361 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -0700362 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700363
364 main.step( "Activate apps defined in the params file" )
365 # get data from the params
366 apps = main.params.get( 'apps' )
367 if apps:
368 apps = apps.split( ',' )
Jon Hallca319892017-06-15 15:25:22 -0700369 main.log.debug( "Apps: " + str( apps ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700370 activateResult = True
371 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700372 main.Cluster.active( 0 ).app( app, "Activate" )
Devin Lim58046fa2017-07-05 16:55:00 -0700373 # TODO: check this worked
374 time.sleep( 10 ) # wait for apps to activate
375 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700376 state = main.Cluster.active( 0 ).appStatus( app )
Devin Lim58046fa2017-07-05 16:55:00 -0700377 if state == "ACTIVE":
378 activateResult = activateResult and True
379 else:
380 main.log.error( "{} is in {} state".format( app, state ) )
381 activateResult = False
382 utilities.assert_equals( expect=True,
383 actual=activateResult,
384 onpass="Successfully activated apps",
385 onfail="Failed to activate apps" )
386 else:
387 main.log.warn( "No apps were specified to be loaded after startup" )
388
389 main.step( "Set ONOS configurations" )
Jon Hallca319892017-06-15 15:25:22 -0700390 # FIXME: This shoudl be part of the general startup sequence
Devin Lim58046fa2017-07-05 16:55:00 -0700391 config = main.params.get( 'ONOS_Configuration' )
392 if config:
393 main.log.debug( config )
394 checkResult = main.TRUE
395 for component in config:
396 for setting in config[ component ]:
397 value = config[ component ][ setting ]
Jon Hallca319892017-06-15 15:25:22 -0700398 check = main.Cluster.next().setCfg( component, setting, value )
Devin Lim58046fa2017-07-05 16:55:00 -0700399 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
400 checkResult = check and checkResult
401 utilities.assert_equals( expect=main.TRUE,
402 actual=checkResult,
403 onpass="Successfully set config",
404 onfail="Failed to set config" )
405 else:
406 main.log.warn( "No configurations were specified to be changed after startup" )
407
Jon Hallca319892017-06-15 15:25:22 -0700408 main.step( "Check app ids" )
409 appCheck = self.appCheck()
410 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700411 onpass="App Ids seem to be correct",
412 onfail="Something is wrong with app Ids" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -0700413
Jon Hallca319892017-06-15 15:25:22 -0700414 def commonChecks( self ):
415 # TODO: make this assertable or assert in here?
416 self.topicsCheck()
417 self.partitionsCheck()
418 self.pendingMapCheck()
419 self.appCheck()
420
421 def topicsCheck( self, extraTopics=[] ):
422 """
423 Check for work partition topics in leaders output
424 """
425 leaders = main.Cluster.next().leaders()
426 missing = False
427 try:
428 if leaders:
429 parsedLeaders = json.loads( leaders )
430 output = json.dumps( parsedLeaders,
431 sort_keys=True,
432 indent=4,
433 separators=( ',', ': ' ) )
434 main.log.debug( "Leaders: " + output )
435 # check for all intent partitions
436 topics = []
437 for i in range( 14 ):
438 topics.append( "work-partition-" + str( i ) )
439 topics += extraTopics
440 main.log.debug( topics )
441 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
442 for topic in topics:
443 if topic not in ONOStopics:
444 main.log.error( "Error: " + topic +
445 " not in leaders" )
446 missing = True
447 else:
448 main.log.error( "leaders() returned None" )
449 except ( ValueError, TypeError ):
450 main.log.exception( "Error parsing leaders" )
451 main.log.error( repr( leaders ) )
452 if missing:
453 #NOTE Can we refactor this into the Cluster class? Maybe an option to print the output of a command from each node?
454 for ctrl in main.Cluster.active():
455 response = ctrl.CLI.leaders( jsonFormat=False )
456 main.log.debug( str( ctrl.name ) + " leaders output: \n" +
457 str( response ) )
458 return missing
459
460 def partitionsCheck( self ):
461 # TODO: return something assertable
462 partitions = main.Cluster.next().partitions()
463 try:
464 if partitions:
465 parsedPartitions = json.loads( partitions )
466 output = json.dumps( parsedPartitions,
467 sort_keys=True,
468 indent=4,
469 separators=( ',', ': ' ) )
470 main.log.debug( "Partitions: " + output )
471 # TODO check for a leader in all paritions
472 # TODO check for consistency among nodes
473 else:
474 main.log.error( "partitions() returned None" )
475 except ( ValueError, TypeError ):
476 main.log.exception( "Error parsing partitions" )
477 main.log.error( repr( partitions ) )
478
479 def pendingMapCheck( self ):
480 pendingMap = main.Cluster.next().pendingMap()
481 try:
482 if pendingMap:
483 parsedPending = json.loads( pendingMap )
484 output = json.dumps( parsedPending,
485 sort_keys=True,
486 indent=4,
487 separators=( ',', ': ' ) )
488 main.log.debug( "Pending map: " + output )
489 # TODO check something here?
490 else:
491 main.log.error( "pendingMap() returned None" )
492 except ( ValueError, TypeError ):
493 main.log.exception( "Error parsing pending map" )
494 main.log.error( repr( pendingMap ) )
495
496 def appCheck( self ):
497 """
498 Check App IDs on all nodes
499 """
500 # FIXME: Rename this to appIDCheck? or add a check for isntalled apps
501 appResults = main.Cluster.command( "appToIDCheck" )
502 appCheck = all( i == main.TRUE for i in appResults )
503 if not appCheck:
Devin Lim142b5342017-07-20 15:22:39 -0700504 ctrl = main.Cluster.active( 0 )
Jon Hallca319892017-06-15 15:25:22 -0700505 main.log.debug( "%s apps: %s" % ( ctrl.name, ctrl.apps() ) )
506 main.log.debug( "%s appIDs: %s" % ( ctrl.name, ctrl.appIDs() ) )
507 return appCheck
508
Jon Halle0f0b342017-04-18 11:43:47 -0700509 def workQueueStatsCheck( self, workQueueName, completed, inProgress, pending ):
510 # Completed
Jon Hallca319892017-06-15 15:25:22 -0700511 completedValues = main.Cluster.command( "workQueueTotalCompleted",
512 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700513 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700514 completedResults = [ int( x ) == completed for x in completedValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700515 completedResult = all( completedResults )
516 if not completedResult:
517 main.log.warn( "Expected Work Queue {} to have {} completed, found {}".format(
518 workQueueName, completed, completedValues ) )
519
520 # In Progress
Jon Hallca319892017-06-15 15:25:22 -0700521 inProgressValues = main.Cluster.command( "workQueueTotalInProgress",
522 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700523 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700524 inProgressResults = [ int( x ) == inProgress for x in inProgressValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700525 inProgressResult = all( inProgressResults )
526 if not inProgressResult:
527 main.log.warn( "Expected Work Queue {} to have {} inProgress, found {}".format(
528 workQueueName, inProgress, inProgressValues ) )
529
530 # Pending
Jon Hallca319892017-06-15 15:25:22 -0700531 pendingValues = main.Cluster.command( "workQueueTotalPending",
532 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700533 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700534 pendingResults = [ int( x ) == pending for x in pendingValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700535 pendingResult = all( pendingResults )
536 if not pendingResult:
537 main.log.warn( "Expected Work Queue {} to have {} pending, found {}".format(
538 workQueueName, pending, pendingValues ) )
539 return completedResult and inProgressResult and pendingResult
540
Devin Lim58046fa2017-07-05 16:55:00 -0700541 def assignDevices( self, main ):
542 """
543 Assign devices to controllers
544 """
545 import re
Devin Lim58046fa2017-07-05 16:55:00 -0700546 assert main, "main not defined"
547 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700548
549 main.case( "Assigning devices to controllers" )
550 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " + \
551 "and check that an ONOS node becomes the " + \
552 "master of the device."
553 main.step( "Assign switches to controllers" )
554
Jon Hallca319892017-06-15 15:25:22 -0700555 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -0700556 swList = []
557 for i in range( 1, 29 ):
558 swList.append( "s" + str( i ) )
559 main.Mininet1.assignSwController( sw=swList, ip=ipList )
560
561 mastershipCheck = main.TRUE
562 for i in range( 1, 29 ):
563 response = main.Mininet1.getSwController( "s" + str( i ) )
564 try:
565 main.log.info( str( response ) )
566 except Exception:
567 main.log.info( repr( response ) )
Devin Lim142b5342017-07-20 15:22:39 -0700568 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -0700569 if re.search( "tcp:" + ctrl.ipAddress, response ):
Devin Lim58046fa2017-07-05 16:55:00 -0700570 mastershipCheck = mastershipCheck and main.TRUE
571 else:
Jon Hallca319892017-06-15 15:25:22 -0700572 main.log.error( "Error, node " + repr( ctrl )+ " is " +
Devin Lim58046fa2017-07-05 16:55:00 -0700573 "not in the list of controllers s" +
574 str( i ) + " is connecting to." )
575 mastershipCheck = main.FALSE
576 utilities.assert_equals(
577 expect=main.TRUE,
578 actual=mastershipCheck,
579 onpass="Switch mastership assigned correctly",
580 onfail="Switches not assigned correctly to controllers" )
Jon Hallca319892017-06-15 15:25:22 -0700581
Devin Lim58046fa2017-07-05 16:55:00 -0700582 def assignIntents( self, main ):
583 """
584 Assign intents
585 """
586 import time
587 import json
Devin Lim58046fa2017-07-05 16:55:00 -0700588 assert main, "main not defined"
589 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700590 try:
591 main.HAlabels
592 except ( NameError, AttributeError ):
593 main.log.error( "main.HAlabels not defined, setting to []" )
594 main.HAlabels = []
595 try:
596 main.HAdata
597 except ( NameError, AttributeError ):
598 main.log.error( "data not defined, setting to []" )
599 main.HAdata = []
600 main.case( "Adding host Intents" )
601 main.caseExplanation = "Discover hosts by using pingall then " +\
602 "assign predetermined host-to-host intents." +\
603 " After installation, check that the intent" +\
604 " is distributed to all nodes and the state" +\
605 " is INSTALLED"
606
607 # install onos-app-fwd
608 main.step( "Install reactive forwarding app" )
Jon Hallca319892017-06-15 15:25:22 -0700609 onosCli = main.Cluster.next()
Devin Lime9f0ccf2017-08-11 17:25:12 -0700610 installResults = onosCli.CLI.activateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700611 utilities.assert_equals( expect=main.TRUE, actual=installResults,
612 onpass="Install fwd successful",
613 onfail="Install fwd failed" )
614
615 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700616 appCheck = self.appCheck()
617 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700618 onpass="App Ids seem to be correct",
619 onfail="Something is wrong with app Ids" )
620
621 main.step( "Discovering Hosts( Via pingall for now )" )
622 # FIXME: Once we have a host discovery mechanism, use that instead
623 # REACTIVE FWD test
624 pingResult = main.FALSE
625 passMsg = "Reactive Pingall test passed"
626 time1 = time.time()
627 pingResult = main.Mininet1.pingall()
628 time2 = time.time()
629 if not pingResult:
630 main.log.warn( "First pingall failed. Trying again..." )
631 pingResult = main.Mininet1.pingall()
632 passMsg += " on the second try"
633 utilities.assert_equals(
634 expect=main.TRUE,
635 actual=pingResult,
636 onpass=passMsg,
637 onfail="Reactive Pingall failed, " +
638 "one or more ping pairs failed" )
639 main.log.info( "Time for pingall: %2f seconds" %
640 ( time2 - time1 ) )
Jon Hallca319892017-06-15 15:25:22 -0700641 if not pingResult:
Devin Lim44075962017-08-11 10:56:37 -0700642 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700643 # timeout for fwd flows
644 time.sleep( 11 )
645 # uninstall onos-app-fwd
646 main.step( "Uninstall reactive forwarding app" )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700647 uninstallResult = onosCli.CLI.deactivateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700648 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
649 onpass="Uninstall fwd successful",
650 onfail="Uninstall fwd failed" )
651
652 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700653 appCheck2 = self.appCheck()
654 utilities.assert_equals( expect=True, actual=appCheck2,
Devin Lim58046fa2017-07-05 16:55:00 -0700655 onpass="App Ids seem to be correct",
656 onfail="Something is wrong with app Ids" )
657
658 main.step( "Add host intents via cli" )
659 intentIds = []
660 # TODO: move the host numbers to params
661 # Maybe look at all the paths we ping?
662 intentAddResult = True
663 hostResult = main.TRUE
664 for i in range( 8, 18 ):
665 main.log.info( "Adding host intent between h" + str( i ) +
666 " and h" + str( i + 10 ) )
667 host1 = "00:00:00:00:00:" + \
668 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
669 host2 = "00:00:00:00:00:" + \
670 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
671 # NOTE: getHost can return None
Jon Hallca319892017-06-15 15:25:22 -0700672 host1Dict = onosCli.CLI.getHost( host1 )
673 host2Dict = onosCli.CLI.getHost( host2 )
Devin Lim58046fa2017-07-05 16:55:00 -0700674 host1Id = None
675 host2Id = None
676 if host1Dict and host2Dict:
677 host1Id = host1Dict.get( 'id', None )
678 host2Id = host2Dict.get( 'id', None )
679 if host1Id and host2Id:
Jon Hallca319892017-06-15 15:25:22 -0700680 nodeNum = len( main.Cluster.active() )
Devin Lim142b5342017-07-20 15:22:39 -0700681 ctrl = main.Cluster.active( i % nodeNum )
Jon Hallca319892017-06-15 15:25:22 -0700682 tmpId = ctrl.CLI.addHostIntent( host1Id, host2Id )
Devin Lim58046fa2017-07-05 16:55:00 -0700683 if tmpId:
684 main.log.info( "Added intent with id: " + tmpId )
685 intentIds.append( tmpId )
686 else:
687 main.log.error( "addHostIntent returned: " +
688 repr( tmpId ) )
689 else:
690 main.log.error( "Error, getHost() failed for h" + str( i ) +
691 " and/or h" + str( i + 10 ) )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700692 hosts = main.Cluster.next().CLI.hosts()
Devin Lim58046fa2017-07-05 16:55:00 -0700693 try:
Jon Hallca319892017-06-15 15:25:22 -0700694 output = json.dumps( json.loads( hosts ),
695 sort_keys=True,
696 indent=4,
697 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700698 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700699 output = repr( hosts )
700 main.log.debug( "Hosts output: %s" % output )
Devin Lim58046fa2017-07-05 16:55:00 -0700701 hostResult = main.FALSE
702 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
703 onpass="Found a host id for each host",
704 onfail="Error looking up host ids" )
705
706 intentStart = time.time()
707 onosIds = onosCli.getAllIntentsId()
708 main.log.info( "Submitted intents: " + str( intentIds ) )
709 main.log.info( "Intents in ONOS: " + str( onosIds ) )
710 for intent in intentIds:
711 if intent in onosIds:
712 pass # intent submitted is in onos
713 else:
714 intentAddResult = False
715 if intentAddResult:
716 intentStop = time.time()
717 else:
718 intentStop = None
719 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700720 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700721 intentStates = []
722 installedCheck = True
723 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
724 count = 0
725 try:
726 for intent in json.loads( intents ):
727 state = intent.get( 'state', None )
728 if "INSTALLED" not in state:
729 installedCheck = False
730 intentId = intent.get( 'id', None )
731 intentStates.append( ( intentId, state ) )
732 except ( ValueError, TypeError ):
733 main.log.exception( "Error parsing intents" )
734 # add submitted intents not in the store
735 tmplist = [ i for i, s in intentStates ]
736 missingIntents = False
737 for i in intentIds:
738 if i not in tmplist:
739 intentStates.append( ( i, " - " ) )
740 missingIntents = True
741 intentStates.sort()
742 for i, s in intentStates:
743 count += 1
744 main.log.info( "%-6s%-15s%-15s" %
745 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700746 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700747
748 intentAddResult = bool( intentAddResult and not missingIntents and
749 installedCheck )
750 if not intentAddResult:
751 main.log.error( "Error in pushing host intents to ONOS" )
752
753 main.step( "Intent Anti-Entropy dispersion" )
754 for j in range( 100 ):
755 correct = True
756 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700757 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700758 onosIds = []
Jon Hallca319892017-06-15 15:25:22 -0700759 ids = ctrl.getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700760 onosIds.append( ids )
Jon Hallca319892017-06-15 15:25:22 -0700761 main.log.debug( "Intents in " + ctrl.name + ": " +
Devin Lim58046fa2017-07-05 16:55:00 -0700762 str( sorted( onosIds ) ) )
763 if sorted( ids ) != sorted( intentIds ):
764 main.log.warn( "Set of intent IDs doesn't match" )
765 correct = False
766 break
767 else:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700768 intents = json.loads( ctrl.CLI.intents() )
Devin Lim58046fa2017-07-05 16:55:00 -0700769 for intent in intents:
770 if intent[ 'state' ] != "INSTALLED":
771 main.log.warn( "Intent " + intent[ 'id' ] +
772 " is " + intent[ 'state' ] )
773 correct = False
774 break
775 if correct:
776 break
777 else:
778 time.sleep( 1 )
779 if not intentStop:
780 intentStop = time.time()
781 global gossipTime
782 gossipTime = intentStop - intentStart
783 main.log.info( "It took about " + str( gossipTime ) +
784 " seconds for all intents to appear in each node" )
785 append = False
786 title = "Gossip Intents"
787 count = 1
788 while append is False:
789 curTitle = title + str( count )
790 if curTitle not in main.HAlabels:
791 main.HAlabels.append( curTitle )
792 main.HAdata.append( str( gossipTime ) )
793 append = True
794 else:
795 count += 1
796 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Devin Lim142b5342017-07-20 15:22:39 -0700797 maxGossipTime = gossipPeriod * len( main.Cluster.runningNodes )
Devin Lim58046fa2017-07-05 16:55:00 -0700798 utilities.assert_greater_equals(
799 expect=maxGossipTime, actual=gossipTime,
800 onpass="ECM anti-entropy for intents worked within " +
801 "expected time",
802 onfail="Intent ECM anti-entropy took too long. " +
803 "Expected time:{}, Actual time:{}".format( maxGossipTime,
804 gossipTime ) )
805 if gossipTime <= maxGossipTime:
806 intentAddResult = True
807
Jon Hallca319892017-06-15 15:25:22 -0700808 pendingMap = main.Cluster.next().pendingMap()
Devin Lim58046fa2017-07-05 16:55:00 -0700809 if not intentAddResult or "key" in pendingMap:
810 import time
811 installedCheck = True
812 main.log.info( "Sleeping 60 seconds to see if intents are found" )
813 time.sleep( 60 )
814 onosIds = onosCli.getAllIntentsId()
815 main.log.info( "Submitted intents: " + str( intentIds ) )
816 main.log.info( "Intents in ONOS: " + str( onosIds ) )
817 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700818 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700819 intentStates = []
820 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
821 count = 0
822 try:
823 for intent in json.loads( intents ):
824 # Iter through intents of a node
825 state = intent.get( 'state', None )
826 if "INSTALLED" not in state:
827 installedCheck = False
828 intentId = intent.get( 'id', None )
829 intentStates.append( ( intentId, state ) )
830 except ( ValueError, TypeError ):
831 main.log.exception( "Error parsing intents" )
832 # add submitted intents not in the store
833 tmplist = [ i for i, s in intentStates ]
834 for i in intentIds:
835 if i not in tmplist:
836 intentStates.append( ( i, " - " ) )
837 intentStates.sort()
838 for i, s in intentStates:
839 count += 1
840 main.log.info( "%-6s%-15s%-15s" %
841 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700842 self.topicsCheck( [ "org.onosproject.election" ] )
843 self.partitionsCheck()
844 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700845
Jon Hallca319892017-06-15 15:25:22 -0700846 def pingAcrossHostIntent( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -0700847 """
848 Ping across added host intents
849 """
850 import json
851 import time
Devin Lim58046fa2017-07-05 16:55:00 -0700852 assert main, "main not defined"
853 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700854 main.case( "Verify connectivity by sending traffic across Intents" )
855 main.caseExplanation = "Ping across added host intents to check " +\
856 "functionality and check the state of " +\
857 "the intent"
858
Jon Hallca319892017-06-15 15:25:22 -0700859 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -0700860 main.step( "Check Intent state" )
861 installedCheck = False
862 loopCount = 0
863 while not installedCheck and loopCount < 40:
864 installedCheck = True
865 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700866 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700867 intentStates = []
868 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
869 count = 0
870 # Iter through intents of a node
871 try:
872 for intent in json.loads( intents ):
873 state = intent.get( 'state', None )
874 if "INSTALLED" not in state:
875 installedCheck = False
Jon Hall8bafdc02017-09-05 11:36:26 -0700876 main.log.debug( "Failed intent: " + str( intent ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700877 intentId = intent.get( 'id', None )
878 intentStates.append( ( intentId, state ) )
879 except ( ValueError, TypeError ):
880 main.log.exception( "Error parsing intents." )
881 # Print states
882 intentStates.sort()
883 for i, s in intentStates:
884 count += 1
885 main.log.info( "%-6s%-15s%-15s" %
886 ( str( count ), str( i ), str( s ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700887 if not installedCheck:
888 time.sleep( 1 )
889 loopCount += 1
890 utilities.assert_equals( expect=True, actual=installedCheck,
891 onpass="Intents are all INSTALLED",
892 onfail="Intents are not all in " +
893 "INSTALLED state" )
894
895 main.step( "Ping across added host intents" )
896 PingResult = main.TRUE
897 for i in range( 8, 18 ):
898 ping = main.Mininet1.pingHost( src="h" + str( i ),
899 target="h" + str( i + 10 ) )
900 PingResult = PingResult and ping
901 if ping == main.FALSE:
902 main.log.warn( "Ping failed between h" + str( i ) +
903 " and h" + str( i + 10 ) )
904 elif ping == main.TRUE:
905 main.log.info( "Ping test passed!" )
906 # Don't set PingResult or you'd override failures
907 if PingResult == main.FALSE:
908 main.log.error(
909 "Intents have not been installed correctly, pings failed." )
910 # TODO: pretty print
Devin Lim58046fa2017-07-05 16:55:00 -0700911 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700912 tmpIntents = onosCli.CLI.intents()
Jon Hallca319892017-06-15 15:25:22 -0700913 output = json.dumps( json.loads( tmpIntents ),
914 sort_keys=True,
915 indent=4,
916 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700917 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700918 output = repr( tmpIntents )
919 main.log.debug( "ONOS1 intents: " + output )
Devin Lim58046fa2017-07-05 16:55:00 -0700920 utilities.assert_equals(
921 expect=main.TRUE,
922 actual=PingResult,
923 onpass="Intents have been installed correctly and pings work",
924 onfail="Intents have not been installed correctly, pings failed." )
925
926 main.step( "Check leadership of topics" )
Jon Hallca319892017-06-15 15:25:22 -0700927 topicsCheck = self.topicsCheck()
928 utilities.assert_equals( expect=False, actual=topicsCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700929 onpass="intent Partitions is in leaders",
Jon Hallca319892017-06-15 15:25:22 -0700930 onfail="Some topics were lost" )
931 self.partitionsCheck()
932 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700933
934 if not installedCheck:
935 main.log.info( "Waiting 60 seconds to see if the state of " +
936 "intents change" )
937 time.sleep( 60 )
938 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700939 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700940 intentStates = []
941 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
942 count = 0
943 # Iter through intents of a node
944 try:
945 for intent in json.loads( intents ):
946 state = intent.get( 'state', None )
947 if "INSTALLED" not in state:
948 installedCheck = False
949 intentId = intent.get( 'id', None )
950 intentStates.append( ( intentId, state ) )
951 except ( ValueError, TypeError ):
952 main.log.exception( "Error parsing intents." )
953 intentStates.sort()
954 for i, s in intentStates:
955 count += 1
956 main.log.info( "%-6s%-15s%-15s" %
957 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700958 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700959
Devin Lim58046fa2017-07-05 16:55:00 -0700960 # Print flowrules
Devin Lime9f0ccf2017-08-11 17:25:12 -0700961 main.log.debug( onosCli.CLI.flows() )
Devin Lim58046fa2017-07-05 16:55:00 -0700962 main.step( "Wait a minute then ping again" )
963 # the wait is above
964 PingResult = main.TRUE
965 for i in range( 8, 18 ):
966 ping = main.Mininet1.pingHost( src="h" + str( i ),
967 target="h" + str( i + 10 ) )
968 PingResult = PingResult and ping
969 if ping == main.FALSE:
970 main.log.warn( "Ping failed between h" + str( i ) +
971 " and h" + str( i + 10 ) )
972 elif ping == main.TRUE:
973 main.log.info( "Ping test passed!" )
974 # Don't set PingResult or you'd override failures
975 if PingResult == main.FALSE:
976 main.log.error(
977 "Intents have not been installed correctly, pings failed." )
978 # TODO: pretty print
Jon Hallca319892017-06-15 15:25:22 -0700979 main.log.warn( str( onosCli.name ) + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -0700980 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700981 tmpIntents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700982 main.log.warn( json.dumps( json.loads( tmpIntents ),
983 sort_keys=True,
984 indent=4,
985 separators=( ',', ': ' ) ) )
986 except ( ValueError, TypeError ):
987 main.log.warn( repr( tmpIntents ) )
988 utilities.assert_equals(
989 expect=main.TRUE,
990 actual=PingResult,
991 onpass="Intents have been installed correctly and pings work",
992 onfail="Intents have not been installed correctly, pings failed." )
993
Devin Lim142b5342017-07-20 15:22:39 -0700994 def checkRoleNotNull( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700995 main.step( "Check that each switch has a master" )
Devin Lim58046fa2017-07-05 16:55:00 -0700996 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -0700997 rolesNotNull = main.Cluster.command( "rolesNotNull", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -0700998 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -0700999 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -07001000 actual=rolesNotNull,
1001 onpass="Each device has a master",
1002 onfail="Some devices don't have a master assigned" )
1003
Devin Lim142b5342017-07-20 15:22:39 -07001004 def checkTheRole( self ):
1005 main.step( "Read device roles from ONOS" )
Jon Hallca319892017-06-15 15:25:22 -07001006 ONOSMastership = main.Cluster.command( "roles" )
Devin Lim58046fa2017-07-05 16:55:00 -07001007 consistentMastership = True
1008 rolesResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001009 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001010 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001011 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001012 main.log.error( "Error in getting " + node + " roles" )
1013 main.log.warn( node + " mastership response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001014 repr( ONOSMastership[ i ] ) )
1015 rolesResults = False
1016 utilities.assert_equals(
1017 expect=True,
1018 actual=rolesResults,
1019 onpass="No error in reading roles output",
1020 onfail="Error in reading roles from ONOS" )
1021
1022 main.step( "Check for consistency in roles from each controller" )
1023 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1024 main.log.info(
1025 "Switch roles are consistent across all ONOS nodes" )
1026 else:
1027 consistentMastership = False
1028 utilities.assert_equals(
1029 expect=True,
1030 actual=consistentMastership,
1031 onpass="Switch roles are consistent across all ONOS nodes",
1032 onfail="ONOS nodes have different views of switch roles" )
Devin Lim142b5342017-07-20 15:22:39 -07001033 return ONOSMastership, rolesResults, consistentMastership
1034
1035 def checkingIntents( self ):
1036 main.step( "Get the intents from each controller" )
1037 ONOSIntents = main.Cluster.command( "intents", specificDriver=2 )
1038 intentsResults = True
1039 for i in range( len( ONOSIntents ) ):
1040 node = str( main.Cluster.active( i ) )
1041 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1042 main.log.error( "Error in getting " + node + " intents" )
1043 main.log.warn( node + " intents response: " +
1044 repr( ONOSIntents[ i ] ) )
1045 intentsResults = False
1046 utilities.assert_equals(
1047 expect=True,
1048 actual=intentsResults,
1049 onpass="No error in reading intents output",
1050 onfail="Error in reading intents from ONOS" )
1051 return ONOSIntents, intentsResults
1052
1053 def readingState( self, main ):
1054 """
1055 Reading state of ONOS
1056 """
1057 import json
1058 import time
1059 assert main, "main not defined"
1060 assert utilities.assert_equals, "utilities.assert_equals not defined"
1061 try:
1062 from tests.dependencies.topology import Topology
1063 except ImportError:
1064 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07001065 main.cleanAndExit()
Devin Lim142b5342017-07-20 15:22:39 -07001066 try:
1067 main.topoRelated
1068 except ( NameError, AttributeError ):
1069 main.topoRelated = Topology()
1070 main.case( "Setting up and gathering data for current state" )
1071 # The general idea for this test case is to pull the state of
1072 # ( intents,flows, topology,... ) from each ONOS node
1073 # We can then compare them with each other and also with past states
1074
1075 global mastershipState
1076 mastershipState = '[]'
1077
1078 self.checkRoleNotNull()
1079
1080 main.step( "Get the Mastership of each switch from each controller" )
1081 mastershipCheck = main.FALSE
1082
1083 ONOSMastership, consistentMastership, rolesResults = self.checkTheRole()
Devin Lim58046fa2017-07-05 16:55:00 -07001084
1085 if rolesResults and not consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001086 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001087 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001088 try:
1089 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001090 node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07001091 json.dumps(
1092 json.loads( ONOSMastership[ i ] ),
1093 sort_keys=True,
1094 indent=4,
1095 separators=( ',', ': ' ) ) )
1096 except ( ValueError, TypeError ):
1097 main.log.warn( repr( ONOSMastership[ i ] ) )
1098 elif rolesResults and consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001099 mastershipCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001100 mastershipState = ONOSMastership[ 0 ]
1101
Devin Lim142b5342017-07-20 15:22:39 -07001102
Devin Lim58046fa2017-07-05 16:55:00 -07001103 global intentState
1104 intentState = []
Devin Lim142b5342017-07-20 15:22:39 -07001105 ONOSIntents, intentsResults = self.checkingIntents()
Jon Hallca319892017-06-15 15:25:22 -07001106 intentCheck = main.FALSE
1107 consistentIntents = True
Devin Lim142b5342017-07-20 15:22:39 -07001108
Devin Lim58046fa2017-07-05 16:55:00 -07001109
1110 main.step( "Check for consistency in Intents from each controller" )
1111 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1112 main.log.info( "Intents are consistent across all ONOS " +
1113 "nodes" )
1114 else:
1115 consistentIntents = False
1116 main.log.error( "Intents not consistent" )
1117 utilities.assert_equals(
1118 expect=True,
1119 actual=consistentIntents,
1120 onpass="Intents are consistent across all ONOS nodes",
1121 onfail="ONOS nodes have different views of intents" )
1122
1123 if intentsResults:
1124 # Try to make it easy to figure out what is happening
1125 #
1126 # Intent ONOS1 ONOS2 ...
1127 # 0x01 INSTALLED INSTALLING
1128 # ... ... ...
1129 # ... ... ...
1130 title = " Id"
Jon Hallca319892017-06-15 15:25:22 -07001131 for ctrl in main.Cluster.active():
1132 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07001133 main.log.warn( title )
1134 # get all intent keys in the cluster
1135 keys = []
1136 try:
1137 # Get the set of all intent keys
1138 for nodeStr in ONOSIntents:
1139 node = json.loads( nodeStr )
1140 for intent in node:
1141 keys.append( intent.get( 'id' ) )
1142 keys = set( keys )
1143 # For each intent key, print the state on each node
1144 for key in keys:
1145 row = "%-13s" % key
1146 for nodeStr in ONOSIntents:
1147 node = json.loads( nodeStr )
1148 for intent in node:
1149 if intent.get( 'id', "Error" ) == key:
1150 row += "%-15s" % intent.get( 'state' )
1151 main.log.warn( row )
1152 # End of intent state table
1153 except ValueError as e:
1154 main.log.exception( e )
1155 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1156
1157 if intentsResults and not consistentIntents:
1158 # print the json objects
Jon Hallca319892017-06-15 15:25:22 -07001159 main.log.debug( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001160 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1161 sort_keys=True,
1162 indent=4,
1163 separators=( ',', ': ' ) ) )
1164 for i in range( len( ONOSIntents ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001165 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001166 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallca319892017-06-15 15:25:22 -07001167 main.log.debug( node + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001168 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
1169 sort_keys=True,
1170 indent=4,
1171 separators=( ',', ': ' ) ) )
1172 else:
Jon Hallca319892017-06-15 15:25:22 -07001173 main.log.debug( node + " intents match " + ctrl.name + " intents" )
Devin Lim58046fa2017-07-05 16:55:00 -07001174 elif intentsResults and consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07001175 intentCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001176 intentState = ONOSIntents[ 0 ]
1177
1178 main.step( "Get the flows from each controller" )
1179 global flowState
1180 flowState = []
Devin Lim142b5342017-07-20 15:22:39 -07001181 ONOSFlows = main.Cluster.command( "flows", specificDriver=2 ) # TODO: Possible arg: sleep = 30
Devin Lim58046fa2017-07-05 16:55:00 -07001182 ONOSFlowsJson = []
1183 flowCheck = main.FALSE
1184 consistentFlows = True
1185 flowsResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001186 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001187 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001188 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001189 main.log.error( "Error in getting " + node + " flows" )
1190 main.log.warn( node + " flows response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001191 repr( ONOSFlows[ i ] ) )
1192 flowsResults = False
1193 ONOSFlowsJson.append( None )
1194 else:
1195 try:
1196 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1197 except ( ValueError, TypeError ):
1198 # FIXME: change this to log.error?
Jon Hallca319892017-06-15 15:25:22 -07001199 main.log.exception( "Error in parsing " + node +
Devin Lim58046fa2017-07-05 16:55:00 -07001200 " response as json." )
1201 main.log.error( repr( ONOSFlows[ i ] ) )
1202 ONOSFlowsJson.append( None )
1203 flowsResults = False
1204 utilities.assert_equals(
1205 expect=True,
1206 actual=flowsResults,
1207 onpass="No error in reading flows output",
1208 onfail="Error in reading flows from ONOS" )
1209
1210 main.step( "Check for consistency in Flows from each controller" )
1211 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1212 if all( tmp ):
1213 main.log.info( "Flow count is consistent across all ONOS nodes" )
1214 else:
1215 consistentFlows = False
1216 utilities.assert_equals(
1217 expect=True,
1218 actual=consistentFlows,
1219 onpass="The flow count is consistent across all ONOS nodes",
1220 onfail="ONOS nodes have different flow counts" )
1221
1222 if flowsResults and not consistentFlows:
1223 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001224 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001225 try:
1226 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001227 node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001228 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
1229 indent=4, separators=( ',', ': ' ) ) )
1230 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -07001231 main.log.warn( node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001232 repr( ONOSFlows[ i ] ) )
1233 elif flowsResults and consistentFlows:
1234 flowCheck = main.TRUE
1235 flowState = ONOSFlows[ 0 ]
1236
1237 main.step( "Get the OF Table entries" )
1238 global flows
1239 flows = []
1240 for i in range( 1, 29 ):
1241 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1242 if flowCheck == main.FALSE:
1243 for table in flows:
1244 main.log.warn( table )
1245 # TODO: Compare switch flow tables with ONOS flow tables
1246
1247 main.step( "Start continuous pings" )
1248 main.Mininet2.pingLong(
1249 src=main.params[ 'PING' ][ 'source1' ],
1250 target=main.params[ 'PING' ][ 'target1' ],
1251 pingTime=500 )
1252 main.Mininet2.pingLong(
1253 src=main.params[ 'PING' ][ 'source2' ],
1254 target=main.params[ 'PING' ][ 'target2' ],
1255 pingTime=500 )
1256 main.Mininet2.pingLong(
1257 src=main.params[ 'PING' ][ 'source3' ],
1258 target=main.params[ 'PING' ][ 'target3' ],
1259 pingTime=500 )
1260 main.Mininet2.pingLong(
1261 src=main.params[ 'PING' ][ 'source4' ],
1262 target=main.params[ 'PING' ][ 'target4' ],
1263 pingTime=500 )
1264 main.Mininet2.pingLong(
1265 src=main.params[ 'PING' ][ 'source5' ],
1266 target=main.params[ 'PING' ][ 'target5' ],
1267 pingTime=500 )
1268 main.Mininet2.pingLong(
1269 src=main.params[ 'PING' ][ 'source6' ],
1270 target=main.params[ 'PING' ][ 'target6' ],
1271 pingTime=500 )
1272 main.Mininet2.pingLong(
1273 src=main.params[ 'PING' ][ 'source7' ],
1274 target=main.params[ 'PING' ][ 'target7' ],
1275 pingTime=500 )
1276 main.Mininet2.pingLong(
1277 src=main.params[ 'PING' ][ 'source8' ],
1278 target=main.params[ 'PING' ][ 'target8' ],
1279 pingTime=500 )
1280 main.Mininet2.pingLong(
1281 src=main.params[ 'PING' ][ 'source9' ],
1282 target=main.params[ 'PING' ][ 'target9' ],
1283 pingTime=500 )
1284 main.Mininet2.pingLong(
1285 src=main.params[ 'PING' ][ 'source10' ],
1286 target=main.params[ 'PING' ][ 'target10' ],
1287 pingTime=500 )
1288
1289 main.step( "Collecting topology information from ONOS" )
Devin Lim142b5342017-07-20 15:22:39 -07001290 devices = main.topoRelated.getAll( "devices" )
1291 hosts = main.topoRelated.getAll( "hosts", inJson=True )
1292 ports = main.topoRelated.getAll( "ports" )
1293 links = main.topoRelated.getAll( "links" )
1294 clusters = main.topoRelated.getAll( "clusters" )
Devin Lim58046fa2017-07-05 16:55:00 -07001295 # Compare json objects for hosts and dataplane clusters
1296
1297 # hosts
1298 main.step( "Host view is consistent across ONOS nodes" )
1299 consistentHostsResult = main.TRUE
1300 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001301 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001302 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1303 if hosts[ controller ] == hosts[ 0 ]:
1304 continue
1305 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07001306 main.log.error( "hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001307 controllerStr +
1308 " is inconsistent with ONOS1" )
1309 main.log.warn( repr( hosts[ controller ] ) )
1310 consistentHostsResult = main.FALSE
1311
1312 else:
Jon Hallca319892017-06-15 15:25:22 -07001313 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001314 controllerStr )
1315 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001316 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001317 " hosts response: " +
1318 repr( hosts[ controller ] ) )
1319 utilities.assert_equals(
1320 expect=main.TRUE,
1321 actual=consistentHostsResult,
1322 onpass="Hosts view is consistent across all ONOS nodes",
1323 onfail="ONOS nodes have different views of hosts" )
1324
1325 main.step( "Each host has an IP address" )
1326 ipResult = main.TRUE
1327 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001328 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001329 if hosts[ controller ]:
1330 for host in hosts[ controller ]:
1331 if not host.get( 'ipAddresses', [] ):
Jon Hallca319892017-06-15 15:25:22 -07001332 main.log.error( "Error with host ips on " +
Devin Lim58046fa2017-07-05 16:55:00 -07001333 controllerStr + ": " + str( host ) )
1334 ipResult = main.FALSE
1335 utilities.assert_equals(
1336 expect=main.TRUE,
1337 actual=ipResult,
1338 onpass="The ips of the hosts aren't empty",
1339 onfail="The ip of at least one host is missing" )
1340
1341 # Strongly connected clusters of devices
1342 main.step( "Cluster view is consistent across ONOS nodes" )
1343 consistentClustersResult = main.TRUE
1344 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001345 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001346 if "Error" not in clusters[ controller ]:
1347 if clusters[ controller ] == clusters[ 0 ]:
1348 continue
1349 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07001350 main.log.error( "clusters from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001351 " is inconsistent with ONOS1" )
1352 consistentClustersResult = main.FALSE
1353
1354 else:
1355 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07001356 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07001357 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001358 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001359 " clusters response: " +
1360 repr( clusters[ controller ] ) )
1361 utilities.assert_equals(
1362 expect=main.TRUE,
1363 actual=consistentClustersResult,
1364 onpass="Clusters view is consistent across all ONOS nodes",
1365 onfail="ONOS nodes have different views of clusters" )
1366 if not consistentClustersResult:
1367 main.log.debug( clusters )
1368
1369 # there should always only be one cluster
1370 main.step( "Cluster view correct across ONOS nodes" )
1371 try:
1372 numClusters = len( json.loads( clusters[ 0 ] ) )
1373 except ( ValueError, TypeError ):
1374 main.log.exception( "Error parsing clusters[0]: " +
1375 repr( clusters[ 0 ] ) )
1376 numClusters = "ERROR"
1377 utilities.assert_equals(
1378 expect=1,
1379 actual=numClusters,
1380 onpass="ONOS shows 1 SCC",
1381 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1382
1383 main.step( "Comparing ONOS topology to MN" )
1384 devicesResults = main.TRUE
1385 linksResults = main.TRUE
1386 hostsResults = main.TRUE
1387 mnSwitches = main.Mininet1.getSwitches()
1388 mnLinks = main.Mininet1.getLinks()
1389 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07001390 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001391 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001392 currentDevicesResult = main.topoRelated.compareDevicePort(
1393 main.Mininet1, controller,
1394 mnSwitches, devices, ports )
1395 utilities.assert_equals( expect=main.TRUE,
1396 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07001397 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001398 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001399 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001400 " Switches view is incorrect" )
1401
1402 currentLinksResult = main.topoRelated.compareBase( links, controller,
1403 main.Mininet1.compareLinks,
1404 [ mnSwitches, mnLinks ] )
1405 utilities.assert_equals( expect=main.TRUE,
1406 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07001407 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001408 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001409 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001410 " links view is incorrect" )
1411
1412 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1413 currentHostsResult = main.Mininet1.compareHosts(
1414 mnHosts,
1415 hosts[ controller ] )
1416 else:
1417 currentHostsResult = main.FALSE
1418 utilities.assert_equals( expect=main.TRUE,
1419 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07001420 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001421 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07001422 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001423 " hosts don't match Mininet" )
1424
1425 devicesResults = devicesResults and currentDevicesResult
1426 linksResults = linksResults and currentLinksResult
1427 hostsResults = hostsResults and currentHostsResult
1428
1429 main.step( "Device information is correct" )
1430 utilities.assert_equals(
1431 expect=main.TRUE,
1432 actual=devicesResults,
1433 onpass="Device information is correct",
1434 onfail="Device information is incorrect" )
1435
1436 main.step( "Links are correct" )
1437 utilities.assert_equals(
1438 expect=main.TRUE,
1439 actual=linksResults,
1440 onpass="Link are correct",
1441 onfail="Links are incorrect" )
1442
1443 main.step( "Hosts are correct" )
1444 utilities.assert_equals(
1445 expect=main.TRUE,
1446 actual=hostsResults,
1447 onpass="Hosts are correct",
1448 onfail="Hosts are incorrect" )
1449
1450 def checkDistPrimitivesFunc( self, main ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001451 """
1452 Check for basic functionality with distributed primitives
1453 """
Jon Halle0f0b342017-04-18 11:43:47 -07001454 # TODO: Clean this up so it's not just a cut/paste from the test
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001455 try:
1456 # Make sure variables are defined/set
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001457 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001458 assert main.pCounterName, "main.pCounterName not defined"
1459 assert main.onosSetName, "main.onosSetName not defined"
1460 # NOTE: assert fails if value is 0/None/Empty/False
1461 try:
1462 main.pCounterValue
Devin Lim58046fa2017-07-05 16:55:00 -07001463 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001464 main.log.error( "main.pCounterValue not defined, setting to 0" )
1465 main.pCounterValue = 0
1466 try:
1467 main.onosSet
Devin Lim58046fa2017-07-05 16:55:00 -07001468 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001469 main.log.error( "main.onosSet not defined, setting to empty Set" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001470 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001471 # Variables for the distributed primitives tests. These are local only
1472 addValue = "a"
1473 addAllValue = "a b c d e f"
1474 retainValue = "c d e f"
Jon Halle0f0b342017-04-18 11:43:47 -07001475 valueName = "TestON-Value"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001476 valueValue = None
Jon Halle0f0b342017-04-18 11:43:47 -07001477 workQueueName = "TestON-Queue"
1478 workQueueCompleted = 0
1479 workQueueInProgress = 0
1480 workQueuePending = 0
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001481
1482 description = "Check for basic functionality with distributed " +\
1483 "primitives"
1484 main.case( description )
1485 main.caseExplanation = "Test the methods of the distributed " +\
1486 "primitives (counters and sets) throught the cli"
1487 # DISTRIBUTED ATOMIC COUNTERS
1488 # Partitioned counters
1489 main.step( "Increment then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001490 pCounters = main.Cluster.command( "counterTestAddAndGet",
1491 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001492 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001493 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001494 main.pCounterValue += 1
1495 addedPValues.append( main.pCounterValue )
Jon Hallca319892017-06-15 15:25:22 -07001496 # Check that counter incremented once per controller
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001497 pCounterResults = True
1498 for i in addedPValues:
1499 tmpResult = i in pCounters
1500 pCounterResults = pCounterResults and tmpResult
1501 if not tmpResult:
1502 main.log.error( str( i ) + " is not in partitioned "
1503 "counter incremented results" )
1504 utilities.assert_equals( expect=True,
1505 actual=pCounterResults,
1506 onpass="Default counter incremented",
1507 onfail="Error incrementing default" +
1508 " counter" )
1509
1510 main.step( "Get then Increment a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001511 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1512 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001513 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001514 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001515 addedPValues.append( main.pCounterValue )
1516 main.pCounterValue += 1
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001517 # Check that counter incremented numController times
1518 pCounterResults = True
1519 for i in addedPValues:
1520 tmpResult = i in pCounters
1521 pCounterResults = pCounterResults and tmpResult
1522 if not tmpResult:
1523 main.log.error( str( i ) + " is not in partitioned "
1524 "counter incremented results" )
1525 utilities.assert_equals( expect=True,
1526 actual=pCounterResults,
1527 onpass="Default counter incremented",
1528 onfail="Error incrementing default" +
1529 " counter" )
1530
1531 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001532 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001533 utilities.assert_equals( expect=main.TRUE,
1534 actual=incrementCheck,
1535 onpass="Added counters are correct",
1536 onfail="Added counters are incorrect" )
1537
1538 main.step( "Add -8 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001539 pCounters = main.Cluster.command( "counterTestAddAndGet",
1540 args=[ main.pCounterName ],
1541 kwargs={ "delta": -8 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001542 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001543 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001544 main.pCounterValue += -8
1545 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001546 # Check that counter incremented numController times
1547 pCounterResults = True
1548 for i in addedPValues:
1549 tmpResult = i in pCounters
1550 pCounterResults = pCounterResults and tmpResult
1551 if not tmpResult:
1552 main.log.error( str( i ) + " is not in partitioned "
1553 "counter incremented results" )
1554 utilities.assert_equals( expect=True,
1555 actual=pCounterResults,
1556 onpass="Default counter incremented",
1557 onfail="Error incrementing default" +
1558 " counter" )
1559
1560 main.step( "Add 5 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001561 pCounters = main.Cluster.command( "counterTestAddAndGet",
1562 args=[ main.pCounterName ],
1563 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001564 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001565 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001566 main.pCounterValue += 5
1567 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001568
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001569 # Check that counter incremented numController times
1570 pCounterResults = True
1571 for i in addedPValues:
1572 tmpResult = i in pCounters
1573 pCounterResults = pCounterResults and tmpResult
1574 if not tmpResult:
1575 main.log.error( str( i ) + " is not in partitioned "
1576 "counter incremented results" )
1577 utilities.assert_equals( expect=True,
1578 actual=pCounterResults,
1579 onpass="Default counter incremented",
1580 onfail="Error incrementing default" +
1581 " counter" )
1582
1583 main.step( "Get then add 5 to a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001584 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1585 args=[ main.pCounterName ],
1586 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001587 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001588 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001589 addedPValues.append( main.pCounterValue )
1590 main.pCounterValue += 5
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001591 # Check that counter incremented numController times
1592 pCounterResults = True
1593 for i in addedPValues:
1594 tmpResult = i in pCounters
1595 pCounterResults = pCounterResults and tmpResult
1596 if not tmpResult:
1597 main.log.error( str( i ) + " is not in partitioned "
1598 "counter incremented results" )
1599 utilities.assert_equals( expect=True,
1600 actual=pCounterResults,
1601 onpass="Default counter incremented",
1602 onfail="Error incrementing default" +
1603 " counter" )
1604
1605 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001606 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001607 utilities.assert_equals( expect=main.TRUE,
1608 actual=incrementCheck,
1609 onpass="Added counters are correct",
1610 onfail="Added counters are incorrect" )
1611
1612 # DISTRIBUTED SETS
1613 main.step( "Distributed Set get" )
1614 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001615 getResponses = main.Cluster.command( "setTestGet",
1616 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001617 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001618 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001619 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001620 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001621 current = set( getResponses[ i ] )
1622 if len( current ) == len( getResponses[ i ] ):
1623 # no repeats
1624 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001625 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001626 " has incorrect view" +
1627 " of set " + main.onosSetName + ":\n" +
1628 str( getResponses[ i ] ) )
1629 main.log.debug( "Expected: " + str( main.onosSet ) )
1630 main.log.debug( "Actual: " + str( current ) )
1631 getResults = main.FALSE
1632 else:
1633 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001634 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001635 " has repeat elements in" +
1636 " set " + main.onosSetName + ":\n" +
1637 str( getResponses[ i ] ) )
1638 getResults = main.FALSE
1639 elif getResponses[ i ] == main.ERROR:
1640 getResults = main.FALSE
1641 utilities.assert_equals( expect=main.TRUE,
1642 actual=getResults,
1643 onpass="Set elements are correct",
1644 onfail="Set elements are incorrect" )
1645
1646 main.step( "Distributed Set size" )
Jon Hallca319892017-06-15 15:25:22 -07001647 sizeResponses = main.Cluster.command( "setTestSize",
1648 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001649 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001650 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001651 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001652 if size != sizeResponses[ i ]:
1653 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001654 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001655 " expected a size of " + str( size ) +
1656 " for set " + main.onosSetName +
1657 " but got " + str( sizeResponses[ i ] ) )
1658 utilities.assert_equals( expect=main.TRUE,
1659 actual=sizeResults,
1660 onpass="Set sizes are correct",
1661 onfail="Set sizes are incorrect" )
1662
1663 main.step( "Distributed Set add()" )
1664 main.onosSet.add( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001665 addResponses = main.Cluster.command( "setTestAdd",
1666 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001667 # main.TRUE = successfully changed the set
1668 # main.FALSE = action resulted in no change in set
1669 # main.ERROR - Some error in executing the function
1670 addResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001671 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001672 if addResponses[ i ] == main.TRUE:
1673 # All is well
1674 pass
1675 elif addResponses[ i ] == main.FALSE:
1676 # Already in set, probably fine
1677 pass
1678 elif addResponses[ i ] == main.ERROR:
1679 # Error in execution
1680 addResults = main.FALSE
1681 else:
1682 # unexpected result
1683 addResults = main.FALSE
1684 if addResults != main.TRUE:
1685 main.log.error( "Error executing set add" )
1686
1687 # Check if set is still correct
1688 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001689 getResponses = main.Cluster.command( "setTestGet",
1690 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001691 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001692 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001693 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001694 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001695 current = set( getResponses[ i ] )
1696 if len( current ) == len( getResponses[ i ] ):
1697 # no repeats
1698 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001699 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001700 " of set " + main.onosSetName + ":\n" +
1701 str( getResponses[ i ] ) )
1702 main.log.debug( "Expected: " + str( main.onosSet ) )
1703 main.log.debug( "Actual: " + str( current ) )
1704 getResults = main.FALSE
1705 else:
1706 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001707 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001708 " set " + main.onosSetName + ":\n" +
1709 str( getResponses[ i ] ) )
1710 getResults = main.FALSE
1711 elif getResponses[ i ] == main.ERROR:
1712 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001713 sizeResponses = main.Cluster.command( "setTestSize",
1714 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001715 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001716 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001717 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001718 if size != sizeResponses[ i ]:
1719 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001720 main.log.error( node + " expected a size of " +
1721 str( size ) + " for set " + main.onosSetName +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001722 " but got " + str( sizeResponses[ i ] ) )
1723 addResults = addResults and getResults and sizeResults
1724 utilities.assert_equals( expect=main.TRUE,
1725 actual=addResults,
1726 onpass="Set add correct",
1727 onfail="Set add was incorrect" )
1728
1729 main.step( "Distributed Set addAll()" )
1730 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001731 addResponses = main.Cluster.command( "setTestAdd",
1732 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001733 # main.TRUE = successfully changed the set
1734 # main.FALSE = action resulted in no change in set
1735 # main.ERROR - Some error in executing the function
1736 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001737 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001738 if addResponses[ i ] == main.TRUE:
1739 # All is well
1740 pass
1741 elif addResponses[ i ] == main.FALSE:
1742 # Already in set, probably fine
1743 pass
1744 elif addResponses[ i ] == main.ERROR:
1745 # Error in execution
1746 addAllResults = main.FALSE
1747 else:
1748 # unexpected result
1749 addAllResults = main.FALSE
1750 if addAllResults != main.TRUE:
1751 main.log.error( "Error executing set addAll" )
1752
1753 # Check if set is still correct
1754 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001755 getResponses = main.Cluster.command( "setTestGet",
1756 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001757 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001758 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001759 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001760 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001761 current = set( getResponses[ i ] )
1762 if len( current ) == len( getResponses[ i ] ):
1763 # no repeats
1764 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001765 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001766 " of set " + main.onosSetName + ":\n" +
1767 str( getResponses[ i ] ) )
1768 main.log.debug( "Expected: " + str( main.onosSet ) )
1769 main.log.debug( "Actual: " + str( current ) )
1770 getResults = main.FALSE
1771 else:
1772 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001773 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001774 " set " + main.onosSetName + ":\n" +
1775 str( getResponses[ i ] ) )
1776 getResults = main.FALSE
1777 elif getResponses[ i ] == main.ERROR:
1778 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001779 sizeResponses = main.Cluster.command( "setTestSize",
1780 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001781 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001782 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001783 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001784 if size != sizeResponses[ i ]:
1785 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001786 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001787 " for set " + main.onosSetName +
1788 " but got " + str( sizeResponses[ i ] ) )
1789 addAllResults = addAllResults and getResults and sizeResults
1790 utilities.assert_equals( expect=main.TRUE,
1791 actual=addAllResults,
1792 onpass="Set addAll correct",
1793 onfail="Set addAll was incorrect" )
1794
1795 main.step( "Distributed Set contains()" )
Jon Hallca319892017-06-15 15:25:22 -07001796 containsResponses = main.Cluster.command( "setTestGet",
1797 args=[ main.onosSetName ],
1798 kwargs={ "values": addValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001799 containsResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001800 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001801 if containsResponses[ i ] == main.ERROR:
1802 containsResults = main.FALSE
1803 else:
1804 containsResults = containsResults and\
1805 containsResponses[ i ][ 1 ]
1806 utilities.assert_equals( expect=main.TRUE,
1807 actual=containsResults,
1808 onpass="Set contains is functional",
1809 onfail="Set contains failed" )
1810
1811 main.step( "Distributed Set containsAll()" )
Jon Hallca319892017-06-15 15:25:22 -07001812 containsAllResponses = main.Cluster.command( "setTestGet",
1813 args=[ main.onosSetName ],
1814 kwargs={ "values": addAllValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001815 containsAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001816 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001817 if containsResponses[ i ] == main.ERROR:
1818 containsResults = main.FALSE
1819 else:
1820 containsResults = containsResults and\
1821 containsResponses[ i ][ 1 ]
1822 utilities.assert_equals( expect=main.TRUE,
1823 actual=containsAllResults,
1824 onpass="Set containsAll is functional",
1825 onfail="Set containsAll failed" )
1826
1827 main.step( "Distributed Set remove()" )
1828 main.onosSet.remove( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001829 removeResponses = main.Cluster.command( "setTestRemove",
1830 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001831 # main.TRUE = successfully changed the set
1832 # main.FALSE = action resulted in no change in set
1833 # main.ERROR - Some error in executing the function
1834 removeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001835 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001836 if removeResponses[ i ] == main.TRUE:
1837 # All is well
1838 pass
1839 elif removeResponses[ i ] == main.FALSE:
1840 # not in set, probably fine
1841 pass
1842 elif removeResponses[ i ] == main.ERROR:
1843 # Error in execution
1844 removeResults = main.FALSE
1845 else:
1846 # unexpected result
1847 removeResults = main.FALSE
1848 if removeResults != main.TRUE:
1849 main.log.error( "Error executing set remove" )
1850
1851 # Check if set is still correct
1852 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001853 getResponses = main.Cluster.command( "setTestGet",
1854 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001855 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001856 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001857 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001858 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001859 current = set( getResponses[ i ] )
1860 if len( current ) == len( getResponses[ i ] ):
1861 # no repeats
1862 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001863 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001864 " of set " + main.onosSetName + ":\n" +
1865 str( getResponses[ i ] ) )
1866 main.log.debug( "Expected: " + str( main.onosSet ) )
1867 main.log.debug( "Actual: " + str( current ) )
1868 getResults = main.FALSE
1869 else:
1870 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001871 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001872 " set " + main.onosSetName + ":\n" +
1873 str( getResponses[ i ] ) )
1874 getResults = main.FALSE
1875 elif getResponses[ i ] == main.ERROR:
1876 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001877 sizeResponses = main.Cluster.command( "setTestSize",
1878 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001879 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001880 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001881 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001882 if size != sizeResponses[ i ]:
1883 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001884 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001885 " for set " + main.onosSetName +
1886 " but got " + str( sizeResponses[ i ] ) )
1887 removeResults = removeResults and getResults and sizeResults
1888 utilities.assert_equals( expect=main.TRUE,
1889 actual=removeResults,
1890 onpass="Set remove correct",
1891 onfail="Set remove was incorrect" )
1892
1893 main.step( "Distributed Set removeAll()" )
1894 main.onosSet.difference_update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001895 removeAllResponses = main.Cluster.command( "setTestRemove",
1896 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001897 # main.TRUE = successfully changed the set
1898 # main.FALSE = action resulted in no change in set
1899 # main.ERROR - Some error in executing the function
1900 removeAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001901 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001902 if removeAllResponses[ i ] == main.TRUE:
1903 # All is well
1904 pass
1905 elif removeAllResponses[ i ] == main.FALSE:
1906 # not in set, probably fine
1907 pass
1908 elif removeAllResponses[ i ] == main.ERROR:
1909 # Error in execution
1910 removeAllResults = main.FALSE
1911 else:
1912 # unexpected result
1913 removeAllResults = main.FALSE
1914 if removeAllResults != main.TRUE:
1915 main.log.error( "Error executing set removeAll" )
1916
1917 # Check if set is still correct
1918 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001919 getResponses = main.Cluster.command( "setTestGet",
1920 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001921 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001922 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001923 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001924 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001925 current = set( getResponses[ i ] )
1926 if len( current ) == len( getResponses[ i ] ):
1927 # no repeats
1928 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001929 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001930 " of set " + main.onosSetName + ":\n" +
1931 str( getResponses[ i ] ) )
1932 main.log.debug( "Expected: " + str( main.onosSet ) )
1933 main.log.debug( "Actual: " + str( current ) )
1934 getResults = main.FALSE
1935 else:
1936 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001937 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001938 " set " + main.onosSetName + ":\n" +
1939 str( getResponses[ i ] ) )
1940 getResults = main.FALSE
1941 elif getResponses[ i ] == main.ERROR:
1942 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001943 sizeResponses = main.Cluster.command( "setTestSize",
1944 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001945 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001946 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001947 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001948 if size != sizeResponses[ i ]:
1949 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001950 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001951 " for set " + main.onosSetName +
1952 " but got " + str( sizeResponses[ i ] ) )
1953 removeAllResults = removeAllResults and getResults and sizeResults
1954 utilities.assert_equals( expect=main.TRUE,
1955 actual=removeAllResults,
1956 onpass="Set removeAll correct",
1957 onfail="Set removeAll was incorrect" )
1958
1959 main.step( "Distributed Set addAll()" )
1960 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001961 addResponses = main.Cluster.command( "setTestAdd",
1962 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001963 # main.TRUE = successfully changed the set
1964 # main.FALSE = action resulted in no change in set
1965 # main.ERROR - Some error in executing the function
1966 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001967 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001968 if addResponses[ i ] == main.TRUE:
1969 # All is well
1970 pass
1971 elif addResponses[ i ] == main.FALSE:
1972 # Already in set, probably fine
1973 pass
1974 elif addResponses[ i ] == main.ERROR:
1975 # Error in execution
1976 addAllResults = main.FALSE
1977 else:
1978 # unexpected result
1979 addAllResults = main.FALSE
1980 if addAllResults != main.TRUE:
1981 main.log.error( "Error executing set addAll" )
1982
1983 # Check if set is still correct
1984 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001985 getResponses = main.Cluster.command( "setTestGet",
1986 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001987 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001988 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001989 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001990 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001991 current = set( getResponses[ i ] )
1992 if len( current ) == len( getResponses[ i ] ):
1993 # no repeats
1994 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001995 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001996 " of set " + main.onosSetName + ":\n" +
1997 str( getResponses[ i ] ) )
1998 main.log.debug( "Expected: " + str( main.onosSet ) )
1999 main.log.debug( "Actual: " + str( current ) )
2000 getResults = main.FALSE
2001 else:
2002 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002003 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002004 " set " + main.onosSetName + ":\n" +
2005 str( getResponses[ i ] ) )
2006 getResults = main.FALSE
2007 elif getResponses[ i ] == main.ERROR:
2008 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002009 sizeResponses = main.Cluster.command( "setTestSize",
2010 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002011 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002012 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002013 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002014 if size != sizeResponses[ i ]:
2015 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002016 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002017 " for set " + main.onosSetName +
2018 " but got " + str( sizeResponses[ i ] ) )
2019 addAllResults = addAllResults and getResults and sizeResults
2020 utilities.assert_equals( expect=main.TRUE,
2021 actual=addAllResults,
2022 onpass="Set addAll correct",
2023 onfail="Set addAll was incorrect" )
2024
2025 main.step( "Distributed Set clear()" )
2026 main.onosSet.clear()
Jon Hallca319892017-06-15 15:25:22 -07002027 clearResponses = main.Cluster.command( "setTestRemove",
2028 args=[ main.onosSetName, " " ], # Values doesn't matter
2029 kwargs={ "clear": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002030 # main.TRUE = successfully changed the set
2031 # main.FALSE = action resulted in no change in set
2032 # main.ERROR - Some error in executing the function
2033 clearResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002034 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002035 if clearResponses[ i ] == main.TRUE:
2036 # All is well
2037 pass
2038 elif clearResponses[ i ] == main.FALSE:
2039 # Nothing set, probably fine
2040 pass
2041 elif clearResponses[ i ] == main.ERROR:
2042 # Error in execution
2043 clearResults = main.FALSE
2044 else:
2045 # unexpected result
2046 clearResults = main.FALSE
2047 if clearResults != main.TRUE:
2048 main.log.error( "Error executing set clear" )
2049
2050 # Check if set is still correct
2051 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002052 getResponses = main.Cluster.command( "setTestGet",
2053 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002054 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002055 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002056 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002057 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002058 current = set( getResponses[ i ] )
2059 if len( current ) == len( getResponses[ i ] ):
2060 # no repeats
2061 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002062 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002063 " of set " + main.onosSetName + ":\n" +
2064 str( getResponses[ i ] ) )
2065 main.log.debug( "Expected: " + str( main.onosSet ) )
2066 main.log.debug( "Actual: " + str( current ) )
2067 getResults = main.FALSE
2068 else:
2069 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002070 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002071 " set " + main.onosSetName + ":\n" +
2072 str( getResponses[ i ] ) )
2073 getResults = main.FALSE
2074 elif getResponses[ i ] == main.ERROR:
2075 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002076 sizeResponses = main.Cluster.command( "setTestSize",
2077 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002078 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002079 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002080 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002081 if size != sizeResponses[ i ]:
2082 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002083 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002084 " for set " + main.onosSetName +
2085 " but got " + str( sizeResponses[ i ] ) )
2086 clearResults = clearResults and getResults and sizeResults
2087 utilities.assert_equals( expect=main.TRUE,
2088 actual=clearResults,
2089 onpass="Set clear correct",
2090 onfail="Set clear was incorrect" )
2091
2092 main.step( "Distributed Set addAll()" )
2093 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002094 addResponses = main.Cluster.command( "setTestAdd",
2095 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002096 # main.TRUE = successfully changed the set
2097 # main.FALSE = action resulted in no change in set
2098 # main.ERROR - Some error in executing the function
2099 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002100 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002101 if addResponses[ i ] == main.TRUE:
2102 # All is well
2103 pass
2104 elif addResponses[ i ] == main.FALSE:
2105 # Already in set, probably fine
2106 pass
2107 elif addResponses[ i ] == main.ERROR:
2108 # Error in execution
2109 addAllResults = main.FALSE
2110 else:
2111 # unexpected result
2112 addAllResults = main.FALSE
2113 if addAllResults != main.TRUE:
2114 main.log.error( "Error executing set addAll" )
2115
2116 # Check if set is still correct
2117 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002118 getResponses = main.Cluster.command( "setTestGet",
2119 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002120 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002121 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002122 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002123 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002124 current = set( getResponses[ i ] )
2125 if len( current ) == len( getResponses[ i ] ):
2126 # no repeats
2127 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002128 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002129 " of set " + main.onosSetName + ":\n" +
2130 str( getResponses[ i ] ) )
2131 main.log.debug( "Expected: " + str( main.onosSet ) )
2132 main.log.debug( "Actual: " + str( current ) )
2133 getResults = main.FALSE
2134 else:
2135 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002136 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002137 " set " + main.onosSetName + ":\n" +
2138 str( getResponses[ i ] ) )
2139 getResults = main.FALSE
2140 elif getResponses[ i ] == main.ERROR:
2141 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002142 sizeResponses = main.Cluster.command( "setTestSize",
2143 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002144 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002145 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002146 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002147 if size != sizeResponses[ i ]:
2148 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002149 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002150 " for set " + main.onosSetName +
2151 " but got " + str( sizeResponses[ i ] ) )
2152 addAllResults = addAllResults and getResults and sizeResults
2153 utilities.assert_equals( expect=main.TRUE,
2154 actual=addAllResults,
2155 onpass="Set addAll correct",
2156 onfail="Set addAll was incorrect" )
2157
2158 main.step( "Distributed Set retain()" )
2159 main.onosSet.intersection_update( retainValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002160 retainResponses = main.Cluster.command( "setTestRemove",
2161 args=[ main.onosSetName, retainValue ],
2162 kwargs={ "retain": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002163 # main.TRUE = successfully changed the set
2164 # main.FALSE = action resulted in no change in set
2165 # main.ERROR - Some error in executing the function
2166 retainResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002167 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002168 if retainResponses[ i ] == main.TRUE:
2169 # All is well
2170 pass
2171 elif retainResponses[ i ] == main.FALSE:
2172 # Already in set, probably fine
2173 pass
2174 elif retainResponses[ i ] == main.ERROR:
2175 # Error in execution
2176 retainResults = main.FALSE
2177 else:
2178 # unexpected result
2179 retainResults = main.FALSE
2180 if retainResults != main.TRUE:
2181 main.log.error( "Error executing set retain" )
2182
2183 # Check if set is still correct
2184 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002185 getResponses = main.Cluster.command( "setTestGet",
2186 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002187 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002188 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002189 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002190 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002191 current = set( getResponses[ i ] )
2192 if len( current ) == len( getResponses[ i ] ):
2193 # no repeats
2194 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002195 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002196 " of set " + main.onosSetName + ":\n" +
2197 str( getResponses[ i ] ) )
2198 main.log.debug( "Expected: " + str( main.onosSet ) )
2199 main.log.debug( "Actual: " + str( current ) )
2200 getResults = main.FALSE
2201 else:
2202 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002203 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002204 " set " + main.onosSetName + ":\n" +
2205 str( getResponses[ i ] ) )
2206 getResults = main.FALSE
2207 elif getResponses[ i ] == main.ERROR:
2208 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002209 sizeResponses = main.Cluster.command( "setTestSize",
2210 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002211 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002212 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002213 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002214 if size != sizeResponses[ i ]:
2215 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002216 main.log.error( node + " expected a size of " +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002217 str( size ) + " for set " + main.onosSetName +
2218 " but got " + str( sizeResponses[ i ] ) )
2219 retainResults = retainResults and getResults and sizeResults
2220 utilities.assert_equals( expect=main.TRUE,
2221 actual=retainResults,
2222 onpass="Set retain correct",
2223 onfail="Set retain was incorrect" )
2224
2225 # Transactional maps
2226 main.step( "Partitioned Transactional maps put" )
2227 tMapValue = "Testing"
2228 numKeys = 100
2229 putResult = True
Jon Hallca319892017-06-15 15:25:22 -07002230 ctrl = main.Cluster.next()
2231 putResponses = ctrl.transactionalMapPut( numKeys, tMapValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002232 if putResponses and len( putResponses ) == 100:
2233 for i in putResponses:
2234 if putResponses[ i ][ 'value' ] != tMapValue:
2235 putResult = False
2236 else:
2237 putResult = False
2238 if not putResult:
2239 main.log.debug( "Put response values: " + str( putResponses ) )
2240 utilities.assert_equals( expect=True,
2241 actual=putResult,
2242 onpass="Partitioned Transactional Map put successful",
2243 onfail="Partitioned Transactional Map put values are incorrect" )
2244
2245 main.step( "Partitioned Transactional maps get" )
2246 # FIXME: is this sleep needed?
2247 time.sleep( 5 )
2248
2249 getCheck = True
2250 for n in range( 1, numKeys + 1 ):
Jon Hallca319892017-06-15 15:25:22 -07002251 getResponses = main.Cluster.command( "transactionalMapGet",
2252 args=[ "Key" + str( n ) ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002253 valueCheck = True
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002254 for node in getResponses:
2255 if node != tMapValue:
2256 valueCheck = False
2257 if not valueCheck:
Jon Hallf37d44d2017-05-24 10:37:30 -07002258 main.log.warn( "Values for key 'Key" + str(n) + "' do not match:" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002259 main.log.warn( getResponses )
2260 getCheck = getCheck and valueCheck
2261 utilities.assert_equals( expect=True,
2262 actual=getCheck,
2263 onpass="Partitioned Transactional Map get values were correct",
2264 onfail="Partitioned Transactional Map values incorrect" )
2265
2266 # DISTRIBUTED ATOMIC VALUE
2267 main.step( "Get the value of a new value" )
Jon Hallca319892017-06-15 15:25:22 -07002268 getValues = main.Cluster.command( "valueTestGet",
2269 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002270 main.log.debug( getValues )
2271 # Check the results
2272 atomicValueGetResult = True
2273 expected = valueValue if valueValue is not None else "null"
2274 main.log.debug( "Checking for value of " + expected )
2275 for i in getValues:
2276 if i != expected:
2277 atomicValueGetResult = False
2278 utilities.assert_equals( expect=True,
2279 actual=atomicValueGetResult,
2280 onpass="Atomic Value get successful",
2281 onfail="Error getting atomic Value " +
2282 str( valueValue ) + ", found: " +
2283 str( getValues ) )
2284
2285 main.step( "Atomic Value set()" )
2286 valueValue = "foo"
Jon Hallca319892017-06-15 15:25:22 -07002287 setValues = main.Cluster.command( "valueTestSet",
2288 args=[ valueName, valueValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002289 main.log.debug( setValues )
2290 # Check the results
2291 atomicValueSetResults = True
2292 for i in setValues:
2293 if i != main.TRUE:
2294 atomicValueSetResults = False
2295 utilities.assert_equals( expect=True,
2296 actual=atomicValueSetResults,
2297 onpass="Atomic Value set successful",
2298 onfail="Error setting atomic Value" +
2299 str( setValues ) )
2300
2301 main.step( "Get the value after set()" )
Jon Hallca319892017-06-15 15:25:22 -07002302 getValues = main.Cluster.command( "valueTestGet",
2303 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002304 main.log.debug( getValues )
2305 # Check the results
2306 atomicValueGetResult = True
2307 expected = valueValue if valueValue is not None else "null"
2308 main.log.debug( "Checking for value of " + expected )
2309 for i in getValues:
2310 if i != expected:
2311 atomicValueGetResult = False
2312 utilities.assert_equals( expect=True,
2313 actual=atomicValueGetResult,
2314 onpass="Atomic Value get successful",
2315 onfail="Error getting atomic Value " +
2316 str( valueValue ) + ", found: " +
2317 str( getValues ) )
2318
2319 main.step( "Atomic Value compareAndSet()" )
2320 oldValue = valueValue
2321 valueValue = "bar"
Jon Hallca319892017-06-15 15:25:22 -07002322 ctrl = main.Cluster.next()
2323 CASValue = ctrl.valueTestCompareAndSet( valueName, oldValue, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002324 main.log.debug( CASValue )
2325 utilities.assert_equals( expect=main.TRUE,
2326 actual=CASValue,
2327 onpass="Atomic Value comapreAndSet successful",
2328 onfail="Error setting atomic Value:" +
2329 str( CASValue ) )
2330
2331 main.step( "Get the value after compareAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002332 getValues = main.Cluster.command( "valueTestGet",
2333 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002334 main.log.debug( getValues )
2335 # Check the results
2336 atomicValueGetResult = True
2337 expected = valueValue if valueValue is not None else "null"
2338 main.log.debug( "Checking for value of " + expected )
2339 for i in getValues:
2340 if i != expected:
2341 atomicValueGetResult = False
2342 utilities.assert_equals( expect=True,
2343 actual=atomicValueGetResult,
2344 onpass="Atomic Value get successful",
2345 onfail="Error getting atomic Value " +
2346 str( valueValue ) + ", found: " +
2347 str( getValues ) )
2348
2349 main.step( "Atomic Value getAndSet()" )
2350 oldValue = valueValue
2351 valueValue = "baz"
Jon Hallca319892017-06-15 15:25:22 -07002352 ctrl = main.Cluster.next()
2353 GASValue = ctrl.valueTestGetAndSet( valueName, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002354 main.log.debug( GASValue )
2355 expected = oldValue if oldValue is not None else "null"
2356 utilities.assert_equals( expect=expected,
2357 actual=GASValue,
2358 onpass="Atomic Value GAS successful",
2359 onfail="Error with GetAndSet atomic Value: expected " +
2360 str( expected ) + ", found: " +
2361 str( GASValue ) )
2362
2363 main.step( "Get the value after getAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002364 getValues = main.Cluster.command( "valueTestGet",
2365 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002366 main.log.debug( getValues )
2367 # Check the results
2368 atomicValueGetResult = True
2369 expected = valueValue if valueValue is not None else "null"
2370 main.log.debug( "Checking for value of " + expected )
2371 for i in getValues:
2372 if i != expected:
2373 atomicValueGetResult = False
2374 utilities.assert_equals( expect=True,
2375 actual=atomicValueGetResult,
2376 onpass="Atomic Value get successful",
2377 onfail="Error getting atomic Value: expected " +
2378 str( valueValue ) + ", found: " +
2379 str( getValues ) )
2380
2381 main.step( "Atomic Value destory()" )
2382 valueValue = None
Jon Hallca319892017-06-15 15:25:22 -07002383 ctrl = main.Cluster.next()
2384 destroyResult = ctrl.valueTestDestroy( valueName )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002385 main.log.debug( destroyResult )
2386 # Check the results
2387 utilities.assert_equals( expect=main.TRUE,
2388 actual=destroyResult,
2389 onpass="Atomic Value destroy successful",
2390 onfail="Error destroying atomic Value" )
2391
2392 main.step( "Get the value after destroy()" )
Jon Hallca319892017-06-15 15:25:22 -07002393 getValues = main.Cluster.command( "valueTestGet",
2394 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002395 main.log.debug( getValues )
2396 # Check the results
2397 atomicValueGetResult = True
2398 expected = valueValue if valueValue is not None else "null"
2399 main.log.debug( "Checking for value of " + expected )
2400 for i in getValues:
2401 if i != expected:
2402 atomicValueGetResult = False
2403 utilities.assert_equals( expect=True,
2404 actual=atomicValueGetResult,
2405 onpass="Atomic Value get successful",
2406 onfail="Error getting atomic Value " +
2407 str( valueValue ) + ", found: " +
2408 str( getValues ) )
Jon Halle0f0b342017-04-18 11:43:47 -07002409
2410 # WORK QUEUES
2411 main.step( "Work Queue add()" )
Jon Hallca319892017-06-15 15:25:22 -07002412 ctrl = main.Cluster.next()
2413 addResult = ctrl.workQueueAdd( workQueueName, 'foo' )
Jon Halle0f0b342017-04-18 11:43:47 -07002414 workQueuePending += 1
2415 main.log.debug( addResult )
2416 # Check the results
2417 utilities.assert_equals( expect=main.TRUE,
2418 actual=addResult,
2419 onpass="Work Queue add successful",
2420 onfail="Error adding to Work Queue" )
2421
2422 main.step( "Check the work queue stats" )
2423 statsResults = self.workQueueStatsCheck( workQueueName,
2424 workQueueCompleted,
2425 workQueueInProgress,
2426 workQueuePending )
2427 utilities.assert_equals( expect=True,
2428 actual=statsResults,
2429 onpass="Work Queue stats correct",
2430 onfail="Work Queue stats incorrect " )
2431
2432 main.step( "Work Queue addMultiple()" )
Jon Hallca319892017-06-15 15:25:22 -07002433 ctrl = main.Cluster.next()
2434 addMultipleResult = ctrl.workQueueAddMultiple( workQueueName, 'bar', 'baz' )
Jon Halle0f0b342017-04-18 11:43:47 -07002435 workQueuePending += 2
2436 main.log.debug( addMultipleResult )
2437 # Check the results
2438 utilities.assert_equals( expect=main.TRUE,
2439 actual=addMultipleResult,
2440 onpass="Work Queue add multiple successful",
2441 onfail="Error adding multiple items to Work Queue" )
2442
2443 main.step( "Check the work queue stats" )
2444 statsResults = self.workQueueStatsCheck( workQueueName,
2445 workQueueCompleted,
2446 workQueueInProgress,
2447 workQueuePending )
2448 utilities.assert_equals( expect=True,
2449 actual=statsResults,
2450 onpass="Work Queue stats correct",
2451 onfail="Work Queue stats incorrect " )
2452
2453 main.step( "Work Queue takeAndComplete() 1" )
Jon Hallca319892017-06-15 15:25:22 -07002454 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002455 number = 1
Jon Hallca319892017-06-15 15:25:22 -07002456 take1Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002457 workQueuePending -= number
2458 workQueueCompleted += number
2459 main.log.debug( take1Result )
2460 # Check the results
2461 utilities.assert_equals( expect=main.TRUE,
2462 actual=take1Result,
2463 onpass="Work Queue takeAndComplete 1 successful",
2464 onfail="Error taking 1 from Work Queue" )
2465
2466 main.step( "Check the work queue stats" )
2467 statsResults = self.workQueueStatsCheck( workQueueName,
2468 workQueueCompleted,
2469 workQueueInProgress,
2470 workQueuePending )
2471 utilities.assert_equals( expect=True,
2472 actual=statsResults,
2473 onpass="Work Queue stats correct",
2474 onfail="Work Queue stats incorrect " )
2475
2476 main.step( "Work Queue takeAndComplete() 2" )
Jon Hallca319892017-06-15 15:25:22 -07002477 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002478 number = 2
Jon Hallca319892017-06-15 15:25:22 -07002479 take2Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002480 workQueuePending -= number
2481 workQueueCompleted += number
2482 main.log.debug( take2Result )
2483 # Check the results
2484 utilities.assert_equals( expect=main.TRUE,
2485 actual=take2Result,
2486 onpass="Work Queue takeAndComplete 2 successful",
2487 onfail="Error taking 2 from Work Queue" )
2488
2489 main.step( "Check the work queue stats" )
2490 statsResults = self.workQueueStatsCheck( workQueueName,
2491 workQueueCompleted,
2492 workQueueInProgress,
2493 workQueuePending )
2494 utilities.assert_equals( expect=True,
2495 actual=statsResults,
2496 onpass="Work Queue stats correct",
2497 onfail="Work Queue stats incorrect " )
2498
2499 main.step( "Work Queue destroy()" )
2500 valueValue = None
2501 threads = []
Jon Hallca319892017-06-15 15:25:22 -07002502 ctrl = main.Cluster.next()
2503 destroyResult = ctrl.workQueueDestroy( workQueueName )
Jon Halle0f0b342017-04-18 11:43:47 -07002504 workQueueCompleted = 0
2505 workQueueInProgress = 0
2506 workQueuePending = 0
2507 main.log.debug( destroyResult )
2508 # Check the results
2509 utilities.assert_equals( expect=main.TRUE,
2510 actual=destroyResult,
2511 onpass="Work Queue destroy successful",
2512 onfail="Error destroying Work Queue" )
2513
2514 main.step( "Check the work queue stats" )
2515 statsResults = self.workQueueStatsCheck( workQueueName,
2516 workQueueCompleted,
2517 workQueueInProgress,
2518 workQueuePending )
2519 utilities.assert_equals( expect=True,
2520 actual=statsResults,
2521 onpass="Work Queue stats correct",
2522 onfail="Work Queue stats incorrect " )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002523 except Exception as e:
2524 main.log.error( "Exception: " + str( e ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002525
2526 def cleanUp( self, main ):
2527 """
2528 Clean up
2529 """
2530 import os
2531 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002532 assert main, "main not defined"
2533 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002534
2535 # printing colors to terminal
2536 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2537 'blue': '\033[94m', 'green': '\033[92m',
2538 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2539 main.case( "Test Cleanup" )
2540 main.step( "Killing tcpdumps" )
2541 main.Mininet2.stopTcpdump()
2542
2543 testname = main.TEST
2544 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2545 main.step( "Copying MN pcap and ONOS log files to test station" )
2546 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2547 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2548 # NOTE: MN Pcap file is being saved to logdir.
2549 # We scp this file as MN and TestON aren't necessarily the same vm
2550
2551 # FIXME: To be replaced with a Jenkin's post script
2552 # TODO: Load these from params
2553 # NOTE: must end in /
2554 logFolder = "/opt/onos/log/"
2555 logFiles = [ "karaf.log", "karaf.log.1" ]
2556 # NOTE: must end in /
2557 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002558 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002559 dstName = main.logdir + "/" + ctrl.name + "-" + f
2560 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002561 logFolder + f, dstName )
2562 # std*.log's
2563 # NOTE: must end in /
2564 logFolder = "/opt/onos/var/"
2565 logFiles = [ "stderr.log", "stdout.log" ]
2566 # NOTE: must end in /
2567 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002568 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002569 dstName = main.logdir + "/" + ctrl.name + "-" + f
2570 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002571 logFolder + f, dstName )
2572 else:
2573 main.log.debug( "skipping saving log files" )
2574
2575 main.step( "Stopping Mininet" )
2576 mnResult = main.Mininet1.stopNet()
2577 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2578 onpass="Mininet stopped",
2579 onfail="MN cleanup NOT successful" )
2580
2581 main.step( "Checking ONOS Logs for errors" )
Devin Lim142b5342017-07-20 15:22:39 -07002582 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002583 main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
2584 main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002585
2586 try:
2587 timerLog = open( main.logdir + "/Timers.csv", 'w' )
2588 main.log.debug( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2589 timerLog.write( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2590 timerLog.close()
2591 except NameError as e:
2592 main.log.exception( e )
Jon Hallca319892017-06-15 15:25:22 -07002593
Devin Lim58046fa2017-07-05 16:55:00 -07002594 def assignMastership( self, main ):
2595 """
2596 Assign mastership to controllers
2597 """
2598 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002599 assert main, "main not defined"
2600 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002601
2602 main.case( "Assigning Controller roles for switches" )
2603 main.caseExplanation = "Check that ONOS is connected to each " +\
2604 "device. Then manually assign" +\
2605 " mastership to specific ONOS nodes using" +\
2606 " 'device-role'"
2607 main.step( "Assign mastership of switches to specific controllers" )
2608 # Manually assign mastership to the controller we want
2609 roleCall = main.TRUE
2610
2611 ipList = []
2612 deviceList = []
Jon Hallca319892017-06-15 15:25:22 -07002613 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07002614 try:
2615 # Assign mastership to specific controllers. This assignment was
2616 # determined for a 7 node cluser, but will work with any sized
2617 # cluster
2618 for i in range( 1, 29 ): # switches 1 through 28
2619 # set up correct variables:
2620 if i == 1:
2621 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002622 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002623 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
2624 elif i == 2:
Devin Lim142b5342017-07-20 15:22:39 -07002625 c = 1 % main.Cluster.numCtrls
2626 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002627 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
2628 elif i == 3:
Devin Lim142b5342017-07-20 15:22:39 -07002629 c = 1 % main.Cluster.numCtrls
2630 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002631 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
2632 elif i == 4:
Devin Lim142b5342017-07-20 15:22:39 -07002633 c = 3 % main.Cluster.numCtrls
2634 ip = main.Cluster.active( c ).ip_address # ONOS4
Devin Lim58046fa2017-07-05 16:55:00 -07002635 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
2636 elif i == 5:
Devin Lim142b5342017-07-20 15:22:39 -07002637 c = 2 % main.Cluster.numCtrls
2638 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002639 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
2640 elif i == 6:
Devin Lim142b5342017-07-20 15:22:39 -07002641 c = 2 % main.Cluster.numCtrls
2642 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002643 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
2644 elif i == 7:
Devin Lim142b5342017-07-20 15:22:39 -07002645 c = 5 % main.Cluster.numCtrls
2646 ip = main.Cluster.active( c ).ip_address # ONOS6
Devin Lim58046fa2017-07-05 16:55:00 -07002647 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
2648 elif i >= 8 and i <= 17:
Devin Lim142b5342017-07-20 15:22:39 -07002649 c = 4 % main.Cluster.numCtrls
2650 ip = main.Cluster.active( c ).ip_address # ONOS5
Devin Lim58046fa2017-07-05 16:55:00 -07002651 dpid = '3' + str( i ).zfill( 3 )
2652 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2653 elif i >= 18 and i <= 27:
Devin Lim142b5342017-07-20 15:22:39 -07002654 c = 6 % main.Cluster.numCtrls
2655 ip = main.Cluster.active( c ).ip_address # ONOS7
Devin Lim58046fa2017-07-05 16:55:00 -07002656 dpid = '6' + str( i ).zfill( 3 )
2657 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2658 elif i == 28:
2659 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002660 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002661 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
2662 else:
2663 main.log.error( "You didn't write an else statement for " +
2664 "switch s" + str( i ) )
2665 roleCall = main.FALSE
2666 # Assign switch
2667 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
2668 # TODO: make this controller dynamic
2669 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
2670 ipList.append( ip )
2671 deviceList.append( deviceId )
2672 except ( AttributeError, AssertionError ):
2673 main.log.exception( "Something is wrong with ONOS device view" )
2674 main.log.info( onosCli.devices() )
2675 utilities.assert_equals(
2676 expect=main.TRUE,
2677 actual=roleCall,
2678 onpass="Re-assigned switch mastership to designated controller",
2679 onfail="Something wrong with deviceRole calls" )
2680
2681 main.step( "Check mastership was correctly assigned" )
2682 roleCheck = main.TRUE
2683 # NOTE: This is due to the fact that device mastership change is not
2684 # atomic and is actually a multi step process
2685 time.sleep( 5 )
2686 for i in range( len( ipList ) ):
2687 ip = ipList[ i ]
2688 deviceId = deviceList[ i ]
2689 # Check assignment
2690 master = onosCli.getRole( deviceId ).get( 'master' )
2691 if ip in master:
2692 roleCheck = roleCheck and main.TRUE
2693 else:
2694 roleCheck = roleCheck and main.FALSE
2695 main.log.error( "Error, controller " + ip + " is not" +
2696 " master " + "of device " +
2697 str( deviceId ) + ". Master is " +
2698 repr( master ) + "." )
2699 utilities.assert_equals(
2700 expect=main.TRUE,
2701 actual=roleCheck,
2702 onpass="Switches were successfully reassigned to designated " +
2703 "controller",
2704 onfail="Switches were not successfully reassigned" )
Jon Hallca319892017-06-15 15:25:22 -07002705
Devin Lim58046fa2017-07-05 16:55:00 -07002706 def bringUpStoppedNode( self, main ):
2707 """
2708 The bring up stopped nodes
2709 """
2710 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002711 assert main, "main not defined"
2712 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002713 assert main.kill, "main.kill not defined"
2714 main.case( "Restart minority of ONOS nodes" )
2715
2716 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
2717 startResults = main.TRUE
2718 restartTime = time.time()
Jon Hallca319892017-06-15 15:25:22 -07002719 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002720 startResults = startResults and\
Jon Hallca319892017-06-15 15:25:22 -07002721 ctrl.onosStart( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002722 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2723 onpass="ONOS nodes started successfully",
2724 onfail="ONOS nodes NOT successfully started" )
2725
2726 main.step( "Checking if ONOS is up yet" )
2727 count = 0
2728 onosIsupResult = main.FALSE
2729 while onosIsupResult == main.FALSE and count < 10:
2730 onosIsupResult = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002731 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002732 onosIsupResult = onosIsupResult and\
Jon Hallca319892017-06-15 15:25:22 -07002733 ctrl.isup( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002734 count = count + 1
2735 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
2736 onpass="ONOS restarted successfully",
2737 onfail="ONOS restart NOT successful" )
2738
Jon Hallca319892017-06-15 15:25:22 -07002739 main.step( "Restarting ONOS nodes" )
Devin Lim58046fa2017-07-05 16:55:00 -07002740 cliResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002741 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002742 cliResults = cliResults and\
Jon Hallca319892017-06-15 15:25:22 -07002743 ctrl.startOnosCli( ctrl.ipAddress )
2744 ctrl.active = True
Devin Lim58046fa2017-07-05 16:55:00 -07002745 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
Jon Hallca319892017-06-15 15:25:22 -07002746 onpass="ONOS node(s) restarted",
2747 onfail="ONOS node(s) did not restart" )
Devin Lim58046fa2017-07-05 16:55:00 -07002748
2749 # Grab the time of restart so we chan check how long the gossip
2750 # protocol has had time to work
2751 main.restartTime = time.time() - restartTime
2752 main.log.debug( "Restart time: " + str( main.restartTime ) )
2753 # TODO: MAke this configurable. Also, we are breaking the above timer
2754 main.step( "Checking ONOS nodes" )
2755 nodeResults = utilities.retry( self.nodesCheck,
2756 False,
Jon Hallca319892017-06-15 15:25:22 -07002757 args=[ main.Cluster.active() ],
Devin Lim58046fa2017-07-05 16:55:00 -07002758 sleep=15,
2759 attempts=5 )
2760
2761 utilities.assert_equals( expect=True, actual=nodeResults,
2762 onpass="Nodes check successful",
2763 onfail="Nodes check NOT successful" )
2764
2765 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07002766 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07002767 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07002768 ctrl.name,
2769 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002770 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -07002771 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002772
Jon Hallca319892017-06-15 15:25:22 -07002773 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -07002774
2775 main.step( "Rerun for election on the node(s) that were killed" )
2776 runResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002777 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002778 runResults = runResults and\
Jon Hallca319892017-06-15 15:25:22 -07002779 ctrl.electionTestRun()
Devin Lim58046fa2017-07-05 16:55:00 -07002780 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2781 onpass="ONOS nodes reran for election topic",
2782 onfail="Errror rerunning for election" )
Devin Lim142b5342017-07-20 15:22:39 -07002783 def tempCell( self, cellName, ipList ):
2784 main.step( "Create cell file" )
2785 cellAppString = main.params[ 'ENV' ][ 'appString' ]
Devin Lim58046fa2017-07-05 16:55:00 -07002786
Devin Lim142b5342017-07-20 15:22:39 -07002787
2788 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
2789 main.Mininet1.ip_address,
2790 cellAppString, ipList , main.ONOScli1.karafUser )
2791 main.step( "Applying cell variable to environment" )
2792 cellResult = main.ONOSbench.setCell( cellName )
2793 verifyResult = main.ONOSbench.verifyCell()
2794
2795
2796 def checkStateAfterEvent( self, main, afterWhich, compareSwitch=False, isRestart=False ):
Devin Lim58046fa2017-07-05 16:55:00 -07002797 """
2798 afterWhich :
Jon Hallca319892017-06-15 15:25:22 -07002799 0: failure
Devin Lim58046fa2017-07-05 16:55:00 -07002800 1: scaling
2801 """
2802 """
2803 Check state after ONOS failure/scaling
2804 """
2805 import json
Devin Lim58046fa2017-07-05 16:55:00 -07002806 assert main, "main not defined"
2807 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002808 main.case( "Running ONOS Constant State Tests" )
2809
2810 OnosAfterWhich = [ "failure" , "scaliing" ]
2811
Devin Lim58046fa2017-07-05 16:55:00 -07002812 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -07002813 self.checkRoleNotNull()
Devin Lim58046fa2017-07-05 16:55:00 -07002814
Devin Lim142b5342017-07-20 15:22:39 -07002815 ONOSMastership, rolesResults, consistentMastership = self.checkTheRole()
Jon Hallca319892017-06-15 15:25:22 -07002816 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07002817
2818 if rolesResults and not consistentMastership:
2819 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002820 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -07002821 main.log.warn( node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07002822 json.dumps( json.loads( ONOSMastership[ i ] ),
2823 sort_keys=True,
2824 indent=4,
2825 separators=( ',', ': ' ) ) )
2826
2827 if compareSwitch:
2828 description2 = "Compare switch roles from before failure"
2829 main.step( description2 )
2830 try:
2831 currentJson = json.loads( ONOSMastership[ 0 ] )
2832 oldJson = json.loads( mastershipState )
2833 except ( ValueError, TypeError ):
2834 main.log.exception( "Something is wrong with parsing " +
2835 "ONOSMastership[0] or mastershipState" )
Jon Hallca319892017-06-15 15:25:22 -07002836 main.log.debug( "ONOSMastership[0]: " + repr( ONOSMastership[ 0 ] ) )
2837 main.log.debug( "mastershipState" + repr( mastershipState ) )
Devin Lim44075962017-08-11 10:56:37 -07002838 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002839 mastershipCheck = main.TRUE
2840 for i in range( 1, 29 ):
2841 switchDPID = str(
2842 main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
2843 current = [ switch[ 'master' ] for switch in currentJson
2844 if switchDPID in switch[ 'id' ] ]
2845 old = [ switch[ 'master' ] for switch in oldJson
2846 if switchDPID in switch[ 'id' ] ]
2847 if current == old:
2848 mastershipCheck = mastershipCheck and main.TRUE
2849 else:
2850 main.log.warn( "Mastership of switch %s changed" % switchDPID )
2851 mastershipCheck = main.FALSE
2852 utilities.assert_equals(
2853 expect=main.TRUE,
2854 actual=mastershipCheck,
2855 onpass="Mastership of Switches was not changed",
2856 onfail="Mastership of some switches changed" )
2857
2858 # NOTE: we expect mastership to change on controller failure/scaling down
Devin Lim142b5342017-07-20 15:22:39 -07002859 ONOSIntents, intentsResults = self.checkingIntents()
Devin Lim58046fa2017-07-05 16:55:00 -07002860 intentCheck = main.FALSE
2861 consistentIntents = True
Devin Lim58046fa2017-07-05 16:55:00 -07002862
2863 main.step( "Check for consistency in Intents from each controller" )
2864 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2865 main.log.info( "Intents are consistent across all ONOS " +
2866 "nodes" )
2867 else:
2868 consistentIntents = False
2869
2870 # Try to make it easy to figure out what is happening
2871 #
2872 # Intent ONOS1 ONOS2 ...
2873 # 0x01 INSTALLED INSTALLING
2874 # ... ... ...
2875 # ... ... ...
2876 title = " ID"
Jon Hallca319892017-06-15 15:25:22 -07002877 for ctrl in main.Cluster.active():
2878 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07002879 main.log.warn( title )
2880 # get all intent keys in the cluster
2881 keys = []
2882 for nodeStr in ONOSIntents:
2883 node = json.loads( nodeStr )
2884 for intent in node:
2885 keys.append( intent.get( 'id' ) )
2886 keys = set( keys )
2887 for key in keys:
2888 row = "%-13s" % key
2889 for nodeStr in ONOSIntents:
2890 node = json.loads( nodeStr )
2891 for intent in node:
2892 if intent.get( 'id' ) == key:
2893 row += "%-15s" % intent.get( 'state' )
2894 main.log.warn( row )
2895 # End table view
2896
2897 utilities.assert_equals(
2898 expect=True,
2899 actual=consistentIntents,
2900 onpass="Intents are consistent across all ONOS nodes",
2901 onfail="ONOS nodes have different views of intents" )
2902 intentStates = []
2903 for node in ONOSIntents: # Iter through ONOS nodes
2904 nodeStates = []
2905 # Iter through intents of a node
2906 try:
2907 for intent in json.loads( node ):
2908 nodeStates.append( intent[ 'state' ] )
2909 except ( ValueError, TypeError ):
2910 main.log.exception( "Error in parsing intents" )
2911 main.log.error( repr( node ) )
2912 intentStates.append( nodeStates )
2913 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2914 main.log.info( dict( out ) )
2915
2916 if intentsResults and not consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07002917 for i in range( len( main.Cluster.active() ) ):
Jon Hall6040bcf2017-08-14 11:15:41 -07002918 ctrl = main.Cluster.controllers[ i ]
Jon Hallca319892017-06-15 15:25:22 -07002919 main.log.warn( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07002920 main.log.warn( json.dumps(
2921 json.loads( ONOSIntents[ i ] ),
2922 sort_keys=True,
2923 indent=4,
2924 separators=( ',', ': ' ) ) )
2925 elif intentsResults and consistentIntents:
2926 intentCheck = main.TRUE
2927
2928 # NOTE: Store has no durability, so intents are lost across system
2929 # restarts
2930 if not isRestart:
2931 main.step( "Compare current intents with intents before the " + OnosAfterWhich[ afterWhich ] )
2932 # NOTE: this requires case 5 to pass for intentState to be set.
2933 # maybe we should stop the test if that fails?
2934 sameIntents = main.FALSE
2935 try:
2936 intentState
2937 except NameError:
2938 main.log.warn( "No previous intent state was saved" )
2939 else:
2940 if intentState and intentState == ONOSIntents[ 0 ]:
2941 sameIntents = main.TRUE
2942 main.log.info( "Intents are consistent with before " + OnosAfterWhich[ afterWhich ] )
2943 # TODO: possibly the states have changed? we may need to figure out
2944 # what the acceptable states are
2945 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2946 sameIntents = main.TRUE
2947 try:
2948 before = json.loads( intentState )
2949 after = json.loads( ONOSIntents[ 0 ] )
2950 for intent in before:
2951 if intent not in after:
2952 sameIntents = main.FALSE
2953 main.log.debug( "Intent is not currently in ONOS " +
2954 "(at least in the same form):" )
2955 main.log.debug( json.dumps( intent ) )
2956 except ( ValueError, TypeError ):
2957 main.log.exception( "Exception printing intents" )
2958 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2959 main.log.debug( repr( intentState ) )
2960 if sameIntents == main.FALSE:
2961 try:
2962 main.log.debug( "ONOS intents before: " )
2963 main.log.debug( json.dumps( json.loads( intentState ),
2964 sort_keys=True, indent=4,
2965 separators=( ',', ': ' ) ) )
2966 main.log.debug( "Current ONOS intents: " )
2967 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2968 sort_keys=True, indent=4,
2969 separators=( ',', ': ' ) ) )
2970 except ( ValueError, TypeError ):
2971 main.log.exception( "Exception printing intents" )
2972 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2973 main.log.debug( repr( intentState ) )
2974 utilities.assert_equals(
2975 expect=main.TRUE,
2976 actual=sameIntents,
2977 onpass="Intents are consistent with before " + OnosAfterWhich[ afterWhich ] ,
2978 onfail="The Intents changed during " + OnosAfterWhich[ afterWhich ] )
2979 intentCheck = intentCheck and sameIntents
2980
2981 main.step( "Get the OF Table entries and compare to before " +
2982 "component " + OnosAfterWhich[ afterWhich ] )
2983 FlowTables = main.TRUE
2984 for i in range( 28 ):
2985 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2986 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2987 curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
2988 FlowTables = FlowTables and curSwitch
2989 if curSwitch == main.FALSE:
2990 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2991 utilities.assert_equals(
2992 expect=main.TRUE,
2993 actual=FlowTables,
2994 onpass="No changes were found in the flow tables",
2995 onfail="Changes were found in the flow tables" )
2996
Jon Hallca319892017-06-15 15:25:22 -07002997 main.Mininet2.pingLongKill()
Devin Lim58046fa2017-07-05 16:55:00 -07002998 """
2999 main.step( "Check the continuous pings to ensure that no packets " +
3000 "were dropped during component failure" )
3001 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
3002 main.params[ 'TESTONIP' ] )
3003 LossInPings = main.FALSE
3004 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
3005 for i in range( 8, 18 ):
3006 main.log.info(
3007 "Checking for a loss in pings along flow from s" +
3008 str( i ) )
3009 LossInPings = main.Mininet2.checkForLoss(
3010 "/tmp/ping.h" +
3011 str( i ) ) or LossInPings
3012 if LossInPings == main.TRUE:
3013 main.log.info( "Loss in ping detected" )
3014 elif LossInPings == main.ERROR:
3015 main.log.info( "There are multiple mininet process running" )
3016 elif LossInPings == main.FALSE:
3017 main.log.info( "No Loss in the pings" )
3018 main.log.info( "No loss of dataplane connectivity" )
3019 utilities.assert_equals(
3020 expect=main.FALSE,
3021 actual=LossInPings,
3022 onpass="No Loss of connectivity",
3023 onfail="Loss of dataplane connectivity detected" )
3024 # NOTE: Since intents are not persisted with IntnentStore,
3025 # we expect loss in dataplane connectivity
3026 LossInPings = main.FALSE
3027 """
3028
3029 def compareTopo( self, main ):
3030 """
3031 Compare topo
3032 """
3033 import json
3034 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003035 assert main, "main not defined"
3036 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003037 try:
3038 from tests.dependencies.topology import Topology
3039 except ImportError:
3040 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07003041 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07003042 try:
3043 main.topoRelated
3044 except ( NameError, AttributeError ):
3045 main.topoRelated = Topology()
3046 main.case( "Compare ONOS Topology view to Mininet topology" )
3047 main.caseExplanation = "Compare topology objects between Mininet" +\
3048 " and ONOS"
3049 topoResult = main.FALSE
3050 topoFailMsg = "ONOS topology don't match Mininet"
3051 elapsed = 0
3052 count = 0
3053 main.step( "Comparing ONOS topology to MN topology" )
3054 startTime = time.time()
3055 # Give time for Gossip to work
3056 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
3057 devicesResults = main.TRUE
3058 linksResults = main.TRUE
3059 hostsResults = main.TRUE
3060 hostAttachmentResults = True
3061 count += 1
3062 cliStart = time.time()
Devin Lim142b5342017-07-20 15:22:39 -07003063 devices = main.topoRelated.getAll( "devices", True,
Jon Hallca319892017-06-15 15:25:22 -07003064 kwargs={ 'sleep': 5, 'attempts': 5,
3065 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003066 ipResult = main.TRUE
3067
Devin Lim142b5342017-07-20 15:22:39 -07003068 hosts = main.topoRelated.getAll( "hosts", True,
Jon Hallca319892017-06-15 15:25:22 -07003069 kwargs={ 'sleep': 5, 'attempts': 5,
3070 'randomTime': True },
3071 inJson=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003072
3073 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003074 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003075 if hosts[ controller ]:
3076 for host in hosts[ controller ]:
3077 if host is None or host.get( 'ipAddresses', [] ) == []:
3078 main.log.error(
3079 "Error with host ipAddresses on controller" +
3080 controllerStr + ": " + str( host ) )
3081 ipResult = main.FALSE
Devin Lim142b5342017-07-20 15:22:39 -07003082 ports = main.topoRelated.getAll( "ports" , True,
Jon Hallca319892017-06-15 15:25:22 -07003083 kwargs={ 'sleep': 5, 'attempts': 5,
3084 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003085 links = main.topoRelated.getAll( "links", True,
Jon Hallca319892017-06-15 15:25:22 -07003086 kwargs={ 'sleep': 5, 'attempts': 5,
3087 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003088 clusters = main.topoRelated.getAll( "clusters", True,
Jon Hallca319892017-06-15 15:25:22 -07003089 kwargs={ 'sleep': 5, 'attempts': 5,
3090 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003091
3092 elapsed = time.time() - startTime
3093 cliTime = time.time() - cliStart
3094 print "Elapsed time: " + str( elapsed )
3095 print "CLI time: " + str( cliTime )
3096
3097 if all( e is None for e in devices ) and\
3098 all( e is None for e in hosts ) and\
3099 all( e is None for e in ports ) and\
3100 all( e is None for e in links ) and\
3101 all( e is None for e in clusters ):
3102 topoFailMsg = "Could not get topology from ONOS"
3103 main.log.error( topoFailMsg )
3104 continue # Try again, No use trying to compare
3105
3106 mnSwitches = main.Mininet1.getSwitches()
3107 mnLinks = main.Mininet1.getLinks()
3108 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07003109 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003110 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003111 currentDevicesResult = main.topoRelated.compareDevicePort( main.Mininet1, controller,
3112 mnSwitches,
3113 devices, ports )
3114 utilities.assert_equals( expect=main.TRUE,
3115 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07003116 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003117 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003118 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003119 " Switches view is incorrect" )
3120
3121
3122 currentLinksResult = main.topoRelated.compareBase( links, controller,
3123 main.Mininet1.compareLinks,
3124 [mnSwitches, mnLinks] )
3125 utilities.assert_equals( expect=main.TRUE,
3126 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07003127 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003128 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003129 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003130 " links view is incorrect" )
3131 if hosts[ controller ] and "Error" not in hosts[ controller ]:
3132 currentHostsResult = main.Mininet1.compareHosts(
3133 mnHosts,
3134 hosts[ controller ] )
3135 elif hosts[ controller ] == []:
3136 currentHostsResult = main.TRUE
3137 else:
3138 currentHostsResult = main.FALSE
3139 utilities.assert_equals( expect=main.TRUE,
3140 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07003141 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003142 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07003143 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003144 " hosts don't match Mininet" )
3145 # CHECKING HOST ATTACHMENT POINTS
3146 hostAttachment = True
3147 zeroHosts = False
3148 # FIXME: topo-HA/obelisk specific mappings:
3149 # key is mac and value is dpid
3150 mappings = {}
3151 for i in range( 1, 29 ): # hosts 1 through 28
3152 # set up correct variables:
3153 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
3154 if i == 1:
3155 deviceId = "1000".zfill( 16 )
3156 elif i == 2:
3157 deviceId = "2000".zfill( 16 )
3158 elif i == 3:
3159 deviceId = "3000".zfill( 16 )
3160 elif i == 4:
3161 deviceId = "3004".zfill( 16 )
3162 elif i == 5:
3163 deviceId = "5000".zfill( 16 )
3164 elif i == 6:
3165 deviceId = "6000".zfill( 16 )
3166 elif i == 7:
3167 deviceId = "6007".zfill( 16 )
3168 elif i >= 8 and i <= 17:
3169 dpid = '3' + str( i ).zfill( 3 )
3170 deviceId = dpid.zfill( 16 )
3171 elif i >= 18 and i <= 27:
3172 dpid = '6' + str( i ).zfill( 3 )
3173 deviceId = dpid.zfill( 16 )
3174 elif i == 28:
3175 deviceId = "2800".zfill( 16 )
3176 mappings[ macId ] = deviceId
3177 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3178 if hosts[ controller ] == []:
3179 main.log.warn( "There are no hosts discovered" )
3180 zeroHosts = True
3181 else:
3182 for host in hosts[ controller ]:
3183 mac = None
3184 location = None
3185 device = None
3186 port = None
3187 try:
3188 mac = host.get( 'mac' )
3189 assert mac, "mac field could not be found for this host object"
Devin Limefaf3062017-08-14 16:18:19 -07003190 print host
3191 if 'locations' in host:
3192 location = host.get( 'locations' )[ 0 ]
3193 elif 'location' in host:
3194 location = host.get( 'location' )
Devin Lim58046fa2017-07-05 16:55:00 -07003195 assert location, "location field could not be found for this host object"
3196
3197 # Trim the protocol identifier off deviceId
3198 device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
3199 assert device, "elementId field could not be found for this host location object"
3200
3201 port = location.get( 'port' )
3202 assert port, "port field could not be found for this host location object"
3203
3204 # Now check if this matches where they should be
3205 if mac and device and port:
3206 if str( port ) != "1":
3207 main.log.error( "The attachment port is incorrect for " +
3208 "host " + str( mac ) +
3209 ". Expected: 1 Actual: " + str( port ) )
3210 hostAttachment = False
3211 if device != mappings[ str( mac ) ]:
3212 main.log.error( "The attachment device is incorrect for " +
3213 "host " + str( mac ) +
3214 ". Expected: " + mappings[ str( mac ) ] +
3215 " Actual: " + device )
3216 hostAttachment = False
3217 else:
3218 hostAttachment = False
Devin Limefaf3062017-08-14 16:18:19 -07003219 except ( AssertionError, TypeError ):
Devin Lim58046fa2017-07-05 16:55:00 -07003220 main.log.exception( "Json object not as expected" )
3221 main.log.error( repr( host ) )
3222 hostAttachment = False
3223 else:
3224 main.log.error( "No hosts json output or \"Error\"" +
3225 " in output. hosts = " +
3226 repr( hosts[ controller ] ) )
3227 if zeroHosts is False:
3228 # TODO: Find a way to know if there should be hosts in a
3229 # given point of the test
3230 hostAttachment = True
3231
3232 # END CHECKING HOST ATTACHMENT POINTS
3233 devicesResults = devicesResults and currentDevicesResult
3234 linksResults = linksResults and currentLinksResult
3235 hostsResults = hostsResults and currentHostsResult
3236 hostAttachmentResults = hostAttachmentResults and\
3237 hostAttachment
3238 topoResult = ( devicesResults and linksResults
3239 and hostsResults and ipResult and
3240 hostAttachmentResults )
3241 utilities.assert_equals( expect=True,
3242 actual=topoResult,
3243 onpass="ONOS topology matches Mininet",
3244 onfail=topoFailMsg )
3245 # End of While loop to pull ONOS state
3246
3247 # Compare json objects for hosts and dataplane clusters
3248
3249 # hosts
3250 main.step( "Hosts view is consistent across all ONOS nodes" )
3251 consistentHostsResult = main.TRUE
3252 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003253 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003254 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3255 if hosts[ controller ] == hosts[ 0 ]:
3256 continue
3257 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07003258 main.log.error( "hosts from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003259 " is inconsistent with ONOS1" )
Jon Hallca319892017-06-15 15:25:22 -07003260 main.log.debug( repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003261 consistentHostsResult = main.FALSE
3262
3263 else:
Jon Hallca319892017-06-15 15:25:22 -07003264 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003265 controllerStr )
3266 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003267 main.log.debug( controllerStr +
3268 " hosts response: " +
3269 repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003270 utilities.assert_equals(
3271 expect=main.TRUE,
3272 actual=consistentHostsResult,
3273 onpass="Hosts view is consistent across all ONOS nodes",
3274 onfail="ONOS nodes have different views of hosts" )
3275
3276 main.step( "Hosts information is correct" )
3277 hostsResults = hostsResults and ipResult
3278 utilities.assert_equals(
3279 expect=main.TRUE,
3280 actual=hostsResults,
3281 onpass="Host information is correct",
3282 onfail="Host information is incorrect" )
3283
3284 main.step( "Host attachment points to the network" )
3285 utilities.assert_equals(
3286 expect=True,
3287 actual=hostAttachmentResults,
3288 onpass="Hosts are correctly attached to the network",
3289 onfail="ONOS did not correctly attach hosts to the network" )
3290
3291 # Strongly connected clusters of devices
3292 main.step( "Clusters view is consistent across all ONOS nodes" )
3293 consistentClustersResult = main.TRUE
3294 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003295 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003296 if "Error" not in clusters[ controller ]:
3297 if clusters[ controller ] == clusters[ 0 ]:
3298 continue
3299 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07003300 main.log.error( "clusters from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003301 controllerStr +
3302 " is inconsistent with ONOS1" )
3303 consistentClustersResult = main.FALSE
3304 else:
3305 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07003306 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07003307 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003308 main.log.debug( controllerStr +
3309 " clusters response: " +
3310 repr( clusters[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003311 utilities.assert_equals(
3312 expect=main.TRUE,
3313 actual=consistentClustersResult,
3314 onpass="Clusters view is consistent across all ONOS nodes",
3315 onfail="ONOS nodes have different views of clusters" )
3316 if not consistentClustersResult:
3317 main.log.debug( clusters )
3318 for x in links:
Jon Hallca319892017-06-15 15:25:22 -07003319 main.log.debug( "{}: {}".format( len( x ), x ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003320
3321 main.step( "There is only one SCC" )
3322 # there should always only be one cluster
3323 try:
3324 numClusters = len( json.loads( clusters[ 0 ] ) )
3325 except ( ValueError, TypeError ):
3326 main.log.exception( "Error parsing clusters[0]: " +
3327 repr( clusters[ 0 ] ) )
3328 numClusters = "ERROR"
3329 clusterResults = main.FALSE
3330 if numClusters == 1:
3331 clusterResults = main.TRUE
3332 utilities.assert_equals(
3333 expect=1,
3334 actual=numClusters,
3335 onpass="ONOS shows 1 SCC",
3336 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
3337
3338 topoResult = ( devicesResults and linksResults
3339 and hostsResults and consistentHostsResult
3340 and consistentClustersResult and clusterResults
3341 and ipResult and hostAttachmentResults )
3342
3343 topoResult = topoResult and int( count <= 2 )
3344 note = "note it takes about " + str( int( cliTime ) ) + \
3345 " seconds for the test to make all the cli calls to fetch " +\
3346 "the topology from each ONOS instance"
3347 main.log.info(
3348 "Very crass estimate for topology discovery/convergence( " +
3349 str( note ) + " ): " + str( elapsed ) + " seconds, " +
3350 str( count ) + " tries" )
3351
3352 main.step( "Device information is correct" )
3353 utilities.assert_equals(
3354 expect=main.TRUE,
3355 actual=devicesResults,
3356 onpass="Device information is correct",
3357 onfail="Device information is incorrect" )
3358
3359 main.step( "Links are correct" )
3360 utilities.assert_equals(
3361 expect=main.TRUE,
3362 actual=linksResults,
3363 onpass="Link are correct",
3364 onfail="Links are incorrect" )
3365
3366 main.step( "Hosts are correct" )
3367 utilities.assert_equals(
3368 expect=main.TRUE,
3369 actual=hostsResults,
3370 onpass="Hosts are correct",
3371 onfail="Hosts are incorrect" )
3372
3373 # FIXME: move this to an ONOS state case
3374 main.step( "Checking ONOS nodes" )
3375 nodeResults = utilities.retry( self.nodesCheck,
3376 False,
Jon Hallca319892017-06-15 15:25:22 -07003377 args=[ main.Cluster.active() ],
Devin Lim58046fa2017-07-05 16:55:00 -07003378 attempts=5 )
3379 utilities.assert_equals( expect=True, actual=nodeResults,
3380 onpass="Nodes check successful",
3381 onfail="Nodes check NOT successful" )
3382 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07003383 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07003384 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07003385 ctrl.name,
3386 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003387
3388 if not topoResult:
Devin Lim44075962017-08-11 10:56:37 -07003389 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -07003390
Devin Lim58046fa2017-07-05 16:55:00 -07003391 def linkDown( self, main, fromS="s3", toS="s28" ):
3392 """
3393 Link fromS-toS down
3394 """
3395 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003396 assert main, "main not defined"
3397 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003398 # NOTE: You should probably run a topology check after this
3399
3400 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3401
3402 description = "Turn off a link to ensure that Link Discovery " +\
3403 "is working properly"
3404 main.case( description )
3405
3406 main.step( "Kill Link between " + fromS + " and " + toS )
3407 LinkDown = main.Mininet1.link( END1=fromS, END2=toS, OPTION="down" )
3408 main.log.info( "Waiting " + str( linkSleep ) +
3409 " seconds for link down to be discovered" )
3410 time.sleep( linkSleep )
3411 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
3412 onpass="Link down successful",
3413 onfail="Failed to bring link down" )
3414 # TODO do some sort of check here
3415
3416 def linkUp( self, main, fromS="s3", toS="s28" ):
3417 """
3418 Link fromS-toS up
3419 """
3420 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003421 assert main, "main not defined"
3422 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003423 # NOTE: You should probably run a topology check after this
3424
3425 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3426
3427 description = "Restore a link to ensure that Link Discovery is " + \
3428 "working properly"
3429 main.case( description )
3430
3431 main.step( "Bring link between " + fromS + " and " + toS +" back up" )
3432 LinkUp = main.Mininet1.link( END1=fromS, END2=toS, OPTION="up" )
3433 main.log.info( "Waiting " + str( linkSleep ) +
3434 " seconds for link up to be discovered" )
3435 time.sleep( linkSleep )
3436 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
3437 onpass="Link up successful",
3438 onfail="Failed to bring link up" )
3439
3440 def switchDown( self, main ):
3441 """
3442 Switch Down
3443 """
3444 # NOTE: You should probably run a topology check after this
3445 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003446 assert main, "main not defined"
3447 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003448
3449 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3450
3451 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallca319892017-06-15 15:25:22 -07003452 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003453 main.case( description )
3454 switch = main.params[ 'kill' ][ 'switch' ]
3455 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3456
3457 # TODO: Make this switch parameterizable
3458 main.step( "Kill " + switch )
3459 main.log.info( "Deleting " + switch )
3460 main.Mininet1.delSwitch( switch )
3461 main.log.info( "Waiting " + str( switchSleep ) +
3462 " seconds for switch down to be discovered" )
3463 time.sleep( switchSleep )
3464 device = onosCli.getDevice( dpid=switchDPID )
3465 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003466 main.log.warn( "Bringing down switch " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003467 result = main.FALSE
3468 if device and device[ 'available' ] is False:
3469 result = main.TRUE
3470 utilities.assert_equals( expect=main.TRUE, actual=result,
3471 onpass="Kill switch successful",
3472 onfail="Failed to kill switch?" )
Jon Hallca319892017-06-15 15:25:22 -07003473
Devin Lim58046fa2017-07-05 16:55:00 -07003474 def switchUp( self, main ):
3475 """
3476 Switch Up
3477 """
3478 # NOTE: You should probably run a topology check after this
3479 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003480 assert main, "main not defined"
3481 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003482
3483 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3484 switch = main.params[ 'kill' ][ 'switch' ]
3485 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3486 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallca319892017-06-15 15:25:22 -07003487 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003488 description = "Adding a switch to ensure it is discovered correctly"
3489 main.case( description )
3490
3491 main.step( "Add back " + switch )
3492 main.Mininet1.addSwitch( switch, dpid=switchDPID )
3493 for peer in links:
3494 main.Mininet1.addLink( switch, peer )
Jon Hallca319892017-06-15 15:25:22 -07003495 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -07003496 main.Mininet1.assignSwController( sw=switch, ip=ipList )
3497 main.log.info( "Waiting " + str( switchSleep ) +
3498 " seconds for switch up to be discovered" )
3499 time.sleep( switchSleep )
3500 device = onosCli.getDevice( dpid=switchDPID )
3501 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003502 main.log.debug( "Added device: " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003503 result = main.FALSE
3504 if device and device[ 'available' ]:
3505 result = main.TRUE
3506 utilities.assert_equals( expect=main.TRUE, actual=result,
3507 onpass="add switch successful",
3508 onfail="Failed to add switch?" )
3509
3510 def startElectionApp( self, main ):
3511 """
3512 start election app on all onos nodes
3513 """
Devin Lim58046fa2017-07-05 16:55:00 -07003514 assert main, "main not defined"
3515 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003516
3517 main.case( "Start Leadership Election app" )
3518 main.step( "Install leadership election app" )
Jon Hallca319892017-06-15 15:25:22 -07003519 onosCli = main.Cluster.next()
Devin Lime9f0ccf2017-08-11 17:25:12 -07003520 appResult = onosCli.CLI.activateApp( "org.onosproject.election" )
Devin Lim58046fa2017-07-05 16:55:00 -07003521 utilities.assert_equals(
3522 expect=main.TRUE,
3523 actual=appResult,
3524 onpass="Election app installed",
3525 onfail="Something went wrong with installing Leadership election" )
3526
3527 main.step( "Run for election on each node" )
Jon Hallca319892017-06-15 15:25:22 -07003528 onosCli.electionTestRun()
3529 main.Cluster.command( "electionTestRun" )
Devin Lim58046fa2017-07-05 16:55:00 -07003530 time.sleep( 5 )
Jon Hallca319892017-06-15 15:25:22 -07003531 sameResult, leaders = main.HA.consistentLeaderboards( main.Cluster.active() )
Devin Lim58046fa2017-07-05 16:55:00 -07003532 utilities.assert_equals(
3533 expect=True,
3534 actual=sameResult,
3535 onpass="All nodes see the same leaderboards",
3536 onfail="Inconsistent leaderboards" )
3537
3538 if sameResult:
3539 leader = leaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003540 if onosCli.ipAddress in leader:
Devin Lim58046fa2017-07-05 16:55:00 -07003541 correctLeader = True
3542 else:
3543 correctLeader = False
3544 main.step( "First node was elected leader" )
3545 utilities.assert_equals(
3546 expect=True,
3547 actual=correctLeader,
3548 onpass="Correct leader was elected",
3549 onfail="Incorrect leader" )
Jon Hallca319892017-06-15 15:25:22 -07003550 main.Cluster.testLeader = leader
3551
Devin Lim58046fa2017-07-05 16:55:00 -07003552 def isElectionFunctional( self, main ):
3553 """
3554 Check that Leadership Election is still functional
3555 15.1 Run election on each node
3556 15.2 Check that each node has the same leaders and candidates
3557 15.3 Find current leader and withdraw
3558 15.4 Check that a new node was elected leader
3559 15.5 Check that that new leader was the candidate of old leader
3560 15.6 Run for election on old leader
3561 15.7 Check that oldLeader is a candidate, and leader if only 1 node
3562 15.8 Make sure that the old leader was added to the candidate list
3563
3564 old and new variable prefixes refer to data from before vs after
3565 withdrawl and later before withdrawl vs after re-election
3566 """
3567 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003568 assert main, "main not defined"
3569 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003570
3571 description = "Check that Leadership Election is still functional"
3572 main.case( description )
3573 # NOTE: Need to re-run after restarts since being a canidate is not persistant
3574
3575 oldLeaders = [] # list of lists of each nodes' candidates before
3576 newLeaders = [] # list of lists of each nodes' candidates after
3577 oldLeader = '' # the old leader from oldLeaders, None if not same
3578 newLeader = '' # the new leaders fron newLoeaders, None if not same
3579 oldLeaderCLI = None # the CLI of the old leader used for re-electing
3580 expectNoLeader = False # True when there is only one leader
Devin Lim142b5342017-07-20 15:22:39 -07003581 if len( main.Cluster.runningNodes ) == 1:
Devin Lim58046fa2017-07-05 16:55:00 -07003582 expectNoLeader = True
3583
3584 main.step( "Run for election on each node" )
Devin Lim142b5342017-07-20 15:22:39 -07003585 electionResult = main.Cluster.command( "electionTestRun", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003586 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -07003587 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -07003588 actual=electionResult,
3589 onpass="All nodes successfully ran for leadership",
3590 onfail="At least one node failed to run for leadership" )
3591
3592 if electionResult == main.FALSE:
3593 main.log.error(
3594 "Skipping Test Case because Election Test App isn't loaded" )
3595 main.skipCase()
3596
3597 main.step( "Check that each node shows the same leader and candidates" )
3598 failMessage = "Nodes have different leaderboards"
Jon Hallca319892017-06-15 15:25:22 -07003599 activeCLIs = main.Cluster.active()
3600 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Devin Lim58046fa2017-07-05 16:55:00 -07003601 if sameResult:
3602 oldLeader = oldLeaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003603 main.log.info( "Old leader: " + oldLeader )
Devin Lim58046fa2017-07-05 16:55:00 -07003604 else:
3605 oldLeader = None
3606 utilities.assert_equals(
3607 expect=True,
3608 actual=sameResult,
3609 onpass="Leaderboards are consistent for the election topic",
3610 onfail=failMessage )
3611
3612 main.step( "Find current leader and withdraw" )
3613 withdrawResult = main.TRUE
3614 # do some sanity checking on leader before using it
3615 if oldLeader is None:
3616 main.log.error( "Leadership isn't consistent." )
3617 withdrawResult = main.FALSE
3618 # Get the CLI of the oldLeader
Jon Hallca319892017-06-15 15:25:22 -07003619 for ctrl in main.Cluster.active():
3620 if oldLeader == ctrl.ipAddress:
3621 oldLeaderCLI = ctrl
Devin Lim58046fa2017-07-05 16:55:00 -07003622 break
3623 else: # FOR/ELSE statement
3624 main.log.error( "Leader election, could not find current leader" )
3625 if oldLeader:
3626 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3627 utilities.assert_equals(
3628 expect=main.TRUE,
3629 actual=withdrawResult,
3630 onpass="Node was withdrawn from election",
3631 onfail="Node was not withdrawn from election" )
3632
3633 main.step( "Check that a new node was elected leader" )
3634 failMessage = "Nodes have different leaders"
3635 # Get new leaders and candidates
3636 newLeaderResult, newLeaders = self.consistentLeaderboards( activeCLIs )
3637 newLeader = None
3638 if newLeaderResult:
3639 if newLeaders[ 0 ][ 0 ] == 'none':
3640 main.log.error( "No leader was elected on at least 1 node" )
3641 if not expectNoLeader:
3642 newLeaderResult = False
3643 newLeader = newLeaders[ 0 ][ 0 ]
3644
3645 # Check that the new leader is not the older leader, which was withdrawn
3646 if newLeader == oldLeader:
3647 newLeaderResult = False
3648 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3649 " as the current leader" )
3650 utilities.assert_equals(
3651 expect=True,
3652 actual=newLeaderResult,
3653 onpass="Leadership election passed",
3654 onfail="Something went wrong with Leadership election" )
3655
3656 main.step( "Check that that new leader was the candidate of old leader" )
3657 # candidates[ 2 ] should become the top candidate after withdrawl
3658 correctCandidateResult = main.TRUE
3659 if expectNoLeader:
3660 if newLeader == 'none':
3661 main.log.info( "No leader expected. None found. Pass" )
3662 correctCandidateResult = main.TRUE
3663 else:
3664 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3665 correctCandidateResult = main.FALSE
3666 elif len( oldLeaders[ 0 ] ) >= 3:
3667 if newLeader == oldLeaders[ 0 ][ 2 ]:
3668 # correct leader was elected
3669 correctCandidateResult = main.TRUE
3670 else:
3671 correctCandidateResult = main.FALSE
3672 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3673 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3674 else:
3675 main.log.warn( "Could not determine who should be the correct leader" )
3676 main.log.debug( oldLeaders[ 0 ] )
3677 correctCandidateResult = main.FALSE
3678 utilities.assert_equals(
3679 expect=main.TRUE,
3680 actual=correctCandidateResult,
3681 onpass="Correct Candidate Elected",
3682 onfail="Incorrect Candidate Elected" )
3683
3684 main.step( "Run for election on old leader( just so everyone " +
3685 "is in the hat )" )
3686 if oldLeaderCLI is not None:
3687 runResult = oldLeaderCLI.electionTestRun()
3688 else:
3689 main.log.error( "No old leader to re-elect" )
3690 runResult = main.FALSE
3691 utilities.assert_equals(
3692 expect=main.TRUE,
3693 actual=runResult,
3694 onpass="App re-ran for election",
3695 onfail="App failed to run for election" )
3696
3697 main.step(
3698 "Check that oldLeader is a candidate, and leader if only 1 node" )
3699 # verify leader didn't just change
3700 # Get new leaders and candidates
3701 reRunLeaders = []
3702 time.sleep( 5 ) # Paremterize
3703 positionResult, reRunLeaders = self.consistentLeaderboards( activeCLIs )
3704
3705 # Check that the re-elected node is last on the candidate List
3706 if not reRunLeaders[ 0 ]:
3707 positionResult = main.FALSE
3708 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3709 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
3710 str( reRunLeaders[ 0 ] ) ) )
3711 positionResult = main.FALSE
3712 utilities.assert_equals(
3713 expect=True,
3714 actual=positionResult,
3715 onpass="Old leader successfully re-ran for election",
3716 onfail="Something went wrong with Leadership election after " +
3717 "the old leader re-ran for election" )
Jon Hallca319892017-06-15 15:25:22 -07003718
Devin Lim58046fa2017-07-05 16:55:00 -07003719 def installDistributedPrimitiveApp( self, main ):
3720 """
3721 Install Distributed Primitives app
3722 """
3723 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003724 assert main, "main not defined"
3725 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003726
3727 # Variables for the distributed primitives tests
3728 main.pCounterName = "TestON-Partitions"
3729 main.pCounterValue = 0
3730 main.onosSet = set( [] )
3731 main.onosSetName = "TestON-set"
3732
3733 description = "Install Primitives app"
3734 main.case( description )
3735 main.step( "Install Primitives app" )
3736 appName = "org.onosproject.distributedprimitives"
Devin Lime9f0ccf2017-08-11 17:25:12 -07003737 appResults = main.Cluster.next().CLI.activateApp( appName )
Devin Lim58046fa2017-07-05 16:55:00 -07003738 utilities.assert_equals( expect=main.TRUE,
3739 actual=appResults,
3740 onpass="Primitives app activated",
3741 onfail="Primitives app not activated" )
3742 # TODO check on all nodes instead of sleeping
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003743 time.sleep( 5 ) # To allow all nodes to activate