blob: 11bc0392b9b32727fe04c9bbba6abb4b6e42c640 [file] [log] [blame]
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07001"""
2Copyright 2015 Open Networking Foundation (ONF)
3
4Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
5the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
6or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
7
8 TestON is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation, either version 2 of the License, or
11 (at your option) any later version.
12
13 TestON is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with TestON. If not, see <http://www.gnu.org/licenses/>.
20"""
21
Jon Halla440e872016-03-31 15:15:50 -070022import json
Jon Hall41d39f12016-04-11 22:54:35 -070023import time
Jon Halle1a3b752015-07-22 13:02:46 -070024
Jon Hallf37d44d2017-05-24 10:37:30 -070025
Jon Hall41d39f12016-04-11 22:54:35 -070026class HA():
Jon Hall57b50432015-10-22 10:20:10 -070027
Jon Halla440e872016-03-31 15:15:50 -070028 def __init__( self ):
29 self.default = ''
Jon Hall57b50432015-10-22 10:20:10 -070030
Devin Lim58046fa2017-07-05 16:55:00 -070031 def customizeOnosGenPartitions( self ):
Devin Lim58046fa2017-07-05 16:55:00 -070032 # copy gen-partions file to ONOS
33 # NOTE: this assumes TestON and ONOS are on the same machine
34 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
35 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
36 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
37 main.ONOSbench.ip_address,
38 srcFile,
39 dstDir,
40 pwd=main.ONOSbench.pwd,
41 direction="from" )
Jon Hallca319892017-06-15 15:25:22 -070042
Devin Lim58046fa2017-07-05 16:55:00 -070043 def cleanUpGenPartition( self ):
44 # clean up gen-partitions file
45 try:
46 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
47 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
48 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
49 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
50 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
51 str( main.ONOSbench.handle.before ) )
52 except ( pexpect.TIMEOUT, pexpect.EOF ):
53 main.log.exception( "ONOSbench: pexpect exception found:" +
54 main.ONOSbench.handle.before )
55 main.cleanup()
56 main.exit()
Jon Hallca319892017-06-15 15:25:22 -070057
Devin Lim58046fa2017-07-05 16:55:00 -070058 def startingMininet( self ):
59 main.step( "Starting Mininet" )
60 # scp topo file to mininet
61 # TODO: move to params?
62 topoName = "obelisk.py"
63 filePath = main.ONOSbench.home + "/tools/test/topos/"
64 main.ONOSbench.scp( main.Mininet1,
65 filePath + topoName,
66 main.Mininet1.home,
67 direction="to" )
68 mnResult = main.Mininet1.startNet()
69 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
70 onpass="Mininet Started",
71 onfail="Error starting Mininet" )
Jon Hallca319892017-06-15 15:25:22 -070072
Devin Lim58046fa2017-07-05 16:55:00 -070073 def scalingMetadata( self ):
74 import re
Devin Lim142b5342017-07-20 15:22:39 -070075 main.step( "Generate initial metadata file" )
Devin Lim58046fa2017-07-05 16:55:00 -070076 main.scaling = main.params[ 'scaling' ].split( "," )
77 main.log.debug( main.scaling )
78 scale = main.scaling.pop( 0 )
79 main.log.debug( scale )
80 if "e" in scale:
81 equal = True
82 else:
83 equal = False
84 main.log.debug( equal )
Devin Lim142b5342017-07-20 15:22:39 -070085 main.Cluster.setRunningNode( int( re.search( "\d+", scale ).group( 0 ) ) )
86 genResult = main.Server.generateFile( main.Cluster.numCtrls, equal=equal )
Devin Lim58046fa2017-07-05 16:55:00 -070087 utilities.assert_equals( expect=main.TRUE, actual=genResult,
88 onpass="New cluster metadata file generated",
89 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -070090
Devin Lim58046fa2017-07-05 16:55:00 -070091 def swapNodeMetadata( self ):
Devin Lim142b5342017-07-20 15:22:39 -070092 main.step( "Generate initial metadata file" )
93 if main.Cluster.numCtrls >= 5:
94 main.Cluster.setRunningNode( main.Cluster.numCtrls - 2 )
Devin Lim58046fa2017-07-05 16:55:00 -070095 else:
96 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
Devin Lim142b5342017-07-20 15:22:39 -070097 genResult = main.Server.generateFile( main.Cluster.numCtrls )
Devin Lim58046fa2017-07-05 16:55:00 -070098 utilities.assert_equals( expect=main.TRUE, actual=genResult,
99 onpass="New cluster metadata file generated",
100 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -0700101
Devin Lim142b5342017-07-20 15:22:39 -0700102 def setServerForCluster( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700103 import os
104 main.step( "Setup server for cluster metadata file" )
105 main.serverPort = main.params[ 'server' ][ 'port' ]
106 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
107 main.log.debug( "Root dir: {}".format( rootDir ) )
108 status = main.Server.start( main.ONOSbench,
109 rootDir,
110 port=main.serverPort,
111 logDir=main.logdir + "/server.log" )
112 utilities.assert_equals( expect=main.TRUE, actual=status,
113 onpass="Server started",
114 onfail="Failled to start SimpleHTTPServer" )
115
Devin Lim142b5342017-07-20 15:22:39 -0700116 def copyingBackupConfig( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700117 main.step( "Copying backup config files" )
118 main.onosServicepath = main.ONOSbench.home + "/tools/package/bin/onos-service"
119 cp = main.ONOSbench.scp( main.ONOSbench,
120 main.onosServicepath,
121 main.onosServicepath + ".backup",
122 direction="to" )
123
124 utilities.assert_equals( expect=main.TRUE,
125 actual=cp,
126 onpass="Copy backup config file succeeded",
127 onfail="Copy backup config file failed" )
128 # we need to modify the onos-service file to use remote metadata file
129 # url for cluster metadata file
130 iface = main.params[ 'server' ].get( 'interface' )
131 ip = main.ONOSbench.getIpAddr( iface=iface )
132 metaFile = "cluster.json"
133 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, main.serverPort, metaFile )
134 main.log.warn( javaArgs )
135 main.log.warn( repr( javaArgs ) )
136 handle = main.ONOSbench.handle
137 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, main.onosServicepath )
138 main.log.warn( sed )
139 main.log.warn( repr( sed ) )
140 handle.sendline( sed )
141 handle.expect( metaFile )
142 output = handle.before
143 handle.expect( "\$" )
144 output += handle.before
145 main.log.debug( repr( output ) )
146
147 def cleanUpOnosService( self ):
148 # Cleanup custom onos-service file
149 main.ONOSbench.scp( main.ONOSbench,
150 main.onosServicepath + ".backup",
151 main.onosServicepath,
152 direction="to" )
Jon Hallca319892017-06-15 15:25:22 -0700153
Jon Halla440e872016-03-31 15:15:50 -0700154 def consistentCheck( self ):
155 """
156 Checks that TestON counters are consistent across all nodes.
Jon Halle1a3b752015-07-22 13:02:46 -0700157
Jon Hallf37d44d2017-05-24 10:37:30 -0700158 Returns the tuple ( onosCounters, consistent )
Jon Hall41d39f12016-04-11 22:54:35 -0700159 - onosCounters is the parsed json output of the counters command on
160 all nodes
161 - consistent is main.TRUE if all "TestON" counters are consitent across
162 all nodes or main.FALSE
Jon Halla440e872016-03-31 15:15:50 -0700163 """
Jon Halle1a3b752015-07-22 13:02:46 -0700164 try:
Jon Halla440e872016-03-31 15:15:50 -0700165 # Get onos counters results
166 onosCountersRaw = []
167 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700168 for ctrl in main.Cluster.active():
Jon Halla440e872016-03-31 15:15:50 -0700169 t = main.Thread( target=utilities.retry,
Jon Hallca319892017-06-15 15:25:22 -0700170 name="counters-" + str( ctrl ),
171 args=[ ctrl.counters, [ None ] ],
Jon Hallf37d44d2017-05-24 10:37:30 -0700172 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Halla440e872016-03-31 15:15:50 -0700173 'randomTime': True } )
174 threads.append( t )
175 t.start()
176 for t in threads:
177 t.join()
178 onosCountersRaw.append( t.result )
179 onosCounters = []
Jon Hallca319892017-06-15 15:25:22 -0700180 for i in range( len( onosCountersRaw ) ):
Jon Halla440e872016-03-31 15:15:50 -0700181 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700182 onosCounters.append( json.loads( onosCountersRaw[ i ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700183 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700184 main.log.error( "Could not parse counters response from " +
Devin Lim142b5342017-07-20 15:22:39 -0700185 str( main.Cluster.active( i ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700186 main.log.warn( repr( onosCountersRaw[ i ] ) )
187 onosCounters.append( [] )
188
189 testCounters = {}
190 # make a list of all the "TestON-*" counters in ONOS
Jon Hall41d39f12016-04-11 22:54:35 -0700191 # lookes like a dict whose keys are the name of the ONOS node and
192 # values are a list of the counters. I.E.
Jon Hallf37d44d2017-05-24 10:37:30 -0700193 # { "ONOS1": [ { "name":"TestON-Partitions","value":56 } ]
Jon Halla440e872016-03-31 15:15:50 -0700194 # }
195 # NOTE: There is an assumtion that all nodes are active
196 # based on the above for loops
197 for controller in enumerate( onosCounters ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700198 for key, value in controller[ 1 ].iteritems():
Jon Halla440e872016-03-31 15:15:50 -0700199 if 'TestON' in key:
Devin Lim142b5342017-07-20 15:22:39 -0700200 node = str( main.Cluster.active( controller[ 0 ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700201 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700202 testCounters[ node ].append( { key: value } )
Jon Halla440e872016-03-31 15:15:50 -0700203 except KeyError:
Jon Hallf37d44d2017-05-24 10:37:30 -0700204 testCounters[ node ] = [ { key: value } ]
Jon Halla440e872016-03-31 15:15:50 -0700205 # compare the counters on each node
Jon Hallf37d44d2017-05-24 10:37:30 -0700206 firstV = testCounters.values()[ 0 ]
Jon Halla440e872016-03-31 15:15:50 -0700207 tmp = [ v == firstV for k, v in testCounters.iteritems() ]
208 if all( tmp ):
209 consistent = main.TRUE
210 else:
211 consistent = main.FALSE
212 main.log.error( "ONOS nodes have different values for counters:\n" +
213 testCounters )
214 return ( onosCounters, consistent )
215 except Exception:
216 main.log.exception( "" )
217 main.cleanup()
218 main.exit()
219
220 def counterCheck( self, counterName, counterValue ):
221 """
222 Checks that TestON counters are consistent across all nodes and that
223 specified counter is in ONOS with the given value
224 """
225 try:
226 correctResults = main.TRUE
227 # Get onos counters results and consistentCheck
228 onosCounters, consistent = self.consistentCheck()
229 # Check for correct values
Jon Hallca319892017-06-15 15:25:22 -0700230 for i in range( len( main.Cluster.active() ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700231 current = onosCounters[ i ]
Jon Halla440e872016-03-31 15:15:50 -0700232 onosValue = None
233 try:
234 onosValue = current.get( counterName )
Jon Hall41d39f12016-04-11 22:54:35 -0700235 except AttributeError:
Devin Lim142b5342017-07-20 15:22:39 -0700236 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -0700237 main.log.exception( node + " counters result " +
Jon Hall41d39f12016-04-11 22:54:35 -0700238 "is not as expected" )
Jon Halla440e872016-03-31 15:15:50 -0700239 correctResults = main.FALSE
240 if onosValue == counterValue:
241 main.log.info( counterName + " counter value is correct" )
242 else:
Jon Hall41d39f12016-04-11 22:54:35 -0700243 main.log.error( counterName +
244 " counter value is incorrect," +
245 " expected value: " + str( counterValue ) +
246 " current value: " + str( onosValue ) )
Jon Halla440e872016-03-31 15:15:50 -0700247 correctResults = main.FALSE
248 return consistent and correctResults
249 except Exception:
250 main.log.exception( "" )
251 main.cleanup()
252 main.exit()
Jon Hall41d39f12016-04-11 22:54:35 -0700253
254 def consistentLeaderboards( self, nodes ):
255 TOPIC = 'org.onosproject.election'
256 # FIXME: use threads
Jon Halle0f0b342017-04-18 11:43:47 -0700257 # FIXME: should we retry outside the function?
Jon Hall41d39f12016-04-11 22:54:35 -0700258 for n in range( 5 ): # Retry in case election is still happening
259 leaderList = []
260 # Get all leaderboards
261 for cli in nodes:
262 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
263 # Compare leaderboards
Jon Hallf37d44d2017-05-24 10:37:30 -0700264 result = all( i == leaderList[ 0 ] for i in leaderList ) and\
Jon Hall41d39f12016-04-11 22:54:35 -0700265 leaderList is not None
266 main.log.debug( leaderList )
267 main.log.warn( result )
268 if result:
269 return ( result, leaderList )
Jon Hallf37d44d2017-05-24 10:37:30 -0700270 time.sleep( 5 ) # TODO: paramerterize
Jon Hall41d39f12016-04-11 22:54:35 -0700271 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
272 return ( result, leaderList )
273
274 def nodesCheck( self, nodes ):
275 nodesOutput = []
276 results = True
277 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700278 for node in nodes:
279 t = main.Thread( target=node.nodes,
280 name="nodes-" + str( node ),
Jon Hallf37d44d2017-05-24 10:37:30 -0700281 args=[] )
Jon Hall41d39f12016-04-11 22:54:35 -0700282 threads.append( t )
283 t.start()
284
285 for t in threads:
286 t.join()
287 nodesOutput.append( t.result )
Jon Hallca319892017-06-15 15:25:22 -0700288 ips = sorted( main.Cluster.getIps( activeOnly=True ) )
Jon Hall41d39f12016-04-11 22:54:35 -0700289 for i in nodesOutput:
290 try:
291 current = json.loads( i )
292 activeIps = []
293 currentResult = False
294 for node in current:
Jon Hallf37d44d2017-05-24 10:37:30 -0700295 if node[ 'state' ] == 'READY':
296 activeIps.append( node[ 'ip' ] )
Jon Hall41d39f12016-04-11 22:54:35 -0700297 activeIps.sort()
298 if ips == activeIps:
299 currentResult = True
300 except ( ValueError, TypeError ):
301 main.log.error( "Error parsing nodes output" )
302 main.log.warn( repr( i ) )
303 currentResult = False
304 results = results and currentResult
305 return results
Jon Hallca319892017-06-15 15:25:22 -0700306
Devin Lim58046fa2017-07-05 16:55:00 -0700307 def generateGraph( self, testName, plotName="Plot-HA", index=2 ):
308 # GRAPHS
309 # NOTE: important params here:
310 # job = name of Jenkins job
311 # Plot Name = Plot-HA, only can be used if multiple plots
312 # index = The number of the graph under plot name
313 job = testName
314 graphs = '<ac:structured-macro ac:name="html">\n'
315 graphs += '<ac:plain-text-body><![CDATA[\n'
316 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
317 '/plot/' + plotName + '/getPlot?index=' + str( index ) +\
318 '&width=500&height=300"' +\
319 'noborder="0" width="500" height="300" scrolling="yes" ' +\
320 'seamless="seamless"></iframe>\n'
321 graphs += ']]></ac:plain-text-body>\n'
322 graphs += '</ac:structured-macro>\n'
323 main.log.wiki( graphs )
Jon Hallca319892017-06-15 15:25:22 -0700324
Devin Lim58046fa2017-07-05 16:55:00 -0700325 def initialSetUp( self, serviceClean=False ):
326 """
327 rest of initialSetup
328 """
329
Devin Lim58046fa2017-07-05 16:55:00 -0700330
331 if main.params[ 'tcpdump' ].lower() == "true":
332 main.step( "Start Packet Capture MN" )
333 main.Mininet2.startTcpdump(
334 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
335 + "-MN.pcap",
336 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
337 port=main.params[ 'MNtcpdump' ][ 'port' ] )
338
339 if serviceClean:
340 main.step( "Clean up ONOS service changes" )
Devin Lim142b5342017-07-20 15:22:39 -0700341 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
342 main.ONOSbench.handle.expect( "\$" )
343 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
344 main.ONOSbench.handle.expect( "\$" )
Devin Lim58046fa2017-07-05 16:55:00 -0700345
346 main.step( "Checking ONOS nodes" )
347 nodeResults = utilities.retry( self.nodesCheck,
348 False,
Jon Hallca319892017-06-15 15:25:22 -0700349 args=[ main.Cluster.active() ],
Devin Lim58046fa2017-07-05 16:55:00 -0700350 attempts=5 )
351
352 utilities.assert_equals( expect=True, actual=nodeResults,
353 onpass="Nodes check successful",
354 onfail="Nodes check NOT successful" )
355
356 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -0700357 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700358 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -0700359 ctrl.name,
360 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700361 main.log.error( "Failed to start ONOS, stopping test" )
362 main.cleanup()
363 main.exit()
364
365 main.step( "Activate apps defined in the params file" )
366 # get data from the params
367 apps = main.params.get( 'apps' )
368 if apps:
369 apps = apps.split( ',' )
Jon Hallca319892017-06-15 15:25:22 -0700370 main.log.debug( "Apps: " + str( apps ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700371 activateResult = True
372 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700373 main.Cluster.active( 0 ).app( app, "Activate" )
Devin Lim58046fa2017-07-05 16:55:00 -0700374 # TODO: check this worked
375 time.sleep( 10 ) # wait for apps to activate
376 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700377 state = main.Cluster.active( 0 ).appStatus( app )
Devin Lim58046fa2017-07-05 16:55:00 -0700378 if state == "ACTIVE":
379 activateResult = activateResult and True
380 else:
381 main.log.error( "{} is in {} state".format( app, state ) )
382 activateResult = False
383 utilities.assert_equals( expect=True,
384 actual=activateResult,
385 onpass="Successfully activated apps",
386 onfail="Failed to activate apps" )
387 else:
388 main.log.warn( "No apps were specified to be loaded after startup" )
389
390 main.step( "Set ONOS configurations" )
Jon Hallca319892017-06-15 15:25:22 -0700391 # FIXME: This shoudl be part of the general startup sequence
Devin Lim58046fa2017-07-05 16:55:00 -0700392 config = main.params.get( 'ONOS_Configuration' )
393 if config:
394 main.log.debug( config )
395 checkResult = main.TRUE
396 for component in config:
397 for setting in config[ component ]:
398 value = config[ component ][ setting ]
Jon Hallca319892017-06-15 15:25:22 -0700399 check = main.Cluster.next().setCfg( component, setting, value )
Devin Lim58046fa2017-07-05 16:55:00 -0700400 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
401 checkResult = check and checkResult
402 utilities.assert_equals( expect=main.TRUE,
403 actual=checkResult,
404 onpass="Successfully set config",
405 onfail="Failed to set config" )
406 else:
407 main.log.warn( "No configurations were specified to be changed after startup" )
408
Jon Hallca319892017-06-15 15:25:22 -0700409 main.step( "Check app ids" )
410 appCheck = self.appCheck()
411 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700412 onpass="App Ids seem to be correct",
413 onfail="Something is wrong with app Ids" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -0700414
Jon Hallca319892017-06-15 15:25:22 -0700415 def commonChecks( self ):
416 # TODO: make this assertable or assert in here?
417 self.topicsCheck()
418 self.partitionsCheck()
419 self.pendingMapCheck()
420 self.appCheck()
421
422 def topicsCheck( self, extraTopics=[] ):
423 """
424 Check for work partition topics in leaders output
425 """
426 leaders = main.Cluster.next().leaders()
427 missing = False
428 try:
429 if leaders:
430 parsedLeaders = json.loads( leaders )
431 output = json.dumps( parsedLeaders,
432 sort_keys=True,
433 indent=4,
434 separators=( ',', ': ' ) )
435 main.log.debug( "Leaders: " + output )
436 # check for all intent partitions
437 topics = []
438 for i in range( 14 ):
439 topics.append( "work-partition-" + str( i ) )
440 topics += extraTopics
441 main.log.debug( topics )
442 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
443 for topic in topics:
444 if topic not in ONOStopics:
445 main.log.error( "Error: " + topic +
446 " not in leaders" )
447 missing = True
448 else:
449 main.log.error( "leaders() returned None" )
450 except ( ValueError, TypeError ):
451 main.log.exception( "Error parsing leaders" )
452 main.log.error( repr( leaders ) )
453 if missing:
454 #NOTE Can we refactor this into the Cluster class? Maybe an option to print the output of a command from each node?
455 for ctrl in main.Cluster.active():
456 response = ctrl.CLI.leaders( jsonFormat=False )
457 main.log.debug( str( ctrl.name ) + " leaders output: \n" +
458 str( response ) )
459 return missing
460
461 def partitionsCheck( self ):
462 # TODO: return something assertable
463 partitions = main.Cluster.next().partitions()
464 try:
465 if partitions:
466 parsedPartitions = json.loads( partitions )
467 output = json.dumps( parsedPartitions,
468 sort_keys=True,
469 indent=4,
470 separators=( ',', ': ' ) )
471 main.log.debug( "Partitions: " + output )
472 # TODO check for a leader in all paritions
473 # TODO check for consistency among nodes
474 else:
475 main.log.error( "partitions() returned None" )
476 except ( ValueError, TypeError ):
477 main.log.exception( "Error parsing partitions" )
478 main.log.error( repr( partitions ) )
479
480 def pendingMapCheck( self ):
481 pendingMap = main.Cluster.next().pendingMap()
482 try:
483 if pendingMap:
484 parsedPending = json.loads( pendingMap )
485 output = json.dumps( parsedPending,
486 sort_keys=True,
487 indent=4,
488 separators=( ',', ': ' ) )
489 main.log.debug( "Pending map: " + output )
490 # TODO check something here?
491 else:
492 main.log.error( "pendingMap() returned None" )
493 except ( ValueError, TypeError ):
494 main.log.exception( "Error parsing pending map" )
495 main.log.error( repr( pendingMap ) )
496
497 def appCheck( self ):
498 """
499 Check App IDs on all nodes
500 """
501 # FIXME: Rename this to appIDCheck? or add a check for isntalled apps
502 appResults = main.Cluster.command( "appToIDCheck" )
503 appCheck = all( i == main.TRUE for i in appResults )
504 if not appCheck:
Devin Lim142b5342017-07-20 15:22:39 -0700505 ctrl = main.Cluster.active( 0 )
Jon Hallca319892017-06-15 15:25:22 -0700506 main.log.debug( "%s apps: %s" % ( ctrl.name, ctrl.apps() ) )
507 main.log.debug( "%s appIDs: %s" % ( ctrl.name, ctrl.appIDs() ) )
508 return appCheck
509
Jon Halle0f0b342017-04-18 11:43:47 -0700510 def workQueueStatsCheck( self, workQueueName, completed, inProgress, pending ):
511 # Completed
Jon Hallca319892017-06-15 15:25:22 -0700512 completedValues = main.Cluster.command( "workQueueTotalCompleted",
513 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700514 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700515 completedResults = [ int( x ) == completed for x in completedValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700516 completedResult = all( completedResults )
517 if not completedResult:
518 main.log.warn( "Expected Work Queue {} to have {} completed, found {}".format(
519 workQueueName, completed, completedValues ) )
520
521 # In Progress
Jon Hallca319892017-06-15 15:25:22 -0700522 inProgressValues = main.Cluster.command( "workQueueTotalInProgress",
523 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700524 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700525 inProgressResults = [ int( x ) == inProgress for x in inProgressValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700526 inProgressResult = all( inProgressResults )
527 if not inProgressResult:
528 main.log.warn( "Expected Work Queue {} to have {} inProgress, found {}".format(
529 workQueueName, inProgress, inProgressValues ) )
530
531 # Pending
Jon Hallca319892017-06-15 15:25:22 -0700532 pendingValues = main.Cluster.command( "workQueueTotalPending",
533 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700534 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700535 pendingResults = [ int( x ) == pending for x in pendingValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700536 pendingResult = all( pendingResults )
537 if not pendingResult:
538 main.log.warn( "Expected Work Queue {} to have {} pending, found {}".format(
539 workQueueName, pending, pendingValues ) )
540 return completedResult and inProgressResult and pendingResult
541
Devin Lim58046fa2017-07-05 16:55:00 -0700542 def assignDevices( self, main ):
543 """
544 Assign devices to controllers
545 """
546 import re
Devin Lim58046fa2017-07-05 16:55:00 -0700547 assert main, "main not defined"
548 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700549
550 main.case( "Assigning devices to controllers" )
551 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " + \
552 "and check that an ONOS node becomes the " + \
553 "master of the device."
554 main.step( "Assign switches to controllers" )
555
Jon Hallca319892017-06-15 15:25:22 -0700556 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -0700557 swList = []
558 for i in range( 1, 29 ):
559 swList.append( "s" + str( i ) )
560 main.Mininet1.assignSwController( sw=swList, ip=ipList )
561
562 mastershipCheck = main.TRUE
563 for i in range( 1, 29 ):
564 response = main.Mininet1.getSwController( "s" + str( i ) )
565 try:
566 main.log.info( str( response ) )
567 except Exception:
568 main.log.info( repr( response ) )
Devin Lim142b5342017-07-20 15:22:39 -0700569 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -0700570 if re.search( "tcp:" + ctrl.ipAddress, response ):
Devin Lim58046fa2017-07-05 16:55:00 -0700571 mastershipCheck = mastershipCheck and main.TRUE
572 else:
Jon Hallca319892017-06-15 15:25:22 -0700573 main.log.error( "Error, node " + repr( ctrl )+ " is " +
Devin Lim58046fa2017-07-05 16:55:00 -0700574 "not in the list of controllers s" +
575 str( i ) + " is connecting to." )
576 mastershipCheck = main.FALSE
577 utilities.assert_equals(
578 expect=main.TRUE,
579 actual=mastershipCheck,
580 onpass="Switch mastership assigned correctly",
581 onfail="Switches not assigned correctly to controllers" )
Jon Hallca319892017-06-15 15:25:22 -0700582
Devin Lim58046fa2017-07-05 16:55:00 -0700583 def assignIntents( self, main ):
584 """
585 Assign intents
586 """
587 import time
588 import json
Devin Lim58046fa2017-07-05 16:55:00 -0700589 assert main, "main not defined"
590 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700591 try:
592 main.HAlabels
593 except ( NameError, AttributeError ):
594 main.log.error( "main.HAlabels not defined, setting to []" )
595 main.HAlabels = []
596 try:
597 main.HAdata
598 except ( NameError, AttributeError ):
599 main.log.error( "data not defined, setting to []" )
600 main.HAdata = []
601 main.case( "Adding host Intents" )
602 main.caseExplanation = "Discover hosts by using pingall then " +\
603 "assign predetermined host-to-host intents." +\
604 " After installation, check that the intent" +\
605 " is distributed to all nodes and the state" +\
606 " is INSTALLED"
607
608 # install onos-app-fwd
609 main.step( "Install reactive forwarding app" )
Jon Hallca319892017-06-15 15:25:22 -0700610 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -0700611 installResults = onosCli.activateApp( "org.onosproject.fwd" )
612 utilities.assert_equals( expect=main.TRUE, actual=installResults,
613 onpass="Install fwd successful",
614 onfail="Install fwd failed" )
615
616 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700617 appCheck = self.appCheck()
618 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700619 onpass="App Ids seem to be correct",
620 onfail="Something is wrong with app Ids" )
621
622 main.step( "Discovering Hosts( Via pingall for now )" )
623 # FIXME: Once we have a host discovery mechanism, use that instead
624 # REACTIVE FWD test
625 pingResult = main.FALSE
626 passMsg = "Reactive Pingall test passed"
627 time1 = time.time()
628 pingResult = main.Mininet1.pingall()
629 time2 = time.time()
630 if not pingResult:
631 main.log.warn( "First pingall failed. Trying again..." )
632 pingResult = main.Mininet1.pingall()
633 passMsg += " on the second try"
634 utilities.assert_equals(
635 expect=main.TRUE,
636 actual=pingResult,
637 onpass=passMsg,
638 onfail="Reactive Pingall failed, " +
639 "one or more ping pairs failed" )
640 main.log.info( "Time for pingall: %2f seconds" %
641 ( time2 - time1 ) )
Jon Hallca319892017-06-15 15:25:22 -0700642 if not pingResult:
643 main.cleanup()
644 main.exit()
Devin Lim58046fa2017-07-05 16:55:00 -0700645 # timeout for fwd flows
646 time.sleep( 11 )
647 # uninstall onos-app-fwd
648 main.step( "Uninstall reactive forwarding app" )
Jon Hallca319892017-06-15 15:25:22 -0700649 uninstallResult = onosCli.deactivateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700650 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
651 onpass="Uninstall fwd successful",
652 onfail="Uninstall fwd failed" )
653
654 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700655 appCheck2 = self.appCheck()
656 utilities.assert_equals( expect=True, actual=appCheck2,
Devin Lim58046fa2017-07-05 16:55:00 -0700657 onpass="App Ids seem to be correct",
658 onfail="Something is wrong with app Ids" )
659
660 main.step( "Add host intents via cli" )
661 intentIds = []
662 # TODO: move the host numbers to params
663 # Maybe look at all the paths we ping?
664 intentAddResult = True
665 hostResult = main.TRUE
666 for i in range( 8, 18 ):
667 main.log.info( "Adding host intent between h" + str( i ) +
668 " and h" + str( i + 10 ) )
669 host1 = "00:00:00:00:00:" + \
670 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
671 host2 = "00:00:00:00:00:" + \
672 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
673 # NOTE: getHost can return None
Jon Hallca319892017-06-15 15:25:22 -0700674 host1Dict = onosCli.CLI.getHost( host1 )
675 host2Dict = onosCli.CLI.getHost( host2 )
Devin Lim58046fa2017-07-05 16:55:00 -0700676 host1Id = None
677 host2Id = None
678 if host1Dict and host2Dict:
679 host1Id = host1Dict.get( 'id', None )
680 host2Id = host2Dict.get( 'id', None )
681 if host1Id and host2Id:
Jon Hallca319892017-06-15 15:25:22 -0700682 nodeNum = len( main.Cluster.active() )
Devin Lim142b5342017-07-20 15:22:39 -0700683 ctrl = main.Cluster.active( i % nodeNum )
Jon Hallca319892017-06-15 15:25:22 -0700684 tmpId = ctrl.CLI.addHostIntent( host1Id, host2Id )
Devin Lim58046fa2017-07-05 16:55:00 -0700685 if tmpId:
686 main.log.info( "Added intent with id: " + tmpId )
687 intentIds.append( tmpId )
688 else:
689 main.log.error( "addHostIntent returned: " +
690 repr( tmpId ) )
691 else:
692 main.log.error( "Error, getHost() failed for h" + str( i ) +
693 " and/or h" + str( i + 10 ) )
Jon Hallca319892017-06-15 15:25:22 -0700694 hosts = main.Cluster.next().hosts()
Devin Lim58046fa2017-07-05 16:55:00 -0700695 try:
Jon Hallca319892017-06-15 15:25:22 -0700696 output = json.dumps( json.loads( hosts ),
697 sort_keys=True,
698 indent=4,
699 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700700 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700701 output = repr( hosts )
702 main.log.debug( "Hosts output: %s" % output )
Devin Lim58046fa2017-07-05 16:55:00 -0700703 hostResult = main.FALSE
704 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
705 onpass="Found a host id for each host",
706 onfail="Error looking up host ids" )
707
708 intentStart = time.time()
709 onosIds = onosCli.getAllIntentsId()
710 main.log.info( "Submitted intents: " + str( intentIds ) )
711 main.log.info( "Intents in ONOS: " + str( onosIds ) )
712 for intent in intentIds:
713 if intent in onosIds:
714 pass # intent submitted is in onos
715 else:
716 intentAddResult = False
717 if intentAddResult:
718 intentStop = time.time()
719 else:
720 intentStop = None
721 # Print the intent states
722 intents = onosCli.intents()
723 intentStates = []
724 installedCheck = True
725 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
726 count = 0
727 try:
728 for intent in json.loads( intents ):
729 state = intent.get( 'state', None )
730 if "INSTALLED" not in state:
731 installedCheck = False
732 intentId = intent.get( 'id', None )
733 intentStates.append( ( intentId, state ) )
734 except ( ValueError, TypeError ):
735 main.log.exception( "Error parsing intents" )
736 # add submitted intents not in the store
737 tmplist = [ i for i, s in intentStates ]
738 missingIntents = False
739 for i in intentIds:
740 if i not in tmplist:
741 intentStates.append( ( i, " - " ) )
742 missingIntents = True
743 intentStates.sort()
744 for i, s in intentStates:
745 count += 1
746 main.log.info( "%-6s%-15s%-15s" %
747 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700748 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700749
750 intentAddResult = bool( intentAddResult and not missingIntents and
751 installedCheck )
752 if not intentAddResult:
753 main.log.error( "Error in pushing host intents to ONOS" )
754
755 main.step( "Intent Anti-Entropy dispersion" )
756 for j in range( 100 ):
757 correct = True
758 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700759 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700760 onosIds = []
Jon Hallca319892017-06-15 15:25:22 -0700761 ids = ctrl.getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700762 onosIds.append( ids )
Jon Hallca319892017-06-15 15:25:22 -0700763 main.log.debug( "Intents in " + ctrl.name + ": " +
Devin Lim58046fa2017-07-05 16:55:00 -0700764 str( sorted( onosIds ) ) )
765 if sorted( ids ) != sorted( intentIds ):
766 main.log.warn( "Set of intent IDs doesn't match" )
767 correct = False
768 break
769 else:
Jon Hallca319892017-06-15 15:25:22 -0700770 intents = json.loads( ctrl.intents() )
Devin Lim58046fa2017-07-05 16:55:00 -0700771 for intent in intents:
772 if intent[ 'state' ] != "INSTALLED":
773 main.log.warn( "Intent " + intent[ 'id' ] +
774 " is " + intent[ 'state' ] )
775 correct = False
776 break
777 if correct:
778 break
779 else:
780 time.sleep( 1 )
781 if not intentStop:
782 intentStop = time.time()
783 global gossipTime
784 gossipTime = intentStop - intentStart
785 main.log.info( "It took about " + str( gossipTime ) +
786 " seconds for all intents to appear in each node" )
787 append = False
788 title = "Gossip Intents"
789 count = 1
790 while append is False:
791 curTitle = title + str( count )
792 if curTitle not in main.HAlabels:
793 main.HAlabels.append( curTitle )
794 main.HAdata.append( str( gossipTime ) )
795 append = True
796 else:
797 count += 1
798 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Devin Lim142b5342017-07-20 15:22:39 -0700799 maxGossipTime = gossipPeriod * len( main.Cluster.runningNodes )
Devin Lim58046fa2017-07-05 16:55:00 -0700800 utilities.assert_greater_equals(
801 expect=maxGossipTime, actual=gossipTime,
802 onpass="ECM anti-entropy for intents worked within " +
803 "expected time",
804 onfail="Intent ECM anti-entropy took too long. " +
805 "Expected time:{}, Actual time:{}".format( maxGossipTime,
806 gossipTime ) )
807 if gossipTime <= maxGossipTime:
808 intentAddResult = True
809
Jon Hallca319892017-06-15 15:25:22 -0700810 pendingMap = main.Cluster.next().pendingMap()
Devin Lim58046fa2017-07-05 16:55:00 -0700811 if not intentAddResult or "key" in pendingMap:
812 import time
813 installedCheck = True
814 main.log.info( "Sleeping 60 seconds to see if intents are found" )
815 time.sleep( 60 )
816 onosIds = onosCli.getAllIntentsId()
817 main.log.info( "Submitted intents: " + str( intentIds ) )
818 main.log.info( "Intents in ONOS: " + str( onosIds ) )
819 # Print the intent states
820 intents = onosCli.intents()
821 intentStates = []
822 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
823 count = 0
824 try:
825 for intent in json.loads( intents ):
826 # Iter through intents of a node
827 state = intent.get( 'state', None )
828 if "INSTALLED" not in state:
829 installedCheck = False
830 intentId = intent.get( 'id', None )
831 intentStates.append( ( intentId, state ) )
832 except ( ValueError, TypeError ):
833 main.log.exception( "Error parsing intents" )
834 # add submitted intents not in the store
835 tmplist = [ i for i, s in intentStates ]
836 for i in intentIds:
837 if i not in tmplist:
838 intentStates.append( ( i, " - " ) )
839 intentStates.sort()
840 for i, s in intentStates:
841 count += 1
842 main.log.info( "%-6s%-15s%-15s" %
843 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700844 self.topicsCheck( [ "org.onosproject.election" ] )
845 self.partitionsCheck()
846 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700847
Jon Hallca319892017-06-15 15:25:22 -0700848 def pingAcrossHostIntent( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -0700849 """
850 Ping across added host intents
851 """
852 import json
853 import time
Devin Lim58046fa2017-07-05 16:55:00 -0700854 assert main, "main not defined"
855 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700856 main.case( "Verify connectivity by sending traffic across Intents" )
857 main.caseExplanation = "Ping across added host intents to check " +\
858 "functionality and check the state of " +\
859 "the intent"
860
Jon Hallca319892017-06-15 15:25:22 -0700861 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -0700862 main.step( "Check Intent state" )
863 installedCheck = False
864 loopCount = 0
865 while not installedCheck and loopCount < 40:
866 installedCheck = True
867 # Print the intent states
Jon Hallca319892017-06-15 15:25:22 -0700868 intents = onosCli.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700869 intentStates = []
870 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
871 count = 0
872 # Iter through intents of a node
873 try:
874 for intent in json.loads( intents ):
875 state = intent.get( 'state', None )
876 if "INSTALLED" not in state:
877 installedCheck = False
878 intentId = intent.get( 'id', None )
879 intentStates.append( ( intentId, state ) )
880 except ( ValueError, TypeError ):
881 main.log.exception( "Error parsing intents." )
882 # Print states
883 intentStates.sort()
884 for i, s in intentStates:
885 count += 1
886 main.log.info( "%-6s%-15s%-15s" %
887 ( str( count ), str( i ), str( s ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700888 if not installedCheck:
889 time.sleep( 1 )
890 loopCount += 1
891 utilities.assert_equals( expect=True, actual=installedCheck,
892 onpass="Intents are all INSTALLED",
893 onfail="Intents are not all in " +
894 "INSTALLED state" )
895
896 main.step( "Ping across added host intents" )
897 PingResult = main.TRUE
898 for i in range( 8, 18 ):
899 ping = main.Mininet1.pingHost( src="h" + str( i ),
900 target="h" + str( i + 10 ) )
901 PingResult = PingResult and ping
902 if ping == main.FALSE:
903 main.log.warn( "Ping failed between h" + str( i ) +
904 " and h" + str( i + 10 ) )
905 elif ping == main.TRUE:
906 main.log.info( "Ping test passed!" )
907 # Don't set PingResult or you'd override failures
908 if PingResult == main.FALSE:
909 main.log.error(
910 "Intents have not been installed correctly, pings failed." )
911 # TODO: pretty print
Devin Lim58046fa2017-07-05 16:55:00 -0700912 try:
913 tmpIntents = onosCli.intents()
Jon Hallca319892017-06-15 15:25:22 -0700914 output = json.dumps( json.loads( tmpIntents ),
915 sort_keys=True,
916 indent=4,
917 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700918 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700919 output = repr( tmpIntents )
920 main.log.debug( "ONOS1 intents: " + output )
Devin Lim58046fa2017-07-05 16:55:00 -0700921 utilities.assert_equals(
922 expect=main.TRUE,
923 actual=PingResult,
924 onpass="Intents have been installed correctly and pings work",
925 onfail="Intents have not been installed correctly, pings failed." )
926
927 main.step( "Check leadership of topics" )
Jon Hallca319892017-06-15 15:25:22 -0700928 topicsCheck = self.topicsCheck()
929 utilities.assert_equals( expect=False, actual=topicsCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700930 onpass="intent Partitions is in leaders",
Jon Hallca319892017-06-15 15:25:22 -0700931 onfail="Some topics were lost" )
932 self.partitionsCheck()
933 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700934
935 if not installedCheck:
936 main.log.info( "Waiting 60 seconds to see if the state of " +
937 "intents change" )
938 time.sleep( 60 )
939 # Print the intent states
940 intents = onosCli.intents()
941 intentStates = []
942 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
943 count = 0
944 # Iter through intents of a node
945 try:
946 for intent in json.loads( intents ):
947 state = intent.get( 'state', None )
948 if "INSTALLED" not in state:
949 installedCheck = False
950 intentId = intent.get( 'id', None )
951 intentStates.append( ( intentId, state ) )
952 except ( ValueError, TypeError ):
953 main.log.exception( "Error parsing intents." )
954 intentStates.sort()
955 for i, s in intentStates:
956 count += 1
957 main.log.info( "%-6s%-15s%-15s" %
958 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700959 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700960
Devin Lim58046fa2017-07-05 16:55:00 -0700961 # Print flowrules
Jon Hallca319892017-06-15 15:25:22 -0700962 main.log.debug( onosCli.flows() )
Devin Lim58046fa2017-07-05 16:55:00 -0700963 main.step( "Wait a minute then ping again" )
964 # the wait is above
965 PingResult = main.TRUE
966 for i in range( 8, 18 ):
967 ping = main.Mininet1.pingHost( src="h" + str( i ),
968 target="h" + str( i + 10 ) )
969 PingResult = PingResult and ping
970 if ping == main.FALSE:
971 main.log.warn( "Ping failed between h" + str( i ) +
972 " and h" + str( i + 10 ) )
973 elif ping == main.TRUE:
974 main.log.info( "Ping test passed!" )
975 # Don't set PingResult or you'd override failures
976 if PingResult == main.FALSE:
977 main.log.error(
978 "Intents have not been installed correctly, pings failed." )
979 # TODO: pretty print
Jon Hallca319892017-06-15 15:25:22 -0700980 main.log.warn( str( onosCli.name ) + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -0700981 try:
982 tmpIntents = onosCli.intents()
983 main.log.warn( json.dumps( json.loads( tmpIntents ),
984 sort_keys=True,
985 indent=4,
986 separators=( ',', ': ' ) ) )
987 except ( ValueError, TypeError ):
988 main.log.warn( repr( tmpIntents ) )
989 utilities.assert_equals(
990 expect=main.TRUE,
991 actual=PingResult,
992 onpass="Intents have been installed correctly and pings work",
993 onfail="Intents have not been installed correctly, pings failed." )
994
Devin Lim142b5342017-07-20 15:22:39 -0700995 def checkRoleNotNull( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700996 main.step( "Check that each switch has a master" )
Devin Lim58046fa2017-07-05 16:55:00 -0700997 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -0700998 rolesNotNull = main.Cluster.command( "rolesNotNull", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -0700999 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -07001000 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -07001001 actual=rolesNotNull,
1002 onpass="Each device has a master",
1003 onfail="Some devices don't have a master assigned" )
1004
Devin Lim142b5342017-07-20 15:22:39 -07001005 def checkTheRole( self ):
1006 main.step( "Read device roles from ONOS" )
Jon Hallca319892017-06-15 15:25:22 -07001007 ONOSMastership = main.Cluster.command( "roles" )
Devin Lim58046fa2017-07-05 16:55:00 -07001008 consistentMastership = True
1009 rolesResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001010 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001011 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001012 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001013 main.log.error( "Error in getting " + node + " roles" )
1014 main.log.warn( node + " mastership response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001015 repr( ONOSMastership[ i ] ) )
1016 rolesResults = False
1017 utilities.assert_equals(
1018 expect=True,
1019 actual=rolesResults,
1020 onpass="No error in reading roles output",
1021 onfail="Error in reading roles from ONOS" )
1022
1023 main.step( "Check for consistency in roles from each controller" )
1024 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1025 main.log.info(
1026 "Switch roles are consistent across all ONOS nodes" )
1027 else:
1028 consistentMastership = False
1029 utilities.assert_equals(
1030 expect=True,
1031 actual=consistentMastership,
1032 onpass="Switch roles are consistent across all ONOS nodes",
1033 onfail="ONOS nodes have different views of switch roles" )
Devin Lim142b5342017-07-20 15:22:39 -07001034 return ONOSMastership, rolesResults, consistentMastership
1035
1036 def checkingIntents( self ):
1037 main.step( "Get the intents from each controller" )
1038 ONOSIntents = main.Cluster.command( "intents", specificDriver=2 )
1039 intentsResults = True
1040 for i in range( len( ONOSIntents ) ):
1041 node = str( main.Cluster.active( i ) )
1042 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1043 main.log.error( "Error in getting " + node + " intents" )
1044 main.log.warn( node + " intents response: " +
1045 repr( ONOSIntents[ i ] ) )
1046 intentsResults = False
1047 utilities.assert_equals(
1048 expect=True,
1049 actual=intentsResults,
1050 onpass="No error in reading intents output",
1051 onfail="Error in reading intents from ONOS" )
1052 return ONOSIntents, intentsResults
1053
1054 def readingState( self, main ):
1055 """
1056 Reading state of ONOS
1057 """
1058 import json
1059 import time
1060 assert main, "main not defined"
1061 assert utilities.assert_equals, "utilities.assert_equals not defined"
1062 try:
1063 from tests.dependencies.topology import Topology
1064 except ImportError:
1065 main.log.error( "Topology not found exiting the test" )
1066 main.cleanup()
1067 main.exit()
1068 try:
1069 main.topoRelated
1070 except ( NameError, AttributeError ):
1071 main.topoRelated = Topology()
1072 main.case( "Setting up and gathering data for current state" )
1073 # The general idea for this test case is to pull the state of
1074 # ( intents,flows, topology,... ) from each ONOS node
1075 # We can then compare them with each other and also with past states
1076
1077 global mastershipState
1078 mastershipState = '[]'
1079
1080 self.checkRoleNotNull()
1081
1082 main.step( "Get the Mastership of each switch from each controller" )
1083 mastershipCheck = main.FALSE
1084
1085 ONOSMastership, consistentMastership, rolesResults = self.checkTheRole()
Devin Lim58046fa2017-07-05 16:55:00 -07001086
1087 if rolesResults and not consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001088 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001089 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001090 try:
1091 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001092 node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07001093 json.dumps(
1094 json.loads( ONOSMastership[ i ] ),
1095 sort_keys=True,
1096 indent=4,
1097 separators=( ',', ': ' ) ) )
1098 except ( ValueError, TypeError ):
1099 main.log.warn( repr( ONOSMastership[ i ] ) )
1100 elif rolesResults and consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001101 mastershipCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001102 mastershipState = ONOSMastership[ 0 ]
1103
Devin Lim142b5342017-07-20 15:22:39 -07001104
Devin Lim58046fa2017-07-05 16:55:00 -07001105 global intentState
1106 intentState = []
Devin Lim142b5342017-07-20 15:22:39 -07001107 ONOSIntents, intentsResults = self.checkingIntents()
Jon Hallca319892017-06-15 15:25:22 -07001108 intentCheck = main.FALSE
1109 consistentIntents = True
Devin Lim142b5342017-07-20 15:22:39 -07001110
Devin Lim58046fa2017-07-05 16:55:00 -07001111
1112 main.step( "Check for consistency in Intents from each controller" )
1113 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1114 main.log.info( "Intents are consistent across all ONOS " +
1115 "nodes" )
1116 else:
1117 consistentIntents = False
1118 main.log.error( "Intents not consistent" )
1119 utilities.assert_equals(
1120 expect=True,
1121 actual=consistentIntents,
1122 onpass="Intents are consistent across all ONOS nodes",
1123 onfail="ONOS nodes have different views of intents" )
1124
1125 if intentsResults:
1126 # Try to make it easy to figure out what is happening
1127 #
1128 # Intent ONOS1 ONOS2 ...
1129 # 0x01 INSTALLED INSTALLING
1130 # ... ... ...
1131 # ... ... ...
1132 title = " Id"
Jon Hallca319892017-06-15 15:25:22 -07001133 for ctrl in main.Cluster.active():
1134 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07001135 main.log.warn( title )
1136 # get all intent keys in the cluster
1137 keys = []
1138 try:
1139 # Get the set of all intent keys
1140 for nodeStr in ONOSIntents:
1141 node = json.loads( nodeStr )
1142 for intent in node:
1143 keys.append( intent.get( 'id' ) )
1144 keys = set( keys )
1145 # For each intent key, print the state on each node
1146 for key in keys:
1147 row = "%-13s" % key
1148 for nodeStr in ONOSIntents:
1149 node = json.loads( nodeStr )
1150 for intent in node:
1151 if intent.get( 'id', "Error" ) == key:
1152 row += "%-15s" % intent.get( 'state' )
1153 main.log.warn( row )
1154 # End of intent state table
1155 except ValueError as e:
1156 main.log.exception( e )
1157 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1158
1159 if intentsResults and not consistentIntents:
1160 # print the json objects
Jon Hallca319892017-06-15 15:25:22 -07001161 main.log.debug( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001162 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1163 sort_keys=True,
1164 indent=4,
1165 separators=( ',', ': ' ) ) )
1166 for i in range( len( ONOSIntents ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001167 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001168 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallca319892017-06-15 15:25:22 -07001169 main.log.debug( node + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001170 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
1171 sort_keys=True,
1172 indent=4,
1173 separators=( ',', ': ' ) ) )
1174 else:
Jon Hallca319892017-06-15 15:25:22 -07001175 main.log.debug( node + " intents match " + ctrl.name + " intents" )
Devin Lim58046fa2017-07-05 16:55:00 -07001176 elif intentsResults and consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07001177 intentCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001178 intentState = ONOSIntents[ 0 ]
1179
1180 main.step( "Get the flows from each controller" )
1181 global flowState
1182 flowState = []
Devin Lim142b5342017-07-20 15:22:39 -07001183 ONOSFlows = main.Cluster.command( "flows", specificDriver=2 ) # TODO: Possible arg: sleep = 30
Devin Lim58046fa2017-07-05 16:55:00 -07001184 ONOSFlowsJson = []
1185 flowCheck = main.FALSE
1186 consistentFlows = True
1187 flowsResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001188 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001189 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001190 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001191 main.log.error( "Error in getting " + node + " flows" )
1192 main.log.warn( node + " flows response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001193 repr( ONOSFlows[ i ] ) )
1194 flowsResults = False
1195 ONOSFlowsJson.append( None )
1196 else:
1197 try:
1198 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1199 except ( ValueError, TypeError ):
1200 # FIXME: change this to log.error?
Jon Hallca319892017-06-15 15:25:22 -07001201 main.log.exception( "Error in parsing " + node +
Devin Lim58046fa2017-07-05 16:55:00 -07001202 " response as json." )
1203 main.log.error( repr( ONOSFlows[ i ] ) )
1204 ONOSFlowsJson.append( None )
1205 flowsResults = False
1206 utilities.assert_equals(
1207 expect=True,
1208 actual=flowsResults,
1209 onpass="No error in reading flows output",
1210 onfail="Error in reading flows from ONOS" )
1211
1212 main.step( "Check for consistency in Flows from each controller" )
1213 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1214 if all( tmp ):
1215 main.log.info( "Flow count is consistent across all ONOS nodes" )
1216 else:
1217 consistentFlows = False
1218 utilities.assert_equals(
1219 expect=True,
1220 actual=consistentFlows,
1221 onpass="The flow count is consistent across all ONOS nodes",
1222 onfail="ONOS nodes have different flow counts" )
1223
1224 if flowsResults and not consistentFlows:
1225 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001226 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001227 try:
1228 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001229 node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001230 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
1231 indent=4, separators=( ',', ': ' ) ) )
1232 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -07001233 main.log.warn( node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001234 repr( ONOSFlows[ i ] ) )
1235 elif flowsResults and consistentFlows:
1236 flowCheck = main.TRUE
1237 flowState = ONOSFlows[ 0 ]
1238
1239 main.step( "Get the OF Table entries" )
1240 global flows
1241 flows = []
1242 for i in range( 1, 29 ):
1243 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1244 if flowCheck == main.FALSE:
1245 for table in flows:
1246 main.log.warn( table )
1247 # TODO: Compare switch flow tables with ONOS flow tables
1248
1249 main.step( "Start continuous pings" )
1250 main.Mininet2.pingLong(
1251 src=main.params[ 'PING' ][ 'source1' ],
1252 target=main.params[ 'PING' ][ 'target1' ],
1253 pingTime=500 )
1254 main.Mininet2.pingLong(
1255 src=main.params[ 'PING' ][ 'source2' ],
1256 target=main.params[ 'PING' ][ 'target2' ],
1257 pingTime=500 )
1258 main.Mininet2.pingLong(
1259 src=main.params[ 'PING' ][ 'source3' ],
1260 target=main.params[ 'PING' ][ 'target3' ],
1261 pingTime=500 )
1262 main.Mininet2.pingLong(
1263 src=main.params[ 'PING' ][ 'source4' ],
1264 target=main.params[ 'PING' ][ 'target4' ],
1265 pingTime=500 )
1266 main.Mininet2.pingLong(
1267 src=main.params[ 'PING' ][ 'source5' ],
1268 target=main.params[ 'PING' ][ 'target5' ],
1269 pingTime=500 )
1270 main.Mininet2.pingLong(
1271 src=main.params[ 'PING' ][ 'source6' ],
1272 target=main.params[ 'PING' ][ 'target6' ],
1273 pingTime=500 )
1274 main.Mininet2.pingLong(
1275 src=main.params[ 'PING' ][ 'source7' ],
1276 target=main.params[ 'PING' ][ 'target7' ],
1277 pingTime=500 )
1278 main.Mininet2.pingLong(
1279 src=main.params[ 'PING' ][ 'source8' ],
1280 target=main.params[ 'PING' ][ 'target8' ],
1281 pingTime=500 )
1282 main.Mininet2.pingLong(
1283 src=main.params[ 'PING' ][ 'source9' ],
1284 target=main.params[ 'PING' ][ 'target9' ],
1285 pingTime=500 )
1286 main.Mininet2.pingLong(
1287 src=main.params[ 'PING' ][ 'source10' ],
1288 target=main.params[ 'PING' ][ 'target10' ],
1289 pingTime=500 )
1290
1291 main.step( "Collecting topology information from ONOS" )
Devin Lim142b5342017-07-20 15:22:39 -07001292 devices = main.topoRelated.getAll( "devices" )
1293 hosts = main.topoRelated.getAll( "hosts", inJson=True )
1294 ports = main.topoRelated.getAll( "ports" )
1295 links = main.topoRelated.getAll( "links" )
1296 clusters = main.topoRelated.getAll( "clusters" )
Devin Lim58046fa2017-07-05 16:55:00 -07001297 # Compare json objects for hosts and dataplane clusters
1298
1299 # hosts
1300 main.step( "Host view is consistent across ONOS nodes" )
1301 consistentHostsResult = main.TRUE
1302 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001303 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001304 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1305 if hosts[ controller ] == hosts[ 0 ]:
1306 continue
1307 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07001308 main.log.error( "hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001309 controllerStr +
1310 " is inconsistent with ONOS1" )
1311 main.log.warn( repr( hosts[ controller ] ) )
1312 consistentHostsResult = main.FALSE
1313
1314 else:
Jon Hallca319892017-06-15 15:25:22 -07001315 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001316 controllerStr )
1317 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001318 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001319 " hosts response: " +
1320 repr( hosts[ controller ] ) )
1321 utilities.assert_equals(
1322 expect=main.TRUE,
1323 actual=consistentHostsResult,
1324 onpass="Hosts view is consistent across all ONOS nodes",
1325 onfail="ONOS nodes have different views of hosts" )
1326
1327 main.step( "Each host has an IP address" )
1328 ipResult = main.TRUE
1329 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001330 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001331 if hosts[ controller ]:
1332 for host in hosts[ controller ]:
1333 if not host.get( 'ipAddresses', [] ):
Jon Hallca319892017-06-15 15:25:22 -07001334 main.log.error( "Error with host ips on " +
Devin Lim58046fa2017-07-05 16:55:00 -07001335 controllerStr + ": " + str( host ) )
1336 ipResult = main.FALSE
1337 utilities.assert_equals(
1338 expect=main.TRUE,
1339 actual=ipResult,
1340 onpass="The ips of the hosts aren't empty",
1341 onfail="The ip of at least one host is missing" )
1342
1343 # Strongly connected clusters of devices
1344 main.step( "Cluster view is consistent across ONOS nodes" )
1345 consistentClustersResult = main.TRUE
1346 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001347 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001348 if "Error" not in clusters[ controller ]:
1349 if clusters[ controller ] == clusters[ 0 ]:
1350 continue
1351 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07001352 main.log.error( "clusters from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001353 " is inconsistent with ONOS1" )
1354 consistentClustersResult = main.FALSE
1355
1356 else:
1357 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07001358 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07001359 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001360 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001361 " clusters response: " +
1362 repr( clusters[ controller ] ) )
1363 utilities.assert_equals(
1364 expect=main.TRUE,
1365 actual=consistentClustersResult,
1366 onpass="Clusters view is consistent across all ONOS nodes",
1367 onfail="ONOS nodes have different views of clusters" )
1368 if not consistentClustersResult:
1369 main.log.debug( clusters )
1370
1371 # there should always only be one cluster
1372 main.step( "Cluster view correct across ONOS nodes" )
1373 try:
1374 numClusters = len( json.loads( clusters[ 0 ] ) )
1375 except ( ValueError, TypeError ):
1376 main.log.exception( "Error parsing clusters[0]: " +
1377 repr( clusters[ 0 ] ) )
1378 numClusters = "ERROR"
1379 utilities.assert_equals(
1380 expect=1,
1381 actual=numClusters,
1382 onpass="ONOS shows 1 SCC",
1383 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1384
1385 main.step( "Comparing ONOS topology to MN" )
1386 devicesResults = main.TRUE
1387 linksResults = main.TRUE
1388 hostsResults = main.TRUE
1389 mnSwitches = main.Mininet1.getSwitches()
1390 mnLinks = main.Mininet1.getLinks()
1391 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07001392 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001393 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001394 currentDevicesResult = main.topoRelated.compareDevicePort(
1395 main.Mininet1, controller,
1396 mnSwitches, devices, ports )
1397 utilities.assert_equals( expect=main.TRUE,
1398 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07001399 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001400 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001401 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001402 " Switches view is incorrect" )
1403
1404 currentLinksResult = main.topoRelated.compareBase( links, controller,
1405 main.Mininet1.compareLinks,
1406 [ mnSwitches, mnLinks ] )
1407 utilities.assert_equals( expect=main.TRUE,
1408 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07001409 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001410 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001411 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001412 " links view is incorrect" )
1413
1414 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1415 currentHostsResult = main.Mininet1.compareHosts(
1416 mnHosts,
1417 hosts[ controller ] )
1418 else:
1419 currentHostsResult = main.FALSE
1420 utilities.assert_equals( expect=main.TRUE,
1421 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07001422 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001423 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07001424 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001425 " hosts don't match Mininet" )
1426
1427 devicesResults = devicesResults and currentDevicesResult
1428 linksResults = linksResults and currentLinksResult
1429 hostsResults = hostsResults and currentHostsResult
1430
1431 main.step( "Device information is correct" )
1432 utilities.assert_equals(
1433 expect=main.TRUE,
1434 actual=devicesResults,
1435 onpass="Device information is correct",
1436 onfail="Device information is incorrect" )
1437
1438 main.step( "Links are correct" )
1439 utilities.assert_equals(
1440 expect=main.TRUE,
1441 actual=linksResults,
1442 onpass="Link are correct",
1443 onfail="Links are incorrect" )
1444
1445 main.step( "Hosts are correct" )
1446 utilities.assert_equals(
1447 expect=main.TRUE,
1448 actual=hostsResults,
1449 onpass="Hosts are correct",
1450 onfail="Hosts are incorrect" )
1451
1452 def checkDistPrimitivesFunc( self, main ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001453 """
1454 Check for basic functionality with distributed primitives
1455 """
Jon Halle0f0b342017-04-18 11:43:47 -07001456 # TODO: Clean this up so it's not just a cut/paste from the test
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001457 try:
1458 # Make sure variables are defined/set
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001459 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001460 assert main.pCounterName, "main.pCounterName not defined"
1461 assert main.onosSetName, "main.onosSetName not defined"
1462 # NOTE: assert fails if value is 0/None/Empty/False
1463 try:
1464 main.pCounterValue
Devin Lim58046fa2017-07-05 16:55:00 -07001465 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001466 main.log.error( "main.pCounterValue not defined, setting to 0" )
1467 main.pCounterValue = 0
1468 try:
1469 main.onosSet
Devin Lim58046fa2017-07-05 16:55:00 -07001470 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001471 main.log.error( "main.onosSet not defined, setting to empty Set" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001472 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001473 # Variables for the distributed primitives tests. These are local only
1474 addValue = "a"
1475 addAllValue = "a b c d e f"
1476 retainValue = "c d e f"
Jon Halle0f0b342017-04-18 11:43:47 -07001477 valueName = "TestON-Value"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001478 valueValue = None
Jon Halle0f0b342017-04-18 11:43:47 -07001479 workQueueName = "TestON-Queue"
1480 workQueueCompleted = 0
1481 workQueueInProgress = 0
1482 workQueuePending = 0
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001483
1484 description = "Check for basic functionality with distributed " +\
1485 "primitives"
1486 main.case( description )
1487 main.caseExplanation = "Test the methods of the distributed " +\
1488 "primitives (counters and sets) throught the cli"
1489 # DISTRIBUTED ATOMIC COUNTERS
1490 # Partitioned counters
1491 main.step( "Increment then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001492 pCounters = main.Cluster.command( "counterTestAddAndGet",
1493 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001494 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001495 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001496 main.pCounterValue += 1
1497 addedPValues.append( main.pCounterValue )
Jon Hallca319892017-06-15 15:25:22 -07001498 # Check that counter incremented once per controller
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001499 pCounterResults = True
1500 for i in addedPValues:
1501 tmpResult = i in pCounters
1502 pCounterResults = pCounterResults and tmpResult
1503 if not tmpResult:
1504 main.log.error( str( i ) + " is not in partitioned "
1505 "counter incremented results" )
1506 utilities.assert_equals( expect=True,
1507 actual=pCounterResults,
1508 onpass="Default counter incremented",
1509 onfail="Error incrementing default" +
1510 " counter" )
1511
1512 main.step( "Get then Increment a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001513 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1514 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001515 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001516 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001517 addedPValues.append( main.pCounterValue )
1518 main.pCounterValue += 1
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001519 # Check that counter incremented numController times
1520 pCounterResults = True
1521 for i in addedPValues:
1522 tmpResult = i in pCounters
1523 pCounterResults = pCounterResults and tmpResult
1524 if not tmpResult:
1525 main.log.error( str( i ) + " is not in partitioned "
1526 "counter incremented results" )
1527 utilities.assert_equals( expect=True,
1528 actual=pCounterResults,
1529 onpass="Default counter incremented",
1530 onfail="Error incrementing default" +
1531 " counter" )
1532
1533 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001534 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001535 utilities.assert_equals( expect=main.TRUE,
1536 actual=incrementCheck,
1537 onpass="Added counters are correct",
1538 onfail="Added counters are incorrect" )
1539
1540 main.step( "Add -8 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001541 pCounters = main.Cluster.command( "counterTestAddAndGet",
1542 args=[ main.pCounterName ],
1543 kwargs={ "delta": -8 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001544 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001545 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001546 main.pCounterValue += -8
1547 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001548 # Check that counter incremented numController times
1549 pCounterResults = True
1550 for i in addedPValues:
1551 tmpResult = i in pCounters
1552 pCounterResults = pCounterResults and tmpResult
1553 if not tmpResult:
1554 main.log.error( str( i ) + " is not in partitioned "
1555 "counter incremented results" )
1556 utilities.assert_equals( expect=True,
1557 actual=pCounterResults,
1558 onpass="Default counter incremented",
1559 onfail="Error incrementing default" +
1560 " counter" )
1561
1562 main.step( "Add 5 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001563 pCounters = main.Cluster.command( "counterTestAddAndGet",
1564 args=[ main.pCounterName ],
1565 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001566 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001567 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001568 main.pCounterValue += 5
1569 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001570
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001571 # Check that counter incremented numController times
1572 pCounterResults = True
1573 for i in addedPValues:
1574 tmpResult = i in pCounters
1575 pCounterResults = pCounterResults and tmpResult
1576 if not tmpResult:
1577 main.log.error( str( i ) + " is not in partitioned "
1578 "counter incremented results" )
1579 utilities.assert_equals( expect=True,
1580 actual=pCounterResults,
1581 onpass="Default counter incremented",
1582 onfail="Error incrementing default" +
1583 " counter" )
1584
1585 main.step( "Get then add 5 to a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001586 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1587 args=[ main.pCounterName ],
1588 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001589 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001590 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001591 addedPValues.append( main.pCounterValue )
1592 main.pCounterValue += 5
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001593 # Check that counter incremented numController times
1594 pCounterResults = True
1595 for i in addedPValues:
1596 tmpResult = i in pCounters
1597 pCounterResults = pCounterResults and tmpResult
1598 if not tmpResult:
1599 main.log.error( str( i ) + " is not in partitioned "
1600 "counter incremented results" )
1601 utilities.assert_equals( expect=True,
1602 actual=pCounterResults,
1603 onpass="Default counter incremented",
1604 onfail="Error incrementing default" +
1605 " counter" )
1606
1607 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001608 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001609 utilities.assert_equals( expect=main.TRUE,
1610 actual=incrementCheck,
1611 onpass="Added counters are correct",
1612 onfail="Added counters are incorrect" )
1613
1614 # DISTRIBUTED SETS
1615 main.step( "Distributed Set get" )
1616 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001617 getResponses = main.Cluster.command( "setTestGet",
1618 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001619 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001620 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001621 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001622 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001623 current = set( getResponses[ i ] )
1624 if len( current ) == len( getResponses[ i ] ):
1625 # no repeats
1626 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001627 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001628 " has incorrect view" +
1629 " of set " + main.onosSetName + ":\n" +
1630 str( getResponses[ i ] ) )
1631 main.log.debug( "Expected: " + str( main.onosSet ) )
1632 main.log.debug( "Actual: " + str( current ) )
1633 getResults = main.FALSE
1634 else:
1635 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001636 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001637 " has repeat elements in" +
1638 " set " + main.onosSetName + ":\n" +
1639 str( getResponses[ i ] ) )
1640 getResults = main.FALSE
1641 elif getResponses[ i ] == main.ERROR:
1642 getResults = main.FALSE
1643 utilities.assert_equals( expect=main.TRUE,
1644 actual=getResults,
1645 onpass="Set elements are correct",
1646 onfail="Set elements are incorrect" )
1647
1648 main.step( "Distributed Set size" )
Jon Hallca319892017-06-15 15:25:22 -07001649 sizeResponses = main.Cluster.command( "setTestSize",
1650 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001651 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001652 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001653 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001654 if size != sizeResponses[ i ]:
1655 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001656 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001657 " expected a size of " + str( size ) +
1658 " for set " + main.onosSetName +
1659 " but got " + str( sizeResponses[ i ] ) )
1660 utilities.assert_equals( expect=main.TRUE,
1661 actual=sizeResults,
1662 onpass="Set sizes are correct",
1663 onfail="Set sizes are incorrect" )
1664
1665 main.step( "Distributed Set add()" )
1666 main.onosSet.add( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001667 addResponses = main.Cluster.command( "setTestAdd",
1668 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001669 # main.TRUE = successfully changed the set
1670 # main.FALSE = action resulted in no change in set
1671 # main.ERROR - Some error in executing the function
1672 addResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001673 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001674 if addResponses[ i ] == main.TRUE:
1675 # All is well
1676 pass
1677 elif addResponses[ i ] == main.FALSE:
1678 # Already in set, probably fine
1679 pass
1680 elif addResponses[ i ] == main.ERROR:
1681 # Error in execution
1682 addResults = main.FALSE
1683 else:
1684 # unexpected result
1685 addResults = main.FALSE
1686 if addResults != main.TRUE:
1687 main.log.error( "Error executing set add" )
1688
1689 # Check if set is still correct
1690 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001691 getResponses = main.Cluster.command( "setTestGet",
1692 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001693 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001694 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001695 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001696 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001697 current = set( getResponses[ i ] )
1698 if len( current ) == len( getResponses[ i ] ):
1699 # no repeats
1700 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001701 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001702 " of set " + main.onosSetName + ":\n" +
1703 str( getResponses[ i ] ) )
1704 main.log.debug( "Expected: " + str( main.onosSet ) )
1705 main.log.debug( "Actual: " + str( current ) )
1706 getResults = main.FALSE
1707 else:
1708 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001709 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001710 " set " + main.onosSetName + ":\n" +
1711 str( getResponses[ i ] ) )
1712 getResults = main.FALSE
1713 elif getResponses[ i ] == main.ERROR:
1714 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001715 sizeResponses = main.Cluster.command( "setTestSize",
1716 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001717 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001718 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001719 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001720 if size != sizeResponses[ i ]:
1721 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001722 main.log.error( node + " expected a size of " +
1723 str( size ) + " for set " + main.onosSetName +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001724 " but got " + str( sizeResponses[ i ] ) )
1725 addResults = addResults and getResults and sizeResults
1726 utilities.assert_equals( expect=main.TRUE,
1727 actual=addResults,
1728 onpass="Set add correct",
1729 onfail="Set add was incorrect" )
1730
1731 main.step( "Distributed Set addAll()" )
1732 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001733 addResponses = main.Cluster.command( "setTestAdd",
1734 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001735 # main.TRUE = successfully changed the set
1736 # main.FALSE = action resulted in no change in set
1737 # main.ERROR - Some error in executing the function
1738 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001739 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001740 if addResponses[ i ] == main.TRUE:
1741 # All is well
1742 pass
1743 elif addResponses[ i ] == main.FALSE:
1744 # Already in set, probably fine
1745 pass
1746 elif addResponses[ i ] == main.ERROR:
1747 # Error in execution
1748 addAllResults = main.FALSE
1749 else:
1750 # unexpected result
1751 addAllResults = main.FALSE
1752 if addAllResults != main.TRUE:
1753 main.log.error( "Error executing set addAll" )
1754
1755 # Check if set is still correct
1756 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001757 getResponses = main.Cluster.command( "setTestGet",
1758 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001759 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001760 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001761 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001762 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001763 current = set( getResponses[ i ] )
1764 if len( current ) == len( getResponses[ i ] ):
1765 # no repeats
1766 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001767 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001768 " of set " + main.onosSetName + ":\n" +
1769 str( getResponses[ i ] ) )
1770 main.log.debug( "Expected: " + str( main.onosSet ) )
1771 main.log.debug( "Actual: " + str( current ) )
1772 getResults = main.FALSE
1773 else:
1774 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001775 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001776 " set " + main.onosSetName + ":\n" +
1777 str( getResponses[ i ] ) )
1778 getResults = main.FALSE
1779 elif getResponses[ i ] == main.ERROR:
1780 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001781 sizeResponses = main.Cluster.command( "setTestSize",
1782 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001783 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001784 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001785 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001786 if size != sizeResponses[ i ]:
1787 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001788 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001789 " for set " + main.onosSetName +
1790 " but got " + str( sizeResponses[ i ] ) )
1791 addAllResults = addAllResults and getResults and sizeResults
1792 utilities.assert_equals( expect=main.TRUE,
1793 actual=addAllResults,
1794 onpass="Set addAll correct",
1795 onfail="Set addAll was incorrect" )
1796
1797 main.step( "Distributed Set contains()" )
Jon Hallca319892017-06-15 15:25:22 -07001798 containsResponses = main.Cluster.command( "setTestGet",
1799 args=[ main.onosSetName ],
1800 kwargs={ "values": addValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001801 containsResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001802 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001803 if containsResponses[ i ] == main.ERROR:
1804 containsResults = main.FALSE
1805 else:
1806 containsResults = containsResults and\
1807 containsResponses[ i ][ 1 ]
1808 utilities.assert_equals( expect=main.TRUE,
1809 actual=containsResults,
1810 onpass="Set contains is functional",
1811 onfail="Set contains failed" )
1812
1813 main.step( "Distributed Set containsAll()" )
Jon Hallca319892017-06-15 15:25:22 -07001814 containsAllResponses = main.Cluster.command( "setTestGet",
1815 args=[ main.onosSetName ],
1816 kwargs={ "values": addAllValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001817 containsAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001818 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001819 if containsResponses[ i ] == main.ERROR:
1820 containsResults = main.FALSE
1821 else:
1822 containsResults = containsResults and\
1823 containsResponses[ i ][ 1 ]
1824 utilities.assert_equals( expect=main.TRUE,
1825 actual=containsAllResults,
1826 onpass="Set containsAll is functional",
1827 onfail="Set containsAll failed" )
1828
1829 main.step( "Distributed Set remove()" )
1830 main.onosSet.remove( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001831 removeResponses = main.Cluster.command( "setTestRemove",
1832 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001833 # main.TRUE = successfully changed the set
1834 # main.FALSE = action resulted in no change in set
1835 # main.ERROR - Some error in executing the function
1836 removeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001837 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001838 if removeResponses[ i ] == main.TRUE:
1839 # All is well
1840 pass
1841 elif removeResponses[ i ] == main.FALSE:
1842 # not in set, probably fine
1843 pass
1844 elif removeResponses[ i ] == main.ERROR:
1845 # Error in execution
1846 removeResults = main.FALSE
1847 else:
1848 # unexpected result
1849 removeResults = main.FALSE
1850 if removeResults != main.TRUE:
1851 main.log.error( "Error executing set remove" )
1852
1853 # Check if set is still correct
1854 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001855 getResponses = main.Cluster.command( "setTestGet",
1856 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001857 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001858 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001859 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001860 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001861 current = set( getResponses[ i ] )
1862 if len( current ) == len( getResponses[ i ] ):
1863 # no repeats
1864 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001865 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001866 " of set " + main.onosSetName + ":\n" +
1867 str( getResponses[ i ] ) )
1868 main.log.debug( "Expected: " + str( main.onosSet ) )
1869 main.log.debug( "Actual: " + str( current ) )
1870 getResults = main.FALSE
1871 else:
1872 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001873 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001874 " set " + main.onosSetName + ":\n" +
1875 str( getResponses[ i ] ) )
1876 getResults = main.FALSE
1877 elif getResponses[ i ] == main.ERROR:
1878 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001879 sizeResponses = main.Cluster.command( "setTestSize",
1880 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001881 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001882 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001883 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001884 if size != sizeResponses[ i ]:
1885 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001886 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001887 " for set " + main.onosSetName +
1888 " but got " + str( sizeResponses[ i ] ) )
1889 removeResults = removeResults and getResults and sizeResults
1890 utilities.assert_equals( expect=main.TRUE,
1891 actual=removeResults,
1892 onpass="Set remove correct",
1893 onfail="Set remove was incorrect" )
1894
1895 main.step( "Distributed Set removeAll()" )
1896 main.onosSet.difference_update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001897 removeAllResponses = main.Cluster.command( "setTestRemove",
1898 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001899 # main.TRUE = successfully changed the set
1900 # main.FALSE = action resulted in no change in set
1901 # main.ERROR - Some error in executing the function
1902 removeAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001903 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001904 if removeAllResponses[ i ] == main.TRUE:
1905 # All is well
1906 pass
1907 elif removeAllResponses[ i ] == main.FALSE:
1908 # not in set, probably fine
1909 pass
1910 elif removeAllResponses[ i ] == main.ERROR:
1911 # Error in execution
1912 removeAllResults = main.FALSE
1913 else:
1914 # unexpected result
1915 removeAllResults = main.FALSE
1916 if removeAllResults != main.TRUE:
1917 main.log.error( "Error executing set removeAll" )
1918
1919 # Check if set is still correct
1920 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001921 getResponses = main.Cluster.command( "setTestGet",
1922 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001923 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001924 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001925 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001926 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001927 current = set( getResponses[ i ] )
1928 if len( current ) == len( getResponses[ i ] ):
1929 # no repeats
1930 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001931 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001932 " of set " + main.onosSetName + ":\n" +
1933 str( getResponses[ i ] ) )
1934 main.log.debug( "Expected: " + str( main.onosSet ) )
1935 main.log.debug( "Actual: " + str( current ) )
1936 getResults = main.FALSE
1937 else:
1938 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001939 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001940 " set " + main.onosSetName + ":\n" +
1941 str( getResponses[ i ] ) )
1942 getResults = main.FALSE
1943 elif getResponses[ i ] == main.ERROR:
1944 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001945 sizeResponses = main.Cluster.command( "setTestSize",
1946 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001947 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001948 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001949 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001950 if size != sizeResponses[ i ]:
1951 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001952 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001953 " for set " + main.onosSetName +
1954 " but got " + str( sizeResponses[ i ] ) )
1955 removeAllResults = removeAllResults and getResults and sizeResults
1956 utilities.assert_equals( expect=main.TRUE,
1957 actual=removeAllResults,
1958 onpass="Set removeAll correct",
1959 onfail="Set removeAll was incorrect" )
1960
1961 main.step( "Distributed Set addAll()" )
1962 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001963 addResponses = main.Cluster.command( "setTestAdd",
1964 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001965 # main.TRUE = successfully changed the set
1966 # main.FALSE = action resulted in no change in set
1967 # main.ERROR - Some error in executing the function
1968 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001969 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001970 if addResponses[ i ] == main.TRUE:
1971 # All is well
1972 pass
1973 elif addResponses[ i ] == main.FALSE:
1974 # Already in set, probably fine
1975 pass
1976 elif addResponses[ i ] == main.ERROR:
1977 # Error in execution
1978 addAllResults = main.FALSE
1979 else:
1980 # unexpected result
1981 addAllResults = main.FALSE
1982 if addAllResults != main.TRUE:
1983 main.log.error( "Error executing set addAll" )
1984
1985 # Check if set is still correct
1986 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001987 getResponses = main.Cluster.command( "setTestGet",
1988 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001989 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001990 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001991 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001992 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001993 current = set( getResponses[ i ] )
1994 if len( current ) == len( getResponses[ i ] ):
1995 # no repeats
1996 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001997 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001998 " of set " + main.onosSetName + ":\n" +
1999 str( getResponses[ i ] ) )
2000 main.log.debug( "Expected: " + str( main.onosSet ) )
2001 main.log.debug( "Actual: " + str( current ) )
2002 getResults = main.FALSE
2003 else:
2004 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002005 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002006 " set " + main.onosSetName + ":\n" +
2007 str( getResponses[ i ] ) )
2008 getResults = main.FALSE
2009 elif getResponses[ i ] == main.ERROR:
2010 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002011 sizeResponses = main.Cluster.command( "setTestSize",
2012 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002013 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002014 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002015 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002016 if size != sizeResponses[ i ]:
2017 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002018 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002019 " for set " + main.onosSetName +
2020 " but got " + str( sizeResponses[ i ] ) )
2021 addAllResults = addAllResults and getResults and sizeResults
2022 utilities.assert_equals( expect=main.TRUE,
2023 actual=addAllResults,
2024 onpass="Set addAll correct",
2025 onfail="Set addAll was incorrect" )
2026
2027 main.step( "Distributed Set clear()" )
2028 main.onosSet.clear()
Jon Hallca319892017-06-15 15:25:22 -07002029 clearResponses = main.Cluster.command( "setTestRemove",
2030 args=[ main.onosSetName, " " ], # Values doesn't matter
2031 kwargs={ "clear": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002032 # main.TRUE = successfully changed the set
2033 # main.FALSE = action resulted in no change in set
2034 # main.ERROR - Some error in executing the function
2035 clearResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002036 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002037 if clearResponses[ i ] == main.TRUE:
2038 # All is well
2039 pass
2040 elif clearResponses[ i ] == main.FALSE:
2041 # Nothing set, probably fine
2042 pass
2043 elif clearResponses[ i ] == main.ERROR:
2044 # Error in execution
2045 clearResults = main.FALSE
2046 else:
2047 # unexpected result
2048 clearResults = main.FALSE
2049 if clearResults != main.TRUE:
2050 main.log.error( "Error executing set clear" )
2051
2052 # Check if set is still correct
2053 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002054 getResponses = main.Cluster.command( "setTestGet",
2055 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002056 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002057 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002058 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002059 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002060 current = set( getResponses[ i ] )
2061 if len( current ) == len( getResponses[ i ] ):
2062 # no repeats
2063 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002064 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002065 " of set " + main.onosSetName + ":\n" +
2066 str( getResponses[ i ] ) )
2067 main.log.debug( "Expected: " + str( main.onosSet ) )
2068 main.log.debug( "Actual: " + str( current ) )
2069 getResults = main.FALSE
2070 else:
2071 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002072 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002073 " set " + main.onosSetName + ":\n" +
2074 str( getResponses[ i ] ) )
2075 getResults = main.FALSE
2076 elif getResponses[ i ] == main.ERROR:
2077 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002078 sizeResponses = main.Cluster.command( "setTestSize",
2079 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002080 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002081 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002082 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002083 if size != sizeResponses[ i ]:
2084 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002085 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002086 " for set " + main.onosSetName +
2087 " but got " + str( sizeResponses[ i ] ) )
2088 clearResults = clearResults and getResults and sizeResults
2089 utilities.assert_equals( expect=main.TRUE,
2090 actual=clearResults,
2091 onpass="Set clear correct",
2092 onfail="Set clear was incorrect" )
2093
2094 main.step( "Distributed Set addAll()" )
2095 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002096 addResponses = main.Cluster.command( "setTestAdd",
2097 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002098 # main.TRUE = successfully changed the set
2099 # main.FALSE = action resulted in no change in set
2100 # main.ERROR - Some error in executing the function
2101 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002102 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002103 if addResponses[ i ] == main.TRUE:
2104 # All is well
2105 pass
2106 elif addResponses[ i ] == main.FALSE:
2107 # Already in set, probably fine
2108 pass
2109 elif addResponses[ i ] == main.ERROR:
2110 # Error in execution
2111 addAllResults = main.FALSE
2112 else:
2113 # unexpected result
2114 addAllResults = main.FALSE
2115 if addAllResults != main.TRUE:
2116 main.log.error( "Error executing set addAll" )
2117
2118 # Check if set is still correct
2119 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002120 getResponses = main.Cluster.command( "setTestGet",
2121 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002122 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002123 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002124 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002125 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002126 current = set( getResponses[ i ] )
2127 if len( current ) == len( getResponses[ i ] ):
2128 # no repeats
2129 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002130 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002131 " of set " + main.onosSetName + ":\n" +
2132 str( getResponses[ i ] ) )
2133 main.log.debug( "Expected: " + str( main.onosSet ) )
2134 main.log.debug( "Actual: " + str( current ) )
2135 getResults = main.FALSE
2136 else:
2137 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002138 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002139 " set " + main.onosSetName + ":\n" +
2140 str( getResponses[ i ] ) )
2141 getResults = main.FALSE
2142 elif getResponses[ i ] == main.ERROR:
2143 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002144 sizeResponses = main.Cluster.command( "setTestSize",
2145 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002146 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002147 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002148 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002149 if size != sizeResponses[ i ]:
2150 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002151 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002152 " for set " + main.onosSetName +
2153 " but got " + str( sizeResponses[ i ] ) )
2154 addAllResults = addAllResults and getResults and sizeResults
2155 utilities.assert_equals( expect=main.TRUE,
2156 actual=addAllResults,
2157 onpass="Set addAll correct",
2158 onfail="Set addAll was incorrect" )
2159
2160 main.step( "Distributed Set retain()" )
2161 main.onosSet.intersection_update( retainValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002162 retainResponses = main.Cluster.command( "setTestRemove",
2163 args=[ main.onosSetName, retainValue ],
2164 kwargs={ "retain": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002165 # main.TRUE = successfully changed the set
2166 # main.FALSE = action resulted in no change in set
2167 # main.ERROR - Some error in executing the function
2168 retainResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002169 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002170 if retainResponses[ i ] == main.TRUE:
2171 # All is well
2172 pass
2173 elif retainResponses[ i ] == main.FALSE:
2174 # Already in set, probably fine
2175 pass
2176 elif retainResponses[ i ] == main.ERROR:
2177 # Error in execution
2178 retainResults = main.FALSE
2179 else:
2180 # unexpected result
2181 retainResults = main.FALSE
2182 if retainResults != main.TRUE:
2183 main.log.error( "Error executing set retain" )
2184
2185 # Check if set is still correct
2186 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002187 getResponses = main.Cluster.command( "setTestGet",
2188 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002189 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002190 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002191 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002192 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002193 current = set( getResponses[ i ] )
2194 if len( current ) == len( getResponses[ i ] ):
2195 # no repeats
2196 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002197 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002198 " of set " + main.onosSetName + ":\n" +
2199 str( getResponses[ i ] ) )
2200 main.log.debug( "Expected: " + str( main.onosSet ) )
2201 main.log.debug( "Actual: " + str( current ) )
2202 getResults = main.FALSE
2203 else:
2204 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002205 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002206 " set " + main.onosSetName + ":\n" +
2207 str( getResponses[ i ] ) )
2208 getResults = main.FALSE
2209 elif getResponses[ i ] == main.ERROR:
2210 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002211 sizeResponses = main.Cluster.command( "setTestSize",
2212 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002213 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002214 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002215 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002216 if size != sizeResponses[ i ]:
2217 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002218 main.log.error( node + " expected a size of " +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002219 str( size ) + " for set " + main.onosSetName +
2220 " but got " + str( sizeResponses[ i ] ) )
2221 retainResults = retainResults and getResults and sizeResults
2222 utilities.assert_equals( expect=main.TRUE,
2223 actual=retainResults,
2224 onpass="Set retain correct",
2225 onfail="Set retain was incorrect" )
2226
2227 # Transactional maps
2228 main.step( "Partitioned Transactional maps put" )
2229 tMapValue = "Testing"
2230 numKeys = 100
2231 putResult = True
Jon Hallca319892017-06-15 15:25:22 -07002232 ctrl = main.Cluster.next()
2233 putResponses = ctrl.transactionalMapPut( numKeys, tMapValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002234 if putResponses and len( putResponses ) == 100:
2235 for i in putResponses:
2236 if putResponses[ i ][ 'value' ] != tMapValue:
2237 putResult = False
2238 else:
2239 putResult = False
2240 if not putResult:
2241 main.log.debug( "Put response values: " + str( putResponses ) )
2242 utilities.assert_equals( expect=True,
2243 actual=putResult,
2244 onpass="Partitioned Transactional Map put successful",
2245 onfail="Partitioned Transactional Map put values are incorrect" )
2246
2247 main.step( "Partitioned Transactional maps get" )
2248 # FIXME: is this sleep needed?
2249 time.sleep( 5 )
2250
2251 getCheck = True
2252 for n in range( 1, numKeys + 1 ):
Jon Hallca319892017-06-15 15:25:22 -07002253 getResponses = main.Cluster.command( "transactionalMapGet",
2254 args=[ "Key" + str( n ) ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002255 valueCheck = True
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002256 for node in getResponses:
2257 if node != tMapValue:
2258 valueCheck = False
2259 if not valueCheck:
Jon Hallf37d44d2017-05-24 10:37:30 -07002260 main.log.warn( "Values for key 'Key" + str(n) + "' do not match:" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002261 main.log.warn( getResponses )
2262 getCheck = getCheck and valueCheck
2263 utilities.assert_equals( expect=True,
2264 actual=getCheck,
2265 onpass="Partitioned Transactional Map get values were correct",
2266 onfail="Partitioned Transactional Map values incorrect" )
2267
2268 # DISTRIBUTED ATOMIC VALUE
2269 main.step( "Get the value of a new value" )
Jon Hallca319892017-06-15 15:25:22 -07002270 getValues = main.Cluster.command( "valueTestGet",
2271 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002272 main.log.debug( getValues )
2273 # Check the results
2274 atomicValueGetResult = True
2275 expected = valueValue if valueValue is not None else "null"
2276 main.log.debug( "Checking for value of " + expected )
2277 for i in getValues:
2278 if i != expected:
2279 atomicValueGetResult = False
2280 utilities.assert_equals( expect=True,
2281 actual=atomicValueGetResult,
2282 onpass="Atomic Value get successful",
2283 onfail="Error getting atomic Value " +
2284 str( valueValue ) + ", found: " +
2285 str( getValues ) )
2286
2287 main.step( "Atomic Value set()" )
2288 valueValue = "foo"
Jon Hallca319892017-06-15 15:25:22 -07002289 setValues = main.Cluster.command( "valueTestSet",
2290 args=[ valueName, valueValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002291 main.log.debug( setValues )
2292 # Check the results
2293 atomicValueSetResults = True
2294 for i in setValues:
2295 if i != main.TRUE:
2296 atomicValueSetResults = False
2297 utilities.assert_equals( expect=True,
2298 actual=atomicValueSetResults,
2299 onpass="Atomic Value set successful",
2300 onfail="Error setting atomic Value" +
2301 str( setValues ) )
2302
2303 main.step( "Get the value after set()" )
Jon Hallca319892017-06-15 15:25:22 -07002304 getValues = main.Cluster.command( "valueTestGet",
2305 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002306 main.log.debug( getValues )
2307 # Check the results
2308 atomicValueGetResult = True
2309 expected = valueValue if valueValue is not None else "null"
2310 main.log.debug( "Checking for value of " + expected )
2311 for i in getValues:
2312 if i != expected:
2313 atomicValueGetResult = False
2314 utilities.assert_equals( expect=True,
2315 actual=atomicValueGetResult,
2316 onpass="Atomic Value get successful",
2317 onfail="Error getting atomic Value " +
2318 str( valueValue ) + ", found: " +
2319 str( getValues ) )
2320
2321 main.step( "Atomic Value compareAndSet()" )
2322 oldValue = valueValue
2323 valueValue = "bar"
Jon Hallca319892017-06-15 15:25:22 -07002324 ctrl = main.Cluster.next()
2325 CASValue = ctrl.valueTestCompareAndSet( valueName, oldValue, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002326 main.log.debug( CASValue )
2327 utilities.assert_equals( expect=main.TRUE,
2328 actual=CASValue,
2329 onpass="Atomic Value comapreAndSet successful",
2330 onfail="Error setting atomic Value:" +
2331 str( CASValue ) )
2332
2333 main.step( "Get the value after compareAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002334 getValues = main.Cluster.command( "valueTestGet",
2335 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002336 main.log.debug( getValues )
2337 # Check the results
2338 atomicValueGetResult = True
2339 expected = valueValue if valueValue is not None else "null"
2340 main.log.debug( "Checking for value of " + expected )
2341 for i in getValues:
2342 if i != expected:
2343 atomicValueGetResult = False
2344 utilities.assert_equals( expect=True,
2345 actual=atomicValueGetResult,
2346 onpass="Atomic Value get successful",
2347 onfail="Error getting atomic Value " +
2348 str( valueValue ) + ", found: " +
2349 str( getValues ) )
2350
2351 main.step( "Atomic Value getAndSet()" )
2352 oldValue = valueValue
2353 valueValue = "baz"
Jon Hallca319892017-06-15 15:25:22 -07002354 ctrl = main.Cluster.next()
2355 GASValue = ctrl.valueTestGetAndSet( valueName, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002356 main.log.debug( GASValue )
2357 expected = oldValue if oldValue is not None else "null"
2358 utilities.assert_equals( expect=expected,
2359 actual=GASValue,
2360 onpass="Atomic Value GAS successful",
2361 onfail="Error with GetAndSet atomic Value: expected " +
2362 str( expected ) + ", found: " +
2363 str( GASValue ) )
2364
2365 main.step( "Get the value after getAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002366 getValues = main.Cluster.command( "valueTestGet",
2367 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002368 main.log.debug( getValues )
2369 # Check the results
2370 atomicValueGetResult = True
2371 expected = valueValue if valueValue is not None else "null"
2372 main.log.debug( "Checking for value of " + expected )
2373 for i in getValues:
2374 if i != expected:
2375 atomicValueGetResult = False
2376 utilities.assert_equals( expect=True,
2377 actual=atomicValueGetResult,
2378 onpass="Atomic Value get successful",
2379 onfail="Error getting atomic Value: expected " +
2380 str( valueValue ) + ", found: " +
2381 str( getValues ) )
2382
2383 main.step( "Atomic Value destory()" )
2384 valueValue = None
Jon Hallca319892017-06-15 15:25:22 -07002385 ctrl = main.Cluster.next()
2386 destroyResult = ctrl.valueTestDestroy( valueName )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002387 main.log.debug( destroyResult )
2388 # Check the results
2389 utilities.assert_equals( expect=main.TRUE,
2390 actual=destroyResult,
2391 onpass="Atomic Value destroy successful",
2392 onfail="Error destroying atomic Value" )
2393
2394 main.step( "Get the value after destroy()" )
Jon Hallca319892017-06-15 15:25:22 -07002395 getValues = main.Cluster.command( "valueTestGet",
2396 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002397 main.log.debug( getValues )
2398 # Check the results
2399 atomicValueGetResult = True
2400 expected = valueValue if valueValue is not None else "null"
2401 main.log.debug( "Checking for value of " + expected )
2402 for i in getValues:
2403 if i != expected:
2404 atomicValueGetResult = False
2405 utilities.assert_equals( expect=True,
2406 actual=atomicValueGetResult,
2407 onpass="Atomic Value get successful",
2408 onfail="Error getting atomic Value " +
2409 str( valueValue ) + ", found: " +
2410 str( getValues ) )
Jon Halle0f0b342017-04-18 11:43:47 -07002411
2412 # WORK QUEUES
2413 main.step( "Work Queue add()" )
Jon Hallca319892017-06-15 15:25:22 -07002414 ctrl = main.Cluster.next()
2415 addResult = ctrl.workQueueAdd( workQueueName, 'foo' )
Jon Halle0f0b342017-04-18 11:43:47 -07002416 workQueuePending += 1
2417 main.log.debug( addResult )
2418 # Check the results
2419 utilities.assert_equals( expect=main.TRUE,
2420 actual=addResult,
2421 onpass="Work Queue add successful",
2422 onfail="Error adding to Work Queue" )
2423
2424 main.step( "Check the work queue stats" )
2425 statsResults = self.workQueueStatsCheck( workQueueName,
2426 workQueueCompleted,
2427 workQueueInProgress,
2428 workQueuePending )
2429 utilities.assert_equals( expect=True,
2430 actual=statsResults,
2431 onpass="Work Queue stats correct",
2432 onfail="Work Queue stats incorrect " )
2433
2434 main.step( "Work Queue addMultiple()" )
Jon Hallca319892017-06-15 15:25:22 -07002435 ctrl = main.Cluster.next()
2436 addMultipleResult = ctrl.workQueueAddMultiple( workQueueName, 'bar', 'baz' )
Jon Halle0f0b342017-04-18 11:43:47 -07002437 workQueuePending += 2
2438 main.log.debug( addMultipleResult )
2439 # Check the results
2440 utilities.assert_equals( expect=main.TRUE,
2441 actual=addMultipleResult,
2442 onpass="Work Queue add multiple successful",
2443 onfail="Error adding multiple items to Work Queue" )
2444
2445 main.step( "Check the work queue stats" )
2446 statsResults = self.workQueueStatsCheck( workQueueName,
2447 workQueueCompleted,
2448 workQueueInProgress,
2449 workQueuePending )
2450 utilities.assert_equals( expect=True,
2451 actual=statsResults,
2452 onpass="Work Queue stats correct",
2453 onfail="Work Queue stats incorrect " )
2454
2455 main.step( "Work Queue takeAndComplete() 1" )
Jon Hallca319892017-06-15 15:25:22 -07002456 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002457 number = 1
Jon Hallca319892017-06-15 15:25:22 -07002458 take1Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002459 workQueuePending -= number
2460 workQueueCompleted += number
2461 main.log.debug( take1Result )
2462 # Check the results
2463 utilities.assert_equals( expect=main.TRUE,
2464 actual=take1Result,
2465 onpass="Work Queue takeAndComplete 1 successful",
2466 onfail="Error taking 1 from Work Queue" )
2467
2468 main.step( "Check the work queue stats" )
2469 statsResults = self.workQueueStatsCheck( workQueueName,
2470 workQueueCompleted,
2471 workQueueInProgress,
2472 workQueuePending )
2473 utilities.assert_equals( expect=True,
2474 actual=statsResults,
2475 onpass="Work Queue stats correct",
2476 onfail="Work Queue stats incorrect " )
2477
2478 main.step( "Work Queue takeAndComplete() 2" )
Jon Hallca319892017-06-15 15:25:22 -07002479 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002480 number = 2
Jon Hallca319892017-06-15 15:25:22 -07002481 take2Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002482 workQueuePending -= number
2483 workQueueCompleted += number
2484 main.log.debug( take2Result )
2485 # Check the results
2486 utilities.assert_equals( expect=main.TRUE,
2487 actual=take2Result,
2488 onpass="Work Queue takeAndComplete 2 successful",
2489 onfail="Error taking 2 from Work Queue" )
2490
2491 main.step( "Check the work queue stats" )
2492 statsResults = self.workQueueStatsCheck( workQueueName,
2493 workQueueCompleted,
2494 workQueueInProgress,
2495 workQueuePending )
2496 utilities.assert_equals( expect=True,
2497 actual=statsResults,
2498 onpass="Work Queue stats correct",
2499 onfail="Work Queue stats incorrect " )
2500
2501 main.step( "Work Queue destroy()" )
2502 valueValue = None
2503 threads = []
Jon Hallca319892017-06-15 15:25:22 -07002504 ctrl = main.Cluster.next()
2505 destroyResult = ctrl.workQueueDestroy( workQueueName )
Jon Halle0f0b342017-04-18 11:43:47 -07002506 workQueueCompleted = 0
2507 workQueueInProgress = 0
2508 workQueuePending = 0
2509 main.log.debug( destroyResult )
2510 # Check the results
2511 utilities.assert_equals( expect=main.TRUE,
2512 actual=destroyResult,
2513 onpass="Work Queue destroy successful",
2514 onfail="Error destroying Work Queue" )
2515
2516 main.step( "Check the work queue stats" )
2517 statsResults = self.workQueueStatsCheck( workQueueName,
2518 workQueueCompleted,
2519 workQueueInProgress,
2520 workQueuePending )
2521 utilities.assert_equals( expect=True,
2522 actual=statsResults,
2523 onpass="Work Queue stats correct",
2524 onfail="Work Queue stats incorrect " )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002525 except Exception as e:
2526 main.log.error( "Exception: " + str( e ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002527
2528 def cleanUp( self, main ):
2529 """
2530 Clean up
2531 """
2532 import os
2533 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002534 assert main, "main not defined"
2535 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002536
2537 # printing colors to terminal
2538 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2539 'blue': '\033[94m', 'green': '\033[92m',
2540 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2541 main.case( "Test Cleanup" )
2542 main.step( "Killing tcpdumps" )
2543 main.Mininet2.stopTcpdump()
2544
2545 testname = main.TEST
2546 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2547 main.step( "Copying MN pcap and ONOS log files to test station" )
2548 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2549 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2550 # NOTE: MN Pcap file is being saved to logdir.
2551 # We scp this file as MN and TestON aren't necessarily the same vm
2552
2553 # FIXME: To be replaced with a Jenkin's post script
2554 # TODO: Load these from params
2555 # NOTE: must end in /
2556 logFolder = "/opt/onos/log/"
2557 logFiles = [ "karaf.log", "karaf.log.1" ]
2558 # NOTE: must end in /
2559 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002560 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002561 dstName = main.logdir + "/" + ctrl.name + "-" + f
2562 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002563 logFolder + f, dstName )
2564 # std*.log's
2565 # NOTE: must end in /
2566 logFolder = "/opt/onos/var/"
2567 logFiles = [ "stderr.log", "stdout.log" ]
2568 # NOTE: must end in /
2569 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002570 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002571 dstName = main.logdir + "/" + ctrl.name + "-" + f
2572 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002573 logFolder + f, dstName )
2574 else:
2575 main.log.debug( "skipping saving log files" )
2576
2577 main.step( "Stopping Mininet" )
2578 mnResult = main.Mininet1.stopNet()
2579 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2580 onpass="Mininet stopped",
2581 onfail="MN cleanup NOT successful" )
2582
2583 main.step( "Checking ONOS Logs for errors" )
Devin Lim142b5342017-07-20 15:22:39 -07002584 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002585 main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
2586 main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002587
2588 try:
2589 timerLog = open( main.logdir + "/Timers.csv", 'w' )
2590 main.log.debug( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2591 timerLog.write( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2592 timerLog.close()
2593 except NameError as e:
2594 main.log.exception( e )
Jon Hallca319892017-06-15 15:25:22 -07002595
Devin Lim58046fa2017-07-05 16:55:00 -07002596 def assignMastership( self, main ):
2597 """
2598 Assign mastership to controllers
2599 """
2600 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002601 assert main, "main not defined"
2602 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002603
2604 main.case( "Assigning Controller roles for switches" )
2605 main.caseExplanation = "Check that ONOS is connected to each " +\
2606 "device. Then manually assign" +\
2607 " mastership to specific ONOS nodes using" +\
2608 " 'device-role'"
2609 main.step( "Assign mastership of switches to specific controllers" )
2610 # Manually assign mastership to the controller we want
2611 roleCall = main.TRUE
2612
2613 ipList = []
2614 deviceList = []
Jon Hallca319892017-06-15 15:25:22 -07002615 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07002616 try:
2617 # Assign mastership to specific controllers. This assignment was
2618 # determined for a 7 node cluser, but will work with any sized
2619 # cluster
2620 for i in range( 1, 29 ): # switches 1 through 28
2621 # set up correct variables:
2622 if i == 1:
2623 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002624 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002625 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
2626 elif i == 2:
Devin Lim142b5342017-07-20 15:22:39 -07002627 c = 1 % main.Cluster.numCtrls
2628 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002629 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
2630 elif i == 3:
Devin Lim142b5342017-07-20 15:22:39 -07002631 c = 1 % main.Cluster.numCtrls
2632 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002633 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
2634 elif i == 4:
Devin Lim142b5342017-07-20 15:22:39 -07002635 c = 3 % main.Cluster.numCtrls
2636 ip = main.Cluster.active( c ).ip_address # ONOS4
Devin Lim58046fa2017-07-05 16:55:00 -07002637 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
2638 elif i == 5:
Devin Lim142b5342017-07-20 15:22:39 -07002639 c = 2 % main.Cluster.numCtrls
2640 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002641 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
2642 elif i == 6:
Devin Lim142b5342017-07-20 15:22:39 -07002643 c = 2 % main.Cluster.numCtrls
2644 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002645 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
2646 elif i == 7:
Devin Lim142b5342017-07-20 15:22:39 -07002647 c = 5 % main.Cluster.numCtrls
2648 ip = main.Cluster.active( c ).ip_address # ONOS6
Devin Lim58046fa2017-07-05 16:55:00 -07002649 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
2650 elif i >= 8 and i <= 17:
Devin Lim142b5342017-07-20 15:22:39 -07002651 c = 4 % main.Cluster.numCtrls
2652 ip = main.Cluster.active( c ).ip_address # ONOS5
Devin Lim58046fa2017-07-05 16:55:00 -07002653 dpid = '3' + str( i ).zfill( 3 )
2654 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2655 elif i >= 18 and i <= 27:
Devin Lim142b5342017-07-20 15:22:39 -07002656 c = 6 % main.Cluster.numCtrls
2657 ip = main.Cluster.active( c ).ip_address # ONOS7
Devin Lim58046fa2017-07-05 16:55:00 -07002658 dpid = '6' + str( i ).zfill( 3 )
2659 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2660 elif i == 28:
2661 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002662 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002663 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
2664 else:
2665 main.log.error( "You didn't write an else statement for " +
2666 "switch s" + str( i ) )
2667 roleCall = main.FALSE
2668 # Assign switch
2669 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
2670 # TODO: make this controller dynamic
2671 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
2672 ipList.append( ip )
2673 deviceList.append( deviceId )
2674 except ( AttributeError, AssertionError ):
2675 main.log.exception( "Something is wrong with ONOS device view" )
2676 main.log.info( onosCli.devices() )
2677 utilities.assert_equals(
2678 expect=main.TRUE,
2679 actual=roleCall,
2680 onpass="Re-assigned switch mastership to designated controller",
2681 onfail="Something wrong with deviceRole calls" )
2682
2683 main.step( "Check mastership was correctly assigned" )
2684 roleCheck = main.TRUE
2685 # NOTE: This is due to the fact that device mastership change is not
2686 # atomic and is actually a multi step process
2687 time.sleep( 5 )
2688 for i in range( len( ipList ) ):
2689 ip = ipList[ i ]
2690 deviceId = deviceList[ i ]
2691 # Check assignment
2692 master = onosCli.getRole( deviceId ).get( 'master' )
2693 if ip in master:
2694 roleCheck = roleCheck and main.TRUE
2695 else:
2696 roleCheck = roleCheck and main.FALSE
2697 main.log.error( "Error, controller " + ip + " is not" +
2698 " master " + "of device " +
2699 str( deviceId ) + ". Master is " +
2700 repr( master ) + "." )
2701 utilities.assert_equals(
2702 expect=main.TRUE,
2703 actual=roleCheck,
2704 onpass="Switches were successfully reassigned to designated " +
2705 "controller",
2706 onfail="Switches were not successfully reassigned" )
Jon Hallca319892017-06-15 15:25:22 -07002707
Devin Lim58046fa2017-07-05 16:55:00 -07002708 def bringUpStoppedNode( self, main ):
2709 """
2710 The bring up stopped nodes
2711 """
2712 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002713 assert main, "main not defined"
2714 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002715 assert main.kill, "main.kill not defined"
2716 main.case( "Restart minority of ONOS nodes" )
2717
2718 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
2719 startResults = main.TRUE
2720 restartTime = time.time()
Jon Hallca319892017-06-15 15:25:22 -07002721 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002722 startResults = startResults and\
Jon Hallca319892017-06-15 15:25:22 -07002723 ctrl.onosStart( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002724 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2725 onpass="ONOS nodes started successfully",
2726 onfail="ONOS nodes NOT successfully started" )
2727
2728 main.step( "Checking if ONOS is up yet" )
2729 count = 0
2730 onosIsupResult = main.FALSE
2731 while onosIsupResult == main.FALSE and count < 10:
2732 onosIsupResult = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002733 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002734 onosIsupResult = onosIsupResult and\
Jon Hallca319892017-06-15 15:25:22 -07002735 ctrl.isup( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002736 count = count + 1
2737 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
2738 onpass="ONOS restarted successfully",
2739 onfail="ONOS restart NOT successful" )
2740
Jon Hallca319892017-06-15 15:25:22 -07002741 main.step( "Restarting ONOS nodes" )
Devin Lim58046fa2017-07-05 16:55:00 -07002742 cliResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002743 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002744 cliResults = cliResults and\
Jon Hallca319892017-06-15 15:25:22 -07002745 ctrl.startOnosCli( ctrl.ipAddress )
2746 ctrl.active = True
Devin Lim58046fa2017-07-05 16:55:00 -07002747 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
Jon Hallca319892017-06-15 15:25:22 -07002748 onpass="ONOS node(s) restarted",
2749 onfail="ONOS node(s) did not restart" )
Devin Lim58046fa2017-07-05 16:55:00 -07002750
2751 # Grab the time of restart so we chan check how long the gossip
2752 # protocol has had time to work
2753 main.restartTime = time.time() - restartTime
2754 main.log.debug( "Restart time: " + str( main.restartTime ) )
2755 # TODO: MAke this configurable. Also, we are breaking the above timer
2756 main.step( "Checking ONOS nodes" )
2757 nodeResults = utilities.retry( self.nodesCheck,
2758 False,
Jon Hallca319892017-06-15 15:25:22 -07002759 args=[ main.Cluster.active() ],
Devin Lim58046fa2017-07-05 16:55:00 -07002760 sleep=15,
2761 attempts=5 )
2762
2763 utilities.assert_equals( expect=True, actual=nodeResults,
2764 onpass="Nodes check successful",
2765 onfail="Nodes check NOT successful" )
2766
2767 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07002768 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07002769 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07002770 ctrl.name,
2771 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002772 main.log.error( "Failed to start ONOS, stopping test" )
2773 main.cleanup()
2774 main.exit()
2775
Jon Hallca319892017-06-15 15:25:22 -07002776 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -07002777
2778 main.step( "Rerun for election on the node(s) that were killed" )
2779 runResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002780 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002781 runResults = runResults and\
Jon Hallca319892017-06-15 15:25:22 -07002782 ctrl.electionTestRun()
Devin Lim58046fa2017-07-05 16:55:00 -07002783 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2784 onpass="ONOS nodes reran for election topic",
2785 onfail="Errror rerunning for election" )
Devin Lim142b5342017-07-20 15:22:39 -07002786 def tempCell( self, cellName, ipList ):
2787 main.step( "Create cell file" )
2788 cellAppString = main.params[ 'ENV' ][ 'appString' ]
Devin Lim58046fa2017-07-05 16:55:00 -07002789
Devin Lim142b5342017-07-20 15:22:39 -07002790
2791 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
2792 main.Mininet1.ip_address,
2793 cellAppString, ipList , main.ONOScli1.karafUser )
2794 main.step( "Applying cell variable to environment" )
2795 cellResult = main.ONOSbench.setCell( cellName )
2796 verifyResult = main.ONOSbench.verifyCell()
2797
2798
2799 def checkStateAfterEvent( self, main, afterWhich, compareSwitch=False, isRestart=False ):
Devin Lim58046fa2017-07-05 16:55:00 -07002800 """
2801 afterWhich :
Jon Hallca319892017-06-15 15:25:22 -07002802 0: failure
Devin Lim58046fa2017-07-05 16:55:00 -07002803 1: scaling
2804 """
2805 """
2806 Check state after ONOS failure/scaling
2807 """
2808 import json
Devin Lim58046fa2017-07-05 16:55:00 -07002809 assert main, "main not defined"
2810 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002811 main.case( "Running ONOS Constant State Tests" )
2812
2813 OnosAfterWhich = [ "failure" , "scaliing" ]
2814
Devin Lim58046fa2017-07-05 16:55:00 -07002815 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -07002816 self.checkRoleNotNull()
Devin Lim58046fa2017-07-05 16:55:00 -07002817
Devin Lim142b5342017-07-20 15:22:39 -07002818 ONOSMastership, rolesResults, consistentMastership = self.checkTheRole()
Jon Hallca319892017-06-15 15:25:22 -07002819 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07002820
2821 if rolesResults and not consistentMastership:
2822 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002823 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -07002824 main.log.warn( node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07002825 json.dumps( json.loads( ONOSMastership[ i ] ),
2826 sort_keys=True,
2827 indent=4,
2828 separators=( ',', ': ' ) ) )
2829
2830 if compareSwitch:
2831 description2 = "Compare switch roles from before failure"
2832 main.step( description2 )
2833 try:
2834 currentJson = json.loads( ONOSMastership[ 0 ] )
2835 oldJson = json.loads( mastershipState )
2836 except ( ValueError, TypeError ):
2837 main.log.exception( "Something is wrong with parsing " +
2838 "ONOSMastership[0] or mastershipState" )
Jon Hallca319892017-06-15 15:25:22 -07002839 main.log.debug( "ONOSMastership[0]: " + repr( ONOSMastership[ 0 ] ) )
2840 main.log.debug( "mastershipState" + repr( mastershipState ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002841 main.cleanup()
2842 main.exit()
2843 mastershipCheck = main.TRUE
2844 for i in range( 1, 29 ):
2845 switchDPID = str(
2846 main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
2847 current = [ switch[ 'master' ] for switch in currentJson
2848 if switchDPID in switch[ 'id' ] ]
2849 old = [ switch[ 'master' ] for switch in oldJson
2850 if switchDPID in switch[ 'id' ] ]
2851 if current == old:
2852 mastershipCheck = mastershipCheck and main.TRUE
2853 else:
2854 main.log.warn( "Mastership of switch %s changed" % switchDPID )
2855 mastershipCheck = main.FALSE
2856 utilities.assert_equals(
2857 expect=main.TRUE,
2858 actual=mastershipCheck,
2859 onpass="Mastership of Switches was not changed",
2860 onfail="Mastership of some switches changed" )
2861
2862 # NOTE: we expect mastership to change on controller failure/scaling down
Devin Lim142b5342017-07-20 15:22:39 -07002863 ONOSIntents, intentsResults = self.checkingIntents()
Devin Lim58046fa2017-07-05 16:55:00 -07002864 intentCheck = main.FALSE
2865 consistentIntents = True
Devin Lim58046fa2017-07-05 16:55:00 -07002866
2867 main.step( "Check for consistency in Intents from each controller" )
2868 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2869 main.log.info( "Intents are consistent across all ONOS " +
2870 "nodes" )
2871 else:
2872 consistentIntents = False
2873
2874 # Try to make it easy to figure out what is happening
2875 #
2876 # Intent ONOS1 ONOS2 ...
2877 # 0x01 INSTALLED INSTALLING
2878 # ... ... ...
2879 # ... ... ...
2880 title = " ID"
Jon Hallca319892017-06-15 15:25:22 -07002881 for ctrl in main.Cluster.active():
2882 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07002883 main.log.warn( title )
2884 # get all intent keys in the cluster
2885 keys = []
2886 for nodeStr in ONOSIntents:
2887 node = json.loads( nodeStr )
2888 for intent in node:
2889 keys.append( intent.get( 'id' ) )
2890 keys = set( keys )
2891 for key in keys:
2892 row = "%-13s" % key
2893 for nodeStr in ONOSIntents:
2894 node = json.loads( nodeStr )
2895 for intent in node:
2896 if intent.get( 'id' ) == key:
2897 row += "%-15s" % intent.get( 'state' )
2898 main.log.warn( row )
2899 # End table view
2900
2901 utilities.assert_equals(
2902 expect=True,
2903 actual=consistentIntents,
2904 onpass="Intents are consistent across all ONOS nodes",
2905 onfail="ONOS nodes have different views of intents" )
2906 intentStates = []
2907 for node in ONOSIntents: # Iter through ONOS nodes
2908 nodeStates = []
2909 # Iter through intents of a node
2910 try:
2911 for intent in json.loads( node ):
2912 nodeStates.append( intent[ 'state' ] )
2913 except ( ValueError, TypeError ):
2914 main.log.exception( "Error in parsing intents" )
2915 main.log.error( repr( node ) )
2916 intentStates.append( nodeStates )
2917 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2918 main.log.info( dict( out ) )
2919
2920 if intentsResults and not consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07002921 for i in range( len( main.Cluster.active() ) ):
2922 ctrl = main.Cluster.contoller[ i ]
2923 main.log.warn( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07002924 main.log.warn( json.dumps(
2925 json.loads( ONOSIntents[ i ] ),
2926 sort_keys=True,
2927 indent=4,
2928 separators=( ',', ': ' ) ) )
2929 elif intentsResults and consistentIntents:
2930 intentCheck = main.TRUE
2931
2932 # NOTE: Store has no durability, so intents are lost across system
2933 # restarts
2934 if not isRestart:
2935 main.step( "Compare current intents with intents before the " + OnosAfterWhich[ afterWhich ] )
2936 # NOTE: this requires case 5 to pass for intentState to be set.
2937 # maybe we should stop the test if that fails?
2938 sameIntents = main.FALSE
2939 try:
2940 intentState
2941 except NameError:
2942 main.log.warn( "No previous intent state was saved" )
2943 else:
2944 if intentState and intentState == ONOSIntents[ 0 ]:
2945 sameIntents = main.TRUE
2946 main.log.info( "Intents are consistent with before " + OnosAfterWhich[ afterWhich ] )
2947 # TODO: possibly the states have changed? we may need to figure out
2948 # what the acceptable states are
2949 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2950 sameIntents = main.TRUE
2951 try:
2952 before = json.loads( intentState )
2953 after = json.loads( ONOSIntents[ 0 ] )
2954 for intent in before:
2955 if intent not in after:
2956 sameIntents = main.FALSE
2957 main.log.debug( "Intent is not currently in ONOS " +
2958 "(at least in the same form):" )
2959 main.log.debug( json.dumps( intent ) )
2960 except ( ValueError, TypeError ):
2961 main.log.exception( "Exception printing intents" )
2962 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2963 main.log.debug( repr( intentState ) )
2964 if sameIntents == main.FALSE:
2965 try:
2966 main.log.debug( "ONOS intents before: " )
2967 main.log.debug( json.dumps( json.loads( intentState ),
2968 sort_keys=True, indent=4,
2969 separators=( ',', ': ' ) ) )
2970 main.log.debug( "Current ONOS intents: " )
2971 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2972 sort_keys=True, indent=4,
2973 separators=( ',', ': ' ) ) )
2974 except ( ValueError, TypeError ):
2975 main.log.exception( "Exception printing intents" )
2976 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2977 main.log.debug( repr( intentState ) )
2978 utilities.assert_equals(
2979 expect=main.TRUE,
2980 actual=sameIntents,
2981 onpass="Intents are consistent with before " + OnosAfterWhich[ afterWhich ] ,
2982 onfail="The Intents changed during " + OnosAfterWhich[ afterWhich ] )
2983 intentCheck = intentCheck and sameIntents
2984
2985 main.step( "Get the OF Table entries and compare to before " +
2986 "component " + OnosAfterWhich[ afterWhich ] )
2987 FlowTables = main.TRUE
2988 for i in range( 28 ):
2989 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2990 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2991 curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
2992 FlowTables = FlowTables and curSwitch
2993 if curSwitch == main.FALSE:
2994 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2995 utilities.assert_equals(
2996 expect=main.TRUE,
2997 actual=FlowTables,
2998 onpass="No changes were found in the flow tables",
2999 onfail="Changes were found in the flow tables" )
3000
Jon Hallca319892017-06-15 15:25:22 -07003001 main.Mininet2.pingLongKill()
Devin Lim58046fa2017-07-05 16:55:00 -07003002 """
3003 main.step( "Check the continuous pings to ensure that no packets " +
3004 "were dropped during component failure" )
3005 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
3006 main.params[ 'TESTONIP' ] )
3007 LossInPings = main.FALSE
3008 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
3009 for i in range( 8, 18 ):
3010 main.log.info(
3011 "Checking for a loss in pings along flow from s" +
3012 str( i ) )
3013 LossInPings = main.Mininet2.checkForLoss(
3014 "/tmp/ping.h" +
3015 str( i ) ) or LossInPings
3016 if LossInPings == main.TRUE:
3017 main.log.info( "Loss in ping detected" )
3018 elif LossInPings == main.ERROR:
3019 main.log.info( "There are multiple mininet process running" )
3020 elif LossInPings == main.FALSE:
3021 main.log.info( "No Loss in the pings" )
3022 main.log.info( "No loss of dataplane connectivity" )
3023 utilities.assert_equals(
3024 expect=main.FALSE,
3025 actual=LossInPings,
3026 onpass="No Loss of connectivity",
3027 onfail="Loss of dataplane connectivity detected" )
3028 # NOTE: Since intents are not persisted with IntnentStore,
3029 # we expect loss in dataplane connectivity
3030 LossInPings = main.FALSE
3031 """
3032
3033 def compareTopo( self, main ):
3034 """
3035 Compare topo
3036 """
3037 import json
3038 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003039 assert main, "main not defined"
3040 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003041 try:
3042 from tests.dependencies.topology import Topology
3043 except ImportError:
3044 main.log.error( "Topology not found exiting the test" )
Devin Lim142b5342017-07-20 15:22:39 -07003045 main.cleanup()
Devin Lim58046fa2017-07-05 16:55:00 -07003046 main.exit()
3047 try:
3048 main.topoRelated
3049 except ( NameError, AttributeError ):
3050 main.topoRelated = Topology()
3051 main.case( "Compare ONOS Topology view to Mininet topology" )
3052 main.caseExplanation = "Compare topology objects between Mininet" +\
3053 " and ONOS"
3054 topoResult = main.FALSE
3055 topoFailMsg = "ONOS topology don't match Mininet"
3056 elapsed = 0
3057 count = 0
3058 main.step( "Comparing ONOS topology to MN topology" )
3059 startTime = time.time()
3060 # Give time for Gossip to work
3061 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
3062 devicesResults = main.TRUE
3063 linksResults = main.TRUE
3064 hostsResults = main.TRUE
3065 hostAttachmentResults = True
3066 count += 1
3067 cliStart = time.time()
Devin Lim142b5342017-07-20 15:22:39 -07003068 devices = main.topoRelated.getAll( "devices", True,
Jon Hallca319892017-06-15 15:25:22 -07003069 kwargs={ 'sleep': 5, 'attempts': 5,
3070 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003071 ipResult = main.TRUE
3072
Devin Lim142b5342017-07-20 15:22:39 -07003073 hosts = main.topoRelated.getAll( "hosts", True,
Jon Hallca319892017-06-15 15:25:22 -07003074 kwargs={ 'sleep': 5, 'attempts': 5,
3075 'randomTime': True },
3076 inJson=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003077
3078 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003079 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003080 if hosts[ controller ]:
3081 for host in hosts[ controller ]:
3082 if host is None or host.get( 'ipAddresses', [] ) == []:
3083 main.log.error(
3084 "Error with host ipAddresses on controller" +
3085 controllerStr + ": " + str( host ) )
3086 ipResult = main.FALSE
Devin Lim142b5342017-07-20 15:22:39 -07003087 ports = main.topoRelated.getAll( "ports" , True,
Jon Hallca319892017-06-15 15:25:22 -07003088 kwargs={ 'sleep': 5, 'attempts': 5,
3089 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003090 links = main.topoRelated.getAll( "links", True,
Jon Hallca319892017-06-15 15:25:22 -07003091 kwargs={ 'sleep': 5, 'attempts': 5,
3092 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003093 clusters = main.topoRelated.getAll( "clusters", True,
Jon Hallca319892017-06-15 15:25:22 -07003094 kwargs={ 'sleep': 5, 'attempts': 5,
3095 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003096
3097 elapsed = time.time() - startTime
3098 cliTime = time.time() - cliStart
3099 print "Elapsed time: " + str( elapsed )
3100 print "CLI time: " + str( cliTime )
3101
3102 if all( e is None for e in devices ) and\
3103 all( e is None for e in hosts ) and\
3104 all( e is None for e in ports ) and\
3105 all( e is None for e in links ) and\
3106 all( e is None for e in clusters ):
3107 topoFailMsg = "Could not get topology from ONOS"
3108 main.log.error( topoFailMsg )
3109 continue # Try again, No use trying to compare
3110
3111 mnSwitches = main.Mininet1.getSwitches()
3112 mnLinks = main.Mininet1.getLinks()
3113 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07003114 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003115 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003116 currentDevicesResult = main.topoRelated.compareDevicePort( main.Mininet1, controller,
3117 mnSwitches,
3118 devices, ports )
3119 utilities.assert_equals( expect=main.TRUE,
3120 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07003121 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003122 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003123 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003124 " Switches view is incorrect" )
3125
3126
3127 currentLinksResult = main.topoRelated.compareBase( links, controller,
3128 main.Mininet1.compareLinks,
3129 [mnSwitches, mnLinks] )
3130 utilities.assert_equals( expect=main.TRUE,
3131 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07003132 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003133 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003134 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003135 " links view is incorrect" )
3136 if hosts[ controller ] and "Error" not in hosts[ controller ]:
3137 currentHostsResult = main.Mininet1.compareHosts(
3138 mnHosts,
3139 hosts[ controller ] )
3140 elif hosts[ controller ] == []:
3141 currentHostsResult = main.TRUE
3142 else:
3143 currentHostsResult = main.FALSE
3144 utilities.assert_equals( expect=main.TRUE,
3145 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07003146 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003147 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07003148 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003149 " hosts don't match Mininet" )
3150 # CHECKING HOST ATTACHMENT POINTS
3151 hostAttachment = True
3152 zeroHosts = False
3153 # FIXME: topo-HA/obelisk specific mappings:
3154 # key is mac and value is dpid
3155 mappings = {}
3156 for i in range( 1, 29 ): # hosts 1 through 28
3157 # set up correct variables:
3158 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
3159 if i == 1:
3160 deviceId = "1000".zfill( 16 )
3161 elif i == 2:
3162 deviceId = "2000".zfill( 16 )
3163 elif i == 3:
3164 deviceId = "3000".zfill( 16 )
3165 elif i == 4:
3166 deviceId = "3004".zfill( 16 )
3167 elif i == 5:
3168 deviceId = "5000".zfill( 16 )
3169 elif i == 6:
3170 deviceId = "6000".zfill( 16 )
3171 elif i == 7:
3172 deviceId = "6007".zfill( 16 )
3173 elif i >= 8 and i <= 17:
3174 dpid = '3' + str( i ).zfill( 3 )
3175 deviceId = dpid.zfill( 16 )
3176 elif i >= 18 and i <= 27:
3177 dpid = '6' + str( i ).zfill( 3 )
3178 deviceId = dpid.zfill( 16 )
3179 elif i == 28:
3180 deviceId = "2800".zfill( 16 )
3181 mappings[ macId ] = deviceId
3182 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3183 if hosts[ controller ] == []:
3184 main.log.warn( "There are no hosts discovered" )
3185 zeroHosts = True
3186 else:
3187 for host in hosts[ controller ]:
3188 mac = None
3189 location = None
3190 device = None
3191 port = None
3192 try:
3193 mac = host.get( 'mac' )
3194 assert mac, "mac field could not be found for this host object"
3195
3196 location = host.get( 'locations' )[ 0 ]
3197 assert location, "location field could not be found for this host object"
3198
3199 # Trim the protocol identifier off deviceId
3200 device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
3201 assert device, "elementId field could not be found for this host location object"
3202
3203 port = location.get( 'port' )
3204 assert port, "port field could not be found for this host location object"
3205
3206 # Now check if this matches where they should be
3207 if mac and device and port:
3208 if str( port ) != "1":
3209 main.log.error( "The attachment port is incorrect for " +
3210 "host " + str( mac ) +
3211 ". Expected: 1 Actual: " + str( port ) )
3212 hostAttachment = False
3213 if device != mappings[ str( mac ) ]:
3214 main.log.error( "The attachment device is incorrect for " +
3215 "host " + str( mac ) +
3216 ". Expected: " + mappings[ str( mac ) ] +
3217 " Actual: " + device )
3218 hostAttachment = False
3219 else:
3220 hostAttachment = False
3221 except AssertionError:
3222 main.log.exception( "Json object not as expected" )
3223 main.log.error( repr( host ) )
3224 hostAttachment = False
3225 else:
3226 main.log.error( "No hosts json output or \"Error\"" +
3227 " in output. hosts = " +
3228 repr( hosts[ controller ] ) )
3229 if zeroHosts is False:
3230 # TODO: Find a way to know if there should be hosts in a
3231 # given point of the test
3232 hostAttachment = True
3233
3234 # END CHECKING HOST ATTACHMENT POINTS
3235 devicesResults = devicesResults and currentDevicesResult
3236 linksResults = linksResults and currentLinksResult
3237 hostsResults = hostsResults and currentHostsResult
3238 hostAttachmentResults = hostAttachmentResults and\
3239 hostAttachment
3240 topoResult = ( devicesResults and linksResults
3241 and hostsResults and ipResult and
3242 hostAttachmentResults )
3243 utilities.assert_equals( expect=True,
3244 actual=topoResult,
3245 onpass="ONOS topology matches Mininet",
3246 onfail=topoFailMsg )
3247 # End of While loop to pull ONOS state
3248
3249 # Compare json objects for hosts and dataplane clusters
3250
3251 # hosts
3252 main.step( "Hosts view is consistent across all ONOS nodes" )
3253 consistentHostsResult = main.TRUE
3254 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003255 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003256 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3257 if hosts[ controller ] == hosts[ 0 ]:
3258 continue
3259 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07003260 main.log.error( "hosts from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003261 " is inconsistent with ONOS1" )
Jon Hallca319892017-06-15 15:25:22 -07003262 main.log.debug( repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003263 consistentHostsResult = main.FALSE
3264
3265 else:
Jon Hallca319892017-06-15 15:25:22 -07003266 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003267 controllerStr )
3268 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003269 main.log.debug( controllerStr +
3270 " hosts response: " +
3271 repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003272 utilities.assert_equals(
3273 expect=main.TRUE,
3274 actual=consistentHostsResult,
3275 onpass="Hosts view is consistent across all ONOS nodes",
3276 onfail="ONOS nodes have different views of hosts" )
3277
3278 main.step( "Hosts information is correct" )
3279 hostsResults = hostsResults and ipResult
3280 utilities.assert_equals(
3281 expect=main.TRUE,
3282 actual=hostsResults,
3283 onpass="Host information is correct",
3284 onfail="Host information is incorrect" )
3285
3286 main.step( "Host attachment points to the network" )
3287 utilities.assert_equals(
3288 expect=True,
3289 actual=hostAttachmentResults,
3290 onpass="Hosts are correctly attached to the network",
3291 onfail="ONOS did not correctly attach hosts to the network" )
3292
3293 # Strongly connected clusters of devices
3294 main.step( "Clusters view is consistent across all ONOS nodes" )
3295 consistentClustersResult = main.TRUE
3296 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003297 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003298 if "Error" not in clusters[ controller ]:
3299 if clusters[ controller ] == clusters[ 0 ]:
3300 continue
3301 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07003302 main.log.error( "clusters from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003303 controllerStr +
3304 " is inconsistent with ONOS1" )
3305 consistentClustersResult = main.FALSE
3306 else:
3307 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07003308 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07003309 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003310 main.log.debug( controllerStr +
3311 " clusters response: " +
3312 repr( clusters[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003313 utilities.assert_equals(
3314 expect=main.TRUE,
3315 actual=consistentClustersResult,
3316 onpass="Clusters view is consistent across all ONOS nodes",
3317 onfail="ONOS nodes have different views of clusters" )
3318 if not consistentClustersResult:
3319 main.log.debug( clusters )
3320 for x in links:
Jon Hallca319892017-06-15 15:25:22 -07003321 main.log.debug( "{}: {}".format( len( x ), x ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003322
3323 main.step( "There is only one SCC" )
3324 # there should always only be one cluster
3325 try:
3326 numClusters = len( json.loads( clusters[ 0 ] ) )
3327 except ( ValueError, TypeError ):
3328 main.log.exception( "Error parsing clusters[0]: " +
3329 repr( clusters[ 0 ] ) )
3330 numClusters = "ERROR"
3331 clusterResults = main.FALSE
3332 if numClusters == 1:
3333 clusterResults = main.TRUE
3334 utilities.assert_equals(
3335 expect=1,
3336 actual=numClusters,
3337 onpass="ONOS shows 1 SCC",
3338 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
3339
3340 topoResult = ( devicesResults and linksResults
3341 and hostsResults and consistentHostsResult
3342 and consistentClustersResult and clusterResults
3343 and ipResult and hostAttachmentResults )
3344
3345 topoResult = topoResult and int( count <= 2 )
3346 note = "note it takes about " + str( int( cliTime ) ) + \
3347 " seconds for the test to make all the cli calls to fetch " +\
3348 "the topology from each ONOS instance"
3349 main.log.info(
3350 "Very crass estimate for topology discovery/convergence( " +
3351 str( note ) + " ): " + str( elapsed ) + " seconds, " +
3352 str( count ) + " tries" )
3353
3354 main.step( "Device information is correct" )
3355 utilities.assert_equals(
3356 expect=main.TRUE,
3357 actual=devicesResults,
3358 onpass="Device information is correct",
3359 onfail="Device information is incorrect" )
3360
3361 main.step( "Links are correct" )
3362 utilities.assert_equals(
3363 expect=main.TRUE,
3364 actual=linksResults,
3365 onpass="Link are correct",
3366 onfail="Links are incorrect" )
3367
3368 main.step( "Hosts are correct" )
3369 utilities.assert_equals(
3370 expect=main.TRUE,
3371 actual=hostsResults,
3372 onpass="Hosts are correct",
3373 onfail="Hosts are incorrect" )
3374
3375 # FIXME: move this to an ONOS state case
3376 main.step( "Checking ONOS nodes" )
3377 nodeResults = utilities.retry( self.nodesCheck,
3378 False,
Jon Hallca319892017-06-15 15:25:22 -07003379 args=[ main.Cluster.active() ],
Devin Lim58046fa2017-07-05 16:55:00 -07003380 attempts=5 )
3381 utilities.assert_equals( expect=True, actual=nodeResults,
3382 onpass="Nodes check successful",
3383 onfail="Nodes check NOT successful" )
3384 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07003385 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07003386 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07003387 ctrl.name,
3388 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003389
3390 if not topoResult:
3391 main.cleanup()
3392 main.exit()
Jon Hallca319892017-06-15 15:25:22 -07003393
Devin Lim58046fa2017-07-05 16:55:00 -07003394 def linkDown( self, main, fromS="s3", toS="s28" ):
3395 """
3396 Link fromS-toS down
3397 """
3398 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003399 assert main, "main not defined"
3400 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003401 # NOTE: You should probably run a topology check after this
3402
3403 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3404
3405 description = "Turn off a link to ensure that Link Discovery " +\
3406 "is working properly"
3407 main.case( description )
3408
3409 main.step( "Kill Link between " + fromS + " and " + toS )
3410 LinkDown = main.Mininet1.link( END1=fromS, END2=toS, OPTION="down" )
3411 main.log.info( "Waiting " + str( linkSleep ) +
3412 " seconds for link down to be discovered" )
3413 time.sleep( linkSleep )
3414 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
3415 onpass="Link down successful",
3416 onfail="Failed to bring link down" )
3417 # TODO do some sort of check here
3418
3419 def linkUp( self, main, fromS="s3", toS="s28" ):
3420 """
3421 Link fromS-toS up
3422 """
3423 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003424 assert main, "main not defined"
3425 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003426 # NOTE: You should probably run a topology check after this
3427
3428 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3429
3430 description = "Restore a link to ensure that Link Discovery is " + \
3431 "working properly"
3432 main.case( description )
3433
3434 main.step( "Bring link between " + fromS + " and " + toS +" back up" )
3435 LinkUp = main.Mininet1.link( END1=fromS, END2=toS, OPTION="up" )
3436 main.log.info( "Waiting " + str( linkSleep ) +
3437 " seconds for link up to be discovered" )
3438 time.sleep( linkSleep )
3439 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
3440 onpass="Link up successful",
3441 onfail="Failed to bring link up" )
3442
3443 def switchDown( self, main ):
3444 """
3445 Switch Down
3446 """
3447 # NOTE: You should probably run a topology check after this
3448 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003449 assert main, "main not defined"
3450 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003451
3452 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3453
3454 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallca319892017-06-15 15:25:22 -07003455 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003456 main.case( description )
3457 switch = main.params[ 'kill' ][ 'switch' ]
3458 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3459
3460 # TODO: Make this switch parameterizable
3461 main.step( "Kill " + switch )
3462 main.log.info( "Deleting " + switch )
3463 main.Mininet1.delSwitch( switch )
3464 main.log.info( "Waiting " + str( switchSleep ) +
3465 " seconds for switch down to be discovered" )
3466 time.sleep( switchSleep )
3467 device = onosCli.getDevice( dpid=switchDPID )
3468 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003469 main.log.warn( "Bringing down switch " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003470 result = main.FALSE
3471 if device and device[ 'available' ] is False:
3472 result = main.TRUE
3473 utilities.assert_equals( expect=main.TRUE, actual=result,
3474 onpass="Kill switch successful",
3475 onfail="Failed to kill switch?" )
Jon Hallca319892017-06-15 15:25:22 -07003476
Devin Lim58046fa2017-07-05 16:55:00 -07003477 def switchUp( self, main ):
3478 """
3479 Switch Up
3480 """
3481 # NOTE: You should probably run a topology check after this
3482 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003483 assert main, "main not defined"
3484 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003485
3486 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3487 switch = main.params[ 'kill' ][ 'switch' ]
3488 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3489 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallca319892017-06-15 15:25:22 -07003490 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003491 description = "Adding a switch to ensure it is discovered correctly"
3492 main.case( description )
3493
3494 main.step( "Add back " + switch )
3495 main.Mininet1.addSwitch( switch, dpid=switchDPID )
3496 for peer in links:
3497 main.Mininet1.addLink( switch, peer )
Jon Hallca319892017-06-15 15:25:22 -07003498 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -07003499 main.Mininet1.assignSwController( sw=switch, ip=ipList )
3500 main.log.info( "Waiting " + str( switchSleep ) +
3501 " seconds for switch up to be discovered" )
3502 time.sleep( switchSleep )
3503 device = onosCli.getDevice( dpid=switchDPID )
3504 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003505 main.log.debug( "Added device: " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003506 result = main.FALSE
3507 if device and device[ 'available' ]:
3508 result = main.TRUE
3509 utilities.assert_equals( expect=main.TRUE, actual=result,
3510 onpass="add switch successful",
3511 onfail="Failed to add switch?" )
3512
3513 def startElectionApp( self, main ):
3514 """
3515 start election app on all onos nodes
3516 """
Devin Lim58046fa2017-07-05 16:55:00 -07003517 assert main, "main not defined"
3518 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003519
3520 main.case( "Start Leadership Election app" )
3521 main.step( "Install leadership election app" )
Jon Hallca319892017-06-15 15:25:22 -07003522 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003523 appResult = onosCli.activateApp( "org.onosproject.election" )
3524 utilities.assert_equals(
3525 expect=main.TRUE,
3526 actual=appResult,
3527 onpass="Election app installed",
3528 onfail="Something went wrong with installing Leadership election" )
3529
3530 main.step( "Run for election on each node" )
Jon Hallca319892017-06-15 15:25:22 -07003531 onosCli.electionTestRun()
3532 main.Cluster.command( "electionTestRun" )
Devin Lim58046fa2017-07-05 16:55:00 -07003533 time.sleep( 5 )
Jon Hallca319892017-06-15 15:25:22 -07003534 sameResult, leaders = main.HA.consistentLeaderboards( main.Cluster.active() )
Devin Lim58046fa2017-07-05 16:55:00 -07003535 utilities.assert_equals(
3536 expect=True,
3537 actual=sameResult,
3538 onpass="All nodes see the same leaderboards",
3539 onfail="Inconsistent leaderboards" )
3540
3541 if sameResult:
3542 leader = leaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003543 if onosCli.ipAddress in leader:
Devin Lim58046fa2017-07-05 16:55:00 -07003544 correctLeader = True
3545 else:
3546 correctLeader = False
3547 main.step( "First node was elected leader" )
3548 utilities.assert_equals(
3549 expect=True,
3550 actual=correctLeader,
3551 onpass="Correct leader was elected",
3552 onfail="Incorrect leader" )
Jon Hallca319892017-06-15 15:25:22 -07003553 main.Cluster.testLeader = leader
3554
Devin Lim58046fa2017-07-05 16:55:00 -07003555 def isElectionFunctional( self, main ):
3556 """
3557 Check that Leadership Election is still functional
3558 15.1 Run election on each node
3559 15.2 Check that each node has the same leaders and candidates
3560 15.3 Find current leader and withdraw
3561 15.4 Check that a new node was elected leader
3562 15.5 Check that that new leader was the candidate of old leader
3563 15.6 Run for election on old leader
3564 15.7 Check that oldLeader is a candidate, and leader if only 1 node
3565 15.8 Make sure that the old leader was added to the candidate list
3566
3567 old and new variable prefixes refer to data from before vs after
3568 withdrawl and later before withdrawl vs after re-election
3569 """
3570 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003571 assert main, "main not defined"
3572 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003573
3574 description = "Check that Leadership Election is still functional"
3575 main.case( description )
3576 # NOTE: Need to re-run after restarts since being a canidate is not persistant
3577
3578 oldLeaders = [] # list of lists of each nodes' candidates before
3579 newLeaders = [] # list of lists of each nodes' candidates after
3580 oldLeader = '' # the old leader from oldLeaders, None if not same
3581 newLeader = '' # the new leaders fron newLoeaders, None if not same
3582 oldLeaderCLI = None # the CLI of the old leader used for re-electing
3583 expectNoLeader = False # True when there is only one leader
Devin Lim142b5342017-07-20 15:22:39 -07003584 if len( main.Cluster.runningNodes ) == 1:
Devin Lim58046fa2017-07-05 16:55:00 -07003585 expectNoLeader = True
3586
3587 main.step( "Run for election on each node" )
Devin Lim142b5342017-07-20 15:22:39 -07003588 electionResult = main.Cluster.command( "electionTestRun", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003589 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -07003590 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -07003591 actual=electionResult,
3592 onpass="All nodes successfully ran for leadership",
3593 onfail="At least one node failed to run for leadership" )
3594
3595 if electionResult == main.FALSE:
3596 main.log.error(
3597 "Skipping Test Case because Election Test App isn't loaded" )
3598 main.skipCase()
3599
3600 main.step( "Check that each node shows the same leader and candidates" )
3601 failMessage = "Nodes have different leaderboards"
Jon Hallca319892017-06-15 15:25:22 -07003602 activeCLIs = main.Cluster.active()
3603 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Devin Lim58046fa2017-07-05 16:55:00 -07003604 if sameResult:
3605 oldLeader = oldLeaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003606 main.log.info( "Old leader: " + oldLeader )
Devin Lim58046fa2017-07-05 16:55:00 -07003607 else:
3608 oldLeader = None
3609 utilities.assert_equals(
3610 expect=True,
3611 actual=sameResult,
3612 onpass="Leaderboards are consistent for the election topic",
3613 onfail=failMessage )
3614
3615 main.step( "Find current leader and withdraw" )
3616 withdrawResult = main.TRUE
3617 # do some sanity checking on leader before using it
3618 if oldLeader is None:
3619 main.log.error( "Leadership isn't consistent." )
3620 withdrawResult = main.FALSE
3621 # Get the CLI of the oldLeader
Jon Hallca319892017-06-15 15:25:22 -07003622 for ctrl in main.Cluster.active():
3623 if oldLeader == ctrl.ipAddress:
3624 oldLeaderCLI = ctrl
Devin Lim58046fa2017-07-05 16:55:00 -07003625 break
3626 else: # FOR/ELSE statement
3627 main.log.error( "Leader election, could not find current leader" )
3628 if oldLeader:
3629 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3630 utilities.assert_equals(
3631 expect=main.TRUE,
3632 actual=withdrawResult,
3633 onpass="Node was withdrawn from election",
3634 onfail="Node was not withdrawn from election" )
3635
3636 main.step( "Check that a new node was elected leader" )
3637 failMessage = "Nodes have different leaders"
3638 # Get new leaders and candidates
3639 newLeaderResult, newLeaders = self.consistentLeaderboards( activeCLIs )
3640 newLeader = None
3641 if newLeaderResult:
3642 if newLeaders[ 0 ][ 0 ] == 'none':
3643 main.log.error( "No leader was elected on at least 1 node" )
3644 if not expectNoLeader:
3645 newLeaderResult = False
3646 newLeader = newLeaders[ 0 ][ 0 ]
3647
3648 # Check that the new leader is not the older leader, which was withdrawn
3649 if newLeader == oldLeader:
3650 newLeaderResult = False
3651 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3652 " as the current leader" )
3653 utilities.assert_equals(
3654 expect=True,
3655 actual=newLeaderResult,
3656 onpass="Leadership election passed",
3657 onfail="Something went wrong with Leadership election" )
3658
3659 main.step( "Check that that new leader was the candidate of old leader" )
3660 # candidates[ 2 ] should become the top candidate after withdrawl
3661 correctCandidateResult = main.TRUE
3662 if expectNoLeader:
3663 if newLeader == 'none':
3664 main.log.info( "No leader expected. None found. Pass" )
3665 correctCandidateResult = main.TRUE
3666 else:
3667 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3668 correctCandidateResult = main.FALSE
3669 elif len( oldLeaders[ 0 ] ) >= 3:
3670 if newLeader == oldLeaders[ 0 ][ 2 ]:
3671 # correct leader was elected
3672 correctCandidateResult = main.TRUE
3673 else:
3674 correctCandidateResult = main.FALSE
3675 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3676 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3677 else:
3678 main.log.warn( "Could not determine who should be the correct leader" )
3679 main.log.debug( oldLeaders[ 0 ] )
3680 correctCandidateResult = main.FALSE
3681 utilities.assert_equals(
3682 expect=main.TRUE,
3683 actual=correctCandidateResult,
3684 onpass="Correct Candidate Elected",
3685 onfail="Incorrect Candidate Elected" )
3686
3687 main.step( "Run for election on old leader( just so everyone " +
3688 "is in the hat )" )
3689 if oldLeaderCLI is not None:
3690 runResult = oldLeaderCLI.electionTestRun()
3691 else:
3692 main.log.error( "No old leader to re-elect" )
3693 runResult = main.FALSE
3694 utilities.assert_equals(
3695 expect=main.TRUE,
3696 actual=runResult,
3697 onpass="App re-ran for election",
3698 onfail="App failed to run for election" )
3699
3700 main.step(
3701 "Check that oldLeader is a candidate, and leader if only 1 node" )
3702 # verify leader didn't just change
3703 # Get new leaders and candidates
3704 reRunLeaders = []
3705 time.sleep( 5 ) # Paremterize
3706 positionResult, reRunLeaders = self.consistentLeaderboards( activeCLIs )
3707
3708 # Check that the re-elected node is last on the candidate List
3709 if not reRunLeaders[ 0 ]:
3710 positionResult = main.FALSE
3711 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3712 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
3713 str( reRunLeaders[ 0 ] ) ) )
3714 positionResult = main.FALSE
3715 utilities.assert_equals(
3716 expect=True,
3717 actual=positionResult,
3718 onpass="Old leader successfully re-ran for election",
3719 onfail="Something went wrong with Leadership election after " +
3720 "the old leader re-ran for election" )
Jon Hallca319892017-06-15 15:25:22 -07003721
Devin Lim58046fa2017-07-05 16:55:00 -07003722 def installDistributedPrimitiveApp( self, main ):
3723 """
3724 Install Distributed Primitives app
3725 """
3726 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003727 assert main, "main not defined"
3728 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003729
3730 # Variables for the distributed primitives tests
3731 main.pCounterName = "TestON-Partitions"
3732 main.pCounterValue = 0
3733 main.onosSet = set( [] )
3734 main.onosSetName = "TestON-set"
3735
3736 description = "Install Primitives app"
3737 main.case( description )
3738 main.step( "Install Primitives app" )
3739 appName = "org.onosproject.distributedprimitives"
Jon Hallca319892017-06-15 15:25:22 -07003740 appResults = main.Cluster.next().activateApp( appName )
Devin Lim58046fa2017-07-05 16:55:00 -07003741 utilities.assert_equals( expect=main.TRUE,
3742 actual=appResults,
3743 onpass="Primitives app activated",
3744 onfail="Primitives app not activated" )
3745 # TODO check on all nodes instead of sleeping
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003746 time.sleep( 5 ) # To allow all nodes to activate