blob: 631ba2afdd0060afc70b7680ef39a56283daa031 [file] [log] [blame]
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07001"""
2Copyright 2015 Open Networking Foundation (ONF)
3
4Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
5the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
6or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
7
8 TestON is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation, either version 2 of the License, or
11 (at your option) any later version.
12
13 TestON is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with TestON. If not, see <http://www.gnu.org/licenses/>.
20"""
21
Jon Halla440e872016-03-31 15:15:50 -070022import json
Jon Hall41d39f12016-04-11 22:54:35 -070023import time
Jon Halle1a3b752015-07-22 13:02:46 -070024
Jon Hallf37d44d2017-05-24 10:37:30 -070025
Jon Hall41d39f12016-04-11 22:54:35 -070026class HA():
Jon Hall57b50432015-10-22 10:20:10 -070027
Jon Halla440e872016-03-31 15:15:50 -070028 def __init__( self ):
29 self.default = ''
Jon Hall57b50432015-10-22 10:20:10 -070030
Devin Lim58046fa2017-07-05 16:55:00 -070031 def customizeOnosGenPartitions( self ):
Devin Lim58046fa2017-07-05 16:55:00 -070032 # copy gen-partions file to ONOS
33 # NOTE: this assumes TestON and ONOS are on the same machine
34 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
35 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
36 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
37 main.ONOSbench.ip_address,
38 srcFile,
39 dstDir,
40 pwd=main.ONOSbench.pwd,
41 direction="from" )
Jon Hallca319892017-06-15 15:25:22 -070042
Devin Lim58046fa2017-07-05 16:55:00 -070043 def cleanUpGenPartition( self ):
44 # clean up gen-partitions file
45 try:
46 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
47 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
48 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
49 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
50 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
51 str( main.ONOSbench.handle.before ) )
52 except ( pexpect.TIMEOUT, pexpect.EOF ):
53 main.log.exception( "ONOSbench: pexpect exception found:" +
54 main.ONOSbench.handle.before )
Devin Lim44075962017-08-11 10:56:37 -070055 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -070056
Devin Lim58046fa2017-07-05 16:55:00 -070057 def startingMininet( self ):
58 main.step( "Starting Mininet" )
59 # scp topo file to mininet
60 # TODO: move to params?
61 topoName = "obelisk.py"
62 filePath = main.ONOSbench.home + "/tools/test/topos/"
63 main.ONOSbench.scp( main.Mininet1,
64 filePath + topoName,
65 main.Mininet1.home,
66 direction="to" )
67 mnResult = main.Mininet1.startNet()
68 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
69 onpass="Mininet Started",
70 onfail="Error starting Mininet" )
Jon Hallca319892017-06-15 15:25:22 -070071
Devin Lim58046fa2017-07-05 16:55:00 -070072 def scalingMetadata( self ):
73 import re
Devin Lim142b5342017-07-20 15:22:39 -070074 main.step( "Generate initial metadata file" )
Devin Lim58046fa2017-07-05 16:55:00 -070075 main.scaling = main.params[ 'scaling' ].split( "," )
76 main.log.debug( main.scaling )
77 scale = main.scaling.pop( 0 )
78 main.log.debug( scale )
79 if "e" in scale:
80 equal = True
81 else:
82 equal = False
83 main.log.debug( equal )
Devin Lim142b5342017-07-20 15:22:39 -070084 main.Cluster.setRunningNode( int( re.search( "\d+", scale ).group( 0 ) ) )
85 genResult = main.Server.generateFile( main.Cluster.numCtrls, equal=equal )
Devin Lim58046fa2017-07-05 16:55:00 -070086 utilities.assert_equals( expect=main.TRUE, actual=genResult,
87 onpass="New cluster metadata file generated",
88 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -070089
Devin Lim58046fa2017-07-05 16:55:00 -070090 def swapNodeMetadata( self ):
Devin Lim142b5342017-07-20 15:22:39 -070091 main.step( "Generate initial metadata file" )
92 if main.Cluster.numCtrls >= 5:
93 main.Cluster.setRunningNode( main.Cluster.numCtrls - 2 )
Devin Lim58046fa2017-07-05 16:55:00 -070094 else:
95 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
Devin Lim142b5342017-07-20 15:22:39 -070096 genResult = main.Server.generateFile( main.Cluster.numCtrls )
Devin Lim58046fa2017-07-05 16:55:00 -070097 utilities.assert_equals( expect=main.TRUE, actual=genResult,
98 onpass="New cluster metadata file generated",
99 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -0700100
Devin Lim142b5342017-07-20 15:22:39 -0700101 def setServerForCluster( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700102 import os
103 main.step( "Setup server for cluster metadata file" )
104 main.serverPort = main.params[ 'server' ][ 'port' ]
105 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
106 main.log.debug( "Root dir: {}".format( rootDir ) )
107 status = main.Server.start( main.ONOSbench,
108 rootDir,
109 port=main.serverPort,
110 logDir=main.logdir + "/server.log" )
111 utilities.assert_equals( expect=main.TRUE, actual=status,
112 onpass="Server started",
113 onfail="Failled to start SimpleHTTPServer" )
114
Devin Lim142b5342017-07-20 15:22:39 -0700115 def copyingBackupConfig( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700116 main.step( "Copying backup config files" )
117 main.onosServicepath = main.ONOSbench.home + "/tools/package/bin/onos-service"
118 cp = main.ONOSbench.scp( main.ONOSbench,
119 main.onosServicepath,
120 main.onosServicepath + ".backup",
121 direction="to" )
122
123 utilities.assert_equals( expect=main.TRUE,
124 actual=cp,
125 onpass="Copy backup config file succeeded",
126 onfail="Copy backup config file failed" )
127 # we need to modify the onos-service file to use remote metadata file
128 # url for cluster metadata file
129 iface = main.params[ 'server' ].get( 'interface' )
130 ip = main.ONOSbench.getIpAddr( iface=iface )
131 metaFile = "cluster.json"
132 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, main.serverPort, metaFile )
133 main.log.warn( javaArgs )
134 main.log.warn( repr( javaArgs ) )
135 handle = main.ONOSbench.handle
136 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, main.onosServicepath )
137 main.log.warn( sed )
138 main.log.warn( repr( sed ) )
139 handle.sendline( sed )
140 handle.expect( metaFile )
141 output = handle.before
142 handle.expect( "\$" )
143 output += handle.before
144 main.log.debug( repr( output ) )
145
146 def cleanUpOnosService( self ):
147 # Cleanup custom onos-service file
148 main.ONOSbench.scp( main.ONOSbench,
149 main.onosServicepath + ".backup",
150 main.onosServicepath,
151 direction="to" )
Jon Hallca319892017-06-15 15:25:22 -0700152
Jon Halla440e872016-03-31 15:15:50 -0700153 def consistentCheck( self ):
154 """
155 Checks that TestON counters are consistent across all nodes.
Jon Halle1a3b752015-07-22 13:02:46 -0700156
Jon Hallf37d44d2017-05-24 10:37:30 -0700157 Returns the tuple ( onosCounters, consistent )
Jon Hall41d39f12016-04-11 22:54:35 -0700158 - onosCounters is the parsed json output of the counters command on
159 all nodes
160 - consistent is main.TRUE if all "TestON" counters are consitent across
161 all nodes or main.FALSE
Jon Halla440e872016-03-31 15:15:50 -0700162 """
Jon Halle1a3b752015-07-22 13:02:46 -0700163 try:
Jon Halla440e872016-03-31 15:15:50 -0700164 # Get onos counters results
165 onosCountersRaw = []
166 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700167 for ctrl in main.Cluster.active():
Jon Halla440e872016-03-31 15:15:50 -0700168 t = main.Thread( target=utilities.retry,
Jon Hallca319892017-06-15 15:25:22 -0700169 name="counters-" + str( ctrl ),
170 args=[ ctrl.counters, [ None ] ],
Jon Hallf37d44d2017-05-24 10:37:30 -0700171 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Halla440e872016-03-31 15:15:50 -0700172 'randomTime': True } )
173 threads.append( t )
174 t.start()
175 for t in threads:
176 t.join()
177 onosCountersRaw.append( t.result )
178 onosCounters = []
Jon Hallca319892017-06-15 15:25:22 -0700179 for i in range( len( onosCountersRaw ) ):
Jon Halla440e872016-03-31 15:15:50 -0700180 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700181 onosCounters.append( json.loads( onosCountersRaw[ i ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700182 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700183 main.log.error( "Could not parse counters response from " +
Devin Lim142b5342017-07-20 15:22:39 -0700184 str( main.Cluster.active( i ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700185 main.log.warn( repr( onosCountersRaw[ i ] ) )
186 onosCounters.append( [] )
187
188 testCounters = {}
189 # make a list of all the "TestON-*" counters in ONOS
Jon Hall41d39f12016-04-11 22:54:35 -0700190 # lookes like a dict whose keys are the name of the ONOS node and
191 # values are a list of the counters. I.E.
Jon Hallf37d44d2017-05-24 10:37:30 -0700192 # { "ONOS1": [ { "name":"TestON-Partitions","value":56 } ]
Jon Halla440e872016-03-31 15:15:50 -0700193 # }
194 # NOTE: There is an assumtion that all nodes are active
195 # based on the above for loops
196 for controller in enumerate( onosCounters ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700197 for key, value in controller[ 1 ].iteritems():
Jon Halla440e872016-03-31 15:15:50 -0700198 if 'TestON' in key:
Devin Lim142b5342017-07-20 15:22:39 -0700199 node = str( main.Cluster.active( controller[ 0 ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700200 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700201 testCounters[ node ].append( { key: value } )
Jon Halla440e872016-03-31 15:15:50 -0700202 except KeyError:
Jon Hallf37d44d2017-05-24 10:37:30 -0700203 testCounters[ node ] = [ { key: value } ]
Jon Halla440e872016-03-31 15:15:50 -0700204 # compare the counters on each node
Jon Hallf37d44d2017-05-24 10:37:30 -0700205 firstV = testCounters.values()[ 0 ]
Jon Halla440e872016-03-31 15:15:50 -0700206 tmp = [ v == firstV for k, v in testCounters.iteritems() ]
207 if all( tmp ):
208 consistent = main.TRUE
209 else:
210 consistent = main.FALSE
211 main.log.error( "ONOS nodes have different values for counters:\n" +
212 testCounters )
213 return ( onosCounters, consistent )
214 except Exception:
215 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700216 main.cleanAndExit()
Jon Halla440e872016-03-31 15:15:50 -0700217
218 def counterCheck( self, counterName, counterValue ):
219 """
220 Checks that TestON counters are consistent across all nodes and that
221 specified counter is in ONOS with the given value
222 """
223 try:
224 correctResults = main.TRUE
225 # Get onos counters results and consistentCheck
226 onosCounters, consistent = self.consistentCheck()
227 # Check for correct values
Jon Hallca319892017-06-15 15:25:22 -0700228 for i in range( len( main.Cluster.active() ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700229 current = onosCounters[ i ]
Jon Halla440e872016-03-31 15:15:50 -0700230 onosValue = None
231 try:
232 onosValue = current.get( counterName )
Jon Hall41d39f12016-04-11 22:54:35 -0700233 except AttributeError:
Devin Lim142b5342017-07-20 15:22:39 -0700234 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -0700235 main.log.exception( node + " counters result " +
Jon Hall41d39f12016-04-11 22:54:35 -0700236 "is not as expected" )
Jon Halla440e872016-03-31 15:15:50 -0700237 correctResults = main.FALSE
238 if onosValue == counterValue:
239 main.log.info( counterName + " counter value is correct" )
240 else:
Jon Hall41d39f12016-04-11 22:54:35 -0700241 main.log.error( counterName +
242 " counter value is incorrect," +
243 " expected value: " + str( counterValue ) +
244 " current value: " + str( onosValue ) )
Jon Halla440e872016-03-31 15:15:50 -0700245 correctResults = main.FALSE
246 return consistent and correctResults
247 except Exception:
248 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700249 main.cleanAndExit()
Jon Hall41d39f12016-04-11 22:54:35 -0700250
251 def consistentLeaderboards( self, nodes ):
252 TOPIC = 'org.onosproject.election'
253 # FIXME: use threads
Jon Halle0f0b342017-04-18 11:43:47 -0700254 # FIXME: should we retry outside the function?
Jon Hall41d39f12016-04-11 22:54:35 -0700255 for n in range( 5 ): # Retry in case election is still happening
256 leaderList = []
257 # Get all leaderboards
258 for cli in nodes:
259 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
260 # Compare leaderboards
Jon Hallf37d44d2017-05-24 10:37:30 -0700261 result = all( i == leaderList[ 0 ] for i in leaderList ) and\
Jon Hall41d39f12016-04-11 22:54:35 -0700262 leaderList is not None
263 main.log.debug( leaderList )
264 main.log.warn( result )
265 if result:
266 return ( result, leaderList )
Jon Hallf37d44d2017-05-24 10:37:30 -0700267 time.sleep( 5 ) # TODO: paramerterize
Jon Hall41d39f12016-04-11 22:54:35 -0700268 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
269 return ( result, leaderList )
270
271 def nodesCheck( self, nodes ):
272 nodesOutput = []
273 results = True
274 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700275 for node in nodes:
276 t = main.Thread( target=node.nodes,
277 name="nodes-" + str( node ),
Jon Hallf37d44d2017-05-24 10:37:30 -0700278 args=[] )
Jon Hall41d39f12016-04-11 22:54:35 -0700279 threads.append( t )
280 t.start()
281
282 for t in threads:
283 t.join()
284 nodesOutput.append( t.result )
Jon Hallca319892017-06-15 15:25:22 -0700285 ips = sorted( main.Cluster.getIps( activeOnly=True ) )
Jon Hall41d39f12016-04-11 22:54:35 -0700286 for i in nodesOutput:
287 try:
288 current = json.loads( i )
289 activeIps = []
290 currentResult = False
291 for node in current:
Jon Hallf37d44d2017-05-24 10:37:30 -0700292 if node[ 'state' ] == 'READY':
293 activeIps.append( node[ 'ip' ] )
Jon Hall41d39f12016-04-11 22:54:35 -0700294 activeIps.sort()
295 if ips == activeIps:
296 currentResult = True
297 except ( ValueError, TypeError ):
298 main.log.error( "Error parsing nodes output" )
299 main.log.warn( repr( i ) )
300 currentResult = False
301 results = results and currentResult
302 return results
Jon Hallca319892017-06-15 15:25:22 -0700303
Devin Lim58046fa2017-07-05 16:55:00 -0700304 def generateGraph( self, testName, plotName="Plot-HA", index=2 ):
305 # GRAPHS
306 # NOTE: important params here:
307 # job = name of Jenkins job
308 # Plot Name = Plot-HA, only can be used if multiple plots
309 # index = The number of the graph under plot name
310 job = testName
311 graphs = '<ac:structured-macro ac:name="html">\n'
312 graphs += '<ac:plain-text-body><![CDATA[\n'
313 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
314 '/plot/' + plotName + '/getPlot?index=' + str( index ) +\
315 '&width=500&height=300"' +\
316 'noborder="0" width="500" height="300" scrolling="yes" ' +\
317 'seamless="seamless"></iframe>\n'
318 graphs += ']]></ac:plain-text-body>\n'
319 graphs += '</ac:structured-macro>\n'
320 main.log.wiki( graphs )
Jon Hallca319892017-06-15 15:25:22 -0700321
Devin Lim58046fa2017-07-05 16:55:00 -0700322 def initialSetUp( self, serviceClean=False ):
323 """
324 rest of initialSetup
325 """
326
Devin Lim58046fa2017-07-05 16:55:00 -0700327
328 if main.params[ 'tcpdump' ].lower() == "true":
329 main.step( "Start Packet Capture MN" )
330 main.Mininet2.startTcpdump(
331 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
332 + "-MN.pcap",
333 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
334 port=main.params[ 'MNtcpdump' ][ 'port' ] )
335
336 if serviceClean:
337 main.step( "Clean up ONOS service changes" )
Devin Lim142b5342017-07-20 15:22:39 -0700338 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
339 main.ONOSbench.handle.expect( "\$" )
340 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
341 main.ONOSbench.handle.expect( "\$" )
Devin Lim58046fa2017-07-05 16:55:00 -0700342
343 main.step( "Checking ONOS nodes" )
344 nodeResults = utilities.retry( self.nodesCheck,
345 False,
Jon Hallca319892017-06-15 15:25:22 -0700346 args=[ main.Cluster.active() ],
Devin Lim58046fa2017-07-05 16:55:00 -0700347 attempts=5 )
348
349 utilities.assert_equals( expect=True, actual=nodeResults,
350 onpass="Nodes check successful",
351 onfail="Nodes check NOT successful" )
352
353 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -0700354 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700355 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -0700356 ctrl.name,
357 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700358 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -0700359 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700360
361 main.step( "Activate apps defined in the params file" )
362 # get data from the params
363 apps = main.params.get( 'apps' )
364 if apps:
365 apps = apps.split( ',' )
Jon Hallca319892017-06-15 15:25:22 -0700366 main.log.debug( "Apps: " + str( apps ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700367 activateResult = True
368 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700369 main.Cluster.active( 0 ).app( app, "Activate" )
Devin Lim58046fa2017-07-05 16:55:00 -0700370 # TODO: check this worked
371 time.sleep( 10 ) # wait for apps to activate
372 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700373 state = main.Cluster.active( 0 ).appStatus( app )
Devin Lim58046fa2017-07-05 16:55:00 -0700374 if state == "ACTIVE":
375 activateResult = activateResult and True
376 else:
377 main.log.error( "{} is in {} state".format( app, state ) )
378 activateResult = False
379 utilities.assert_equals( expect=True,
380 actual=activateResult,
381 onpass="Successfully activated apps",
382 onfail="Failed to activate apps" )
383 else:
384 main.log.warn( "No apps were specified to be loaded after startup" )
385
386 main.step( "Set ONOS configurations" )
Jon Hallca319892017-06-15 15:25:22 -0700387 # FIXME: This shoudl be part of the general startup sequence
Devin Lim58046fa2017-07-05 16:55:00 -0700388 config = main.params.get( 'ONOS_Configuration' )
389 if config:
390 main.log.debug( config )
391 checkResult = main.TRUE
392 for component in config:
393 for setting in config[ component ]:
394 value = config[ component ][ setting ]
Jon Hallca319892017-06-15 15:25:22 -0700395 check = main.Cluster.next().setCfg( component, setting, value )
Devin Lim58046fa2017-07-05 16:55:00 -0700396 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
397 checkResult = check and checkResult
398 utilities.assert_equals( expect=main.TRUE,
399 actual=checkResult,
400 onpass="Successfully set config",
401 onfail="Failed to set config" )
402 else:
403 main.log.warn( "No configurations were specified to be changed after startup" )
404
Jon Hallca319892017-06-15 15:25:22 -0700405 main.step( "Check app ids" )
406 appCheck = self.appCheck()
407 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700408 onpass="App Ids seem to be correct",
409 onfail="Something is wrong with app Ids" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -0700410
Jon Hallca319892017-06-15 15:25:22 -0700411 def commonChecks( self ):
412 # TODO: make this assertable or assert in here?
413 self.topicsCheck()
414 self.partitionsCheck()
415 self.pendingMapCheck()
416 self.appCheck()
417
418 def topicsCheck( self, extraTopics=[] ):
419 """
420 Check for work partition topics in leaders output
421 """
422 leaders = main.Cluster.next().leaders()
423 missing = False
424 try:
425 if leaders:
426 parsedLeaders = json.loads( leaders )
427 output = json.dumps( parsedLeaders,
428 sort_keys=True,
429 indent=4,
430 separators=( ',', ': ' ) )
431 main.log.debug( "Leaders: " + output )
432 # check for all intent partitions
433 topics = []
434 for i in range( 14 ):
435 topics.append( "work-partition-" + str( i ) )
436 topics += extraTopics
437 main.log.debug( topics )
438 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
439 for topic in topics:
440 if topic not in ONOStopics:
441 main.log.error( "Error: " + topic +
442 " not in leaders" )
443 missing = True
444 else:
445 main.log.error( "leaders() returned None" )
446 except ( ValueError, TypeError ):
447 main.log.exception( "Error parsing leaders" )
448 main.log.error( repr( leaders ) )
449 if missing:
450 #NOTE Can we refactor this into the Cluster class? Maybe an option to print the output of a command from each node?
451 for ctrl in main.Cluster.active():
452 response = ctrl.CLI.leaders( jsonFormat=False )
453 main.log.debug( str( ctrl.name ) + " leaders output: \n" +
454 str( response ) )
455 return missing
456
457 def partitionsCheck( self ):
458 # TODO: return something assertable
459 partitions = main.Cluster.next().partitions()
460 try:
461 if partitions:
462 parsedPartitions = json.loads( partitions )
463 output = json.dumps( parsedPartitions,
464 sort_keys=True,
465 indent=4,
466 separators=( ',', ': ' ) )
467 main.log.debug( "Partitions: " + output )
468 # TODO check for a leader in all paritions
469 # TODO check for consistency among nodes
470 else:
471 main.log.error( "partitions() returned None" )
472 except ( ValueError, TypeError ):
473 main.log.exception( "Error parsing partitions" )
474 main.log.error( repr( partitions ) )
475
476 def pendingMapCheck( self ):
477 pendingMap = main.Cluster.next().pendingMap()
478 try:
479 if pendingMap:
480 parsedPending = json.loads( pendingMap )
481 output = json.dumps( parsedPending,
482 sort_keys=True,
483 indent=4,
484 separators=( ',', ': ' ) )
485 main.log.debug( "Pending map: " + output )
486 # TODO check something here?
487 else:
488 main.log.error( "pendingMap() returned None" )
489 except ( ValueError, TypeError ):
490 main.log.exception( "Error parsing pending map" )
491 main.log.error( repr( pendingMap ) )
492
493 def appCheck( self ):
494 """
495 Check App IDs on all nodes
496 """
497 # FIXME: Rename this to appIDCheck? or add a check for isntalled apps
498 appResults = main.Cluster.command( "appToIDCheck" )
499 appCheck = all( i == main.TRUE for i in appResults )
500 if not appCheck:
Devin Lim142b5342017-07-20 15:22:39 -0700501 ctrl = main.Cluster.active( 0 )
Jon Hallca319892017-06-15 15:25:22 -0700502 main.log.debug( "%s apps: %s" % ( ctrl.name, ctrl.apps() ) )
503 main.log.debug( "%s appIDs: %s" % ( ctrl.name, ctrl.appIDs() ) )
504 return appCheck
505
Jon Halle0f0b342017-04-18 11:43:47 -0700506 def workQueueStatsCheck( self, workQueueName, completed, inProgress, pending ):
507 # Completed
Jon Hallca319892017-06-15 15:25:22 -0700508 completedValues = main.Cluster.command( "workQueueTotalCompleted",
509 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700510 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700511 completedResults = [ int( x ) == completed for x in completedValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700512 completedResult = all( completedResults )
513 if not completedResult:
514 main.log.warn( "Expected Work Queue {} to have {} completed, found {}".format(
515 workQueueName, completed, completedValues ) )
516
517 # In Progress
Jon Hallca319892017-06-15 15:25:22 -0700518 inProgressValues = main.Cluster.command( "workQueueTotalInProgress",
519 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700520 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700521 inProgressResults = [ int( x ) == inProgress for x in inProgressValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700522 inProgressResult = all( inProgressResults )
523 if not inProgressResult:
524 main.log.warn( "Expected Work Queue {} to have {} inProgress, found {}".format(
525 workQueueName, inProgress, inProgressValues ) )
526
527 # Pending
Jon Hallca319892017-06-15 15:25:22 -0700528 pendingValues = main.Cluster.command( "workQueueTotalPending",
529 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700530 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700531 pendingResults = [ int( x ) == pending for x in pendingValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700532 pendingResult = all( pendingResults )
533 if not pendingResult:
534 main.log.warn( "Expected Work Queue {} to have {} pending, found {}".format(
535 workQueueName, pending, pendingValues ) )
536 return completedResult and inProgressResult and pendingResult
537
Devin Lim58046fa2017-07-05 16:55:00 -0700538 def assignDevices( self, main ):
539 """
540 Assign devices to controllers
541 """
542 import re
Devin Lim58046fa2017-07-05 16:55:00 -0700543 assert main, "main not defined"
544 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700545
546 main.case( "Assigning devices to controllers" )
547 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " + \
548 "and check that an ONOS node becomes the " + \
549 "master of the device."
550 main.step( "Assign switches to controllers" )
551
Jon Hallca319892017-06-15 15:25:22 -0700552 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -0700553 swList = []
554 for i in range( 1, 29 ):
555 swList.append( "s" + str( i ) )
556 main.Mininet1.assignSwController( sw=swList, ip=ipList )
557
558 mastershipCheck = main.TRUE
559 for i in range( 1, 29 ):
560 response = main.Mininet1.getSwController( "s" + str( i ) )
561 try:
562 main.log.info( str( response ) )
563 except Exception:
564 main.log.info( repr( response ) )
Devin Lim142b5342017-07-20 15:22:39 -0700565 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -0700566 if re.search( "tcp:" + ctrl.ipAddress, response ):
Devin Lim58046fa2017-07-05 16:55:00 -0700567 mastershipCheck = mastershipCheck and main.TRUE
568 else:
Jon Hallca319892017-06-15 15:25:22 -0700569 main.log.error( "Error, node " + repr( ctrl )+ " is " +
Devin Lim58046fa2017-07-05 16:55:00 -0700570 "not in the list of controllers s" +
571 str( i ) + " is connecting to." )
572 mastershipCheck = main.FALSE
573 utilities.assert_equals(
574 expect=main.TRUE,
575 actual=mastershipCheck,
576 onpass="Switch mastership assigned correctly",
577 onfail="Switches not assigned correctly to controllers" )
Jon Hallca319892017-06-15 15:25:22 -0700578
Devin Lim58046fa2017-07-05 16:55:00 -0700579 def assignIntents( self, main ):
580 """
581 Assign intents
582 """
583 import time
584 import json
Devin Lim58046fa2017-07-05 16:55:00 -0700585 assert main, "main not defined"
586 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700587 try:
588 main.HAlabels
589 except ( NameError, AttributeError ):
590 main.log.error( "main.HAlabels not defined, setting to []" )
591 main.HAlabels = []
592 try:
593 main.HAdata
594 except ( NameError, AttributeError ):
595 main.log.error( "data not defined, setting to []" )
596 main.HAdata = []
597 main.case( "Adding host Intents" )
598 main.caseExplanation = "Discover hosts by using pingall then " +\
599 "assign predetermined host-to-host intents." +\
600 " After installation, check that the intent" +\
601 " is distributed to all nodes and the state" +\
602 " is INSTALLED"
603
604 # install onos-app-fwd
605 main.step( "Install reactive forwarding app" )
Jon Hallca319892017-06-15 15:25:22 -0700606 onosCli = main.Cluster.next()
Devin Lime9f0ccf2017-08-11 17:25:12 -0700607 installResults = onosCli.CLI.activateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700608 utilities.assert_equals( expect=main.TRUE, actual=installResults,
609 onpass="Install fwd successful",
610 onfail="Install fwd failed" )
611
612 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700613 appCheck = self.appCheck()
614 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700615 onpass="App Ids seem to be correct",
616 onfail="Something is wrong with app Ids" )
617
618 main.step( "Discovering Hosts( Via pingall for now )" )
619 # FIXME: Once we have a host discovery mechanism, use that instead
620 # REACTIVE FWD test
621 pingResult = main.FALSE
622 passMsg = "Reactive Pingall test passed"
623 time1 = time.time()
624 pingResult = main.Mininet1.pingall()
625 time2 = time.time()
626 if not pingResult:
627 main.log.warn( "First pingall failed. Trying again..." )
628 pingResult = main.Mininet1.pingall()
629 passMsg += " on the second try"
630 utilities.assert_equals(
631 expect=main.TRUE,
632 actual=pingResult,
633 onpass=passMsg,
634 onfail="Reactive Pingall failed, " +
635 "one or more ping pairs failed" )
636 main.log.info( "Time for pingall: %2f seconds" %
637 ( time2 - time1 ) )
Jon Hallca319892017-06-15 15:25:22 -0700638 if not pingResult:
Devin Lim44075962017-08-11 10:56:37 -0700639 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700640 # timeout for fwd flows
641 time.sleep( 11 )
642 # uninstall onos-app-fwd
643 main.step( "Uninstall reactive forwarding app" )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700644 uninstallResult = onosCli.CLI.deactivateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700645 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
646 onpass="Uninstall fwd successful",
647 onfail="Uninstall fwd failed" )
648
649 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700650 appCheck2 = self.appCheck()
651 utilities.assert_equals( expect=True, actual=appCheck2,
Devin Lim58046fa2017-07-05 16:55:00 -0700652 onpass="App Ids seem to be correct",
653 onfail="Something is wrong with app Ids" )
654
655 main.step( "Add host intents via cli" )
656 intentIds = []
657 # TODO: move the host numbers to params
658 # Maybe look at all the paths we ping?
659 intentAddResult = True
660 hostResult = main.TRUE
661 for i in range( 8, 18 ):
662 main.log.info( "Adding host intent between h" + str( i ) +
663 " and h" + str( i + 10 ) )
664 host1 = "00:00:00:00:00:" + \
665 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
666 host2 = "00:00:00:00:00:" + \
667 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
668 # NOTE: getHost can return None
Jon Hallca319892017-06-15 15:25:22 -0700669 host1Dict = onosCli.CLI.getHost( host1 )
670 host2Dict = onosCli.CLI.getHost( host2 )
Devin Lim58046fa2017-07-05 16:55:00 -0700671 host1Id = None
672 host2Id = None
673 if host1Dict and host2Dict:
674 host1Id = host1Dict.get( 'id', None )
675 host2Id = host2Dict.get( 'id', None )
676 if host1Id and host2Id:
Jon Hallca319892017-06-15 15:25:22 -0700677 nodeNum = len( main.Cluster.active() )
Devin Lim142b5342017-07-20 15:22:39 -0700678 ctrl = main.Cluster.active( i % nodeNum )
Jon Hallca319892017-06-15 15:25:22 -0700679 tmpId = ctrl.CLI.addHostIntent( host1Id, host2Id )
Devin Lim58046fa2017-07-05 16:55:00 -0700680 if tmpId:
681 main.log.info( "Added intent with id: " + tmpId )
682 intentIds.append( tmpId )
683 else:
684 main.log.error( "addHostIntent returned: " +
685 repr( tmpId ) )
686 else:
687 main.log.error( "Error, getHost() failed for h" + str( i ) +
688 " and/or h" + str( i + 10 ) )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700689 hosts = main.Cluster.next().CLI.hosts()
Devin Lim58046fa2017-07-05 16:55:00 -0700690 try:
Jon Hallca319892017-06-15 15:25:22 -0700691 output = json.dumps( json.loads( hosts ),
692 sort_keys=True,
693 indent=4,
694 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700695 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700696 output = repr( hosts )
697 main.log.debug( "Hosts output: %s" % output )
Devin Lim58046fa2017-07-05 16:55:00 -0700698 hostResult = main.FALSE
699 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
700 onpass="Found a host id for each host",
701 onfail="Error looking up host ids" )
702
703 intentStart = time.time()
704 onosIds = onosCli.getAllIntentsId()
705 main.log.info( "Submitted intents: " + str( intentIds ) )
706 main.log.info( "Intents in ONOS: " + str( onosIds ) )
707 for intent in intentIds:
708 if intent in onosIds:
709 pass # intent submitted is in onos
710 else:
711 intentAddResult = False
712 if intentAddResult:
713 intentStop = time.time()
714 else:
715 intentStop = None
716 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700717 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700718 intentStates = []
719 installedCheck = True
720 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
721 count = 0
722 try:
723 for intent in json.loads( intents ):
724 state = intent.get( 'state', None )
725 if "INSTALLED" not in state:
726 installedCheck = False
727 intentId = intent.get( 'id', None )
728 intentStates.append( ( intentId, state ) )
729 except ( ValueError, TypeError ):
730 main.log.exception( "Error parsing intents" )
731 # add submitted intents not in the store
732 tmplist = [ i for i, s in intentStates ]
733 missingIntents = False
734 for i in intentIds:
735 if i not in tmplist:
736 intentStates.append( ( i, " - " ) )
737 missingIntents = True
738 intentStates.sort()
739 for i, s in intentStates:
740 count += 1
741 main.log.info( "%-6s%-15s%-15s" %
742 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700743 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700744
745 intentAddResult = bool( intentAddResult and not missingIntents and
746 installedCheck )
747 if not intentAddResult:
748 main.log.error( "Error in pushing host intents to ONOS" )
749
750 main.step( "Intent Anti-Entropy dispersion" )
751 for j in range( 100 ):
752 correct = True
753 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700754 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700755 onosIds = []
Jon Hallca319892017-06-15 15:25:22 -0700756 ids = ctrl.getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700757 onosIds.append( ids )
Jon Hallca319892017-06-15 15:25:22 -0700758 main.log.debug( "Intents in " + ctrl.name + ": " +
Devin Lim58046fa2017-07-05 16:55:00 -0700759 str( sorted( onosIds ) ) )
760 if sorted( ids ) != sorted( intentIds ):
761 main.log.warn( "Set of intent IDs doesn't match" )
762 correct = False
763 break
764 else:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700765 intents = json.loads( ctrl.CLI.intents() )
Devin Lim58046fa2017-07-05 16:55:00 -0700766 for intent in intents:
767 if intent[ 'state' ] != "INSTALLED":
768 main.log.warn( "Intent " + intent[ 'id' ] +
769 " is " + intent[ 'state' ] )
770 correct = False
771 break
772 if correct:
773 break
774 else:
775 time.sleep( 1 )
776 if not intentStop:
777 intentStop = time.time()
778 global gossipTime
779 gossipTime = intentStop - intentStart
780 main.log.info( "It took about " + str( gossipTime ) +
781 " seconds for all intents to appear in each node" )
782 append = False
783 title = "Gossip Intents"
784 count = 1
785 while append is False:
786 curTitle = title + str( count )
787 if curTitle not in main.HAlabels:
788 main.HAlabels.append( curTitle )
789 main.HAdata.append( str( gossipTime ) )
790 append = True
791 else:
792 count += 1
793 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Devin Lim142b5342017-07-20 15:22:39 -0700794 maxGossipTime = gossipPeriod * len( main.Cluster.runningNodes )
Devin Lim58046fa2017-07-05 16:55:00 -0700795 utilities.assert_greater_equals(
796 expect=maxGossipTime, actual=gossipTime,
797 onpass="ECM anti-entropy for intents worked within " +
798 "expected time",
799 onfail="Intent ECM anti-entropy took too long. " +
800 "Expected time:{}, Actual time:{}".format( maxGossipTime,
801 gossipTime ) )
802 if gossipTime <= maxGossipTime:
803 intentAddResult = True
804
Jon Hallca319892017-06-15 15:25:22 -0700805 pendingMap = main.Cluster.next().pendingMap()
Devin Lim58046fa2017-07-05 16:55:00 -0700806 if not intentAddResult or "key" in pendingMap:
807 import time
808 installedCheck = True
809 main.log.info( "Sleeping 60 seconds to see if intents are found" )
810 time.sleep( 60 )
811 onosIds = onosCli.getAllIntentsId()
812 main.log.info( "Submitted intents: " + str( intentIds ) )
813 main.log.info( "Intents in ONOS: " + str( onosIds ) )
814 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700815 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700816 intentStates = []
817 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
818 count = 0
819 try:
820 for intent in json.loads( intents ):
821 # Iter through intents of a node
822 state = intent.get( 'state', None )
823 if "INSTALLED" not in state:
824 installedCheck = False
825 intentId = intent.get( 'id', None )
826 intentStates.append( ( intentId, state ) )
827 except ( ValueError, TypeError ):
828 main.log.exception( "Error parsing intents" )
829 # add submitted intents not in the store
830 tmplist = [ i for i, s in intentStates ]
831 for i in intentIds:
832 if i not in tmplist:
833 intentStates.append( ( i, " - " ) )
834 intentStates.sort()
835 for i, s in intentStates:
836 count += 1
837 main.log.info( "%-6s%-15s%-15s" %
838 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700839 self.topicsCheck( [ "org.onosproject.election" ] )
840 self.partitionsCheck()
841 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700842
Jon Hallca319892017-06-15 15:25:22 -0700843 def pingAcrossHostIntent( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -0700844 """
845 Ping across added host intents
846 """
847 import json
848 import time
Devin Lim58046fa2017-07-05 16:55:00 -0700849 assert main, "main not defined"
850 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700851 main.case( "Verify connectivity by sending traffic across Intents" )
852 main.caseExplanation = "Ping across added host intents to check " +\
853 "functionality and check the state of " +\
854 "the intent"
855
Jon Hallca319892017-06-15 15:25:22 -0700856 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -0700857 main.step( "Check Intent state" )
858 installedCheck = False
859 loopCount = 0
860 while not installedCheck and loopCount < 40:
861 installedCheck = True
862 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700863 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700864 intentStates = []
865 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
866 count = 0
867 # Iter through intents of a node
868 try:
869 for intent in json.loads( intents ):
870 state = intent.get( 'state', None )
871 if "INSTALLED" not in state:
872 installedCheck = False
873 intentId = intent.get( 'id', None )
874 intentStates.append( ( intentId, state ) )
875 except ( ValueError, TypeError ):
876 main.log.exception( "Error parsing intents." )
877 # Print states
878 intentStates.sort()
879 for i, s in intentStates:
880 count += 1
881 main.log.info( "%-6s%-15s%-15s" %
882 ( str( count ), str( i ), str( s ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700883 if not installedCheck:
884 time.sleep( 1 )
885 loopCount += 1
886 utilities.assert_equals( expect=True, actual=installedCheck,
887 onpass="Intents are all INSTALLED",
888 onfail="Intents are not all in " +
889 "INSTALLED state" )
890
891 main.step( "Ping across added host intents" )
892 PingResult = main.TRUE
893 for i in range( 8, 18 ):
894 ping = main.Mininet1.pingHost( src="h" + str( i ),
895 target="h" + str( i + 10 ) )
896 PingResult = PingResult and ping
897 if ping == main.FALSE:
898 main.log.warn( "Ping failed between h" + str( i ) +
899 " and h" + str( i + 10 ) )
900 elif ping == main.TRUE:
901 main.log.info( "Ping test passed!" )
902 # Don't set PingResult or you'd override failures
903 if PingResult == main.FALSE:
904 main.log.error(
905 "Intents have not been installed correctly, pings failed." )
906 # TODO: pretty print
Devin Lim58046fa2017-07-05 16:55:00 -0700907 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700908 tmpIntents = onosCli.CLI.intents()
Jon Hallca319892017-06-15 15:25:22 -0700909 output = json.dumps( json.loads( tmpIntents ),
910 sort_keys=True,
911 indent=4,
912 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700913 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700914 output = repr( tmpIntents )
915 main.log.debug( "ONOS1 intents: " + output )
Devin Lim58046fa2017-07-05 16:55:00 -0700916 utilities.assert_equals(
917 expect=main.TRUE,
918 actual=PingResult,
919 onpass="Intents have been installed correctly and pings work",
920 onfail="Intents have not been installed correctly, pings failed." )
921
922 main.step( "Check leadership of topics" )
Jon Hallca319892017-06-15 15:25:22 -0700923 topicsCheck = self.topicsCheck()
924 utilities.assert_equals( expect=False, actual=topicsCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700925 onpass="intent Partitions is in leaders",
Jon Hallca319892017-06-15 15:25:22 -0700926 onfail="Some topics were lost" )
927 self.partitionsCheck()
928 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700929
930 if not installedCheck:
931 main.log.info( "Waiting 60 seconds to see if the state of " +
932 "intents change" )
933 time.sleep( 60 )
934 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700935 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700936 intentStates = []
937 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
938 count = 0
939 # Iter through intents of a node
940 try:
941 for intent in json.loads( intents ):
942 state = intent.get( 'state', None )
943 if "INSTALLED" not in state:
944 installedCheck = False
945 intentId = intent.get( 'id', None )
946 intentStates.append( ( intentId, state ) )
947 except ( ValueError, TypeError ):
948 main.log.exception( "Error parsing intents." )
949 intentStates.sort()
950 for i, s in intentStates:
951 count += 1
952 main.log.info( "%-6s%-15s%-15s" %
953 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700954 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700955
Devin Lim58046fa2017-07-05 16:55:00 -0700956 # Print flowrules
Devin Lime9f0ccf2017-08-11 17:25:12 -0700957 main.log.debug( onosCli.CLI.flows() )
Devin Lim58046fa2017-07-05 16:55:00 -0700958 main.step( "Wait a minute then ping again" )
959 # the wait is above
960 PingResult = main.TRUE
961 for i in range( 8, 18 ):
962 ping = main.Mininet1.pingHost( src="h" + str( i ),
963 target="h" + str( i + 10 ) )
964 PingResult = PingResult and ping
965 if ping == main.FALSE:
966 main.log.warn( "Ping failed between h" + str( i ) +
967 " and h" + str( i + 10 ) )
968 elif ping == main.TRUE:
969 main.log.info( "Ping test passed!" )
970 # Don't set PingResult or you'd override failures
971 if PingResult == main.FALSE:
972 main.log.error(
973 "Intents have not been installed correctly, pings failed." )
974 # TODO: pretty print
Jon Hallca319892017-06-15 15:25:22 -0700975 main.log.warn( str( onosCli.name ) + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -0700976 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700977 tmpIntents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700978 main.log.warn( json.dumps( json.loads( tmpIntents ),
979 sort_keys=True,
980 indent=4,
981 separators=( ',', ': ' ) ) )
982 except ( ValueError, TypeError ):
983 main.log.warn( repr( tmpIntents ) )
984 utilities.assert_equals(
985 expect=main.TRUE,
986 actual=PingResult,
987 onpass="Intents have been installed correctly and pings work",
988 onfail="Intents have not been installed correctly, pings failed." )
989
Devin Lim142b5342017-07-20 15:22:39 -0700990 def checkRoleNotNull( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700991 main.step( "Check that each switch has a master" )
Devin Lim58046fa2017-07-05 16:55:00 -0700992 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -0700993 rolesNotNull = main.Cluster.command( "rolesNotNull", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -0700994 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -0700995 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -0700996 actual=rolesNotNull,
997 onpass="Each device has a master",
998 onfail="Some devices don't have a master assigned" )
999
Devin Lim142b5342017-07-20 15:22:39 -07001000 def checkTheRole( self ):
1001 main.step( "Read device roles from ONOS" )
Jon Hallca319892017-06-15 15:25:22 -07001002 ONOSMastership = main.Cluster.command( "roles" )
Devin Lim58046fa2017-07-05 16:55:00 -07001003 consistentMastership = True
1004 rolesResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001005 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001006 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001007 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001008 main.log.error( "Error in getting " + node + " roles" )
1009 main.log.warn( node + " mastership response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001010 repr( ONOSMastership[ i ] ) )
1011 rolesResults = False
1012 utilities.assert_equals(
1013 expect=True,
1014 actual=rolesResults,
1015 onpass="No error in reading roles output",
1016 onfail="Error in reading roles from ONOS" )
1017
1018 main.step( "Check for consistency in roles from each controller" )
1019 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1020 main.log.info(
1021 "Switch roles are consistent across all ONOS nodes" )
1022 else:
1023 consistentMastership = False
1024 utilities.assert_equals(
1025 expect=True,
1026 actual=consistentMastership,
1027 onpass="Switch roles are consistent across all ONOS nodes",
1028 onfail="ONOS nodes have different views of switch roles" )
Devin Lim142b5342017-07-20 15:22:39 -07001029 return ONOSMastership, rolesResults, consistentMastership
1030
1031 def checkingIntents( self ):
1032 main.step( "Get the intents from each controller" )
1033 ONOSIntents = main.Cluster.command( "intents", specificDriver=2 )
1034 intentsResults = True
1035 for i in range( len( ONOSIntents ) ):
1036 node = str( main.Cluster.active( i ) )
1037 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1038 main.log.error( "Error in getting " + node + " intents" )
1039 main.log.warn( node + " intents response: " +
1040 repr( ONOSIntents[ i ] ) )
1041 intentsResults = False
1042 utilities.assert_equals(
1043 expect=True,
1044 actual=intentsResults,
1045 onpass="No error in reading intents output",
1046 onfail="Error in reading intents from ONOS" )
1047 return ONOSIntents, intentsResults
1048
1049 def readingState( self, main ):
1050 """
1051 Reading state of ONOS
1052 """
1053 import json
1054 import time
1055 assert main, "main not defined"
1056 assert utilities.assert_equals, "utilities.assert_equals not defined"
1057 try:
1058 from tests.dependencies.topology import Topology
1059 except ImportError:
1060 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07001061 main.cleanAndExit()
Devin Lim142b5342017-07-20 15:22:39 -07001062 try:
1063 main.topoRelated
1064 except ( NameError, AttributeError ):
1065 main.topoRelated = Topology()
1066 main.case( "Setting up and gathering data for current state" )
1067 # The general idea for this test case is to pull the state of
1068 # ( intents,flows, topology,... ) from each ONOS node
1069 # We can then compare them with each other and also with past states
1070
1071 global mastershipState
1072 mastershipState = '[]'
1073
1074 self.checkRoleNotNull()
1075
1076 main.step( "Get the Mastership of each switch from each controller" )
1077 mastershipCheck = main.FALSE
1078
1079 ONOSMastership, consistentMastership, rolesResults = self.checkTheRole()
Devin Lim58046fa2017-07-05 16:55:00 -07001080
1081 if rolesResults and not consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001082 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001083 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001084 try:
1085 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001086 node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07001087 json.dumps(
1088 json.loads( ONOSMastership[ i ] ),
1089 sort_keys=True,
1090 indent=4,
1091 separators=( ',', ': ' ) ) )
1092 except ( ValueError, TypeError ):
1093 main.log.warn( repr( ONOSMastership[ i ] ) )
1094 elif rolesResults and consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001095 mastershipCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001096 mastershipState = ONOSMastership[ 0 ]
1097
Devin Lim142b5342017-07-20 15:22:39 -07001098
Devin Lim58046fa2017-07-05 16:55:00 -07001099 global intentState
1100 intentState = []
Devin Lim142b5342017-07-20 15:22:39 -07001101 ONOSIntents, intentsResults = self.checkingIntents()
Jon Hallca319892017-06-15 15:25:22 -07001102 intentCheck = main.FALSE
1103 consistentIntents = True
Devin Lim142b5342017-07-20 15:22:39 -07001104
Devin Lim58046fa2017-07-05 16:55:00 -07001105
1106 main.step( "Check for consistency in Intents from each controller" )
1107 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1108 main.log.info( "Intents are consistent across all ONOS " +
1109 "nodes" )
1110 else:
1111 consistentIntents = False
1112 main.log.error( "Intents not consistent" )
1113 utilities.assert_equals(
1114 expect=True,
1115 actual=consistentIntents,
1116 onpass="Intents are consistent across all ONOS nodes",
1117 onfail="ONOS nodes have different views of intents" )
1118
1119 if intentsResults:
1120 # Try to make it easy to figure out what is happening
1121 #
1122 # Intent ONOS1 ONOS2 ...
1123 # 0x01 INSTALLED INSTALLING
1124 # ... ... ...
1125 # ... ... ...
1126 title = " Id"
Jon Hallca319892017-06-15 15:25:22 -07001127 for ctrl in main.Cluster.active():
1128 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07001129 main.log.warn( title )
1130 # get all intent keys in the cluster
1131 keys = []
1132 try:
1133 # Get the set of all intent keys
1134 for nodeStr in ONOSIntents:
1135 node = json.loads( nodeStr )
1136 for intent in node:
1137 keys.append( intent.get( 'id' ) )
1138 keys = set( keys )
1139 # For each intent key, print the state on each node
1140 for key in keys:
1141 row = "%-13s" % key
1142 for nodeStr in ONOSIntents:
1143 node = json.loads( nodeStr )
1144 for intent in node:
1145 if intent.get( 'id', "Error" ) == key:
1146 row += "%-15s" % intent.get( 'state' )
1147 main.log.warn( row )
1148 # End of intent state table
1149 except ValueError as e:
1150 main.log.exception( e )
1151 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1152
1153 if intentsResults and not consistentIntents:
1154 # print the json objects
Jon Hallca319892017-06-15 15:25:22 -07001155 main.log.debug( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001156 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1157 sort_keys=True,
1158 indent=4,
1159 separators=( ',', ': ' ) ) )
1160 for i in range( len( ONOSIntents ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001161 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001162 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallca319892017-06-15 15:25:22 -07001163 main.log.debug( node + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001164 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
1165 sort_keys=True,
1166 indent=4,
1167 separators=( ',', ': ' ) ) )
1168 else:
Jon Hallca319892017-06-15 15:25:22 -07001169 main.log.debug( node + " intents match " + ctrl.name + " intents" )
Devin Lim58046fa2017-07-05 16:55:00 -07001170 elif intentsResults and consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07001171 intentCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001172 intentState = ONOSIntents[ 0 ]
1173
1174 main.step( "Get the flows from each controller" )
1175 global flowState
1176 flowState = []
Devin Lim142b5342017-07-20 15:22:39 -07001177 ONOSFlows = main.Cluster.command( "flows", specificDriver=2 ) # TODO: Possible arg: sleep = 30
Devin Lim58046fa2017-07-05 16:55:00 -07001178 ONOSFlowsJson = []
1179 flowCheck = main.FALSE
1180 consistentFlows = True
1181 flowsResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001182 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001183 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001184 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001185 main.log.error( "Error in getting " + node + " flows" )
1186 main.log.warn( node + " flows response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001187 repr( ONOSFlows[ i ] ) )
1188 flowsResults = False
1189 ONOSFlowsJson.append( None )
1190 else:
1191 try:
1192 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1193 except ( ValueError, TypeError ):
1194 # FIXME: change this to log.error?
Jon Hallca319892017-06-15 15:25:22 -07001195 main.log.exception( "Error in parsing " + node +
Devin Lim58046fa2017-07-05 16:55:00 -07001196 " response as json." )
1197 main.log.error( repr( ONOSFlows[ i ] ) )
1198 ONOSFlowsJson.append( None )
1199 flowsResults = False
1200 utilities.assert_equals(
1201 expect=True,
1202 actual=flowsResults,
1203 onpass="No error in reading flows output",
1204 onfail="Error in reading flows from ONOS" )
1205
1206 main.step( "Check for consistency in Flows from each controller" )
1207 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1208 if all( tmp ):
1209 main.log.info( "Flow count is consistent across all ONOS nodes" )
1210 else:
1211 consistentFlows = False
1212 utilities.assert_equals(
1213 expect=True,
1214 actual=consistentFlows,
1215 onpass="The flow count is consistent across all ONOS nodes",
1216 onfail="ONOS nodes have different flow counts" )
1217
1218 if flowsResults and not consistentFlows:
1219 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001220 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001221 try:
1222 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001223 node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001224 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
1225 indent=4, separators=( ',', ': ' ) ) )
1226 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -07001227 main.log.warn( node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001228 repr( ONOSFlows[ i ] ) )
1229 elif flowsResults and consistentFlows:
1230 flowCheck = main.TRUE
1231 flowState = ONOSFlows[ 0 ]
1232
1233 main.step( "Get the OF Table entries" )
1234 global flows
1235 flows = []
1236 for i in range( 1, 29 ):
1237 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1238 if flowCheck == main.FALSE:
1239 for table in flows:
1240 main.log.warn( table )
1241 # TODO: Compare switch flow tables with ONOS flow tables
1242
1243 main.step( "Start continuous pings" )
1244 main.Mininet2.pingLong(
1245 src=main.params[ 'PING' ][ 'source1' ],
1246 target=main.params[ 'PING' ][ 'target1' ],
1247 pingTime=500 )
1248 main.Mininet2.pingLong(
1249 src=main.params[ 'PING' ][ 'source2' ],
1250 target=main.params[ 'PING' ][ 'target2' ],
1251 pingTime=500 )
1252 main.Mininet2.pingLong(
1253 src=main.params[ 'PING' ][ 'source3' ],
1254 target=main.params[ 'PING' ][ 'target3' ],
1255 pingTime=500 )
1256 main.Mininet2.pingLong(
1257 src=main.params[ 'PING' ][ 'source4' ],
1258 target=main.params[ 'PING' ][ 'target4' ],
1259 pingTime=500 )
1260 main.Mininet2.pingLong(
1261 src=main.params[ 'PING' ][ 'source5' ],
1262 target=main.params[ 'PING' ][ 'target5' ],
1263 pingTime=500 )
1264 main.Mininet2.pingLong(
1265 src=main.params[ 'PING' ][ 'source6' ],
1266 target=main.params[ 'PING' ][ 'target6' ],
1267 pingTime=500 )
1268 main.Mininet2.pingLong(
1269 src=main.params[ 'PING' ][ 'source7' ],
1270 target=main.params[ 'PING' ][ 'target7' ],
1271 pingTime=500 )
1272 main.Mininet2.pingLong(
1273 src=main.params[ 'PING' ][ 'source8' ],
1274 target=main.params[ 'PING' ][ 'target8' ],
1275 pingTime=500 )
1276 main.Mininet2.pingLong(
1277 src=main.params[ 'PING' ][ 'source9' ],
1278 target=main.params[ 'PING' ][ 'target9' ],
1279 pingTime=500 )
1280 main.Mininet2.pingLong(
1281 src=main.params[ 'PING' ][ 'source10' ],
1282 target=main.params[ 'PING' ][ 'target10' ],
1283 pingTime=500 )
1284
1285 main.step( "Collecting topology information from ONOS" )
Devin Lim142b5342017-07-20 15:22:39 -07001286 devices = main.topoRelated.getAll( "devices" )
1287 hosts = main.topoRelated.getAll( "hosts", inJson=True )
1288 ports = main.topoRelated.getAll( "ports" )
1289 links = main.topoRelated.getAll( "links" )
1290 clusters = main.topoRelated.getAll( "clusters" )
Devin Lim58046fa2017-07-05 16:55:00 -07001291 # Compare json objects for hosts and dataplane clusters
1292
1293 # hosts
1294 main.step( "Host view is consistent across ONOS nodes" )
1295 consistentHostsResult = main.TRUE
1296 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001297 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001298 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1299 if hosts[ controller ] == hosts[ 0 ]:
1300 continue
1301 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07001302 main.log.error( "hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001303 controllerStr +
1304 " is inconsistent with ONOS1" )
1305 main.log.warn( repr( hosts[ controller ] ) )
1306 consistentHostsResult = main.FALSE
1307
1308 else:
Jon Hallca319892017-06-15 15:25:22 -07001309 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001310 controllerStr )
1311 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001312 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001313 " hosts response: " +
1314 repr( hosts[ controller ] ) )
1315 utilities.assert_equals(
1316 expect=main.TRUE,
1317 actual=consistentHostsResult,
1318 onpass="Hosts view is consistent across all ONOS nodes",
1319 onfail="ONOS nodes have different views of hosts" )
1320
1321 main.step( "Each host has an IP address" )
1322 ipResult = main.TRUE
1323 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001324 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001325 if hosts[ controller ]:
1326 for host in hosts[ controller ]:
1327 if not host.get( 'ipAddresses', [] ):
Jon Hallca319892017-06-15 15:25:22 -07001328 main.log.error( "Error with host ips on " +
Devin Lim58046fa2017-07-05 16:55:00 -07001329 controllerStr + ": " + str( host ) )
1330 ipResult = main.FALSE
1331 utilities.assert_equals(
1332 expect=main.TRUE,
1333 actual=ipResult,
1334 onpass="The ips of the hosts aren't empty",
1335 onfail="The ip of at least one host is missing" )
1336
1337 # Strongly connected clusters of devices
1338 main.step( "Cluster view is consistent across ONOS nodes" )
1339 consistentClustersResult = main.TRUE
1340 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001341 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001342 if "Error" not in clusters[ controller ]:
1343 if clusters[ controller ] == clusters[ 0 ]:
1344 continue
1345 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07001346 main.log.error( "clusters from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001347 " is inconsistent with ONOS1" )
1348 consistentClustersResult = main.FALSE
1349
1350 else:
1351 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07001352 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07001353 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001354 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001355 " clusters response: " +
1356 repr( clusters[ controller ] ) )
1357 utilities.assert_equals(
1358 expect=main.TRUE,
1359 actual=consistentClustersResult,
1360 onpass="Clusters view is consistent across all ONOS nodes",
1361 onfail="ONOS nodes have different views of clusters" )
1362 if not consistentClustersResult:
1363 main.log.debug( clusters )
1364
1365 # there should always only be one cluster
1366 main.step( "Cluster view correct across ONOS nodes" )
1367 try:
1368 numClusters = len( json.loads( clusters[ 0 ] ) )
1369 except ( ValueError, TypeError ):
1370 main.log.exception( "Error parsing clusters[0]: " +
1371 repr( clusters[ 0 ] ) )
1372 numClusters = "ERROR"
1373 utilities.assert_equals(
1374 expect=1,
1375 actual=numClusters,
1376 onpass="ONOS shows 1 SCC",
1377 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1378
1379 main.step( "Comparing ONOS topology to MN" )
1380 devicesResults = main.TRUE
1381 linksResults = main.TRUE
1382 hostsResults = main.TRUE
1383 mnSwitches = main.Mininet1.getSwitches()
1384 mnLinks = main.Mininet1.getLinks()
1385 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07001386 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001387 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001388 currentDevicesResult = main.topoRelated.compareDevicePort(
1389 main.Mininet1, controller,
1390 mnSwitches, devices, ports )
1391 utilities.assert_equals( expect=main.TRUE,
1392 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07001393 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001394 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001395 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001396 " Switches view is incorrect" )
1397
1398 currentLinksResult = main.topoRelated.compareBase( links, controller,
1399 main.Mininet1.compareLinks,
1400 [ mnSwitches, mnLinks ] )
1401 utilities.assert_equals( expect=main.TRUE,
1402 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07001403 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001404 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001405 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001406 " links view is incorrect" )
1407
1408 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1409 currentHostsResult = main.Mininet1.compareHosts(
1410 mnHosts,
1411 hosts[ controller ] )
1412 else:
1413 currentHostsResult = main.FALSE
1414 utilities.assert_equals( expect=main.TRUE,
1415 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07001416 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001417 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07001418 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001419 " hosts don't match Mininet" )
1420
1421 devicesResults = devicesResults and currentDevicesResult
1422 linksResults = linksResults and currentLinksResult
1423 hostsResults = hostsResults and currentHostsResult
1424
1425 main.step( "Device information is correct" )
1426 utilities.assert_equals(
1427 expect=main.TRUE,
1428 actual=devicesResults,
1429 onpass="Device information is correct",
1430 onfail="Device information is incorrect" )
1431
1432 main.step( "Links are correct" )
1433 utilities.assert_equals(
1434 expect=main.TRUE,
1435 actual=linksResults,
1436 onpass="Link are correct",
1437 onfail="Links are incorrect" )
1438
1439 main.step( "Hosts are correct" )
1440 utilities.assert_equals(
1441 expect=main.TRUE,
1442 actual=hostsResults,
1443 onpass="Hosts are correct",
1444 onfail="Hosts are incorrect" )
1445
1446 def checkDistPrimitivesFunc( self, main ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001447 """
1448 Check for basic functionality with distributed primitives
1449 """
Jon Halle0f0b342017-04-18 11:43:47 -07001450 # TODO: Clean this up so it's not just a cut/paste from the test
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001451 try:
1452 # Make sure variables are defined/set
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001453 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001454 assert main.pCounterName, "main.pCounterName not defined"
1455 assert main.onosSetName, "main.onosSetName not defined"
1456 # NOTE: assert fails if value is 0/None/Empty/False
1457 try:
1458 main.pCounterValue
Devin Lim58046fa2017-07-05 16:55:00 -07001459 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001460 main.log.error( "main.pCounterValue not defined, setting to 0" )
1461 main.pCounterValue = 0
1462 try:
1463 main.onosSet
Devin Lim58046fa2017-07-05 16:55:00 -07001464 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001465 main.log.error( "main.onosSet not defined, setting to empty Set" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001466 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001467 # Variables for the distributed primitives tests. These are local only
1468 addValue = "a"
1469 addAllValue = "a b c d e f"
1470 retainValue = "c d e f"
Jon Halle0f0b342017-04-18 11:43:47 -07001471 valueName = "TestON-Value"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001472 valueValue = None
Jon Halle0f0b342017-04-18 11:43:47 -07001473 workQueueName = "TestON-Queue"
1474 workQueueCompleted = 0
1475 workQueueInProgress = 0
1476 workQueuePending = 0
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001477
1478 description = "Check for basic functionality with distributed " +\
1479 "primitives"
1480 main.case( description )
1481 main.caseExplanation = "Test the methods of the distributed " +\
1482 "primitives (counters and sets) throught the cli"
1483 # DISTRIBUTED ATOMIC COUNTERS
1484 # Partitioned counters
1485 main.step( "Increment then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001486 pCounters = main.Cluster.command( "counterTestAddAndGet",
1487 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001488 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001489 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001490 main.pCounterValue += 1
1491 addedPValues.append( main.pCounterValue )
Jon Hallca319892017-06-15 15:25:22 -07001492 # Check that counter incremented once per controller
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001493 pCounterResults = True
1494 for i in addedPValues:
1495 tmpResult = i in pCounters
1496 pCounterResults = pCounterResults and tmpResult
1497 if not tmpResult:
1498 main.log.error( str( i ) + " is not in partitioned "
1499 "counter incremented results" )
1500 utilities.assert_equals( expect=True,
1501 actual=pCounterResults,
1502 onpass="Default counter incremented",
1503 onfail="Error incrementing default" +
1504 " counter" )
1505
1506 main.step( "Get then Increment a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001507 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1508 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001509 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001510 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001511 addedPValues.append( main.pCounterValue )
1512 main.pCounterValue += 1
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001513 # Check that counter incremented numController times
1514 pCounterResults = True
1515 for i in addedPValues:
1516 tmpResult = i in pCounters
1517 pCounterResults = pCounterResults and tmpResult
1518 if not tmpResult:
1519 main.log.error( str( i ) + " is not in partitioned "
1520 "counter incremented results" )
1521 utilities.assert_equals( expect=True,
1522 actual=pCounterResults,
1523 onpass="Default counter incremented",
1524 onfail="Error incrementing default" +
1525 " counter" )
1526
1527 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001528 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001529 utilities.assert_equals( expect=main.TRUE,
1530 actual=incrementCheck,
1531 onpass="Added counters are correct",
1532 onfail="Added counters are incorrect" )
1533
1534 main.step( "Add -8 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001535 pCounters = main.Cluster.command( "counterTestAddAndGet",
1536 args=[ main.pCounterName ],
1537 kwargs={ "delta": -8 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001538 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001539 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001540 main.pCounterValue += -8
1541 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001542 # Check that counter incremented numController times
1543 pCounterResults = True
1544 for i in addedPValues:
1545 tmpResult = i in pCounters
1546 pCounterResults = pCounterResults and tmpResult
1547 if not tmpResult:
1548 main.log.error( str( i ) + " is not in partitioned "
1549 "counter incremented results" )
1550 utilities.assert_equals( expect=True,
1551 actual=pCounterResults,
1552 onpass="Default counter incremented",
1553 onfail="Error incrementing default" +
1554 " counter" )
1555
1556 main.step( "Add 5 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001557 pCounters = main.Cluster.command( "counterTestAddAndGet",
1558 args=[ main.pCounterName ],
1559 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001560 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001561 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001562 main.pCounterValue += 5
1563 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001564
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001565 # Check that counter incremented numController times
1566 pCounterResults = True
1567 for i in addedPValues:
1568 tmpResult = i in pCounters
1569 pCounterResults = pCounterResults and tmpResult
1570 if not tmpResult:
1571 main.log.error( str( i ) + " is not in partitioned "
1572 "counter incremented results" )
1573 utilities.assert_equals( expect=True,
1574 actual=pCounterResults,
1575 onpass="Default counter incremented",
1576 onfail="Error incrementing default" +
1577 " counter" )
1578
1579 main.step( "Get then add 5 to a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001580 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1581 args=[ main.pCounterName ],
1582 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001583 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001584 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001585 addedPValues.append( main.pCounterValue )
1586 main.pCounterValue += 5
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001587 # Check that counter incremented numController times
1588 pCounterResults = True
1589 for i in addedPValues:
1590 tmpResult = i in pCounters
1591 pCounterResults = pCounterResults and tmpResult
1592 if not tmpResult:
1593 main.log.error( str( i ) + " is not in partitioned "
1594 "counter incremented results" )
1595 utilities.assert_equals( expect=True,
1596 actual=pCounterResults,
1597 onpass="Default counter incremented",
1598 onfail="Error incrementing default" +
1599 " counter" )
1600
1601 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001602 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001603 utilities.assert_equals( expect=main.TRUE,
1604 actual=incrementCheck,
1605 onpass="Added counters are correct",
1606 onfail="Added counters are incorrect" )
1607
1608 # DISTRIBUTED SETS
1609 main.step( "Distributed Set get" )
1610 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001611 getResponses = main.Cluster.command( "setTestGet",
1612 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001613 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001614 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001615 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001616 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001617 current = set( getResponses[ i ] )
1618 if len( current ) == len( getResponses[ i ] ):
1619 # no repeats
1620 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001621 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001622 " has incorrect view" +
1623 " of set " + main.onosSetName + ":\n" +
1624 str( getResponses[ i ] ) )
1625 main.log.debug( "Expected: " + str( main.onosSet ) )
1626 main.log.debug( "Actual: " + str( current ) )
1627 getResults = main.FALSE
1628 else:
1629 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001630 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001631 " has repeat elements in" +
1632 " set " + main.onosSetName + ":\n" +
1633 str( getResponses[ i ] ) )
1634 getResults = main.FALSE
1635 elif getResponses[ i ] == main.ERROR:
1636 getResults = main.FALSE
1637 utilities.assert_equals( expect=main.TRUE,
1638 actual=getResults,
1639 onpass="Set elements are correct",
1640 onfail="Set elements are incorrect" )
1641
1642 main.step( "Distributed Set size" )
Jon Hallca319892017-06-15 15:25:22 -07001643 sizeResponses = main.Cluster.command( "setTestSize",
1644 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001645 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001646 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001647 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001648 if size != sizeResponses[ i ]:
1649 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001650 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001651 " expected a size of " + str( size ) +
1652 " for set " + main.onosSetName +
1653 " but got " + str( sizeResponses[ i ] ) )
1654 utilities.assert_equals( expect=main.TRUE,
1655 actual=sizeResults,
1656 onpass="Set sizes are correct",
1657 onfail="Set sizes are incorrect" )
1658
1659 main.step( "Distributed Set add()" )
1660 main.onosSet.add( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001661 addResponses = main.Cluster.command( "setTestAdd",
1662 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001663 # main.TRUE = successfully changed the set
1664 # main.FALSE = action resulted in no change in set
1665 # main.ERROR - Some error in executing the function
1666 addResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001667 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001668 if addResponses[ i ] == main.TRUE:
1669 # All is well
1670 pass
1671 elif addResponses[ i ] == main.FALSE:
1672 # Already in set, probably fine
1673 pass
1674 elif addResponses[ i ] == main.ERROR:
1675 # Error in execution
1676 addResults = main.FALSE
1677 else:
1678 # unexpected result
1679 addResults = main.FALSE
1680 if addResults != main.TRUE:
1681 main.log.error( "Error executing set add" )
1682
1683 # Check if set is still correct
1684 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001685 getResponses = main.Cluster.command( "setTestGet",
1686 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001687 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001688 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001689 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001690 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001691 current = set( getResponses[ i ] )
1692 if len( current ) == len( getResponses[ i ] ):
1693 # no repeats
1694 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001695 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001696 " of set " + main.onosSetName + ":\n" +
1697 str( getResponses[ i ] ) )
1698 main.log.debug( "Expected: " + str( main.onosSet ) )
1699 main.log.debug( "Actual: " + str( current ) )
1700 getResults = main.FALSE
1701 else:
1702 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001703 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001704 " set " + main.onosSetName + ":\n" +
1705 str( getResponses[ i ] ) )
1706 getResults = main.FALSE
1707 elif getResponses[ i ] == main.ERROR:
1708 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001709 sizeResponses = main.Cluster.command( "setTestSize",
1710 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001711 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001712 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001713 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001714 if size != sizeResponses[ i ]:
1715 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001716 main.log.error( node + " expected a size of " +
1717 str( size ) + " for set " + main.onosSetName +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001718 " but got " + str( sizeResponses[ i ] ) )
1719 addResults = addResults and getResults and sizeResults
1720 utilities.assert_equals( expect=main.TRUE,
1721 actual=addResults,
1722 onpass="Set add correct",
1723 onfail="Set add was incorrect" )
1724
1725 main.step( "Distributed Set addAll()" )
1726 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001727 addResponses = main.Cluster.command( "setTestAdd",
1728 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001729 # main.TRUE = successfully changed the set
1730 # main.FALSE = action resulted in no change in set
1731 # main.ERROR - Some error in executing the function
1732 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001733 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001734 if addResponses[ i ] == main.TRUE:
1735 # All is well
1736 pass
1737 elif addResponses[ i ] == main.FALSE:
1738 # Already in set, probably fine
1739 pass
1740 elif addResponses[ i ] == main.ERROR:
1741 # Error in execution
1742 addAllResults = main.FALSE
1743 else:
1744 # unexpected result
1745 addAllResults = main.FALSE
1746 if addAllResults != main.TRUE:
1747 main.log.error( "Error executing set addAll" )
1748
1749 # Check if set is still correct
1750 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001751 getResponses = main.Cluster.command( "setTestGet",
1752 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001753 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001754 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001755 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001756 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001757 current = set( getResponses[ i ] )
1758 if len( current ) == len( getResponses[ i ] ):
1759 # no repeats
1760 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001761 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001762 " of set " + main.onosSetName + ":\n" +
1763 str( getResponses[ i ] ) )
1764 main.log.debug( "Expected: " + str( main.onosSet ) )
1765 main.log.debug( "Actual: " + str( current ) )
1766 getResults = main.FALSE
1767 else:
1768 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001769 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001770 " set " + main.onosSetName + ":\n" +
1771 str( getResponses[ i ] ) )
1772 getResults = main.FALSE
1773 elif getResponses[ i ] == main.ERROR:
1774 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001775 sizeResponses = main.Cluster.command( "setTestSize",
1776 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001777 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001778 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001779 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001780 if size != sizeResponses[ i ]:
1781 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001782 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001783 " for set " + main.onosSetName +
1784 " but got " + str( sizeResponses[ i ] ) )
1785 addAllResults = addAllResults and getResults and sizeResults
1786 utilities.assert_equals( expect=main.TRUE,
1787 actual=addAllResults,
1788 onpass="Set addAll correct",
1789 onfail="Set addAll was incorrect" )
1790
1791 main.step( "Distributed Set contains()" )
Jon Hallca319892017-06-15 15:25:22 -07001792 containsResponses = main.Cluster.command( "setTestGet",
1793 args=[ main.onosSetName ],
1794 kwargs={ "values": addValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001795 containsResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001796 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001797 if containsResponses[ i ] == main.ERROR:
1798 containsResults = main.FALSE
1799 else:
1800 containsResults = containsResults and\
1801 containsResponses[ i ][ 1 ]
1802 utilities.assert_equals( expect=main.TRUE,
1803 actual=containsResults,
1804 onpass="Set contains is functional",
1805 onfail="Set contains failed" )
1806
1807 main.step( "Distributed Set containsAll()" )
Jon Hallca319892017-06-15 15:25:22 -07001808 containsAllResponses = main.Cluster.command( "setTestGet",
1809 args=[ main.onosSetName ],
1810 kwargs={ "values": addAllValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001811 containsAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001812 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001813 if containsResponses[ i ] == main.ERROR:
1814 containsResults = main.FALSE
1815 else:
1816 containsResults = containsResults and\
1817 containsResponses[ i ][ 1 ]
1818 utilities.assert_equals( expect=main.TRUE,
1819 actual=containsAllResults,
1820 onpass="Set containsAll is functional",
1821 onfail="Set containsAll failed" )
1822
1823 main.step( "Distributed Set remove()" )
1824 main.onosSet.remove( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001825 removeResponses = main.Cluster.command( "setTestRemove",
1826 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001827 # main.TRUE = successfully changed the set
1828 # main.FALSE = action resulted in no change in set
1829 # main.ERROR - Some error in executing the function
1830 removeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001831 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001832 if removeResponses[ i ] == main.TRUE:
1833 # All is well
1834 pass
1835 elif removeResponses[ i ] == main.FALSE:
1836 # not in set, probably fine
1837 pass
1838 elif removeResponses[ i ] == main.ERROR:
1839 # Error in execution
1840 removeResults = main.FALSE
1841 else:
1842 # unexpected result
1843 removeResults = main.FALSE
1844 if removeResults != main.TRUE:
1845 main.log.error( "Error executing set remove" )
1846
1847 # Check if set is still correct
1848 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001849 getResponses = main.Cluster.command( "setTestGet",
1850 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001851 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001852 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001853 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001854 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001855 current = set( getResponses[ i ] )
1856 if len( current ) == len( getResponses[ i ] ):
1857 # no repeats
1858 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001859 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001860 " of set " + main.onosSetName + ":\n" +
1861 str( getResponses[ i ] ) )
1862 main.log.debug( "Expected: " + str( main.onosSet ) )
1863 main.log.debug( "Actual: " + str( current ) )
1864 getResults = main.FALSE
1865 else:
1866 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001867 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001868 " set " + main.onosSetName + ":\n" +
1869 str( getResponses[ i ] ) )
1870 getResults = main.FALSE
1871 elif getResponses[ i ] == main.ERROR:
1872 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001873 sizeResponses = main.Cluster.command( "setTestSize",
1874 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001875 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001876 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001877 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001878 if size != sizeResponses[ i ]:
1879 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001880 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001881 " for set " + main.onosSetName +
1882 " but got " + str( sizeResponses[ i ] ) )
1883 removeResults = removeResults and getResults and sizeResults
1884 utilities.assert_equals( expect=main.TRUE,
1885 actual=removeResults,
1886 onpass="Set remove correct",
1887 onfail="Set remove was incorrect" )
1888
1889 main.step( "Distributed Set removeAll()" )
1890 main.onosSet.difference_update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001891 removeAllResponses = main.Cluster.command( "setTestRemove",
1892 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001893 # main.TRUE = successfully changed the set
1894 # main.FALSE = action resulted in no change in set
1895 # main.ERROR - Some error in executing the function
1896 removeAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001897 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001898 if removeAllResponses[ i ] == main.TRUE:
1899 # All is well
1900 pass
1901 elif removeAllResponses[ i ] == main.FALSE:
1902 # not in set, probably fine
1903 pass
1904 elif removeAllResponses[ i ] == main.ERROR:
1905 # Error in execution
1906 removeAllResults = main.FALSE
1907 else:
1908 # unexpected result
1909 removeAllResults = main.FALSE
1910 if removeAllResults != main.TRUE:
1911 main.log.error( "Error executing set removeAll" )
1912
1913 # Check if set is still correct
1914 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001915 getResponses = main.Cluster.command( "setTestGet",
1916 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001917 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001918 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001919 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001920 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001921 current = set( getResponses[ i ] )
1922 if len( current ) == len( getResponses[ i ] ):
1923 # no repeats
1924 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001925 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001926 " of set " + main.onosSetName + ":\n" +
1927 str( getResponses[ i ] ) )
1928 main.log.debug( "Expected: " + str( main.onosSet ) )
1929 main.log.debug( "Actual: " + str( current ) )
1930 getResults = main.FALSE
1931 else:
1932 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001933 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001934 " set " + main.onosSetName + ":\n" +
1935 str( getResponses[ i ] ) )
1936 getResults = main.FALSE
1937 elif getResponses[ i ] == main.ERROR:
1938 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001939 sizeResponses = main.Cluster.command( "setTestSize",
1940 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001941 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001942 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001943 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001944 if size != sizeResponses[ i ]:
1945 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001946 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001947 " for set " + main.onosSetName +
1948 " but got " + str( sizeResponses[ i ] ) )
1949 removeAllResults = removeAllResults and getResults and sizeResults
1950 utilities.assert_equals( expect=main.TRUE,
1951 actual=removeAllResults,
1952 onpass="Set removeAll correct",
1953 onfail="Set removeAll was incorrect" )
1954
1955 main.step( "Distributed Set addAll()" )
1956 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001957 addResponses = main.Cluster.command( "setTestAdd",
1958 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001959 # main.TRUE = successfully changed the set
1960 # main.FALSE = action resulted in no change in set
1961 # main.ERROR - Some error in executing the function
1962 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001963 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001964 if addResponses[ i ] == main.TRUE:
1965 # All is well
1966 pass
1967 elif addResponses[ i ] == main.FALSE:
1968 # Already in set, probably fine
1969 pass
1970 elif addResponses[ i ] == main.ERROR:
1971 # Error in execution
1972 addAllResults = main.FALSE
1973 else:
1974 # unexpected result
1975 addAllResults = main.FALSE
1976 if addAllResults != main.TRUE:
1977 main.log.error( "Error executing set addAll" )
1978
1979 # Check if set is still correct
1980 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001981 getResponses = main.Cluster.command( "setTestGet",
1982 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001983 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001984 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001985 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001986 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001987 current = set( getResponses[ i ] )
1988 if len( current ) == len( getResponses[ i ] ):
1989 # no repeats
1990 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001991 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001992 " of set " + main.onosSetName + ":\n" +
1993 str( getResponses[ i ] ) )
1994 main.log.debug( "Expected: " + str( main.onosSet ) )
1995 main.log.debug( "Actual: " + str( current ) )
1996 getResults = main.FALSE
1997 else:
1998 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001999 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002000 " set " + main.onosSetName + ":\n" +
2001 str( getResponses[ i ] ) )
2002 getResults = main.FALSE
2003 elif getResponses[ i ] == main.ERROR:
2004 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002005 sizeResponses = main.Cluster.command( "setTestSize",
2006 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002007 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002008 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002009 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002010 if size != sizeResponses[ i ]:
2011 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002012 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002013 " for set " + main.onosSetName +
2014 " but got " + str( sizeResponses[ i ] ) )
2015 addAllResults = addAllResults and getResults and sizeResults
2016 utilities.assert_equals( expect=main.TRUE,
2017 actual=addAllResults,
2018 onpass="Set addAll correct",
2019 onfail="Set addAll was incorrect" )
2020
2021 main.step( "Distributed Set clear()" )
2022 main.onosSet.clear()
Jon Hallca319892017-06-15 15:25:22 -07002023 clearResponses = main.Cluster.command( "setTestRemove",
2024 args=[ main.onosSetName, " " ], # Values doesn't matter
2025 kwargs={ "clear": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002026 # main.TRUE = successfully changed the set
2027 # main.FALSE = action resulted in no change in set
2028 # main.ERROR - Some error in executing the function
2029 clearResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002030 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002031 if clearResponses[ i ] == main.TRUE:
2032 # All is well
2033 pass
2034 elif clearResponses[ i ] == main.FALSE:
2035 # Nothing set, probably fine
2036 pass
2037 elif clearResponses[ i ] == main.ERROR:
2038 # Error in execution
2039 clearResults = main.FALSE
2040 else:
2041 # unexpected result
2042 clearResults = main.FALSE
2043 if clearResults != main.TRUE:
2044 main.log.error( "Error executing set clear" )
2045
2046 # Check if set is still correct
2047 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002048 getResponses = main.Cluster.command( "setTestGet",
2049 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002050 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002051 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002052 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002053 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002054 current = set( getResponses[ i ] )
2055 if len( current ) == len( getResponses[ i ] ):
2056 # no repeats
2057 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002058 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002059 " of set " + main.onosSetName + ":\n" +
2060 str( getResponses[ i ] ) )
2061 main.log.debug( "Expected: " + str( main.onosSet ) )
2062 main.log.debug( "Actual: " + str( current ) )
2063 getResults = main.FALSE
2064 else:
2065 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002066 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002067 " set " + main.onosSetName + ":\n" +
2068 str( getResponses[ i ] ) )
2069 getResults = main.FALSE
2070 elif getResponses[ i ] == main.ERROR:
2071 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002072 sizeResponses = main.Cluster.command( "setTestSize",
2073 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002074 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002075 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002076 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002077 if size != sizeResponses[ i ]:
2078 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002079 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002080 " for set " + main.onosSetName +
2081 " but got " + str( sizeResponses[ i ] ) )
2082 clearResults = clearResults and getResults and sizeResults
2083 utilities.assert_equals( expect=main.TRUE,
2084 actual=clearResults,
2085 onpass="Set clear correct",
2086 onfail="Set clear was incorrect" )
2087
2088 main.step( "Distributed Set addAll()" )
2089 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002090 addResponses = main.Cluster.command( "setTestAdd",
2091 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002092 # main.TRUE = successfully changed the set
2093 # main.FALSE = action resulted in no change in set
2094 # main.ERROR - Some error in executing the function
2095 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002096 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002097 if addResponses[ i ] == main.TRUE:
2098 # All is well
2099 pass
2100 elif addResponses[ i ] == main.FALSE:
2101 # Already in set, probably fine
2102 pass
2103 elif addResponses[ i ] == main.ERROR:
2104 # Error in execution
2105 addAllResults = main.FALSE
2106 else:
2107 # unexpected result
2108 addAllResults = main.FALSE
2109 if addAllResults != main.TRUE:
2110 main.log.error( "Error executing set addAll" )
2111
2112 # Check if set is still correct
2113 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002114 getResponses = main.Cluster.command( "setTestGet",
2115 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002116 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002117 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002118 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002119 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002120 current = set( getResponses[ i ] )
2121 if len( current ) == len( getResponses[ i ] ):
2122 # no repeats
2123 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002124 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002125 " of set " + main.onosSetName + ":\n" +
2126 str( getResponses[ i ] ) )
2127 main.log.debug( "Expected: " + str( main.onosSet ) )
2128 main.log.debug( "Actual: " + str( current ) )
2129 getResults = main.FALSE
2130 else:
2131 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002132 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002133 " set " + main.onosSetName + ":\n" +
2134 str( getResponses[ i ] ) )
2135 getResults = main.FALSE
2136 elif getResponses[ i ] == main.ERROR:
2137 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002138 sizeResponses = main.Cluster.command( "setTestSize",
2139 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002140 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002141 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002142 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002143 if size != sizeResponses[ i ]:
2144 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002145 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002146 " for set " + main.onosSetName +
2147 " but got " + str( sizeResponses[ i ] ) )
2148 addAllResults = addAllResults and getResults and sizeResults
2149 utilities.assert_equals( expect=main.TRUE,
2150 actual=addAllResults,
2151 onpass="Set addAll correct",
2152 onfail="Set addAll was incorrect" )
2153
2154 main.step( "Distributed Set retain()" )
2155 main.onosSet.intersection_update( retainValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002156 retainResponses = main.Cluster.command( "setTestRemove",
2157 args=[ main.onosSetName, retainValue ],
2158 kwargs={ "retain": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002159 # main.TRUE = successfully changed the set
2160 # main.FALSE = action resulted in no change in set
2161 # main.ERROR - Some error in executing the function
2162 retainResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002163 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002164 if retainResponses[ i ] == main.TRUE:
2165 # All is well
2166 pass
2167 elif retainResponses[ i ] == main.FALSE:
2168 # Already in set, probably fine
2169 pass
2170 elif retainResponses[ i ] == main.ERROR:
2171 # Error in execution
2172 retainResults = main.FALSE
2173 else:
2174 # unexpected result
2175 retainResults = main.FALSE
2176 if retainResults != main.TRUE:
2177 main.log.error( "Error executing set retain" )
2178
2179 # Check if set is still correct
2180 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002181 getResponses = main.Cluster.command( "setTestGet",
2182 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002183 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002184 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002185 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002186 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002187 current = set( getResponses[ i ] )
2188 if len( current ) == len( getResponses[ i ] ):
2189 # no repeats
2190 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002191 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002192 " of set " + main.onosSetName + ":\n" +
2193 str( getResponses[ i ] ) )
2194 main.log.debug( "Expected: " + str( main.onosSet ) )
2195 main.log.debug( "Actual: " + str( current ) )
2196 getResults = main.FALSE
2197 else:
2198 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002199 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002200 " set " + main.onosSetName + ":\n" +
2201 str( getResponses[ i ] ) )
2202 getResults = main.FALSE
2203 elif getResponses[ i ] == main.ERROR:
2204 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002205 sizeResponses = main.Cluster.command( "setTestSize",
2206 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002207 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002208 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002209 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002210 if size != sizeResponses[ i ]:
2211 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002212 main.log.error( node + " expected a size of " +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002213 str( size ) + " for set " + main.onosSetName +
2214 " but got " + str( sizeResponses[ i ] ) )
2215 retainResults = retainResults and getResults and sizeResults
2216 utilities.assert_equals( expect=main.TRUE,
2217 actual=retainResults,
2218 onpass="Set retain correct",
2219 onfail="Set retain was incorrect" )
2220
2221 # Transactional maps
2222 main.step( "Partitioned Transactional maps put" )
2223 tMapValue = "Testing"
2224 numKeys = 100
2225 putResult = True
Jon Hallca319892017-06-15 15:25:22 -07002226 ctrl = main.Cluster.next()
2227 putResponses = ctrl.transactionalMapPut( numKeys, tMapValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002228 if putResponses and len( putResponses ) == 100:
2229 for i in putResponses:
2230 if putResponses[ i ][ 'value' ] != tMapValue:
2231 putResult = False
2232 else:
2233 putResult = False
2234 if not putResult:
2235 main.log.debug( "Put response values: " + str( putResponses ) )
2236 utilities.assert_equals( expect=True,
2237 actual=putResult,
2238 onpass="Partitioned Transactional Map put successful",
2239 onfail="Partitioned Transactional Map put values are incorrect" )
2240
2241 main.step( "Partitioned Transactional maps get" )
2242 # FIXME: is this sleep needed?
2243 time.sleep( 5 )
2244
2245 getCheck = True
2246 for n in range( 1, numKeys + 1 ):
Jon Hallca319892017-06-15 15:25:22 -07002247 getResponses = main.Cluster.command( "transactionalMapGet",
2248 args=[ "Key" + str( n ) ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002249 valueCheck = True
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002250 for node in getResponses:
2251 if node != tMapValue:
2252 valueCheck = False
2253 if not valueCheck:
Jon Hallf37d44d2017-05-24 10:37:30 -07002254 main.log.warn( "Values for key 'Key" + str(n) + "' do not match:" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002255 main.log.warn( getResponses )
2256 getCheck = getCheck and valueCheck
2257 utilities.assert_equals( expect=True,
2258 actual=getCheck,
2259 onpass="Partitioned Transactional Map get values were correct",
2260 onfail="Partitioned Transactional Map values incorrect" )
2261
2262 # DISTRIBUTED ATOMIC VALUE
2263 main.step( "Get the value of a new value" )
Jon Hallca319892017-06-15 15:25:22 -07002264 getValues = main.Cluster.command( "valueTestGet",
2265 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002266 main.log.debug( getValues )
2267 # Check the results
2268 atomicValueGetResult = True
2269 expected = valueValue if valueValue is not None else "null"
2270 main.log.debug( "Checking for value of " + expected )
2271 for i in getValues:
2272 if i != expected:
2273 atomicValueGetResult = False
2274 utilities.assert_equals( expect=True,
2275 actual=atomicValueGetResult,
2276 onpass="Atomic Value get successful",
2277 onfail="Error getting atomic Value " +
2278 str( valueValue ) + ", found: " +
2279 str( getValues ) )
2280
2281 main.step( "Atomic Value set()" )
2282 valueValue = "foo"
Jon Hallca319892017-06-15 15:25:22 -07002283 setValues = main.Cluster.command( "valueTestSet",
2284 args=[ valueName, valueValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002285 main.log.debug( setValues )
2286 # Check the results
2287 atomicValueSetResults = True
2288 for i in setValues:
2289 if i != main.TRUE:
2290 atomicValueSetResults = False
2291 utilities.assert_equals( expect=True,
2292 actual=atomicValueSetResults,
2293 onpass="Atomic Value set successful",
2294 onfail="Error setting atomic Value" +
2295 str( setValues ) )
2296
2297 main.step( "Get the value after set()" )
Jon Hallca319892017-06-15 15:25:22 -07002298 getValues = main.Cluster.command( "valueTestGet",
2299 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002300 main.log.debug( getValues )
2301 # Check the results
2302 atomicValueGetResult = True
2303 expected = valueValue if valueValue is not None else "null"
2304 main.log.debug( "Checking for value of " + expected )
2305 for i in getValues:
2306 if i != expected:
2307 atomicValueGetResult = False
2308 utilities.assert_equals( expect=True,
2309 actual=atomicValueGetResult,
2310 onpass="Atomic Value get successful",
2311 onfail="Error getting atomic Value " +
2312 str( valueValue ) + ", found: " +
2313 str( getValues ) )
2314
2315 main.step( "Atomic Value compareAndSet()" )
2316 oldValue = valueValue
2317 valueValue = "bar"
Jon Hallca319892017-06-15 15:25:22 -07002318 ctrl = main.Cluster.next()
2319 CASValue = ctrl.valueTestCompareAndSet( valueName, oldValue, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002320 main.log.debug( CASValue )
2321 utilities.assert_equals( expect=main.TRUE,
2322 actual=CASValue,
2323 onpass="Atomic Value comapreAndSet successful",
2324 onfail="Error setting atomic Value:" +
2325 str( CASValue ) )
2326
2327 main.step( "Get the value after compareAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002328 getValues = main.Cluster.command( "valueTestGet",
2329 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002330 main.log.debug( getValues )
2331 # Check the results
2332 atomicValueGetResult = True
2333 expected = valueValue if valueValue is not None else "null"
2334 main.log.debug( "Checking for value of " + expected )
2335 for i in getValues:
2336 if i != expected:
2337 atomicValueGetResult = False
2338 utilities.assert_equals( expect=True,
2339 actual=atomicValueGetResult,
2340 onpass="Atomic Value get successful",
2341 onfail="Error getting atomic Value " +
2342 str( valueValue ) + ", found: " +
2343 str( getValues ) )
2344
2345 main.step( "Atomic Value getAndSet()" )
2346 oldValue = valueValue
2347 valueValue = "baz"
Jon Hallca319892017-06-15 15:25:22 -07002348 ctrl = main.Cluster.next()
2349 GASValue = ctrl.valueTestGetAndSet( valueName, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002350 main.log.debug( GASValue )
2351 expected = oldValue if oldValue is not None else "null"
2352 utilities.assert_equals( expect=expected,
2353 actual=GASValue,
2354 onpass="Atomic Value GAS successful",
2355 onfail="Error with GetAndSet atomic Value: expected " +
2356 str( expected ) + ", found: " +
2357 str( GASValue ) )
2358
2359 main.step( "Get the value after getAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002360 getValues = main.Cluster.command( "valueTestGet",
2361 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002362 main.log.debug( getValues )
2363 # Check the results
2364 atomicValueGetResult = True
2365 expected = valueValue if valueValue is not None else "null"
2366 main.log.debug( "Checking for value of " + expected )
2367 for i in getValues:
2368 if i != expected:
2369 atomicValueGetResult = False
2370 utilities.assert_equals( expect=True,
2371 actual=atomicValueGetResult,
2372 onpass="Atomic Value get successful",
2373 onfail="Error getting atomic Value: expected " +
2374 str( valueValue ) + ", found: " +
2375 str( getValues ) )
2376
2377 main.step( "Atomic Value destory()" )
2378 valueValue = None
Jon Hallca319892017-06-15 15:25:22 -07002379 ctrl = main.Cluster.next()
2380 destroyResult = ctrl.valueTestDestroy( valueName )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002381 main.log.debug( destroyResult )
2382 # Check the results
2383 utilities.assert_equals( expect=main.TRUE,
2384 actual=destroyResult,
2385 onpass="Atomic Value destroy successful",
2386 onfail="Error destroying atomic Value" )
2387
2388 main.step( "Get the value after destroy()" )
Jon Hallca319892017-06-15 15:25:22 -07002389 getValues = main.Cluster.command( "valueTestGet",
2390 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002391 main.log.debug( getValues )
2392 # Check the results
2393 atomicValueGetResult = True
2394 expected = valueValue if valueValue is not None else "null"
2395 main.log.debug( "Checking for value of " + expected )
2396 for i in getValues:
2397 if i != expected:
2398 atomicValueGetResult = False
2399 utilities.assert_equals( expect=True,
2400 actual=atomicValueGetResult,
2401 onpass="Atomic Value get successful",
2402 onfail="Error getting atomic Value " +
2403 str( valueValue ) + ", found: " +
2404 str( getValues ) )
Jon Halle0f0b342017-04-18 11:43:47 -07002405
2406 # WORK QUEUES
2407 main.step( "Work Queue add()" )
Jon Hallca319892017-06-15 15:25:22 -07002408 ctrl = main.Cluster.next()
2409 addResult = ctrl.workQueueAdd( workQueueName, 'foo' )
Jon Halle0f0b342017-04-18 11:43:47 -07002410 workQueuePending += 1
2411 main.log.debug( addResult )
2412 # Check the results
2413 utilities.assert_equals( expect=main.TRUE,
2414 actual=addResult,
2415 onpass="Work Queue add successful",
2416 onfail="Error adding to Work Queue" )
2417
2418 main.step( "Check the work queue stats" )
2419 statsResults = self.workQueueStatsCheck( workQueueName,
2420 workQueueCompleted,
2421 workQueueInProgress,
2422 workQueuePending )
2423 utilities.assert_equals( expect=True,
2424 actual=statsResults,
2425 onpass="Work Queue stats correct",
2426 onfail="Work Queue stats incorrect " )
2427
2428 main.step( "Work Queue addMultiple()" )
Jon Hallca319892017-06-15 15:25:22 -07002429 ctrl = main.Cluster.next()
2430 addMultipleResult = ctrl.workQueueAddMultiple( workQueueName, 'bar', 'baz' )
Jon Halle0f0b342017-04-18 11:43:47 -07002431 workQueuePending += 2
2432 main.log.debug( addMultipleResult )
2433 # Check the results
2434 utilities.assert_equals( expect=main.TRUE,
2435 actual=addMultipleResult,
2436 onpass="Work Queue add multiple successful",
2437 onfail="Error adding multiple items to Work Queue" )
2438
2439 main.step( "Check the work queue stats" )
2440 statsResults = self.workQueueStatsCheck( workQueueName,
2441 workQueueCompleted,
2442 workQueueInProgress,
2443 workQueuePending )
2444 utilities.assert_equals( expect=True,
2445 actual=statsResults,
2446 onpass="Work Queue stats correct",
2447 onfail="Work Queue stats incorrect " )
2448
2449 main.step( "Work Queue takeAndComplete() 1" )
Jon Hallca319892017-06-15 15:25:22 -07002450 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002451 number = 1
Jon Hallca319892017-06-15 15:25:22 -07002452 take1Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002453 workQueuePending -= number
2454 workQueueCompleted += number
2455 main.log.debug( take1Result )
2456 # Check the results
2457 utilities.assert_equals( expect=main.TRUE,
2458 actual=take1Result,
2459 onpass="Work Queue takeAndComplete 1 successful",
2460 onfail="Error taking 1 from Work Queue" )
2461
2462 main.step( "Check the work queue stats" )
2463 statsResults = self.workQueueStatsCheck( workQueueName,
2464 workQueueCompleted,
2465 workQueueInProgress,
2466 workQueuePending )
2467 utilities.assert_equals( expect=True,
2468 actual=statsResults,
2469 onpass="Work Queue stats correct",
2470 onfail="Work Queue stats incorrect " )
2471
2472 main.step( "Work Queue takeAndComplete() 2" )
Jon Hallca319892017-06-15 15:25:22 -07002473 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002474 number = 2
Jon Hallca319892017-06-15 15:25:22 -07002475 take2Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002476 workQueuePending -= number
2477 workQueueCompleted += number
2478 main.log.debug( take2Result )
2479 # Check the results
2480 utilities.assert_equals( expect=main.TRUE,
2481 actual=take2Result,
2482 onpass="Work Queue takeAndComplete 2 successful",
2483 onfail="Error taking 2 from Work Queue" )
2484
2485 main.step( "Check the work queue stats" )
2486 statsResults = self.workQueueStatsCheck( workQueueName,
2487 workQueueCompleted,
2488 workQueueInProgress,
2489 workQueuePending )
2490 utilities.assert_equals( expect=True,
2491 actual=statsResults,
2492 onpass="Work Queue stats correct",
2493 onfail="Work Queue stats incorrect " )
2494
2495 main.step( "Work Queue destroy()" )
2496 valueValue = None
2497 threads = []
Jon Hallca319892017-06-15 15:25:22 -07002498 ctrl = main.Cluster.next()
2499 destroyResult = ctrl.workQueueDestroy( workQueueName )
Jon Halle0f0b342017-04-18 11:43:47 -07002500 workQueueCompleted = 0
2501 workQueueInProgress = 0
2502 workQueuePending = 0
2503 main.log.debug( destroyResult )
2504 # Check the results
2505 utilities.assert_equals( expect=main.TRUE,
2506 actual=destroyResult,
2507 onpass="Work Queue destroy successful",
2508 onfail="Error destroying Work Queue" )
2509
2510 main.step( "Check the work queue stats" )
2511 statsResults = self.workQueueStatsCheck( workQueueName,
2512 workQueueCompleted,
2513 workQueueInProgress,
2514 workQueuePending )
2515 utilities.assert_equals( expect=True,
2516 actual=statsResults,
2517 onpass="Work Queue stats correct",
2518 onfail="Work Queue stats incorrect " )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002519 except Exception as e:
2520 main.log.error( "Exception: " + str( e ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002521
2522 def cleanUp( self, main ):
2523 """
2524 Clean up
2525 """
2526 import os
2527 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002528 assert main, "main not defined"
2529 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002530
2531 # printing colors to terminal
2532 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2533 'blue': '\033[94m', 'green': '\033[92m',
2534 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2535 main.case( "Test Cleanup" )
2536 main.step( "Killing tcpdumps" )
2537 main.Mininet2.stopTcpdump()
2538
2539 testname = main.TEST
2540 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2541 main.step( "Copying MN pcap and ONOS log files to test station" )
2542 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2543 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2544 # NOTE: MN Pcap file is being saved to logdir.
2545 # We scp this file as MN and TestON aren't necessarily the same vm
2546
2547 # FIXME: To be replaced with a Jenkin's post script
2548 # TODO: Load these from params
2549 # NOTE: must end in /
2550 logFolder = "/opt/onos/log/"
2551 logFiles = [ "karaf.log", "karaf.log.1" ]
2552 # NOTE: must end in /
2553 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002554 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002555 dstName = main.logdir + "/" + ctrl.name + "-" + f
2556 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002557 logFolder + f, dstName )
2558 # std*.log's
2559 # NOTE: must end in /
2560 logFolder = "/opt/onos/var/"
2561 logFiles = [ "stderr.log", "stdout.log" ]
2562 # NOTE: must end in /
2563 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002564 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002565 dstName = main.logdir + "/" + ctrl.name + "-" + f
2566 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002567 logFolder + f, dstName )
2568 else:
2569 main.log.debug( "skipping saving log files" )
2570
2571 main.step( "Stopping Mininet" )
2572 mnResult = main.Mininet1.stopNet()
2573 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2574 onpass="Mininet stopped",
2575 onfail="MN cleanup NOT successful" )
2576
2577 main.step( "Checking ONOS Logs for errors" )
Devin Lim142b5342017-07-20 15:22:39 -07002578 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002579 main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
2580 main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002581
2582 try:
2583 timerLog = open( main.logdir + "/Timers.csv", 'w' )
2584 main.log.debug( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2585 timerLog.write( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2586 timerLog.close()
2587 except NameError as e:
2588 main.log.exception( e )
Jon Hallca319892017-06-15 15:25:22 -07002589
Devin Lim58046fa2017-07-05 16:55:00 -07002590 def assignMastership( self, main ):
2591 """
2592 Assign mastership to controllers
2593 """
2594 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002595 assert main, "main not defined"
2596 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002597
2598 main.case( "Assigning Controller roles for switches" )
2599 main.caseExplanation = "Check that ONOS is connected to each " +\
2600 "device. Then manually assign" +\
2601 " mastership to specific ONOS nodes using" +\
2602 " 'device-role'"
2603 main.step( "Assign mastership of switches to specific controllers" )
2604 # Manually assign mastership to the controller we want
2605 roleCall = main.TRUE
2606
2607 ipList = []
2608 deviceList = []
Jon Hallca319892017-06-15 15:25:22 -07002609 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07002610 try:
2611 # Assign mastership to specific controllers. This assignment was
2612 # determined for a 7 node cluser, but will work with any sized
2613 # cluster
2614 for i in range( 1, 29 ): # switches 1 through 28
2615 # set up correct variables:
2616 if i == 1:
2617 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002618 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002619 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
2620 elif i == 2:
Devin Lim142b5342017-07-20 15:22:39 -07002621 c = 1 % main.Cluster.numCtrls
2622 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002623 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
2624 elif i == 3:
Devin Lim142b5342017-07-20 15:22:39 -07002625 c = 1 % main.Cluster.numCtrls
2626 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002627 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
2628 elif i == 4:
Devin Lim142b5342017-07-20 15:22:39 -07002629 c = 3 % main.Cluster.numCtrls
2630 ip = main.Cluster.active( c ).ip_address # ONOS4
Devin Lim58046fa2017-07-05 16:55:00 -07002631 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
2632 elif i == 5:
Devin Lim142b5342017-07-20 15:22:39 -07002633 c = 2 % main.Cluster.numCtrls
2634 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002635 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
2636 elif i == 6:
Devin Lim142b5342017-07-20 15:22:39 -07002637 c = 2 % main.Cluster.numCtrls
2638 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002639 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
2640 elif i == 7:
Devin Lim142b5342017-07-20 15:22:39 -07002641 c = 5 % main.Cluster.numCtrls
2642 ip = main.Cluster.active( c ).ip_address # ONOS6
Devin Lim58046fa2017-07-05 16:55:00 -07002643 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
2644 elif i >= 8 and i <= 17:
Devin Lim142b5342017-07-20 15:22:39 -07002645 c = 4 % main.Cluster.numCtrls
2646 ip = main.Cluster.active( c ).ip_address # ONOS5
Devin Lim58046fa2017-07-05 16:55:00 -07002647 dpid = '3' + str( i ).zfill( 3 )
2648 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2649 elif i >= 18 and i <= 27:
Devin Lim142b5342017-07-20 15:22:39 -07002650 c = 6 % main.Cluster.numCtrls
2651 ip = main.Cluster.active( c ).ip_address # ONOS7
Devin Lim58046fa2017-07-05 16:55:00 -07002652 dpid = '6' + str( i ).zfill( 3 )
2653 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2654 elif i == 28:
2655 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002656 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002657 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
2658 else:
2659 main.log.error( "You didn't write an else statement for " +
2660 "switch s" + str( i ) )
2661 roleCall = main.FALSE
2662 # Assign switch
2663 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
2664 # TODO: make this controller dynamic
2665 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
2666 ipList.append( ip )
2667 deviceList.append( deviceId )
2668 except ( AttributeError, AssertionError ):
2669 main.log.exception( "Something is wrong with ONOS device view" )
2670 main.log.info( onosCli.devices() )
2671 utilities.assert_equals(
2672 expect=main.TRUE,
2673 actual=roleCall,
2674 onpass="Re-assigned switch mastership to designated controller",
2675 onfail="Something wrong with deviceRole calls" )
2676
2677 main.step( "Check mastership was correctly assigned" )
2678 roleCheck = main.TRUE
2679 # NOTE: This is due to the fact that device mastership change is not
2680 # atomic and is actually a multi step process
2681 time.sleep( 5 )
2682 for i in range( len( ipList ) ):
2683 ip = ipList[ i ]
2684 deviceId = deviceList[ i ]
2685 # Check assignment
2686 master = onosCli.getRole( deviceId ).get( 'master' )
2687 if ip in master:
2688 roleCheck = roleCheck and main.TRUE
2689 else:
2690 roleCheck = roleCheck and main.FALSE
2691 main.log.error( "Error, controller " + ip + " is not" +
2692 " master " + "of device " +
2693 str( deviceId ) + ". Master is " +
2694 repr( master ) + "." )
2695 utilities.assert_equals(
2696 expect=main.TRUE,
2697 actual=roleCheck,
2698 onpass="Switches were successfully reassigned to designated " +
2699 "controller",
2700 onfail="Switches were not successfully reassigned" )
Jon Hallca319892017-06-15 15:25:22 -07002701
Devin Lim58046fa2017-07-05 16:55:00 -07002702 def bringUpStoppedNode( self, main ):
2703 """
2704 The bring up stopped nodes
2705 """
2706 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002707 assert main, "main not defined"
2708 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002709 assert main.kill, "main.kill not defined"
2710 main.case( "Restart minority of ONOS nodes" )
2711
2712 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
2713 startResults = main.TRUE
2714 restartTime = time.time()
Jon Hallca319892017-06-15 15:25:22 -07002715 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002716 startResults = startResults and\
Jon Hallca319892017-06-15 15:25:22 -07002717 ctrl.onosStart( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002718 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2719 onpass="ONOS nodes started successfully",
2720 onfail="ONOS nodes NOT successfully started" )
2721
2722 main.step( "Checking if ONOS is up yet" )
2723 count = 0
2724 onosIsupResult = main.FALSE
2725 while onosIsupResult == main.FALSE and count < 10:
2726 onosIsupResult = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002727 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002728 onosIsupResult = onosIsupResult and\
Jon Hallca319892017-06-15 15:25:22 -07002729 ctrl.isup( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002730 count = count + 1
2731 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
2732 onpass="ONOS restarted successfully",
2733 onfail="ONOS restart NOT successful" )
2734
Jon Hallca319892017-06-15 15:25:22 -07002735 main.step( "Restarting ONOS nodes" )
Devin Lim58046fa2017-07-05 16:55:00 -07002736 cliResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002737 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002738 cliResults = cliResults and\
Jon Hallca319892017-06-15 15:25:22 -07002739 ctrl.startOnosCli( ctrl.ipAddress )
2740 ctrl.active = True
Devin Lim58046fa2017-07-05 16:55:00 -07002741 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
Jon Hallca319892017-06-15 15:25:22 -07002742 onpass="ONOS node(s) restarted",
2743 onfail="ONOS node(s) did not restart" )
Devin Lim58046fa2017-07-05 16:55:00 -07002744
2745 # Grab the time of restart so we chan check how long the gossip
2746 # protocol has had time to work
2747 main.restartTime = time.time() - restartTime
2748 main.log.debug( "Restart time: " + str( main.restartTime ) )
2749 # TODO: MAke this configurable. Also, we are breaking the above timer
2750 main.step( "Checking ONOS nodes" )
2751 nodeResults = utilities.retry( self.nodesCheck,
2752 False,
Jon Hallca319892017-06-15 15:25:22 -07002753 args=[ main.Cluster.active() ],
Devin Lim58046fa2017-07-05 16:55:00 -07002754 sleep=15,
2755 attempts=5 )
2756
2757 utilities.assert_equals( expect=True, actual=nodeResults,
2758 onpass="Nodes check successful",
2759 onfail="Nodes check NOT successful" )
2760
2761 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07002762 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07002763 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07002764 ctrl.name,
2765 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002766 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -07002767 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002768
Jon Hallca319892017-06-15 15:25:22 -07002769 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -07002770
2771 main.step( "Rerun for election on the node(s) that were killed" )
2772 runResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002773 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002774 runResults = runResults and\
Jon Hallca319892017-06-15 15:25:22 -07002775 ctrl.electionTestRun()
Devin Lim58046fa2017-07-05 16:55:00 -07002776 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2777 onpass="ONOS nodes reran for election topic",
2778 onfail="Errror rerunning for election" )
Devin Lim142b5342017-07-20 15:22:39 -07002779 def tempCell( self, cellName, ipList ):
2780 main.step( "Create cell file" )
2781 cellAppString = main.params[ 'ENV' ][ 'appString' ]
Devin Lim58046fa2017-07-05 16:55:00 -07002782
Devin Lim142b5342017-07-20 15:22:39 -07002783
2784 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
2785 main.Mininet1.ip_address,
2786 cellAppString, ipList , main.ONOScli1.karafUser )
2787 main.step( "Applying cell variable to environment" )
2788 cellResult = main.ONOSbench.setCell( cellName )
2789 verifyResult = main.ONOSbench.verifyCell()
2790
2791
2792 def checkStateAfterEvent( self, main, afterWhich, compareSwitch=False, isRestart=False ):
Devin Lim58046fa2017-07-05 16:55:00 -07002793 """
2794 afterWhich :
Jon Hallca319892017-06-15 15:25:22 -07002795 0: failure
Devin Lim58046fa2017-07-05 16:55:00 -07002796 1: scaling
2797 """
2798 """
2799 Check state after ONOS failure/scaling
2800 """
2801 import json
Devin Lim58046fa2017-07-05 16:55:00 -07002802 assert main, "main not defined"
2803 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002804 main.case( "Running ONOS Constant State Tests" )
2805
2806 OnosAfterWhich = [ "failure" , "scaliing" ]
2807
Devin Lim58046fa2017-07-05 16:55:00 -07002808 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -07002809 self.checkRoleNotNull()
Devin Lim58046fa2017-07-05 16:55:00 -07002810
Devin Lim142b5342017-07-20 15:22:39 -07002811 ONOSMastership, rolesResults, consistentMastership = self.checkTheRole()
Jon Hallca319892017-06-15 15:25:22 -07002812 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07002813
2814 if rolesResults and not consistentMastership:
2815 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002816 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -07002817 main.log.warn( node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07002818 json.dumps( json.loads( ONOSMastership[ i ] ),
2819 sort_keys=True,
2820 indent=4,
2821 separators=( ',', ': ' ) ) )
2822
2823 if compareSwitch:
2824 description2 = "Compare switch roles from before failure"
2825 main.step( description2 )
2826 try:
2827 currentJson = json.loads( ONOSMastership[ 0 ] )
2828 oldJson = json.loads( mastershipState )
2829 except ( ValueError, TypeError ):
2830 main.log.exception( "Something is wrong with parsing " +
2831 "ONOSMastership[0] or mastershipState" )
Jon Hallca319892017-06-15 15:25:22 -07002832 main.log.debug( "ONOSMastership[0]: " + repr( ONOSMastership[ 0 ] ) )
2833 main.log.debug( "mastershipState" + repr( mastershipState ) )
Devin Lim44075962017-08-11 10:56:37 -07002834 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002835 mastershipCheck = main.TRUE
2836 for i in range( 1, 29 ):
2837 switchDPID = str(
2838 main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
2839 current = [ switch[ 'master' ] for switch in currentJson
2840 if switchDPID in switch[ 'id' ] ]
2841 old = [ switch[ 'master' ] for switch in oldJson
2842 if switchDPID in switch[ 'id' ] ]
2843 if current == old:
2844 mastershipCheck = mastershipCheck and main.TRUE
2845 else:
2846 main.log.warn( "Mastership of switch %s changed" % switchDPID )
2847 mastershipCheck = main.FALSE
2848 utilities.assert_equals(
2849 expect=main.TRUE,
2850 actual=mastershipCheck,
2851 onpass="Mastership of Switches was not changed",
2852 onfail="Mastership of some switches changed" )
2853
2854 # NOTE: we expect mastership to change on controller failure/scaling down
Devin Lim142b5342017-07-20 15:22:39 -07002855 ONOSIntents, intentsResults = self.checkingIntents()
Devin Lim58046fa2017-07-05 16:55:00 -07002856 intentCheck = main.FALSE
2857 consistentIntents = True
Devin Lim58046fa2017-07-05 16:55:00 -07002858
2859 main.step( "Check for consistency in Intents from each controller" )
2860 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2861 main.log.info( "Intents are consistent across all ONOS " +
2862 "nodes" )
2863 else:
2864 consistentIntents = False
2865
2866 # Try to make it easy to figure out what is happening
2867 #
2868 # Intent ONOS1 ONOS2 ...
2869 # 0x01 INSTALLED INSTALLING
2870 # ... ... ...
2871 # ... ... ...
2872 title = " ID"
Jon Hallca319892017-06-15 15:25:22 -07002873 for ctrl in main.Cluster.active():
2874 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07002875 main.log.warn( title )
2876 # get all intent keys in the cluster
2877 keys = []
2878 for nodeStr in ONOSIntents:
2879 node = json.loads( nodeStr )
2880 for intent in node:
2881 keys.append( intent.get( 'id' ) )
2882 keys = set( keys )
2883 for key in keys:
2884 row = "%-13s" % key
2885 for nodeStr in ONOSIntents:
2886 node = json.loads( nodeStr )
2887 for intent in node:
2888 if intent.get( 'id' ) == key:
2889 row += "%-15s" % intent.get( 'state' )
2890 main.log.warn( row )
2891 # End table view
2892
2893 utilities.assert_equals(
2894 expect=True,
2895 actual=consistentIntents,
2896 onpass="Intents are consistent across all ONOS nodes",
2897 onfail="ONOS nodes have different views of intents" )
2898 intentStates = []
2899 for node in ONOSIntents: # Iter through ONOS nodes
2900 nodeStates = []
2901 # Iter through intents of a node
2902 try:
2903 for intent in json.loads( node ):
2904 nodeStates.append( intent[ 'state' ] )
2905 except ( ValueError, TypeError ):
2906 main.log.exception( "Error in parsing intents" )
2907 main.log.error( repr( node ) )
2908 intentStates.append( nodeStates )
2909 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2910 main.log.info( dict( out ) )
2911
2912 if intentsResults and not consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07002913 for i in range( len( main.Cluster.active() ) ):
Jon Hall6040bcf2017-08-14 11:15:41 -07002914 ctrl = main.Cluster.controllers[ i ]
Jon Hallca319892017-06-15 15:25:22 -07002915 main.log.warn( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07002916 main.log.warn( json.dumps(
2917 json.loads( ONOSIntents[ i ] ),
2918 sort_keys=True,
2919 indent=4,
2920 separators=( ',', ': ' ) ) )
2921 elif intentsResults and consistentIntents:
2922 intentCheck = main.TRUE
2923
2924 # NOTE: Store has no durability, so intents are lost across system
2925 # restarts
2926 if not isRestart:
2927 main.step( "Compare current intents with intents before the " + OnosAfterWhich[ afterWhich ] )
2928 # NOTE: this requires case 5 to pass for intentState to be set.
2929 # maybe we should stop the test if that fails?
2930 sameIntents = main.FALSE
2931 try:
2932 intentState
2933 except NameError:
2934 main.log.warn( "No previous intent state was saved" )
2935 else:
2936 if intentState and intentState == ONOSIntents[ 0 ]:
2937 sameIntents = main.TRUE
2938 main.log.info( "Intents are consistent with before " + OnosAfterWhich[ afterWhich ] )
2939 # TODO: possibly the states have changed? we may need to figure out
2940 # what the acceptable states are
2941 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2942 sameIntents = main.TRUE
2943 try:
2944 before = json.loads( intentState )
2945 after = json.loads( ONOSIntents[ 0 ] )
2946 for intent in before:
2947 if intent not in after:
2948 sameIntents = main.FALSE
2949 main.log.debug( "Intent is not currently in ONOS " +
2950 "(at least in the same form):" )
2951 main.log.debug( json.dumps( intent ) )
2952 except ( ValueError, TypeError ):
2953 main.log.exception( "Exception printing intents" )
2954 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2955 main.log.debug( repr( intentState ) )
2956 if sameIntents == main.FALSE:
2957 try:
2958 main.log.debug( "ONOS intents before: " )
2959 main.log.debug( json.dumps( json.loads( intentState ),
2960 sort_keys=True, indent=4,
2961 separators=( ',', ': ' ) ) )
2962 main.log.debug( "Current ONOS intents: " )
2963 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2964 sort_keys=True, indent=4,
2965 separators=( ',', ': ' ) ) )
2966 except ( ValueError, TypeError ):
2967 main.log.exception( "Exception printing intents" )
2968 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2969 main.log.debug( repr( intentState ) )
2970 utilities.assert_equals(
2971 expect=main.TRUE,
2972 actual=sameIntents,
2973 onpass="Intents are consistent with before " + OnosAfterWhich[ afterWhich ] ,
2974 onfail="The Intents changed during " + OnosAfterWhich[ afterWhich ] )
2975 intentCheck = intentCheck and sameIntents
2976
2977 main.step( "Get the OF Table entries and compare to before " +
2978 "component " + OnosAfterWhich[ afterWhich ] )
2979 FlowTables = main.TRUE
2980 for i in range( 28 ):
2981 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2982 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2983 curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
2984 FlowTables = FlowTables and curSwitch
2985 if curSwitch == main.FALSE:
2986 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2987 utilities.assert_equals(
2988 expect=main.TRUE,
2989 actual=FlowTables,
2990 onpass="No changes were found in the flow tables",
2991 onfail="Changes were found in the flow tables" )
2992
Jon Hallca319892017-06-15 15:25:22 -07002993 main.Mininet2.pingLongKill()
Devin Lim58046fa2017-07-05 16:55:00 -07002994 """
2995 main.step( "Check the continuous pings to ensure that no packets " +
2996 "were dropped during component failure" )
2997 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2998 main.params[ 'TESTONIP' ] )
2999 LossInPings = main.FALSE
3000 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
3001 for i in range( 8, 18 ):
3002 main.log.info(
3003 "Checking for a loss in pings along flow from s" +
3004 str( i ) )
3005 LossInPings = main.Mininet2.checkForLoss(
3006 "/tmp/ping.h" +
3007 str( i ) ) or LossInPings
3008 if LossInPings == main.TRUE:
3009 main.log.info( "Loss in ping detected" )
3010 elif LossInPings == main.ERROR:
3011 main.log.info( "There are multiple mininet process running" )
3012 elif LossInPings == main.FALSE:
3013 main.log.info( "No Loss in the pings" )
3014 main.log.info( "No loss of dataplane connectivity" )
3015 utilities.assert_equals(
3016 expect=main.FALSE,
3017 actual=LossInPings,
3018 onpass="No Loss of connectivity",
3019 onfail="Loss of dataplane connectivity detected" )
3020 # NOTE: Since intents are not persisted with IntnentStore,
3021 # we expect loss in dataplane connectivity
3022 LossInPings = main.FALSE
3023 """
3024
3025 def compareTopo( self, main ):
3026 """
3027 Compare topo
3028 """
3029 import json
3030 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003031 assert main, "main not defined"
3032 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003033 try:
3034 from tests.dependencies.topology import Topology
3035 except ImportError:
3036 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07003037 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07003038 try:
3039 main.topoRelated
3040 except ( NameError, AttributeError ):
3041 main.topoRelated = Topology()
3042 main.case( "Compare ONOS Topology view to Mininet topology" )
3043 main.caseExplanation = "Compare topology objects between Mininet" +\
3044 " and ONOS"
3045 topoResult = main.FALSE
3046 topoFailMsg = "ONOS topology don't match Mininet"
3047 elapsed = 0
3048 count = 0
3049 main.step( "Comparing ONOS topology to MN topology" )
3050 startTime = time.time()
3051 # Give time for Gossip to work
3052 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
3053 devicesResults = main.TRUE
3054 linksResults = main.TRUE
3055 hostsResults = main.TRUE
3056 hostAttachmentResults = True
3057 count += 1
3058 cliStart = time.time()
Devin Lim142b5342017-07-20 15:22:39 -07003059 devices = main.topoRelated.getAll( "devices", True,
Jon Hallca319892017-06-15 15:25:22 -07003060 kwargs={ 'sleep': 5, 'attempts': 5,
3061 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003062 ipResult = main.TRUE
3063
Devin Lim142b5342017-07-20 15:22:39 -07003064 hosts = main.topoRelated.getAll( "hosts", True,
Jon Hallca319892017-06-15 15:25:22 -07003065 kwargs={ 'sleep': 5, 'attempts': 5,
3066 'randomTime': True },
3067 inJson=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003068
3069 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003070 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003071 if hosts[ controller ]:
3072 for host in hosts[ controller ]:
3073 if host is None or host.get( 'ipAddresses', [] ) == []:
3074 main.log.error(
3075 "Error with host ipAddresses on controller" +
3076 controllerStr + ": " + str( host ) )
3077 ipResult = main.FALSE
Devin Lim142b5342017-07-20 15:22:39 -07003078 ports = main.topoRelated.getAll( "ports" , True,
Jon Hallca319892017-06-15 15:25:22 -07003079 kwargs={ 'sleep': 5, 'attempts': 5,
3080 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003081 links = main.topoRelated.getAll( "links", True,
Jon Hallca319892017-06-15 15:25:22 -07003082 kwargs={ 'sleep': 5, 'attempts': 5,
3083 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003084 clusters = main.topoRelated.getAll( "clusters", True,
Jon Hallca319892017-06-15 15:25:22 -07003085 kwargs={ 'sleep': 5, 'attempts': 5,
3086 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003087
3088 elapsed = time.time() - startTime
3089 cliTime = time.time() - cliStart
3090 print "Elapsed time: " + str( elapsed )
3091 print "CLI time: " + str( cliTime )
3092
3093 if all( e is None for e in devices ) and\
3094 all( e is None for e in hosts ) and\
3095 all( e is None for e in ports ) and\
3096 all( e is None for e in links ) and\
3097 all( e is None for e in clusters ):
3098 topoFailMsg = "Could not get topology from ONOS"
3099 main.log.error( topoFailMsg )
3100 continue # Try again, No use trying to compare
3101
3102 mnSwitches = main.Mininet1.getSwitches()
3103 mnLinks = main.Mininet1.getLinks()
3104 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07003105 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003106 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003107 currentDevicesResult = main.topoRelated.compareDevicePort( main.Mininet1, controller,
3108 mnSwitches,
3109 devices, ports )
3110 utilities.assert_equals( expect=main.TRUE,
3111 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07003112 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003113 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003114 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003115 " Switches view is incorrect" )
3116
3117
3118 currentLinksResult = main.topoRelated.compareBase( links, controller,
3119 main.Mininet1.compareLinks,
3120 [mnSwitches, mnLinks] )
3121 utilities.assert_equals( expect=main.TRUE,
3122 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07003123 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003124 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003125 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003126 " links view is incorrect" )
3127 if hosts[ controller ] and "Error" not in hosts[ controller ]:
3128 currentHostsResult = main.Mininet1.compareHosts(
3129 mnHosts,
3130 hosts[ controller ] )
3131 elif hosts[ controller ] == []:
3132 currentHostsResult = main.TRUE
3133 else:
3134 currentHostsResult = main.FALSE
3135 utilities.assert_equals( expect=main.TRUE,
3136 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07003137 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003138 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07003139 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003140 " hosts don't match Mininet" )
3141 # CHECKING HOST ATTACHMENT POINTS
3142 hostAttachment = True
3143 zeroHosts = False
3144 # FIXME: topo-HA/obelisk specific mappings:
3145 # key is mac and value is dpid
3146 mappings = {}
3147 for i in range( 1, 29 ): # hosts 1 through 28
3148 # set up correct variables:
3149 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
3150 if i == 1:
3151 deviceId = "1000".zfill( 16 )
3152 elif i == 2:
3153 deviceId = "2000".zfill( 16 )
3154 elif i == 3:
3155 deviceId = "3000".zfill( 16 )
3156 elif i == 4:
3157 deviceId = "3004".zfill( 16 )
3158 elif i == 5:
3159 deviceId = "5000".zfill( 16 )
3160 elif i == 6:
3161 deviceId = "6000".zfill( 16 )
3162 elif i == 7:
3163 deviceId = "6007".zfill( 16 )
3164 elif i >= 8 and i <= 17:
3165 dpid = '3' + str( i ).zfill( 3 )
3166 deviceId = dpid.zfill( 16 )
3167 elif i >= 18 and i <= 27:
3168 dpid = '6' + str( i ).zfill( 3 )
3169 deviceId = dpid.zfill( 16 )
3170 elif i == 28:
3171 deviceId = "2800".zfill( 16 )
3172 mappings[ macId ] = deviceId
3173 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3174 if hosts[ controller ] == []:
3175 main.log.warn( "There are no hosts discovered" )
3176 zeroHosts = True
3177 else:
3178 for host in hosts[ controller ]:
3179 mac = None
3180 location = None
3181 device = None
3182 port = None
3183 try:
3184 mac = host.get( 'mac' )
3185 assert mac, "mac field could not be found for this host object"
3186
3187 location = host.get( 'locations' )[ 0 ]
3188 assert location, "location field could not be found for this host object"
3189
3190 # Trim the protocol identifier off deviceId
3191 device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
3192 assert device, "elementId field could not be found for this host location object"
3193
3194 port = location.get( 'port' )
3195 assert port, "port field could not be found for this host location object"
3196
3197 # Now check if this matches where they should be
3198 if mac and device and port:
3199 if str( port ) != "1":
3200 main.log.error( "The attachment port is incorrect for " +
3201 "host " + str( mac ) +
3202 ". Expected: 1 Actual: " + str( port ) )
3203 hostAttachment = False
3204 if device != mappings[ str( mac ) ]:
3205 main.log.error( "The attachment device is incorrect for " +
3206 "host " + str( mac ) +
3207 ". Expected: " + mappings[ str( mac ) ] +
3208 " Actual: " + device )
3209 hostAttachment = False
3210 else:
3211 hostAttachment = False
3212 except AssertionError:
3213 main.log.exception( "Json object not as expected" )
3214 main.log.error( repr( host ) )
3215 hostAttachment = False
3216 else:
3217 main.log.error( "No hosts json output or \"Error\"" +
3218 " in output. hosts = " +
3219 repr( hosts[ controller ] ) )
3220 if zeroHosts is False:
3221 # TODO: Find a way to know if there should be hosts in a
3222 # given point of the test
3223 hostAttachment = True
3224
3225 # END CHECKING HOST ATTACHMENT POINTS
3226 devicesResults = devicesResults and currentDevicesResult
3227 linksResults = linksResults and currentLinksResult
3228 hostsResults = hostsResults and currentHostsResult
3229 hostAttachmentResults = hostAttachmentResults and\
3230 hostAttachment
3231 topoResult = ( devicesResults and linksResults
3232 and hostsResults and ipResult and
3233 hostAttachmentResults )
3234 utilities.assert_equals( expect=True,
3235 actual=topoResult,
3236 onpass="ONOS topology matches Mininet",
3237 onfail=topoFailMsg )
3238 # End of While loop to pull ONOS state
3239
3240 # Compare json objects for hosts and dataplane clusters
3241
3242 # hosts
3243 main.step( "Hosts view is consistent across all ONOS nodes" )
3244 consistentHostsResult = main.TRUE
3245 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003246 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003247 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3248 if hosts[ controller ] == hosts[ 0 ]:
3249 continue
3250 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07003251 main.log.error( "hosts from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003252 " is inconsistent with ONOS1" )
Jon Hallca319892017-06-15 15:25:22 -07003253 main.log.debug( repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003254 consistentHostsResult = main.FALSE
3255
3256 else:
Jon Hallca319892017-06-15 15:25:22 -07003257 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003258 controllerStr )
3259 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003260 main.log.debug( controllerStr +
3261 " hosts response: " +
3262 repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003263 utilities.assert_equals(
3264 expect=main.TRUE,
3265 actual=consistentHostsResult,
3266 onpass="Hosts view is consistent across all ONOS nodes",
3267 onfail="ONOS nodes have different views of hosts" )
3268
3269 main.step( "Hosts information is correct" )
3270 hostsResults = hostsResults and ipResult
3271 utilities.assert_equals(
3272 expect=main.TRUE,
3273 actual=hostsResults,
3274 onpass="Host information is correct",
3275 onfail="Host information is incorrect" )
3276
3277 main.step( "Host attachment points to the network" )
3278 utilities.assert_equals(
3279 expect=True,
3280 actual=hostAttachmentResults,
3281 onpass="Hosts are correctly attached to the network",
3282 onfail="ONOS did not correctly attach hosts to the network" )
3283
3284 # Strongly connected clusters of devices
3285 main.step( "Clusters view is consistent across all ONOS nodes" )
3286 consistentClustersResult = main.TRUE
3287 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003288 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003289 if "Error" not in clusters[ controller ]:
3290 if clusters[ controller ] == clusters[ 0 ]:
3291 continue
3292 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07003293 main.log.error( "clusters from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003294 controllerStr +
3295 " is inconsistent with ONOS1" )
3296 consistentClustersResult = main.FALSE
3297 else:
3298 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07003299 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07003300 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003301 main.log.debug( controllerStr +
3302 " clusters response: " +
3303 repr( clusters[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003304 utilities.assert_equals(
3305 expect=main.TRUE,
3306 actual=consistentClustersResult,
3307 onpass="Clusters view is consistent across all ONOS nodes",
3308 onfail="ONOS nodes have different views of clusters" )
3309 if not consistentClustersResult:
3310 main.log.debug( clusters )
3311 for x in links:
Jon Hallca319892017-06-15 15:25:22 -07003312 main.log.debug( "{}: {}".format( len( x ), x ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003313
3314 main.step( "There is only one SCC" )
3315 # there should always only be one cluster
3316 try:
3317 numClusters = len( json.loads( clusters[ 0 ] ) )
3318 except ( ValueError, TypeError ):
3319 main.log.exception( "Error parsing clusters[0]: " +
3320 repr( clusters[ 0 ] ) )
3321 numClusters = "ERROR"
3322 clusterResults = main.FALSE
3323 if numClusters == 1:
3324 clusterResults = main.TRUE
3325 utilities.assert_equals(
3326 expect=1,
3327 actual=numClusters,
3328 onpass="ONOS shows 1 SCC",
3329 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
3330
3331 topoResult = ( devicesResults and linksResults
3332 and hostsResults and consistentHostsResult
3333 and consistentClustersResult and clusterResults
3334 and ipResult and hostAttachmentResults )
3335
3336 topoResult = topoResult and int( count <= 2 )
3337 note = "note it takes about " + str( int( cliTime ) ) + \
3338 " seconds for the test to make all the cli calls to fetch " +\
3339 "the topology from each ONOS instance"
3340 main.log.info(
3341 "Very crass estimate for topology discovery/convergence( " +
3342 str( note ) + " ): " + str( elapsed ) + " seconds, " +
3343 str( count ) + " tries" )
3344
3345 main.step( "Device information is correct" )
3346 utilities.assert_equals(
3347 expect=main.TRUE,
3348 actual=devicesResults,
3349 onpass="Device information is correct",
3350 onfail="Device information is incorrect" )
3351
3352 main.step( "Links are correct" )
3353 utilities.assert_equals(
3354 expect=main.TRUE,
3355 actual=linksResults,
3356 onpass="Link are correct",
3357 onfail="Links are incorrect" )
3358
3359 main.step( "Hosts are correct" )
3360 utilities.assert_equals(
3361 expect=main.TRUE,
3362 actual=hostsResults,
3363 onpass="Hosts are correct",
3364 onfail="Hosts are incorrect" )
3365
3366 # FIXME: move this to an ONOS state case
3367 main.step( "Checking ONOS nodes" )
3368 nodeResults = utilities.retry( self.nodesCheck,
3369 False,
Jon Hallca319892017-06-15 15:25:22 -07003370 args=[ main.Cluster.active() ],
Devin Lim58046fa2017-07-05 16:55:00 -07003371 attempts=5 )
3372 utilities.assert_equals( expect=True, actual=nodeResults,
3373 onpass="Nodes check successful",
3374 onfail="Nodes check NOT successful" )
3375 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07003376 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07003377 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07003378 ctrl.name,
3379 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003380
3381 if not topoResult:
Devin Lim44075962017-08-11 10:56:37 -07003382 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -07003383
Devin Lim58046fa2017-07-05 16:55:00 -07003384 def linkDown( self, main, fromS="s3", toS="s28" ):
3385 """
3386 Link fromS-toS down
3387 """
3388 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003389 assert main, "main not defined"
3390 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003391 # NOTE: You should probably run a topology check after this
3392
3393 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3394
3395 description = "Turn off a link to ensure that Link Discovery " +\
3396 "is working properly"
3397 main.case( description )
3398
3399 main.step( "Kill Link between " + fromS + " and " + toS )
3400 LinkDown = main.Mininet1.link( END1=fromS, END2=toS, OPTION="down" )
3401 main.log.info( "Waiting " + str( linkSleep ) +
3402 " seconds for link down to be discovered" )
3403 time.sleep( linkSleep )
3404 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
3405 onpass="Link down successful",
3406 onfail="Failed to bring link down" )
3407 # TODO do some sort of check here
3408
3409 def linkUp( self, main, fromS="s3", toS="s28" ):
3410 """
3411 Link fromS-toS up
3412 """
3413 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003414 assert main, "main not defined"
3415 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003416 # NOTE: You should probably run a topology check after this
3417
3418 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3419
3420 description = "Restore a link to ensure that Link Discovery is " + \
3421 "working properly"
3422 main.case( description )
3423
3424 main.step( "Bring link between " + fromS + " and " + toS +" back up" )
3425 LinkUp = main.Mininet1.link( END1=fromS, END2=toS, OPTION="up" )
3426 main.log.info( "Waiting " + str( linkSleep ) +
3427 " seconds for link up to be discovered" )
3428 time.sleep( linkSleep )
3429 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
3430 onpass="Link up successful",
3431 onfail="Failed to bring link up" )
3432
3433 def switchDown( self, main ):
3434 """
3435 Switch Down
3436 """
3437 # NOTE: You should probably run a topology check after this
3438 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003439 assert main, "main not defined"
3440 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003441
3442 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3443
3444 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallca319892017-06-15 15:25:22 -07003445 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003446 main.case( description )
3447 switch = main.params[ 'kill' ][ 'switch' ]
3448 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3449
3450 # TODO: Make this switch parameterizable
3451 main.step( "Kill " + switch )
3452 main.log.info( "Deleting " + switch )
3453 main.Mininet1.delSwitch( switch )
3454 main.log.info( "Waiting " + str( switchSleep ) +
3455 " seconds for switch down to be discovered" )
3456 time.sleep( switchSleep )
3457 device = onosCli.getDevice( dpid=switchDPID )
3458 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003459 main.log.warn( "Bringing down switch " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003460 result = main.FALSE
3461 if device and device[ 'available' ] is False:
3462 result = main.TRUE
3463 utilities.assert_equals( expect=main.TRUE, actual=result,
3464 onpass="Kill switch successful",
3465 onfail="Failed to kill switch?" )
Jon Hallca319892017-06-15 15:25:22 -07003466
Devin Lim58046fa2017-07-05 16:55:00 -07003467 def switchUp( self, main ):
3468 """
3469 Switch Up
3470 """
3471 # NOTE: You should probably run a topology check after this
3472 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003473 assert main, "main not defined"
3474 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003475
3476 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3477 switch = main.params[ 'kill' ][ 'switch' ]
3478 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3479 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallca319892017-06-15 15:25:22 -07003480 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003481 description = "Adding a switch to ensure it is discovered correctly"
3482 main.case( description )
3483
3484 main.step( "Add back " + switch )
3485 main.Mininet1.addSwitch( switch, dpid=switchDPID )
3486 for peer in links:
3487 main.Mininet1.addLink( switch, peer )
Jon Hallca319892017-06-15 15:25:22 -07003488 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -07003489 main.Mininet1.assignSwController( sw=switch, ip=ipList )
3490 main.log.info( "Waiting " + str( switchSleep ) +
3491 " seconds for switch up to be discovered" )
3492 time.sleep( switchSleep )
3493 device = onosCli.getDevice( dpid=switchDPID )
3494 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003495 main.log.debug( "Added device: " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003496 result = main.FALSE
3497 if device and device[ 'available' ]:
3498 result = main.TRUE
3499 utilities.assert_equals( expect=main.TRUE, actual=result,
3500 onpass="add switch successful",
3501 onfail="Failed to add switch?" )
3502
3503 def startElectionApp( self, main ):
3504 """
3505 start election app on all onos nodes
3506 """
Devin Lim58046fa2017-07-05 16:55:00 -07003507 assert main, "main not defined"
3508 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003509
3510 main.case( "Start Leadership Election app" )
3511 main.step( "Install leadership election app" )
Jon Hallca319892017-06-15 15:25:22 -07003512 onosCli = main.Cluster.next()
Devin Lime9f0ccf2017-08-11 17:25:12 -07003513 appResult = onosCli.CLI.activateApp( "org.onosproject.election" )
Devin Lim58046fa2017-07-05 16:55:00 -07003514 utilities.assert_equals(
3515 expect=main.TRUE,
3516 actual=appResult,
3517 onpass="Election app installed",
3518 onfail="Something went wrong with installing Leadership election" )
3519
3520 main.step( "Run for election on each node" )
Jon Hallca319892017-06-15 15:25:22 -07003521 onosCli.electionTestRun()
3522 main.Cluster.command( "electionTestRun" )
Devin Lim58046fa2017-07-05 16:55:00 -07003523 time.sleep( 5 )
Jon Hallca319892017-06-15 15:25:22 -07003524 sameResult, leaders = main.HA.consistentLeaderboards( main.Cluster.active() )
Devin Lim58046fa2017-07-05 16:55:00 -07003525 utilities.assert_equals(
3526 expect=True,
3527 actual=sameResult,
3528 onpass="All nodes see the same leaderboards",
3529 onfail="Inconsistent leaderboards" )
3530
3531 if sameResult:
3532 leader = leaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003533 if onosCli.ipAddress in leader:
Devin Lim58046fa2017-07-05 16:55:00 -07003534 correctLeader = True
3535 else:
3536 correctLeader = False
3537 main.step( "First node was elected leader" )
3538 utilities.assert_equals(
3539 expect=True,
3540 actual=correctLeader,
3541 onpass="Correct leader was elected",
3542 onfail="Incorrect leader" )
Jon Hallca319892017-06-15 15:25:22 -07003543 main.Cluster.testLeader = leader
3544
Devin Lim58046fa2017-07-05 16:55:00 -07003545 def isElectionFunctional( self, main ):
3546 """
3547 Check that Leadership Election is still functional
3548 15.1 Run election on each node
3549 15.2 Check that each node has the same leaders and candidates
3550 15.3 Find current leader and withdraw
3551 15.4 Check that a new node was elected leader
3552 15.5 Check that that new leader was the candidate of old leader
3553 15.6 Run for election on old leader
3554 15.7 Check that oldLeader is a candidate, and leader if only 1 node
3555 15.8 Make sure that the old leader was added to the candidate list
3556
3557 old and new variable prefixes refer to data from before vs after
3558 withdrawl and later before withdrawl vs after re-election
3559 """
3560 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003561 assert main, "main not defined"
3562 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003563
3564 description = "Check that Leadership Election is still functional"
3565 main.case( description )
3566 # NOTE: Need to re-run after restarts since being a canidate is not persistant
3567
3568 oldLeaders = [] # list of lists of each nodes' candidates before
3569 newLeaders = [] # list of lists of each nodes' candidates after
3570 oldLeader = '' # the old leader from oldLeaders, None if not same
3571 newLeader = '' # the new leaders fron newLoeaders, None if not same
3572 oldLeaderCLI = None # the CLI of the old leader used for re-electing
3573 expectNoLeader = False # True when there is only one leader
Devin Lim142b5342017-07-20 15:22:39 -07003574 if len( main.Cluster.runningNodes ) == 1:
Devin Lim58046fa2017-07-05 16:55:00 -07003575 expectNoLeader = True
3576
3577 main.step( "Run for election on each node" )
Devin Lim142b5342017-07-20 15:22:39 -07003578 electionResult = main.Cluster.command( "electionTestRun", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003579 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -07003580 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -07003581 actual=electionResult,
3582 onpass="All nodes successfully ran for leadership",
3583 onfail="At least one node failed to run for leadership" )
3584
3585 if electionResult == main.FALSE:
3586 main.log.error(
3587 "Skipping Test Case because Election Test App isn't loaded" )
3588 main.skipCase()
3589
3590 main.step( "Check that each node shows the same leader and candidates" )
3591 failMessage = "Nodes have different leaderboards"
Jon Hallca319892017-06-15 15:25:22 -07003592 activeCLIs = main.Cluster.active()
3593 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Devin Lim58046fa2017-07-05 16:55:00 -07003594 if sameResult:
3595 oldLeader = oldLeaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003596 main.log.info( "Old leader: " + oldLeader )
Devin Lim58046fa2017-07-05 16:55:00 -07003597 else:
3598 oldLeader = None
3599 utilities.assert_equals(
3600 expect=True,
3601 actual=sameResult,
3602 onpass="Leaderboards are consistent for the election topic",
3603 onfail=failMessage )
3604
3605 main.step( "Find current leader and withdraw" )
3606 withdrawResult = main.TRUE
3607 # do some sanity checking on leader before using it
3608 if oldLeader is None:
3609 main.log.error( "Leadership isn't consistent." )
3610 withdrawResult = main.FALSE
3611 # Get the CLI of the oldLeader
Jon Hallca319892017-06-15 15:25:22 -07003612 for ctrl in main.Cluster.active():
3613 if oldLeader == ctrl.ipAddress:
3614 oldLeaderCLI = ctrl
Devin Lim58046fa2017-07-05 16:55:00 -07003615 break
3616 else: # FOR/ELSE statement
3617 main.log.error( "Leader election, could not find current leader" )
3618 if oldLeader:
3619 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3620 utilities.assert_equals(
3621 expect=main.TRUE,
3622 actual=withdrawResult,
3623 onpass="Node was withdrawn from election",
3624 onfail="Node was not withdrawn from election" )
3625
3626 main.step( "Check that a new node was elected leader" )
3627 failMessage = "Nodes have different leaders"
3628 # Get new leaders and candidates
3629 newLeaderResult, newLeaders = self.consistentLeaderboards( activeCLIs )
3630 newLeader = None
3631 if newLeaderResult:
3632 if newLeaders[ 0 ][ 0 ] == 'none':
3633 main.log.error( "No leader was elected on at least 1 node" )
3634 if not expectNoLeader:
3635 newLeaderResult = False
3636 newLeader = newLeaders[ 0 ][ 0 ]
3637
3638 # Check that the new leader is not the older leader, which was withdrawn
3639 if newLeader == oldLeader:
3640 newLeaderResult = False
3641 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3642 " as the current leader" )
3643 utilities.assert_equals(
3644 expect=True,
3645 actual=newLeaderResult,
3646 onpass="Leadership election passed",
3647 onfail="Something went wrong with Leadership election" )
3648
3649 main.step( "Check that that new leader was the candidate of old leader" )
3650 # candidates[ 2 ] should become the top candidate after withdrawl
3651 correctCandidateResult = main.TRUE
3652 if expectNoLeader:
3653 if newLeader == 'none':
3654 main.log.info( "No leader expected. None found. Pass" )
3655 correctCandidateResult = main.TRUE
3656 else:
3657 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3658 correctCandidateResult = main.FALSE
3659 elif len( oldLeaders[ 0 ] ) >= 3:
3660 if newLeader == oldLeaders[ 0 ][ 2 ]:
3661 # correct leader was elected
3662 correctCandidateResult = main.TRUE
3663 else:
3664 correctCandidateResult = main.FALSE
3665 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3666 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3667 else:
3668 main.log.warn( "Could not determine who should be the correct leader" )
3669 main.log.debug( oldLeaders[ 0 ] )
3670 correctCandidateResult = main.FALSE
3671 utilities.assert_equals(
3672 expect=main.TRUE,
3673 actual=correctCandidateResult,
3674 onpass="Correct Candidate Elected",
3675 onfail="Incorrect Candidate Elected" )
3676
3677 main.step( "Run for election on old leader( just so everyone " +
3678 "is in the hat )" )
3679 if oldLeaderCLI is not None:
3680 runResult = oldLeaderCLI.electionTestRun()
3681 else:
3682 main.log.error( "No old leader to re-elect" )
3683 runResult = main.FALSE
3684 utilities.assert_equals(
3685 expect=main.TRUE,
3686 actual=runResult,
3687 onpass="App re-ran for election",
3688 onfail="App failed to run for election" )
3689
3690 main.step(
3691 "Check that oldLeader is a candidate, and leader if only 1 node" )
3692 # verify leader didn't just change
3693 # Get new leaders and candidates
3694 reRunLeaders = []
3695 time.sleep( 5 ) # Paremterize
3696 positionResult, reRunLeaders = self.consistentLeaderboards( activeCLIs )
3697
3698 # Check that the re-elected node is last on the candidate List
3699 if not reRunLeaders[ 0 ]:
3700 positionResult = main.FALSE
3701 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3702 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
3703 str( reRunLeaders[ 0 ] ) ) )
3704 positionResult = main.FALSE
3705 utilities.assert_equals(
3706 expect=True,
3707 actual=positionResult,
3708 onpass="Old leader successfully re-ran for election",
3709 onfail="Something went wrong with Leadership election after " +
3710 "the old leader re-ran for election" )
Jon Hallca319892017-06-15 15:25:22 -07003711
Devin Lim58046fa2017-07-05 16:55:00 -07003712 def installDistributedPrimitiveApp( self, main ):
3713 """
3714 Install Distributed Primitives app
3715 """
3716 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003717 assert main, "main not defined"
3718 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003719
3720 # Variables for the distributed primitives tests
3721 main.pCounterName = "TestON-Partitions"
3722 main.pCounterValue = 0
3723 main.onosSet = set( [] )
3724 main.onosSetName = "TestON-set"
3725
3726 description = "Install Primitives app"
3727 main.case( description )
3728 main.step( "Install Primitives app" )
3729 appName = "org.onosproject.distributedprimitives"
Devin Lime9f0ccf2017-08-11 17:25:12 -07003730 appResults = main.Cluster.next().CLI.activateApp( appName )
Devin Lim58046fa2017-07-05 16:55:00 -07003731 utilities.assert_equals( expect=main.TRUE,
3732 actual=appResults,
3733 onpass="Primitives app activated",
3734 onfail="Primitives app not activated" )
3735 # TODO check on all nodes instead of sleeping
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003736 time.sleep( 5 ) # To allow all nodes to activate