blob: 3d18eaabedfacd323381b514fe5f3c130f91fab0 [file] [log] [blame]
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07001"""
2Copyright 2015 Open Networking Foundation (ONF)
3
4Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
5the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
6or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
7
8 TestON is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation, either version 2 of the License, or
11 (at your option) any later version.
12
13 TestON is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with TestON. If not, see <http://www.gnu.org/licenses/>.
20"""
21
Jon Halla440e872016-03-31 15:15:50 -070022import json
Jon Hall41d39f12016-04-11 22:54:35 -070023import time
Jon Halle1a3b752015-07-22 13:02:46 -070024
Jon Hallf37d44d2017-05-24 10:37:30 -070025
Jon Hall41d39f12016-04-11 22:54:35 -070026class HA():
Jon Hall57b50432015-10-22 10:20:10 -070027
Jon Halla440e872016-03-31 15:15:50 -070028 def __init__( self ):
29 self.default = ''
Jon Hall57b50432015-10-22 10:20:10 -070030
Devin Lim58046fa2017-07-05 16:55:00 -070031 def customizeOnosGenPartitions( self ):
Devin Lim58046fa2017-07-05 16:55:00 -070032 # copy gen-partions file to ONOS
33 # NOTE: this assumes TestON and ONOS are on the same machine
34 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
35 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
36 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
37 main.ONOSbench.ip_address,
38 srcFile,
39 dstDir,
40 pwd=main.ONOSbench.pwd,
41 direction="from" )
Jon Hallca319892017-06-15 15:25:22 -070042
Devin Lim58046fa2017-07-05 16:55:00 -070043 def cleanUpGenPartition( self ):
44 # clean up gen-partitions file
45 try:
46 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
47 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
48 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
49 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
50 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
51 str( main.ONOSbench.handle.before ) )
52 except ( pexpect.TIMEOUT, pexpect.EOF ):
53 main.log.exception( "ONOSbench: pexpect exception found:" +
54 main.ONOSbench.handle.before )
Devin Lim44075962017-08-11 10:56:37 -070055 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -070056
Devin Lim58046fa2017-07-05 16:55:00 -070057 def startingMininet( self ):
58 main.step( "Starting Mininet" )
59 # scp topo file to mininet
60 # TODO: move to params?
61 topoName = "obelisk.py"
62 filePath = main.ONOSbench.home + "/tools/test/topos/"
63 main.ONOSbench.scp( main.Mininet1,
64 filePath + topoName,
65 main.Mininet1.home,
66 direction="to" )
67 mnResult = main.Mininet1.startNet()
68 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
69 onpass="Mininet Started",
70 onfail="Error starting Mininet" )
Jon Hallca319892017-06-15 15:25:22 -070071
Devin Lim58046fa2017-07-05 16:55:00 -070072 def scalingMetadata( self ):
73 import re
Devin Lim142b5342017-07-20 15:22:39 -070074 main.step( "Generate initial metadata file" )
Devin Lim58046fa2017-07-05 16:55:00 -070075 main.scaling = main.params[ 'scaling' ].split( "," )
76 main.log.debug( main.scaling )
77 scale = main.scaling.pop( 0 )
78 main.log.debug( scale )
79 if "e" in scale:
80 equal = True
81 else:
82 equal = False
83 main.log.debug( equal )
Devin Lim142b5342017-07-20 15:22:39 -070084 main.Cluster.setRunningNode( int( re.search( "\d+", scale ).group( 0 ) ) )
85 genResult = main.Server.generateFile( main.Cluster.numCtrls, equal=equal )
Devin Lim58046fa2017-07-05 16:55:00 -070086 utilities.assert_equals( expect=main.TRUE, actual=genResult,
87 onpass="New cluster metadata file generated",
88 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -070089
Devin Lim58046fa2017-07-05 16:55:00 -070090 def swapNodeMetadata( self ):
Devin Lim142b5342017-07-20 15:22:39 -070091 main.step( "Generate initial metadata file" )
92 if main.Cluster.numCtrls >= 5:
93 main.Cluster.setRunningNode( main.Cluster.numCtrls - 2 )
Devin Lim58046fa2017-07-05 16:55:00 -070094 else:
95 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
Devin Lim142b5342017-07-20 15:22:39 -070096 genResult = main.Server.generateFile( main.Cluster.numCtrls )
Devin Lim58046fa2017-07-05 16:55:00 -070097 utilities.assert_equals( expect=main.TRUE, actual=genResult,
98 onpass="New cluster metadata file generated",
99 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -0700100
Devin Lim142b5342017-07-20 15:22:39 -0700101 def setServerForCluster( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700102 import os
103 main.step( "Setup server for cluster metadata file" )
104 main.serverPort = main.params[ 'server' ][ 'port' ]
105 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
106 main.log.debug( "Root dir: {}".format( rootDir ) )
107 status = main.Server.start( main.ONOSbench,
108 rootDir,
109 port=main.serverPort,
110 logDir=main.logdir + "/server.log" )
111 utilities.assert_equals( expect=main.TRUE, actual=status,
112 onpass="Server started",
113 onfail="Failled to start SimpleHTTPServer" )
114
Devin Lim142b5342017-07-20 15:22:39 -0700115 def copyingBackupConfig( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700116 main.step( "Copying backup config files" )
117 main.onosServicepath = main.ONOSbench.home + "/tools/package/bin/onos-service"
118 cp = main.ONOSbench.scp( main.ONOSbench,
119 main.onosServicepath,
120 main.onosServicepath + ".backup",
121 direction="to" )
122
123 utilities.assert_equals( expect=main.TRUE,
124 actual=cp,
125 onpass="Copy backup config file succeeded",
126 onfail="Copy backup config file failed" )
127 # we need to modify the onos-service file to use remote metadata file
128 # url for cluster metadata file
129 iface = main.params[ 'server' ].get( 'interface' )
130 ip = main.ONOSbench.getIpAddr( iface=iface )
131 metaFile = "cluster.json"
132 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, main.serverPort, metaFile )
133 main.log.warn( javaArgs )
134 main.log.warn( repr( javaArgs ) )
135 handle = main.ONOSbench.handle
136 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs, main.onosServicepath )
137 main.log.warn( sed )
138 main.log.warn( repr( sed ) )
139 handle.sendline( sed )
140 handle.expect( metaFile )
141 output = handle.before
142 handle.expect( "\$" )
143 output += handle.before
144 main.log.debug( repr( output ) )
145
146 def cleanUpOnosService( self ):
147 # Cleanup custom onos-service file
148 main.ONOSbench.scp( main.ONOSbench,
149 main.onosServicepath + ".backup",
150 main.onosServicepath,
151 direction="to" )
Jon Hallca319892017-06-15 15:25:22 -0700152
Jon Halla440e872016-03-31 15:15:50 -0700153 def consistentCheck( self ):
154 """
155 Checks that TestON counters are consistent across all nodes.
Jon Halle1a3b752015-07-22 13:02:46 -0700156
Jon Hallf37d44d2017-05-24 10:37:30 -0700157 Returns the tuple ( onosCounters, consistent )
Jon Hall41d39f12016-04-11 22:54:35 -0700158 - onosCounters is the parsed json output of the counters command on
159 all nodes
160 - consistent is main.TRUE if all "TestON" counters are consitent across
161 all nodes or main.FALSE
Jon Halla440e872016-03-31 15:15:50 -0700162 """
Jon Halle1a3b752015-07-22 13:02:46 -0700163 try:
Jon Halla440e872016-03-31 15:15:50 -0700164 # Get onos counters results
165 onosCountersRaw = []
166 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700167 for ctrl in main.Cluster.active():
Jon Halla440e872016-03-31 15:15:50 -0700168 t = main.Thread( target=utilities.retry,
Jon Hallca319892017-06-15 15:25:22 -0700169 name="counters-" + str( ctrl ),
170 args=[ ctrl.counters, [ None ] ],
Jon Hallf37d44d2017-05-24 10:37:30 -0700171 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Halla440e872016-03-31 15:15:50 -0700172 'randomTime': True } )
173 threads.append( t )
174 t.start()
175 for t in threads:
176 t.join()
177 onosCountersRaw.append( t.result )
178 onosCounters = []
Jon Hallca319892017-06-15 15:25:22 -0700179 for i in range( len( onosCountersRaw ) ):
Jon Halla440e872016-03-31 15:15:50 -0700180 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700181 onosCounters.append( json.loads( onosCountersRaw[ i ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700182 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700183 main.log.error( "Could not parse counters response from " +
Devin Lim142b5342017-07-20 15:22:39 -0700184 str( main.Cluster.active( i ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700185 main.log.warn( repr( onosCountersRaw[ i ] ) )
186 onosCounters.append( [] )
187
188 testCounters = {}
189 # make a list of all the "TestON-*" counters in ONOS
Jon Hall41d39f12016-04-11 22:54:35 -0700190 # lookes like a dict whose keys are the name of the ONOS node and
191 # values are a list of the counters. I.E.
Jon Hallf37d44d2017-05-24 10:37:30 -0700192 # { "ONOS1": [ { "name":"TestON-Partitions","value":56 } ]
Jon Halla440e872016-03-31 15:15:50 -0700193 # }
194 # NOTE: There is an assumtion that all nodes are active
195 # based on the above for loops
196 for controller in enumerate( onosCounters ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700197 for key, value in controller[ 1 ].iteritems():
Jon Halla440e872016-03-31 15:15:50 -0700198 if 'TestON' in key:
Devin Lim142b5342017-07-20 15:22:39 -0700199 node = str( main.Cluster.active( controller[ 0 ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700200 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700201 testCounters[ node ].append( { key: value } )
Jon Halla440e872016-03-31 15:15:50 -0700202 except KeyError:
Jon Hallf37d44d2017-05-24 10:37:30 -0700203 testCounters[ node ] = [ { key: value } ]
Jon Halla440e872016-03-31 15:15:50 -0700204 # compare the counters on each node
Jon Hallf37d44d2017-05-24 10:37:30 -0700205 firstV = testCounters.values()[ 0 ]
Jon Halla440e872016-03-31 15:15:50 -0700206 tmp = [ v == firstV for k, v in testCounters.iteritems() ]
207 if all( tmp ):
208 consistent = main.TRUE
209 else:
210 consistent = main.FALSE
211 main.log.error( "ONOS nodes have different values for counters:\n" +
212 testCounters )
213 return ( onosCounters, consistent )
214 except Exception:
215 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700216 main.cleanAndExit()
Jon Halla440e872016-03-31 15:15:50 -0700217
218 def counterCheck( self, counterName, counterValue ):
219 """
220 Checks that TestON counters are consistent across all nodes and that
221 specified counter is in ONOS with the given value
222 """
223 try:
224 correctResults = main.TRUE
225 # Get onos counters results and consistentCheck
226 onosCounters, consistent = self.consistentCheck()
227 # Check for correct values
Jon Hallca319892017-06-15 15:25:22 -0700228 for i in range( len( main.Cluster.active() ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700229 current = onosCounters[ i ]
Jon Halla440e872016-03-31 15:15:50 -0700230 onosValue = None
231 try:
232 onosValue = current.get( counterName )
Jon Hall41d39f12016-04-11 22:54:35 -0700233 except AttributeError:
Devin Lim142b5342017-07-20 15:22:39 -0700234 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -0700235 main.log.exception( node + " counters result " +
Jon Hall41d39f12016-04-11 22:54:35 -0700236 "is not as expected" )
Jon Halla440e872016-03-31 15:15:50 -0700237 correctResults = main.FALSE
238 if onosValue == counterValue:
239 main.log.info( counterName + " counter value is correct" )
240 else:
Jon Hall41d39f12016-04-11 22:54:35 -0700241 main.log.error( counterName +
242 " counter value is incorrect," +
243 " expected value: " + str( counterValue ) +
244 " current value: " + str( onosValue ) )
Jon Halla440e872016-03-31 15:15:50 -0700245 correctResults = main.FALSE
246 return consistent and correctResults
247 except Exception:
248 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700249 main.cleanAndExit()
Jon Hall41d39f12016-04-11 22:54:35 -0700250
251 def consistentLeaderboards( self, nodes ):
252 TOPIC = 'org.onosproject.election'
253 # FIXME: use threads
Jon Halle0f0b342017-04-18 11:43:47 -0700254 # FIXME: should we retry outside the function?
Jon Hall41d39f12016-04-11 22:54:35 -0700255 for n in range( 5 ): # Retry in case election is still happening
256 leaderList = []
257 # Get all leaderboards
258 for cli in nodes:
259 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
260 # Compare leaderboards
Jon Hallf37d44d2017-05-24 10:37:30 -0700261 result = all( i == leaderList[ 0 ] for i in leaderList ) and\
Jon Hall41d39f12016-04-11 22:54:35 -0700262 leaderList is not None
263 main.log.debug( leaderList )
264 main.log.warn( result )
265 if result:
266 return ( result, leaderList )
Jon Hallf37d44d2017-05-24 10:37:30 -0700267 time.sleep( 5 ) # TODO: paramerterize
Jon Hall41d39f12016-04-11 22:54:35 -0700268 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
269 return ( result, leaderList )
270
271 def nodesCheck( self, nodes ):
272 nodesOutput = []
273 results = True
274 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700275 for node in nodes:
276 t = main.Thread( target=node.nodes,
277 name="nodes-" + str( node ),
Jon Hallf37d44d2017-05-24 10:37:30 -0700278 args=[] )
Jon Hall41d39f12016-04-11 22:54:35 -0700279 threads.append( t )
280 t.start()
281
282 for t in threads:
283 t.join()
284 nodesOutput.append( t.result )
Jon Hallca319892017-06-15 15:25:22 -0700285 ips = sorted( main.Cluster.getIps( activeOnly=True ) )
Jon Hall41d39f12016-04-11 22:54:35 -0700286 for i in nodesOutput:
287 try:
288 current = json.loads( i )
289 activeIps = []
290 currentResult = False
291 for node in current:
Jon Hallf37d44d2017-05-24 10:37:30 -0700292 if node[ 'state' ] == 'READY':
293 activeIps.append( node[ 'ip' ] )
Jon Hall41d39f12016-04-11 22:54:35 -0700294 activeIps.sort()
295 if ips == activeIps:
296 currentResult = True
297 except ( ValueError, TypeError ):
298 main.log.error( "Error parsing nodes output" )
299 main.log.warn( repr( i ) )
300 currentResult = False
301 results = results and currentResult
302 return results
Jon Hallca319892017-06-15 15:25:22 -0700303
Devin Lim58046fa2017-07-05 16:55:00 -0700304 def generateGraph( self, testName, plotName="Plot-HA", index=2 ):
305 # GRAPHS
306 # NOTE: important params here:
307 # job = name of Jenkins job
308 # Plot Name = Plot-HA, only can be used if multiple plots
309 # index = The number of the graph under plot name
310 job = testName
311 graphs = '<ac:structured-macro ac:name="html">\n'
312 graphs += '<ac:plain-text-body><![CDATA[\n'
313 graphs += '<iframe src="https://onos-jenkins.onlab.us/job/' + job +\
314 '/plot/' + plotName + '/getPlot?index=' + str( index ) +\
315 '&width=500&height=300"' +\
316 'noborder="0" width="500" height="300" scrolling="yes" ' +\
317 'seamless="seamless"></iframe>\n'
318 graphs += ']]></ac:plain-text-body>\n'
319 graphs += '</ac:structured-macro>\n'
320 main.log.wiki( graphs )
Jon Hallca319892017-06-15 15:25:22 -0700321
Devin Lim58046fa2017-07-05 16:55:00 -0700322 def initialSetUp( self, serviceClean=False ):
323 """
324 rest of initialSetup
325 """
326
Devin Lim58046fa2017-07-05 16:55:00 -0700327
328 if main.params[ 'tcpdump' ].lower() == "true":
329 main.step( "Start Packet Capture MN" )
330 main.Mininet2.startTcpdump(
331 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
332 + "-MN.pcap",
333 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
334 port=main.params[ 'MNtcpdump' ][ 'port' ] )
335
336 if serviceClean:
337 main.step( "Clean up ONOS service changes" )
Devin Lim142b5342017-07-20 15:22:39 -0700338 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
339 main.ONOSbench.handle.expect( "\$" )
340 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
341 main.ONOSbench.handle.expect( "\$" )
Devin Lim58046fa2017-07-05 16:55:00 -0700342
343 main.step( "Checking ONOS nodes" )
344 nodeResults = utilities.retry( self.nodesCheck,
345 False,
Jon Hallca319892017-06-15 15:25:22 -0700346 args=[ main.Cluster.active() ],
Devin Lim58046fa2017-07-05 16:55:00 -0700347 attempts=5 )
348
349 utilities.assert_equals( expect=True, actual=nodeResults,
350 onpass="Nodes check successful",
351 onfail="Nodes check NOT successful" )
352
353 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -0700354 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700355 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -0700356 ctrl.name,
357 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700358 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -0700359 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700360
361 main.step( "Activate apps defined in the params file" )
362 # get data from the params
363 apps = main.params.get( 'apps' )
364 if apps:
365 apps = apps.split( ',' )
Jon Hallca319892017-06-15 15:25:22 -0700366 main.log.debug( "Apps: " + str( apps ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700367 activateResult = True
368 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700369 main.Cluster.active( 0 ).app( app, "Activate" )
Devin Lim58046fa2017-07-05 16:55:00 -0700370 # TODO: check this worked
371 time.sleep( 10 ) # wait for apps to activate
372 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700373 state = main.Cluster.active( 0 ).appStatus( app )
Devin Lim58046fa2017-07-05 16:55:00 -0700374 if state == "ACTIVE":
375 activateResult = activateResult and True
376 else:
377 main.log.error( "{} is in {} state".format( app, state ) )
378 activateResult = False
379 utilities.assert_equals( expect=True,
380 actual=activateResult,
381 onpass="Successfully activated apps",
382 onfail="Failed to activate apps" )
383 else:
384 main.log.warn( "No apps were specified to be loaded after startup" )
385
386 main.step( "Set ONOS configurations" )
Jon Hallca319892017-06-15 15:25:22 -0700387 # FIXME: This shoudl be part of the general startup sequence
Devin Lim58046fa2017-07-05 16:55:00 -0700388 config = main.params.get( 'ONOS_Configuration' )
389 if config:
390 main.log.debug( config )
391 checkResult = main.TRUE
392 for component in config:
393 for setting in config[ component ]:
394 value = config[ component ][ setting ]
Jon Hallca319892017-06-15 15:25:22 -0700395 check = main.Cluster.next().setCfg( component, setting, value )
Devin Lim58046fa2017-07-05 16:55:00 -0700396 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
397 checkResult = check and checkResult
398 utilities.assert_equals( expect=main.TRUE,
399 actual=checkResult,
400 onpass="Successfully set config",
401 onfail="Failed to set config" )
402 else:
403 main.log.warn( "No configurations were specified to be changed after startup" )
404
Jon Hallca319892017-06-15 15:25:22 -0700405 main.step( "Check app ids" )
406 appCheck = self.appCheck()
407 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700408 onpass="App Ids seem to be correct",
409 onfail="Something is wrong with app Ids" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -0700410
Jon Hallca319892017-06-15 15:25:22 -0700411 def commonChecks( self ):
412 # TODO: make this assertable or assert in here?
413 self.topicsCheck()
414 self.partitionsCheck()
415 self.pendingMapCheck()
416 self.appCheck()
417
418 def topicsCheck( self, extraTopics=[] ):
419 """
420 Check for work partition topics in leaders output
421 """
422 leaders = main.Cluster.next().leaders()
423 missing = False
424 try:
425 if leaders:
426 parsedLeaders = json.loads( leaders )
427 output = json.dumps( parsedLeaders,
428 sort_keys=True,
429 indent=4,
430 separators=( ',', ': ' ) )
431 main.log.debug( "Leaders: " + output )
432 # check for all intent partitions
433 topics = []
434 for i in range( 14 ):
435 topics.append( "work-partition-" + str( i ) )
436 topics += extraTopics
437 main.log.debug( topics )
438 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
439 for topic in topics:
440 if topic not in ONOStopics:
441 main.log.error( "Error: " + topic +
442 " not in leaders" )
443 missing = True
444 else:
445 main.log.error( "leaders() returned None" )
446 except ( ValueError, TypeError ):
447 main.log.exception( "Error parsing leaders" )
448 main.log.error( repr( leaders ) )
449 if missing:
450 #NOTE Can we refactor this into the Cluster class? Maybe an option to print the output of a command from each node?
451 for ctrl in main.Cluster.active():
452 response = ctrl.CLI.leaders( jsonFormat=False )
453 main.log.debug( str( ctrl.name ) + " leaders output: \n" +
454 str( response ) )
455 return missing
456
457 def partitionsCheck( self ):
458 # TODO: return something assertable
459 partitions = main.Cluster.next().partitions()
460 try:
461 if partitions:
462 parsedPartitions = json.loads( partitions )
463 output = json.dumps( parsedPartitions,
464 sort_keys=True,
465 indent=4,
466 separators=( ',', ': ' ) )
467 main.log.debug( "Partitions: " + output )
468 # TODO check for a leader in all paritions
469 # TODO check for consistency among nodes
470 else:
471 main.log.error( "partitions() returned None" )
472 except ( ValueError, TypeError ):
473 main.log.exception( "Error parsing partitions" )
474 main.log.error( repr( partitions ) )
475
476 def pendingMapCheck( self ):
477 pendingMap = main.Cluster.next().pendingMap()
478 try:
479 if pendingMap:
480 parsedPending = json.loads( pendingMap )
481 output = json.dumps( parsedPending,
482 sort_keys=True,
483 indent=4,
484 separators=( ',', ': ' ) )
485 main.log.debug( "Pending map: " + output )
486 # TODO check something here?
487 else:
488 main.log.error( "pendingMap() returned None" )
489 except ( ValueError, TypeError ):
490 main.log.exception( "Error parsing pending map" )
491 main.log.error( repr( pendingMap ) )
492
493 def appCheck( self ):
494 """
495 Check App IDs on all nodes
496 """
497 # FIXME: Rename this to appIDCheck? or add a check for isntalled apps
498 appResults = main.Cluster.command( "appToIDCheck" )
499 appCheck = all( i == main.TRUE for i in appResults )
500 if not appCheck:
Devin Lim142b5342017-07-20 15:22:39 -0700501 ctrl = main.Cluster.active( 0 )
Jon Hallca319892017-06-15 15:25:22 -0700502 main.log.debug( "%s apps: %s" % ( ctrl.name, ctrl.apps() ) )
503 main.log.debug( "%s appIDs: %s" % ( ctrl.name, ctrl.appIDs() ) )
504 return appCheck
505
Jon Halle0f0b342017-04-18 11:43:47 -0700506 def workQueueStatsCheck( self, workQueueName, completed, inProgress, pending ):
507 # Completed
Jon Hallca319892017-06-15 15:25:22 -0700508 completedValues = main.Cluster.command( "workQueueTotalCompleted",
509 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700510 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700511 completedResults = [ int( x ) == completed for x in completedValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700512 completedResult = all( completedResults )
513 if not completedResult:
514 main.log.warn( "Expected Work Queue {} to have {} completed, found {}".format(
515 workQueueName, completed, completedValues ) )
516
517 # In Progress
Jon Hallca319892017-06-15 15:25:22 -0700518 inProgressValues = main.Cluster.command( "workQueueTotalInProgress",
519 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700520 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700521 inProgressResults = [ int( x ) == inProgress for x in inProgressValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700522 inProgressResult = all( inProgressResults )
523 if not inProgressResult:
524 main.log.warn( "Expected Work Queue {} to have {} inProgress, found {}".format(
525 workQueueName, inProgress, inProgressValues ) )
526
527 # Pending
Jon Hallca319892017-06-15 15:25:22 -0700528 pendingValues = main.Cluster.command( "workQueueTotalPending",
529 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700530 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700531 pendingResults = [ int( x ) == pending for x in pendingValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700532 pendingResult = all( pendingResults )
533 if not pendingResult:
534 main.log.warn( "Expected Work Queue {} to have {} pending, found {}".format(
535 workQueueName, pending, pendingValues ) )
536 return completedResult and inProgressResult and pendingResult
537
Devin Lim58046fa2017-07-05 16:55:00 -0700538 def assignDevices( self, main ):
539 """
540 Assign devices to controllers
541 """
542 import re
Devin Lim58046fa2017-07-05 16:55:00 -0700543 assert main, "main not defined"
544 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700545
546 main.case( "Assigning devices to controllers" )
547 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " + \
548 "and check that an ONOS node becomes the " + \
549 "master of the device."
550 main.step( "Assign switches to controllers" )
551
Jon Hallca319892017-06-15 15:25:22 -0700552 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -0700553 swList = []
554 for i in range( 1, 29 ):
555 swList.append( "s" + str( i ) )
556 main.Mininet1.assignSwController( sw=swList, ip=ipList )
557
558 mastershipCheck = main.TRUE
559 for i in range( 1, 29 ):
560 response = main.Mininet1.getSwController( "s" + str( i ) )
561 try:
562 main.log.info( str( response ) )
563 except Exception:
564 main.log.info( repr( response ) )
Devin Lim142b5342017-07-20 15:22:39 -0700565 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -0700566 if re.search( "tcp:" + ctrl.ipAddress, response ):
Devin Lim58046fa2017-07-05 16:55:00 -0700567 mastershipCheck = mastershipCheck and main.TRUE
568 else:
Jon Hallca319892017-06-15 15:25:22 -0700569 main.log.error( "Error, node " + repr( ctrl )+ " is " +
Devin Lim58046fa2017-07-05 16:55:00 -0700570 "not in the list of controllers s" +
571 str( i ) + " is connecting to." )
572 mastershipCheck = main.FALSE
573 utilities.assert_equals(
574 expect=main.TRUE,
575 actual=mastershipCheck,
576 onpass="Switch mastership assigned correctly",
577 onfail="Switches not assigned correctly to controllers" )
Jon Hallca319892017-06-15 15:25:22 -0700578
Devin Lim58046fa2017-07-05 16:55:00 -0700579 def assignIntents( self, main ):
580 """
581 Assign intents
582 """
583 import time
584 import json
Devin Lim58046fa2017-07-05 16:55:00 -0700585 assert main, "main not defined"
586 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700587 try:
588 main.HAlabels
589 except ( NameError, AttributeError ):
590 main.log.error( "main.HAlabels not defined, setting to []" )
591 main.HAlabels = []
592 try:
593 main.HAdata
594 except ( NameError, AttributeError ):
595 main.log.error( "data not defined, setting to []" )
596 main.HAdata = []
597 main.case( "Adding host Intents" )
598 main.caseExplanation = "Discover hosts by using pingall then " +\
599 "assign predetermined host-to-host intents." +\
600 " After installation, check that the intent" +\
601 " is distributed to all nodes and the state" +\
602 " is INSTALLED"
603
604 # install onos-app-fwd
605 main.step( "Install reactive forwarding app" )
Jon Hallca319892017-06-15 15:25:22 -0700606 onosCli = main.Cluster.next()
Devin Lime9f0ccf2017-08-11 17:25:12 -0700607 installResults = onosCli.CLI.activateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700608 utilities.assert_equals( expect=main.TRUE, actual=installResults,
609 onpass="Install fwd successful",
610 onfail="Install fwd failed" )
611
612 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700613 appCheck = self.appCheck()
614 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700615 onpass="App Ids seem to be correct",
616 onfail="Something is wrong with app Ids" )
617
618 main.step( "Discovering Hosts( Via pingall for now )" )
619 # FIXME: Once we have a host discovery mechanism, use that instead
620 # REACTIVE FWD test
621 pingResult = main.FALSE
622 passMsg = "Reactive Pingall test passed"
623 time1 = time.time()
624 pingResult = main.Mininet1.pingall()
625 time2 = time.time()
626 if not pingResult:
627 main.log.warn( "First pingall failed. Trying again..." )
628 pingResult = main.Mininet1.pingall()
629 passMsg += " on the second try"
630 utilities.assert_equals(
631 expect=main.TRUE,
632 actual=pingResult,
633 onpass=passMsg,
634 onfail="Reactive Pingall failed, " +
635 "one or more ping pairs failed" )
636 main.log.info( "Time for pingall: %2f seconds" %
637 ( time2 - time1 ) )
Jon Hallca319892017-06-15 15:25:22 -0700638 if not pingResult:
Devin Lim44075962017-08-11 10:56:37 -0700639 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700640 # timeout for fwd flows
641 time.sleep( 11 )
642 # uninstall onos-app-fwd
643 main.step( "Uninstall reactive forwarding app" )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700644 uninstallResult = onosCli.CLI.deactivateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700645 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
646 onpass="Uninstall fwd successful",
647 onfail="Uninstall fwd failed" )
648
649 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700650 appCheck2 = self.appCheck()
651 utilities.assert_equals( expect=True, actual=appCheck2,
Devin Lim58046fa2017-07-05 16:55:00 -0700652 onpass="App Ids seem to be correct",
653 onfail="Something is wrong with app Ids" )
654
655 main.step( "Add host intents via cli" )
656 intentIds = []
657 # TODO: move the host numbers to params
658 # Maybe look at all the paths we ping?
659 intentAddResult = True
660 hostResult = main.TRUE
661 for i in range( 8, 18 ):
662 main.log.info( "Adding host intent between h" + str( i ) +
663 " and h" + str( i + 10 ) )
664 host1 = "00:00:00:00:00:" + \
665 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
666 host2 = "00:00:00:00:00:" + \
667 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
668 # NOTE: getHost can return None
Jon Hallca319892017-06-15 15:25:22 -0700669 host1Dict = onosCli.CLI.getHost( host1 )
670 host2Dict = onosCli.CLI.getHost( host2 )
Devin Lim58046fa2017-07-05 16:55:00 -0700671 host1Id = None
672 host2Id = None
673 if host1Dict and host2Dict:
674 host1Id = host1Dict.get( 'id', None )
675 host2Id = host2Dict.get( 'id', None )
676 if host1Id and host2Id:
Jon Hallca319892017-06-15 15:25:22 -0700677 nodeNum = len( main.Cluster.active() )
Devin Lim142b5342017-07-20 15:22:39 -0700678 ctrl = main.Cluster.active( i % nodeNum )
Jon Hallca319892017-06-15 15:25:22 -0700679 tmpId = ctrl.CLI.addHostIntent( host1Id, host2Id )
Devin Lim58046fa2017-07-05 16:55:00 -0700680 if tmpId:
681 main.log.info( "Added intent with id: " + tmpId )
682 intentIds.append( tmpId )
683 else:
684 main.log.error( "addHostIntent returned: " +
685 repr( tmpId ) )
686 else:
687 main.log.error( "Error, getHost() failed for h" + str( i ) +
688 " and/or h" + str( i + 10 ) )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700689 hosts = main.Cluster.next().CLI.hosts()
Devin Lim58046fa2017-07-05 16:55:00 -0700690 try:
Jon Hallca319892017-06-15 15:25:22 -0700691 output = json.dumps( json.loads( hosts ),
692 sort_keys=True,
693 indent=4,
694 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700695 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700696 output = repr( hosts )
697 main.log.debug( "Hosts output: %s" % output )
Devin Lim58046fa2017-07-05 16:55:00 -0700698 hostResult = main.FALSE
699 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
700 onpass="Found a host id for each host",
701 onfail="Error looking up host ids" )
702
703 intentStart = time.time()
704 onosIds = onosCli.getAllIntentsId()
705 main.log.info( "Submitted intents: " + str( intentIds ) )
706 main.log.info( "Intents in ONOS: " + str( onosIds ) )
707 for intent in intentIds:
708 if intent in onosIds:
709 pass # intent submitted is in onos
710 else:
711 intentAddResult = False
712 if intentAddResult:
713 intentStop = time.time()
714 else:
715 intentStop = None
716 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700717 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700718 intentStates = []
719 installedCheck = True
720 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
721 count = 0
722 try:
723 for intent in json.loads( intents ):
724 state = intent.get( 'state', None )
725 if "INSTALLED" not in state:
726 installedCheck = False
727 intentId = intent.get( 'id', None )
728 intentStates.append( ( intentId, state ) )
729 except ( ValueError, TypeError ):
730 main.log.exception( "Error parsing intents" )
731 # add submitted intents not in the store
732 tmplist = [ i for i, s in intentStates ]
733 missingIntents = False
734 for i in intentIds:
735 if i not in tmplist:
736 intentStates.append( ( i, " - " ) )
737 missingIntents = True
738 intentStates.sort()
739 for i, s in intentStates:
740 count += 1
741 main.log.info( "%-6s%-15s%-15s" %
742 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700743 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700744
745 intentAddResult = bool( intentAddResult and not missingIntents and
746 installedCheck )
747 if not intentAddResult:
748 main.log.error( "Error in pushing host intents to ONOS" )
749
750 main.step( "Intent Anti-Entropy dispersion" )
751 for j in range( 100 ):
752 correct = True
753 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700754 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700755 onosIds = []
Jon Hallca319892017-06-15 15:25:22 -0700756 ids = ctrl.getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700757 onosIds.append( ids )
Jon Hallca319892017-06-15 15:25:22 -0700758 main.log.debug( "Intents in " + ctrl.name + ": " +
Devin Lim58046fa2017-07-05 16:55:00 -0700759 str( sorted( onosIds ) ) )
760 if sorted( ids ) != sorted( intentIds ):
761 main.log.warn( "Set of intent IDs doesn't match" )
762 correct = False
763 break
764 else:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700765 intents = json.loads( ctrl.CLI.intents() )
Devin Lim58046fa2017-07-05 16:55:00 -0700766 for intent in intents:
767 if intent[ 'state' ] != "INSTALLED":
768 main.log.warn( "Intent " + intent[ 'id' ] +
769 " is " + intent[ 'state' ] )
770 correct = False
771 break
772 if correct:
773 break
774 else:
775 time.sleep( 1 )
776 if not intentStop:
777 intentStop = time.time()
778 global gossipTime
779 gossipTime = intentStop - intentStart
780 main.log.info( "It took about " + str( gossipTime ) +
781 " seconds for all intents to appear in each node" )
782 append = False
783 title = "Gossip Intents"
784 count = 1
785 while append is False:
786 curTitle = title + str( count )
787 if curTitle not in main.HAlabels:
788 main.HAlabels.append( curTitle )
789 main.HAdata.append( str( gossipTime ) )
790 append = True
791 else:
792 count += 1
793 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Devin Lim142b5342017-07-20 15:22:39 -0700794 maxGossipTime = gossipPeriod * len( main.Cluster.runningNodes )
Devin Lim58046fa2017-07-05 16:55:00 -0700795 utilities.assert_greater_equals(
796 expect=maxGossipTime, actual=gossipTime,
797 onpass="ECM anti-entropy for intents worked within " +
798 "expected time",
799 onfail="Intent ECM anti-entropy took too long. " +
800 "Expected time:{}, Actual time:{}".format( maxGossipTime,
801 gossipTime ) )
802 if gossipTime <= maxGossipTime:
803 intentAddResult = True
804
Jon Hallca319892017-06-15 15:25:22 -0700805 pendingMap = main.Cluster.next().pendingMap()
Devin Lim58046fa2017-07-05 16:55:00 -0700806 if not intentAddResult or "key" in pendingMap:
807 import time
808 installedCheck = True
809 main.log.info( "Sleeping 60 seconds to see if intents are found" )
810 time.sleep( 60 )
811 onosIds = onosCli.getAllIntentsId()
812 main.log.info( "Submitted intents: " + str( intentIds ) )
813 main.log.info( "Intents in ONOS: " + str( onosIds ) )
814 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700815 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700816 intentStates = []
817 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
818 count = 0
819 try:
820 for intent in json.loads( intents ):
821 # Iter through intents of a node
822 state = intent.get( 'state', None )
823 if "INSTALLED" not in state:
824 installedCheck = False
825 intentId = intent.get( 'id', None )
826 intentStates.append( ( intentId, state ) )
827 except ( ValueError, TypeError ):
828 main.log.exception( "Error parsing intents" )
829 # add submitted intents not in the store
830 tmplist = [ i for i, s in intentStates ]
831 for i in intentIds:
832 if i not in tmplist:
833 intentStates.append( ( i, " - " ) )
834 intentStates.sort()
835 for i, s in intentStates:
836 count += 1
837 main.log.info( "%-6s%-15s%-15s" %
838 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700839 self.topicsCheck( [ "org.onosproject.election" ] )
840 self.partitionsCheck()
841 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700842
Jon Hallca319892017-06-15 15:25:22 -0700843 def pingAcrossHostIntent( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -0700844 """
845 Ping across added host intents
846 """
847 import json
848 import time
Devin Lim58046fa2017-07-05 16:55:00 -0700849 assert main, "main not defined"
850 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700851 main.case( "Verify connectivity by sending traffic across Intents" )
852 main.caseExplanation = "Ping across added host intents to check " +\
853 "functionality and check the state of " +\
854 "the intent"
855
Jon Hallca319892017-06-15 15:25:22 -0700856 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -0700857 main.step( "Check Intent state" )
858 installedCheck = False
859 loopCount = 0
860 while not installedCheck and loopCount < 40:
861 installedCheck = True
862 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700863 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700864 intentStates = []
865 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
866 count = 0
867 # Iter through intents of a node
868 try:
869 for intent in json.loads( intents ):
870 state = intent.get( 'state', None )
871 if "INSTALLED" not in state:
872 installedCheck = False
Jon Hall8bafdc02017-09-05 11:36:26 -0700873 main.log.debug( "Failed intent: " + str( intent ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700874 intentId = intent.get( 'id', None )
875 intentStates.append( ( intentId, state ) )
876 except ( ValueError, TypeError ):
877 main.log.exception( "Error parsing intents." )
878 # Print states
879 intentStates.sort()
880 for i, s in intentStates:
881 count += 1
882 main.log.info( "%-6s%-15s%-15s" %
883 ( str( count ), str( i ), str( s ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700884 if not installedCheck:
885 time.sleep( 1 )
886 loopCount += 1
887 utilities.assert_equals( expect=True, actual=installedCheck,
888 onpass="Intents are all INSTALLED",
889 onfail="Intents are not all in " +
890 "INSTALLED state" )
891
892 main.step( "Ping across added host intents" )
893 PingResult = main.TRUE
894 for i in range( 8, 18 ):
895 ping = main.Mininet1.pingHost( src="h" + str( i ),
896 target="h" + str( i + 10 ) )
897 PingResult = PingResult and ping
898 if ping == main.FALSE:
899 main.log.warn( "Ping failed between h" + str( i ) +
900 " and h" + str( i + 10 ) )
901 elif ping == main.TRUE:
902 main.log.info( "Ping test passed!" )
903 # Don't set PingResult or you'd override failures
904 if PingResult == main.FALSE:
905 main.log.error(
906 "Intents have not been installed correctly, pings failed." )
907 # TODO: pretty print
Devin Lim58046fa2017-07-05 16:55:00 -0700908 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700909 tmpIntents = onosCli.CLI.intents()
Jon Hallca319892017-06-15 15:25:22 -0700910 output = json.dumps( json.loads( tmpIntents ),
911 sort_keys=True,
912 indent=4,
913 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700914 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700915 output = repr( tmpIntents )
916 main.log.debug( "ONOS1 intents: " + output )
Devin Lim58046fa2017-07-05 16:55:00 -0700917 utilities.assert_equals(
918 expect=main.TRUE,
919 actual=PingResult,
920 onpass="Intents have been installed correctly and pings work",
921 onfail="Intents have not been installed correctly, pings failed." )
922
923 main.step( "Check leadership of topics" )
Jon Hallca319892017-06-15 15:25:22 -0700924 topicsCheck = self.topicsCheck()
925 utilities.assert_equals( expect=False, actual=topicsCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700926 onpass="intent Partitions is in leaders",
Jon Hallca319892017-06-15 15:25:22 -0700927 onfail="Some topics were lost" )
928 self.partitionsCheck()
929 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700930
931 if not installedCheck:
932 main.log.info( "Waiting 60 seconds to see if the state of " +
933 "intents change" )
934 time.sleep( 60 )
935 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700936 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700937 intentStates = []
938 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
939 count = 0
940 # Iter through intents of a node
941 try:
942 for intent in json.loads( intents ):
943 state = intent.get( 'state', None )
944 if "INSTALLED" not in state:
945 installedCheck = False
946 intentId = intent.get( 'id', None )
947 intentStates.append( ( intentId, state ) )
948 except ( ValueError, TypeError ):
949 main.log.exception( "Error parsing intents." )
950 intentStates.sort()
951 for i, s in intentStates:
952 count += 1
953 main.log.info( "%-6s%-15s%-15s" %
954 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700955 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700956
Devin Lim58046fa2017-07-05 16:55:00 -0700957 # Print flowrules
Devin Lime9f0ccf2017-08-11 17:25:12 -0700958 main.log.debug( onosCli.CLI.flows() )
Devin Lim58046fa2017-07-05 16:55:00 -0700959 main.step( "Wait a minute then ping again" )
960 # the wait is above
961 PingResult = main.TRUE
962 for i in range( 8, 18 ):
963 ping = main.Mininet1.pingHost( src="h" + str( i ),
964 target="h" + str( i + 10 ) )
965 PingResult = PingResult and ping
966 if ping == main.FALSE:
967 main.log.warn( "Ping failed between h" + str( i ) +
968 " and h" + str( i + 10 ) )
969 elif ping == main.TRUE:
970 main.log.info( "Ping test passed!" )
971 # Don't set PingResult or you'd override failures
972 if PingResult == main.FALSE:
973 main.log.error(
974 "Intents have not been installed correctly, pings failed." )
975 # TODO: pretty print
Jon Hallca319892017-06-15 15:25:22 -0700976 main.log.warn( str( onosCli.name ) + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -0700977 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700978 tmpIntents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700979 main.log.warn( json.dumps( json.loads( tmpIntents ),
980 sort_keys=True,
981 indent=4,
982 separators=( ',', ': ' ) ) )
983 except ( ValueError, TypeError ):
984 main.log.warn( repr( tmpIntents ) )
985 utilities.assert_equals(
986 expect=main.TRUE,
987 actual=PingResult,
988 onpass="Intents have been installed correctly and pings work",
989 onfail="Intents have not been installed correctly, pings failed." )
990
Devin Lim142b5342017-07-20 15:22:39 -0700991 def checkRoleNotNull( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700992 main.step( "Check that each switch has a master" )
Devin Lim58046fa2017-07-05 16:55:00 -0700993 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -0700994 rolesNotNull = main.Cluster.command( "rolesNotNull", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -0700995 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -0700996 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -0700997 actual=rolesNotNull,
998 onpass="Each device has a master",
999 onfail="Some devices don't have a master assigned" )
1000
Devin Lim142b5342017-07-20 15:22:39 -07001001 def checkTheRole( self ):
1002 main.step( "Read device roles from ONOS" )
Jon Hallca319892017-06-15 15:25:22 -07001003 ONOSMastership = main.Cluster.command( "roles" )
Devin Lim58046fa2017-07-05 16:55:00 -07001004 consistentMastership = True
1005 rolesResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001006 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001007 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001008 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001009 main.log.error( "Error in getting " + node + " roles" )
1010 main.log.warn( node + " mastership response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001011 repr( ONOSMastership[ i ] ) )
1012 rolesResults = False
1013 utilities.assert_equals(
1014 expect=True,
1015 actual=rolesResults,
1016 onpass="No error in reading roles output",
1017 onfail="Error in reading roles from ONOS" )
1018
1019 main.step( "Check for consistency in roles from each controller" )
1020 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1021 main.log.info(
1022 "Switch roles are consistent across all ONOS nodes" )
1023 else:
1024 consistentMastership = False
1025 utilities.assert_equals(
1026 expect=True,
1027 actual=consistentMastership,
1028 onpass="Switch roles are consistent across all ONOS nodes",
1029 onfail="ONOS nodes have different views of switch roles" )
Devin Lim142b5342017-07-20 15:22:39 -07001030 return ONOSMastership, rolesResults, consistentMastership
1031
1032 def checkingIntents( self ):
1033 main.step( "Get the intents from each controller" )
1034 ONOSIntents = main.Cluster.command( "intents", specificDriver=2 )
1035 intentsResults = True
1036 for i in range( len( ONOSIntents ) ):
1037 node = str( main.Cluster.active( i ) )
1038 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1039 main.log.error( "Error in getting " + node + " intents" )
1040 main.log.warn( node + " intents response: " +
1041 repr( ONOSIntents[ i ] ) )
1042 intentsResults = False
1043 utilities.assert_equals(
1044 expect=True,
1045 actual=intentsResults,
1046 onpass="No error in reading intents output",
1047 onfail="Error in reading intents from ONOS" )
1048 return ONOSIntents, intentsResults
1049
1050 def readingState( self, main ):
1051 """
1052 Reading state of ONOS
1053 """
1054 import json
1055 import time
1056 assert main, "main not defined"
1057 assert utilities.assert_equals, "utilities.assert_equals not defined"
1058 try:
1059 from tests.dependencies.topology import Topology
1060 except ImportError:
1061 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07001062 main.cleanAndExit()
Devin Lim142b5342017-07-20 15:22:39 -07001063 try:
1064 main.topoRelated
1065 except ( NameError, AttributeError ):
1066 main.topoRelated = Topology()
1067 main.case( "Setting up and gathering data for current state" )
1068 # The general idea for this test case is to pull the state of
1069 # ( intents,flows, topology,... ) from each ONOS node
1070 # We can then compare them with each other and also with past states
1071
1072 global mastershipState
1073 mastershipState = '[]'
1074
1075 self.checkRoleNotNull()
1076
1077 main.step( "Get the Mastership of each switch from each controller" )
1078 mastershipCheck = main.FALSE
1079
1080 ONOSMastership, consistentMastership, rolesResults = self.checkTheRole()
Devin Lim58046fa2017-07-05 16:55:00 -07001081
1082 if rolesResults and not consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001083 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001084 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001085 try:
1086 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001087 node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07001088 json.dumps(
1089 json.loads( ONOSMastership[ i ] ),
1090 sort_keys=True,
1091 indent=4,
1092 separators=( ',', ': ' ) ) )
1093 except ( ValueError, TypeError ):
1094 main.log.warn( repr( ONOSMastership[ i ] ) )
1095 elif rolesResults and consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001096 mastershipCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001097 mastershipState = ONOSMastership[ 0 ]
1098
Devin Lim142b5342017-07-20 15:22:39 -07001099
Devin Lim58046fa2017-07-05 16:55:00 -07001100 global intentState
1101 intentState = []
Devin Lim142b5342017-07-20 15:22:39 -07001102 ONOSIntents, intentsResults = self.checkingIntents()
Jon Hallca319892017-06-15 15:25:22 -07001103 intentCheck = main.FALSE
1104 consistentIntents = True
Devin Lim142b5342017-07-20 15:22:39 -07001105
Devin Lim58046fa2017-07-05 16:55:00 -07001106
1107 main.step( "Check for consistency in Intents from each controller" )
1108 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1109 main.log.info( "Intents are consistent across all ONOS " +
1110 "nodes" )
1111 else:
1112 consistentIntents = False
1113 main.log.error( "Intents not consistent" )
1114 utilities.assert_equals(
1115 expect=True,
1116 actual=consistentIntents,
1117 onpass="Intents are consistent across all ONOS nodes",
1118 onfail="ONOS nodes have different views of intents" )
1119
1120 if intentsResults:
1121 # Try to make it easy to figure out what is happening
1122 #
1123 # Intent ONOS1 ONOS2 ...
1124 # 0x01 INSTALLED INSTALLING
1125 # ... ... ...
1126 # ... ... ...
1127 title = " Id"
Jon Hallca319892017-06-15 15:25:22 -07001128 for ctrl in main.Cluster.active():
1129 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07001130 main.log.warn( title )
1131 # get all intent keys in the cluster
1132 keys = []
1133 try:
1134 # Get the set of all intent keys
1135 for nodeStr in ONOSIntents:
1136 node = json.loads( nodeStr )
1137 for intent in node:
1138 keys.append( intent.get( 'id' ) )
1139 keys = set( keys )
1140 # For each intent key, print the state on each node
1141 for key in keys:
1142 row = "%-13s" % key
1143 for nodeStr in ONOSIntents:
1144 node = json.loads( nodeStr )
1145 for intent in node:
1146 if intent.get( 'id', "Error" ) == key:
1147 row += "%-15s" % intent.get( 'state' )
1148 main.log.warn( row )
1149 # End of intent state table
1150 except ValueError as e:
1151 main.log.exception( e )
1152 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1153
1154 if intentsResults and not consistentIntents:
1155 # print the json objects
Jon Hallca319892017-06-15 15:25:22 -07001156 main.log.debug( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001157 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1158 sort_keys=True,
1159 indent=4,
1160 separators=( ',', ': ' ) ) )
1161 for i in range( len( ONOSIntents ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001162 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001163 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallca319892017-06-15 15:25:22 -07001164 main.log.debug( node + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001165 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
1166 sort_keys=True,
1167 indent=4,
1168 separators=( ',', ': ' ) ) )
1169 else:
Jon Hallca319892017-06-15 15:25:22 -07001170 main.log.debug( node + " intents match " + ctrl.name + " intents" )
Devin Lim58046fa2017-07-05 16:55:00 -07001171 elif intentsResults and consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07001172 intentCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001173 intentState = ONOSIntents[ 0 ]
1174
1175 main.step( "Get the flows from each controller" )
1176 global flowState
1177 flowState = []
Devin Lim142b5342017-07-20 15:22:39 -07001178 ONOSFlows = main.Cluster.command( "flows", specificDriver=2 ) # TODO: Possible arg: sleep = 30
Devin Lim58046fa2017-07-05 16:55:00 -07001179 ONOSFlowsJson = []
1180 flowCheck = main.FALSE
1181 consistentFlows = True
1182 flowsResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001183 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001184 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001185 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001186 main.log.error( "Error in getting " + node + " flows" )
1187 main.log.warn( node + " flows response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001188 repr( ONOSFlows[ i ] ) )
1189 flowsResults = False
1190 ONOSFlowsJson.append( None )
1191 else:
1192 try:
1193 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1194 except ( ValueError, TypeError ):
1195 # FIXME: change this to log.error?
Jon Hallca319892017-06-15 15:25:22 -07001196 main.log.exception( "Error in parsing " + node +
Devin Lim58046fa2017-07-05 16:55:00 -07001197 " response as json." )
1198 main.log.error( repr( ONOSFlows[ i ] ) )
1199 ONOSFlowsJson.append( None )
1200 flowsResults = False
1201 utilities.assert_equals(
1202 expect=True,
1203 actual=flowsResults,
1204 onpass="No error in reading flows output",
1205 onfail="Error in reading flows from ONOS" )
1206
1207 main.step( "Check for consistency in Flows from each controller" )
1208 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1209 if all( tmp ):
1210 main.log.info( "Flow count is consistent across all ONOS nodes" )
1211 else:
1212 consistentFlows = False
1213 utilities.assert_equals(
1214 expect=True,
1215 actual=consistentFlows,
1216 onpass="The flow count is consistent across all ONOS nodes",
1217 onfail="ONOS nodes have different flow counts" )
1218
1219 if flowsResults and not consistentFlows:
1220 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001221 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001222 try:
1223 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001224 node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001225 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
1226 indent=4, separators=( ',', ': ' ) ) )
1227 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -07001228 main.log.warn( node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001229 repr( ONOSFlows[ i ] ) )
1230 elif flowsResults and consistentFlows:
1231 flowCheck = main.TRUE
1232 flowState = ONOSFlows[ 0 ]
1233
1234 main.step( "Get the OF Table entries" )
1235 global flows
1236 flows = []
1237 for i in range( 1, 29 ):
1238 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1239 if flowCheck == main.FALSE:
1240 for table in flows:
1241 main.log.warn( table )
1242 # TODO: Compare switch flow tables with ONOS flow tables
1243
1244 main.step( "Start continuous pings" )
1245 main.Mininet2.pingLong(
1246 src=main.params[ 'PING' ][ 'source1' ],
1247 target=main.params[ 'PING' ][ 'target1' ],
1248 pingTime=500 )
1249 main.Mininet2.pingLong(
1250 src=main.params[ 'PING' ][ 'source2' ],
1251 target=main.params[ 'PING' ][ 'target2' ],
1252 pingTime=500 )
1253 main.Mininet2.pingLong(
1254 src=main.params[ 'PING' ][ 'source3' ],
1255 target=main.params[ 'PING' ][ 'target3' ],
1256 pingTime=500 )
1257 main.Mininet2.pingLong(
1258 src=main.params[ 'PING' ][ 'source4' ],
1259 target=main.params[ 'PING' ][ 'target4' ],
1260 pingTime=500 )
1261 main.Mininet2.pingLong(
1262 src=main.params[ 'PING' ][ 'source5' ],
1263 target=main.params[ 'PING' ][ 'target5' ],
1264 pingTime=500 )
1265 main.Mininet2.pingLong(
1266 src=main.params[ 'PING' ][ 'source6' ],
1267 target=main.params[ 'PING' ][ 'target6' ],
1268 pingTime=500 )
1269 main.Mininet2.pingLong(
1270 src=main.params[ 'PING' ][ 'source7' ],
1271 target=main.params[ 'PING' ][ 'target7' ],
1272 pingTime=500 )
1273 main.Mininet2.pingLong(
1274 src=main.params[ 'PING' ][ 'source8' ],
1275 target=main.params[ 'PING' ][ 'target8' ],
1276 pingTime=500 )
1277 main.Mininet2.pingLong(
1278 src=main.params[ 'PING' ][ 'source9' ],
1279 target=main.params[ 'PING' ][ 'target9' ],
1280 pingTime=500 )
1281 main.Mininet2.pingLong(
1282 src=main.params[ 'PING' ][ 'source10' ],
1283 target=main.params[ 'PING' ][ 'target10' ],
1284 pingTime=500 )
1285
1286 main.step( "Collecting topology information from ONOS" )
Devin Lim142b5342017-07-20 15:22:39 -07001287 devices = main.topoRelated.getAll( "devices" )
1288 hosts = main.topoRelated.getAll( "hosts", inJson=True )
1289 ports = main.topoRelated.getAll( "ports" )
1290 links = main.topoRelated.getAll( "links" )
1291 clusters = main.topoRelated.getAll( "clusters" )
Devin Lim58046fa2017-07-05 16:55:00 -07001292 # Compare json objects for hosts and dataplane clusters
1293
1294 # hosts
1295 main.step( "Host view is consistent across ONOS nodes" )
1296 consistentHostsResult = main.TRUE
1297 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001298 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001299 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1300 if hosts[ controller ] == hosts[ 0 ]:
1301 continue
1302 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07001303 main.log.error( "hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001304 controllerStr +
1305 " is inconsistent with ONOS1" )
1306 main.log.warn( repr( hosts[ controller ] ) )
1307 consistentHostsResult = main.FALSE
1308
1309 else:
Jon Hallca319892017-06-15 15:25:22 -07001310 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001311 controllerStr )
1312 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001313 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001314 " hosts response: " +
1315 repr( hosts[ controller ] ) )
1316 utilities.assert_equals(
1317 expect=main.TRUE,
1318 actual=consistentHostsResult,
1319 onpass="Hosts view is consistent across all ONOS nodes",
1320 onfail="ONOS nodes have different views of hosts" )
1321
1322 main.step( "Each host has an IP address" )
1323 ipResult = main.TRUE
1324 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001325 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001326 if hosts[ controller ]:
1327 for host in hosts[ controller ]:
1328 if not host.get( 'ipAddresses', [] ):
Jon Hallca319892017-06-15 15:25:22 -07001329 main.log.error( "Error with host ips on " +
Devin Lim58046fa2017-07-05 16:55:00 -07001330 controllerStr + ": " + str( host ) )
1331 ipResult = main.FALSE
1332 utilities.assert_equals(
1333 expect=main.TRUE,
1334 actual=ipResult,
1335 onpass="The ips of the hosts aren't empty",
1336 onfail="The ip of at least one host is missing" )
1337
1338 # Strongly connected clusters of devices
1339 main.step( "Cluster view is consistent across ONOS nodes" )
1340 consistentClustersResult = main.TRUE
1341 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001342 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001343 if "Error" not in clusters[ controller ]:
1344 if clusters[ controller ] == clusters[ 0 ]:
1345 continue
1346 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07001347 main.log.error( "clusters from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001348 " is inconsistent with ONOS1" )
1349 consistentClustersResult = main.FALSE
1350
1351 else:
1352 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07001353 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07001354 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001355 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001356 " clusters response: " +
1357 repr( clusters[ controller ] ) )
1358 utilities.assert_equals(
1359 expect=main.TRUE,
1360 actual=consistentClustersResult,
1361 onpass="Clusters view is consistent across all ONOS nodes",
1362 onfail="ONOS nodes have different views of clusters" )
1363 if not consistentClustersResult:
1364 main.log.debug( clusters )
1365
1366 # there should always only be one cluster
1367 main.step( "Cluster view correct across ONOS nodes" )
1368 try:
1369 numClusters = len( json.loads( clusters[ 0 ] ) )
1370 except ( ValueError, TypeError ):
1371 main.log.exception( "Error parsing clusters[0]: " +
1372 repr( clusters[ 0 ] ) )
1373 numClusters = "ERROR"
1374 utilities.assert_equals(
1375 expect=1,
1376 actual=numClusters,
1377 onpass="ONOS shows 1 SCC",
1378 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1379
1380 main.step( "Comparing ONOS topology to MN" )
1381 devicesResults = main.TRUE
1382 linksResults = main.TRUE
1383 hostsResults = main.TRUE
1384 mnSwitches = main.Mininet1.getSwitches()
1385 mnLinks = main.Mininet1.getLinks()
1386 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07001387 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001388 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001389 currentDevicesResult = main.topoRelated.compareDevicePort(
1390 main.Mininet1, controller,
1391 mnSwitches, devices, ports )
1392 utilities.assert_equals( expect=main.TRUE,
1393 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07001394 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001395 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001396 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001397 " Switches view is incorrect" )
1398
1399 currentLinksResult = main.topoRelated.compareBase( links, controller,
1400 main.Mininet1.compareLinks,
1401 [ mnSwitches, mnLinks ] )
1402 utilities.assert_equals( expect=main.TRUE,
1403 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07001404 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001405 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001406 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001407 " links view is incorrect" )
1408
1409 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1410 currentHostsResult = main.Mininet1.compareHosts(
1411 mnHosts,
1412 hosts[ controller ] )
1413 else:
1414 currentHostsResult = main.FALSE
1415 utilities.assert_equals( expect=main.TRUE,
1416 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07001417 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001418 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07001419 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001420 " hosts don't match Mininet" )
1421
1422 devicesResults = devicesResults and currentDevicesResult
1423 linksResults = linksResults and currentLinksResult
1424 hostsResults = hostsResults and currentHostsResult
1425
1426 main.step( "Device information is correct" )
1427 utilities.assert_equals(
1428 expect=main.TRUE,
1429 actual=devicesResults,
1430 onpass="Device information is correct",
1431 onfail="Device information is incorrect" )
1432
1433 main.step( "Links are correct" )
1434 utilities.assert_equals(
1435 expect=main.TRUE,
1436 actual=linksResults,
1437 onpass="Link are correct",
1438 onfail="Links are incorrect" )
1439
1440 main.step( "Hosts are correct" )
1441 utilities.assert_equals(
1442 expect=main.TRUE,
1443 actual=hostsResults,
1444 onpass="Hosts are correct",
1445 onfail="Hosts are incorrect" )
1446
1447 def checkDistPrimitivesFunc( self, main ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001448 """
1449 Check for basic functionality with distributed primitives
1450 """
Jon Halle0f0b342017-04-18 11:43:47 -07001451 # TODO: Clean this up so it's not just a cut/paste from the test
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001452 try:
1453 # Make sure variables are defined/set
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001454 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001455 assert main.pCounterName, "main.pCounterName not defined"
1456 assert main.onosSetName, "main.onosSetName not defined"
1457 # NOTE: assert fails if value is 0/None/Empty/False
1458 try:
1459 main.pCounterValue
Devin Lim58046fa2017-07-05 16:55:00 -07001460 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001461 main.log.error( "main.pCounterValue not defined, setting to 0" )
1462 main.pCounterValue = 0
1463 try:
1464 main.onosSet
Devin Lim58046fa2017-07-05 16:55:00 -07001465 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001466 main.log.error( "main.onosSet not defined, setting to empty Set" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001467 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001468 # Variables for the distributed primitives tests. These are local only
1469 addValue = "a"
1470 addAllValue = "a b c d e f"
1471 retainValue = "c d e f"
Jon Halle0f0b342017-04-18 11:43:47 -07001472 valueName = "TestON-Value"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001473 valueValue = None
Jon Halle0f0b342017-04-18 11:43:47 -07001474 workQueueName = "TestON-Queue"
1475 workQueueCompleted = 0
1476 workQueueInProgress = 0
1477 workQueuePending = 0
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001478
1479 description = "Check for basic functionality with distributed " +\
1480 "primitives"
1481 main.case( description )
1482 main.caseExplanation = "Test the methods of the distributed " +\
1483 "primitives (counters and sets) throught the cli"
1484 # DISTRIBUTED ATOMIC COUNTERS
1485 # Partitioned counters
1486 main.step( "Increment then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001487 pCounters = main.Cluster.command( "counterTestAddAndGet",
1488 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001489 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001490 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001491 main.pCounterValue += 1
1492 addedPValues.append( main.pCounterValue )
Jon Hallca319892017-06-15 15:25:22 -07001493 # Check that counter incremented once per controller
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001494 pCounterResults = True
1495 for i in addedPValues:
1496 tmpResult = i in pCounters
1497 pCounterResults = pCounterResults and tmpResult
1498 if not tmpResult:
1499 main.log.error( str( i ) + " is not in partitioned "
1500 "counter incremented results" )
1501 utilities.assert_equals( expect=True,
1502 actual=pCounterResults,
1503 onpass="Default counter incremented",
1504 onfail="Error incrementing default" +
1505 " counter" )
1506
1507 main.step( "Get then Increment a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001508 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1509 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001510 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001511 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001512 addedPValues.append( main.pCounterValue )
1513 main.pCounterValue += 1
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001514 # Check that counter incremented numController times
1515 pCounterResults = True
1516 for i in addedPValues:
1517 tmpResult = i in pCounters
1518 pCounterResults = pCounterResults and tmpResult
1519 if not tmpResult:
1520 main.log.error( str( i ) + " is not in partitioned "
1521 "counter incremented results" )
1522 utilities.assert_equals( expect=True,
1523 actual=pCounterResults,
1524 onpass="Default counter incremented",
1525 onfail="Error incrementing default" +
1526 " counter" )
1527
1528 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001529 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001530 utilities.assert_equals( expect=main.TRUE,
1531 actual=incrementCheck,
1532 onpass="Added counters are correct",
1533 onfail="Added counters are incorrect" )
1534
1535 main.step( "Add -8 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001536 pCounters = main.Cluster.command( "counterTestAddAndGet",
1537 args=[ main.pCounterName ],
1538 kwargs={ "delta": -8 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001539 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001540 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001541 main.pCounterValue += -8
1542 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001543 # Check that counter incremented numController times
1544 pCounterResults = True
1545 for i in addedPValues:
1546 tmpResult = i in pCounters
1547 pCounterResults = pCounterResults and tmpResult
1548 if not tmpResult:
1549 main.log.error( str( i ) + " is not in partitioned "
1550 "counter incremented results" )
1551 utilities.assert_equals( expect=True,
1552 actual=pCounterResults,
1553 onpass="Default counter incremented",
1554 onfail="Error incrementing default" +
1555 " counter" )
1556
1557 main.step( "Add 5 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001558 pCounters = main.Cluster.command( "counterTestAddAndGet",
1559 args=[ main.pCounterName ],
1560 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001561 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001562 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001563 main.pCounterValue += 5
1564 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001565
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001566 # Check that counter incremented numController times
1567 pCounterResults = True
1568 for i in addedPValues:
1569 tmpResult = i in pCounters
1570 pCounterResults = pCounterResults and tmpResult
1571 if not tmpResult:
1572 main.log.error( str( i ) + " is not in partitioned "
1573 "counter incremented results" )
1574 utilities.assert_equals( expect=True,
1575 actual=pCounterResults,
1576 onpass="Default counter incremented",
1577 onfail="Error incrementing default" +
1578 " counter" )
1579
1580 main.step( "Get then add 5 to a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001581 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1582 args=[ main.pCounterName ],
1583 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001584 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001585 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001586 addedPValues.append( main.pCounterValue )
1587 main.pCounterValue += 5
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001588 # Check that counter incremented numController times
1589 pCounterResults = True
1590 for i in addedPValues:
1591 tmpResult = i in pCounters
1592 pCounterResults = pCounterResults and tmpResult
1593 if not tmpResult:
1594 main.log.error( str( i ) + " is not in partitioned "
1595 "counter incremented results" )
1596 utilities.assert_equals( expect=True,
1597 actual=pCounterResults,
1598 onpass="Default counter incremented",
1599 onfail="Error incrementing default" +
1600 " counter" )
1601
1602 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001603 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001604 utilities.assert_equals( expect=main.TRUE,
1605 actual=incrementCheck,
1606 onpass="Added counters are correct",
1607 onfail="Added counters are incorrect" )
1608
1609 # DISTRIBUTED SETS
1610 main.step( "Distributed Set get" )
1611 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001612 getResponses = main.Cluster.command( "setTestGet",
1613 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001614 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001615 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001616 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001617 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001618 current = set( getResponses[ i ] )
1619 if len( current ) == len( getResponses[ i ] ):
1620 # no repeats
1621 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001622 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001623 " has incorrect view" +
1624 " of set " + main.onosSetName + ":\n" +
1625 str( getResponses[ i ] ) )
1626 main.log.debug( "Expected: " + str( main.onosSet ) )
1627 main.log.debug( "Actual: " + str( current ) )
1628 getResults = main.FALSE
1629 else:
1630 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001631 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001632 " has repeat elements in" +
1633 " set " + main.onosSetName + ":\n" +
1634 str( getResponses[ i ] ) )
1635 getResults = main.FALSE
1636 elif getResponses[ i ] == main.ERROR:
1637 getResults = main.FALSE
1638 utilities.assert_equals( expect=main.TRUE,
1639 actual=getResults,
1640 onpass="Set elements are correct",
1641 onfail="Set elements are incorrect" )
1642
1643 main.step( "Distributed Set size" )
Jon Hallca319892017-06-15 15:25:22 -07001644 sizeResponses = main.Cluster.command( "setTestSize",
1645 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001646 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001647 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001648 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001649 if size != sizeResponses[ i ]:
1650 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001651 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001652 " expected a size of " + str( size ) +
1653 " for set " + main.onosSetName +
1654 " but got " + str( sizeResponses[ i ] ) )
1655 utilities.assert_equals( expect=main.TRUE,
1656 actual=sizeResults,
1657 onpass="Set sizes are correct",
1658 onfail="Set sizes are incorrect" )
1659
1660 main.step( "Distributed Set add()" )
1661 main.onosSet.add( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001662 addResponses = main.Cluster.command( "setTestAdd",
1663 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001664 # main.TRUE = successfully changed the set
1665 # main.FALSE = action resulted in no change in set
1666 # main.ERROR - Some error in executing the function
1667 addResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001668 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001669 if addResponses[ i ] == main.TRUE:
1670 # All is well
1671 pass
1672 elif addResponses[ i ] == main.FALSE:
1673 # Already in set, probably fine
1674 pass
1675 elif addResponses[ i ] == main.ERROR:
1676 # Error in execution
1677 addResults = main.FALSE
1678 else:
1679 # unexpected result
1680 addResults = main.FALSE
1681 if addResults != main.TRUE:
1682 main.log.error( "Error executing set add" )
1683
1684 # Check if set is still correct
1685 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001686 getResponses = main.Cluster.command( "setTestGet",
1687 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001688 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001689 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001690 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001691 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001692 current = set( getResponses[ i ] )
1693 if len( current ) == len( getResponses[ i ] ):
1694 # no repeats
1695 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001696 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001697 " of set " + main.onosSetName + ":\n" +
1698 str( getResponses[ i ] ) )
1699 main.log.debug( "Expected: " + str( main.onosSet ) )
1700 main.log.debug( "Actual: " + str( current ) )
1701 getResults = main.FALSE
1702 else:
1703 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001704 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001705 " set " + main.onosSetName + ":\n" +
1706 str( getResponses[ i ] ) )
1707 getResults = main.FALSE
1708 elif getResponses[ i ] == main.ERROR:
1709 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001710 sizeResponses = main.Cluster.command( "setTestSize",
1711 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001712 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001713 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001714 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001715 if size != sizeResponses[ i ]:
1716 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001717 main.log.error( node + " expected a size of " +
1718 str( size ) + " for set " + main.onosSetName +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001719 " but got " + str( sizeResponses[ i ] ) )
1720 addResults = addResults and getResults and sizeResults
1721 utilities.assert_equals( expect=main.TRUE,
1722 actual=addResults,
1723 onpass="Set add correct",
1724 onfail="Set add was incorrect" )
1725
1726 main.step( "Distributed Set addAll()" )
1727 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001728 addResponses = main.Cluster.command( "setTestAdd",
1729 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001730 # main.TRUE = successfully changed the set
1731 # main.FALSE = action resulted in no change in set
1732 # main.ERROR - Some error in executing the function
1733 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001734 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001735 if addResponses[ i ] == main.TRUE:
1736 # All is well
1737 pass
1738 elif addResponses[ i ] == main.FALSE:
1739 # Already in set, probably fine
1740 pass
1741 elif addResponses[ i ] == main.ERROR:
1742 # Error in execution
1743 addAllResults = main.FALSE
1744 else:
1745 # unexpected result
1746 addAllResults = main.FALSE
1747 if addAllResults != main.TRUE:
1748 main.log.error( "Error executing set addAll" )
1749
1750 # Check if set is still correct
1751 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001752 getResponses = main.Cluster.command( "setTestGet",
1753 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001754 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001755 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001756 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001757 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001758 current = set( getResponses[ i ] )
1759 if len( current ) == len( getResponses[ i ] ):
1760 # no repeats
1761 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001762 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001763 " of set " + main.onosSetName + ":\n" +
1764 str( getResponses[ i ] ) )
1765 main.log.debug( "Expected: " + str( main.onosSet ) )
1766 main.log.debug( "Actual: " + str( current ) )
1767 getResults = main.FALSE
1768 else:
1769 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001770 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001771 " set " + main.onosSetName + ":\n" +
1772 str( getResponses[ i ] ) )
1773 getResults = main.FALSE
1774 elif getResponses[ i ] == main.ERROR:
1775 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001776 sizeResponses = main.Cluster.command( "setTestSize",
1777 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001778 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001779 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001780 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001781 if size != sizeResponses[ i ]:
1782 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001783 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001784 " for set " + main.onosSetName +
1785 " but got " + str( sizeResponses[ i ] ) )
1786 addAllResults = addAllResults and getResults and sizeResults
1787 utilities.assert_equals( expect=main.TRUE,
1788 actual=addAllResults,
1789 onpass="Set addAll correct",
1790 onfail="Set addAll was incorrect" )
1791
1792 main.step( "Distributed Set contains()" )
Jon Hallca319892017-06-15 15:25:22 -07001793 containsResponses = main.Cluster.command( "setTestGet",
1794 args=[ main.onosSetName ],
1795 kwargs={ "values": addValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001796 containsResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001797 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001798 if containsResponses[ i ] == main.ERROR:
1799 containsResults = main.FALSE
1800 else:
1801 containsResults = containsResults and\
1802 containsResponses[ i ][ 1 ]
1803 utilities.assert_equals( expect=main.TRUE,
1804 actual=containsResults,
1805 onpass="Set contains is functional",
1806 onfail="Set contains failed" )
1807
1808 main.step( "Distributed Set containsAll()" )
Jon Hallca319892017-06-15 15:25:22 -07001809 containsAllResponses = main.Cluster.command( "setTestGet",
1810 args=[ main.onosSetName ],
1811 kwargs={ "values": addAllValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001812 containsAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001813 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001814 if containsResponses[ i ] == main.ERROR:
1815 containsResults = main.FALSE
1816 else:
1817 containsResults = containsResults and\
1818 containsResponses[ i ][ 1 ]
1819 utilities.assert_equals( expect=main.TRUE,
1820 actual=containsAllResults,
1821 onpass="Set containsAll is functional",
1822 onfail="Set containsAll failed" )
1823
1824 main.step( "Distributed Set remove()" )
1825 main.onosSet.remove( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001826 removeResponses = main.Cluster.command( "setTestRemove",
1827 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001828 # main.TRUE = successfully changed the set
1829 # main.FALSE = action resulted in no change in set
1830 # main.ERROR - Some error in executing the function
1831 removeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001832 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001833 if removeResponses[ i ] == main.TRUE:
1834 # All is well
1835 pass
1836 elif removeResponses[ i ] == main.FALSE:
1837 # not in set, probably fine
1838 pass
1839 elif removeResponses[ i ] == main.ERROR:
1840 # Error in execution
1841 removeResults = main.FALSE
1842 else:
1843 # unexpected result
1844 removeResults = main.FALSE
1845 if removeResults != main.TRUE:
1846 main.log.error( "Error executing set remove" )
1847
1848 # Check if set is still correct
1849 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001850 getResponses = main.Cluster.command( "setTestGet",
1851 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001852 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001853 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001854 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001855 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001856 current = set( getResponses[ i ] )
1857 if len( current ) == len( getResponses[ i ] ):
1858 # no repeats
1859 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001860 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001861 " of set " + main.onosSetName + ":\n" +
1862 str( getResponses[ i ] ) )
1863 main.log.debug( "Expected: " + str( main.onosSet ) )
1864 main.log.debug( "Actual: " + str( current ) )
1865 getResults = main.FALSE
1866 else:
1867 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001868 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001869 " set " + main.onosSetName + ":\n" +
1870 str( getResponses[ i ] ) )
1871 getResults = main.FALSE
1872 elif getResponses[ i ] == main.ERROR:
1873 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001874 sizeResponses = main.Cluster.command( "setTestSize",
1875 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001876 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001877 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001878 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001879 if size != sizeResponses[ i ]:
1880 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001881 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001882 " for set " + main.onosSetName +
1883 " but got " + str( sizeResponses[ i ] ) )
1884 removeResults = removeResults and getResults and sizeResults
1885 utilities.assert_equals( expect=main.TRUE,
1886 actual=removeResults,
1887 onpass="Set remove correct",
1888 onfail="Set remove was incorrect" )
1889
1890 main.step( "Distributed Set removeAll()" )
1891 main.onosSet.difference_update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001892 removeAllResponses = main.Cluster.command( "setTestRemove",
1893 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001894 # main.TRUE = successfully changed the set
1895 # main.FALSE = action resulted in no change in set
1896 # main.ERROR - Some error in executing the function
1897 removeAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001898 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001899 if removeAllResponses[ i ] == main.TRUE:
1900 # All is well
1901 pass
1902 elif removeAllResponses[ i ] == main.FALSE:
1903 # not in set, probably fine
1904 pass
1905 elif removeAllResponses[ i ] == main.ERROR:
1906 # Error in execution
1907 removeAllResults = main.FALSE
1908 else:
1909 # unexpected result
1910 removeAllResults = main.FALSE
1911 if removeAllResults != main.TRUE:
1912 main.log.error( "Error executing set removeAll" )
1913
1914 # Check if set is still correct
1915 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001916 getResponses = main.Cluster.command( "setTestGet",
1917 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001918 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001919 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001920 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001921 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001922 current = set( getResponses[ i ] )
1923 if len( current ) == len( getResponses[ i ] ):
1924 # no repeats
1925 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001926 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001927 " of set " + main.onosSetName + ":\n" +
1928 str( getResponses[ i ] ) )
1929 main.log.debug( "Expected: " + str( main.onosSet ) )
1930 main.log.debug( "Actual: " + str( current ) )
1931 getResults = main.FALSE
1932 else:
1933 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001934 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001935 " set " + main.onosSetName + ":\n" +
1936 str( getResponses[ i ] ) )
1937 getResults = main.FALSE
1938 elif getResponses[ i ] == main.ERROR:
1939 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001940 sizeResponses = main.Cluster.command( "setTestSize",
1941 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001942 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001943 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001944 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001945 if size != sizeResponses[ i ]:
1946 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001947 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001948 " for set " + main.onosSetName +
1949 " but got " + str( sizeResponses[ i ] ) )
1950 removeAllResults = removeAllResults and getResults and sizeResults
1951 utilities.assert_equals( expect=main.TRUE,
1952 actual=removeAllResults,
1953 onpass="Set removeAll correct",
1954 onfail="Set removeAll was incorrect" )
1955
1956 main.step( "Distributed Set addAll()" )
1957 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001958 addResponses = main.Cluster.command( "setTestAdd",
1959 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001960 # main.TRUE = successfully changed the set
1961 # main.FALSE = action resulted in no change in set
1962 # main.ERROR - Some error in executing the function
1963 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001964 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001965 if addResponses[ i ] == main.TRUE:
1966 # All is well
1967 pass
1968 elif addResponses[ i ] == main.FALSE:
1969 # Already in set, probably fine
1970 pass
1971 elif addResponses[ i ] == main.ERROR:
1972 # Error in execution
1973 addAllResults = main.FALSE
1974 else:
1975 # unexpected result
1976 addAllResults = main.FALSE
1977 if addAllResults != main.TRUE:
1978 main.log.error( "Error executing set addAll" )
1979
1980 # Check if set is still correct
1981 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001982 getResponses = main.Cluster.command( "setTestGet",
1983 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001984 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001985 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001986 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001987 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001988 current = set( getResponses[ i ] )
1989 if len( current ) == len( getResponses[ i ] ):
1990 # no repeats
1991 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001992 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001993 " of set " + main.onosSetName + ":\n" +
1994 str( getResponses[ i ] ) )
1995 main.log.debug( "Expected: " + str( main.onosSet ) )
1996 main.log.debug( "Actual: " + str( current ) )
1997 getResults = main.FALSE
1998 else:
1999 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002000 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002001 " set " + main.onosSetName + ":\n" +
2002 str( getResponses[ i ] ) )
2003 getResults = main.FALSE
2004 elif getResponses[ i ] == main.ERROR:
2005 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002006 sizeResponses = main.Cluster.command( "setTestSize",
2007 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002008 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002009 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002010 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002011 if size != sizeResponses[ i ]:
2012 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002013 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002014 " for set " + main.onosSetName +
2015 " but got " + str( sizeResponses[ i ] ) )
2016 addAllResults = addAllResults and getResults and sizeResults
2017 utilities.assert_equals( expect=main.TRUE,
2018 actual=addAllResults,
2019 onpass="Set addAll correct",
2020 onfail="Set addAll was incorrect" )
2021
2022 main.step( "Distributed Set clear()" )
2023 main.onosSet.clear()
Jon Hallca319892017-06-15 15:25:22 -07002024 clearResponses = main.Cluster.command( "setTestRemove",
2025 args=[ main.onosSetName, " " ], # Values doesn't matter
2026 kwargs={ "clear": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002027 # main.TRUE = successfully changed the set
2028 # main.FALSE = action resulted in no change in set
2029 # main.ERROR - Some error in executing the function
2030 clearResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002031 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002032 if clearResponses[ i ] == main.TRUE:
2033 # All is well
2034 pass
2035 elif clearResponses[ i ] == main.FALSE:
2036 # Nothing set, probably fine
2037 pass
2038 elif clearResponses[ i ] == main.ERROR:
2039 # Error in execution
2040 clearResults = main.FALSE
2041 else:
2042 # unexpected result
2043 clearResults = main.FALSE
2044 if clearResults != main.TRUE:
2045 main.log.error( "Error executing set clear" )
2046
2047 # Check if set is still correct
2048 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002049 getResponses = main.Cluster.command( "setTestGet",
2050 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002051 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002052 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002053 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002054 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002055 current = set( getResponses[ i ] )
2056 if len( current ) == len( getResponses[ i ] ):
2057 # no repeats
2058 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002059 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002060 " of set " + main.onosSetName + ":\n" +
2061 str( getResponses[ i ] ) )
2062 main.log.debug( "Expected: " + str( main.onosSet ) )
2063 main.log.debug( "Actual: " + str( current ) )
2064 getResults = main.FALSE
2065 else:
2066 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002067 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002068 " set " + main.onosSetName + ":\n" +
2069 str( getResponses[ i ] ) )
2070 getResults = main.FALSE
2071 elif getResponses[ i ] == main.ERROR:
2072 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002073 sizeResponses = main.Cluster.command( "setTestSize",
2074 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002075 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002076 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002077 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002078 if size != sizeResponses[ i ]:
2079 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002080 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002081 " for set " + main.onosSetName +
2082 " but got " + str( sizeResponses[ i ] ) )
2083 clearResults = clearResults and getResults and sizeResults
2084 utilities.assert_equals( expect=main.TRUE,
2085 actual=clearResults,
2086 onpass="Set clear correct",
2087 onfail="Set clear was incorrect" )
2088
2089 main.step( "Distributed Set addAll()" )
2090 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002091 addResponses = main.Cluster.command( "setTestAdd",
2092 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002093 # main.TRUE = successfully changed the set
2094 # main.FALSE = action resulted in no change in set
2095 # main.ERROR - Some error in executing the function
2096 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002097 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002098 if addResponses[ i ] == main.TRUE:
2099 # All is well
2100 pass
2101 elif addResponses[ i ] == main.FALSE:
2102 # Already in set, probably fine
2103 pass
2104 elif addResponses[ i ] == main.ERROR:
2105 # Error in execution
2106 addAllResults = main.FALSE
2107 else:
2108 # unexpected result
2109 addAllResults = main.FALSE
2110 if addAllResults != main.TRUE:
2111 main.log.error( "Error executing set addAll" )
2112
2113 # Check if set is still correct
2114 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002115 getResponses = main.Cluster.command( "setTestGet",
2116 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002117 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002118 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002119 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002120 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002121 current = set( getResponses[ i ] )
2122 if len( current ) == len( getResponses[ i ] ):
2123 # no repeats
2124 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002125 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002126 " of set " + main.onosSetName + ":\n" +
2127 str( getResponses[ i ] ) )
2128 main.log.debug( "Expected: " + str( main.onosSet ) )
2129 main.log.debug( "Actual: " + str( current ) )
2130 getResults = main.FALSE
2131 else:
2132 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002133 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002134 " set " + main.onosSetName + ":\n" +
2135 str( getResponses[ i ] ) )
2136 getResults = main.FALSE
2137 elif getResponses[ i ] == main.ERROR:
2138 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002139 sizeResponses = main.Cluster.command( "setTestSize",
2140 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002141 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002142 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002143 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002144 if size != sizeResponses[ i ]:
2145 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002146 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002147 " for set " + main.onosSetName +
2148 " but got " + str( sizeResponses[ i ] ) )
2149 addAllResults = addAllResults and getResults and sizeResults
2150 utilities.assert_equals( expect=main.TRUE,
2151 actual=addAllResults,
2152 onpass="Set addAll correct",
2153 onfail="Set addAll was incorrect" )
2154
2155 main.step( "Distributed Set retain()" )
2156 main.onosSet.intersection_update( retainValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002157 retainResponses = main.Cluster.command( "setTestRemove",
2158 args=[ main.onosSetName, retainValue ],
2159 kwargs={ "retain": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002160 # main.TRUE = successfully changed the set
2161 # main.FALSE = action resulted in no change in set
2162 # main.ERROR - Some error in executing the function
2163 retainResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002164 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002165 if retainResponses[ i ] == main.TRUE:
2166 # All is well
2167 pass
2168 elif retainResponses[ i ] == main.FALSE:
2169 # Already in set, probably fine
2170 pass
2171 elif retainResponses[ i ] == main.ERROR:
2172 # Error in execution
2173 retainResults = main.FALSE
2174 else:
2175 # unexpected result
2176 retainResults = main.FALSE
2177 if retainResults != main.TRUE:
2178 main.log.error( "Error executing set retain" )
2179
2180 # Check if set is still correct
2181 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002182 getResponses = main.Cluster.command( "setTestGet",
2183 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002184 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002185 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002186 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002187 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002188 current = set( getResponses[ i ] )
2189 if len( current ) == len( getResponses[ i ] ):
2190 # no repeats
2191 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002192 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002193 " of set " + main.onosSetName + ":\n" +
2194 str( getResponses[ i ] ) )
2195 main.log.debug( "Expected: " + str( main.onosSet ) )
2196 main.log.debug( "Actual: " + str( current ) )
2197 getResults = main.FALSE
2198 else:
2199 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002200 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002201 " set " + main.onosSetName + ":\n" +
2202 str( getResponses[ i ] ) )
2203 getResults = main.FALSE
2204 elif getResponses[ i ] == main.ERROR:
2205 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002206 sizeResponses = main.Cluster.command( "setTestSize",
2207 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002208 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002209 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002210 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002211 if size != sizeResponses[ i ]:
2212 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002213 main.log.error( node + " expected a size of " +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002214 str( size ) + " for set " + main.onosSetName +
2215 " but got " + str( sizeResponses[ i ] ) )
2216 retainResults = retainResults and getResults and sizeResults
2217 utilities.assert_equals( expect=main.TRUE,
2218 actual=retainResults,
2219 onpass="Set retain correct",
2220 onfail="Set retain was incorrect" )
2221
2222 # Transactional maps
2223 main.step( "Partitioned Transactional maps put" )
2224 tMapValue = "Testing"
2225 numKeys = 100
2226 putResult = True
Jon Hallca319892017-06-15 15:25:22 -07002227 ctrl = main.Cluster.next()
2228 putResponses = ctrl.transactionalMapPut( numKeys, tMapValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002229 if putResponses and len( putResponses ) == 100:
2230 for i in putResponses:
2231 if putResponses[ i ][ 'value' ] != tMapValue:
2232 putResult = False
2233 else:
2234 putResult = False
2235 if not putResult:
2236 main.log.debug( "Put response values: " + str( putResponses ) )
2237 utilities.assert_equals( expect=True,
2238 actual=putResult,
2239 onpass="Partitioned Transactional Map put successful",
2240 onfail="Partitioned Transactional Map put values are incorrect" )
2241
2242 main.step( "Partitioned Transactional maps get" )
2243 # FIXME: is this sleep needed?
2244 time.sleep( 5 )
2245
2246 getCheck = True
2247 for n in range( 1, numKeys + 1 ):
Jon Hallca319892017-06-15 15:25:22 -07002248 getResponses = main.Cluster.command( "transactionalMapGet",
2249 args=[ "Key" + str( n ) ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002250 valueCheck = True
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002251 for node in getResponses:
2252 if node != tMapValue:
2253 valueCheck = False
2254 if not valueCheck:
Jon Hallf37d44d2017-05-24 10:37:30 -07002255 main.log.warn( "Values for key 'Key" + str(n) + "' do not match:" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002256 main.log.warn( getResponses )
2257 getCheck = getCheck and valueCheck
2258 utilities.assert_equals( expect=True,
2259 actual=getCheck,
2260 onpass="Partitioned Transactional Map get values were correct",
2261 onfail="Partitioned Transactional Map values incorrect" )
2262
2263 # DISTRIBUTED ATOMIC VALUE
2264 main.step( "Get the value of a new value" )
Jon Hallca319892017-06-15 15:25:22 -07002265 getValues = main.Cluster.command( "valueTestGet",
2266 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002267 main.log.debug( getValues )
2268 # Check the results
2269 atomicValueGetResult = True
2270 expected = valueValue if valueValue is not None else "null"
2271 main.log.debug( "Checking for value of " + expected )
2272 for i in getValues:
2273 if i != expected:
2274 atomicValueGetResult = False
2275 utilities.assert_equals( expect=True,
2276 actual=atomicValueGetResult,
2277 onpass="Atomic Value get successful",
2278 onfail="Error getting atomic Value " +
2279 str( valueValue ) + ", found: " +
2280 str( getValues ) )
2281
2282 main.step( "Atomic Value set()" )
2283 valueValue = "foo"
Jon Hallca319892017-06-15 15:25:22 -07002284 setValues = main.Cluster.command( "valueTestSet",
2285 args=[ valueName, valueValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002286 main.log.debug( setValues )
2287 # Check the results
2288 atomicValueSetResults = True
2289 for i in setValues:
2290 if i != main.TRUE:
2291 atomicValueSetResults = False
2292 utilities.assert_equals( expect=True,
2293 actual=atomicValueSetResults,
2294 onpass="Atomic Value set successful",
2295 onfail="Error setting atomic Value" +
2296 str( setValues ) )
2297
2298 main.step( "Get the value after set()" )
Jon Hallca319892017-06-15 15:25:22 -07002299 getValues = main.Cluster.command( "valueTestGet",
2300 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002301 main.log.debug( getValues )
2302 # Check the results
2303 atomicValueGetResult = True
2304 expected = valueValue if valueValue is not None else "null"
2305 main.log.debug( "Checking for value of " + expected )
2306 for i in getValues:
2307 if i != expected:
2308 atomicValueGetResult = False
2309 utilities.assert_equals( expect=True,
2310 actual=atomicValueGetResult,
2311 onpass="Atomic Value get successful",
2312 onfail="Error getting atomic Value " +
2313 str( valueValue ) + ", found: " +
2314 str( getValues ) )
2315
2316 main.step( "Atomic Value compareAndSet()" )
2317 oldValue = valueValue
2318 valueValue = "bar"
Jon Hallca319892017-06-15 15:25:22 -07002319 ctrl = main.Cluster.next()
2320 CASValue = ctrl.valueTestCompareAndSet( valueName, oldValue, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002321 main.log.debug( CASValue )
2322 utilities.assert_equals( expect=main.TRUE,
2323 actual=CASValue,
2324 onpass="Atomic Value comapreAndSet successful",
2325 onfail="Error setting atomic Value:" +
2326 str( CASValue ) )
2327
2328 main.step( "Get the value after compareAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002329 getValues = main.Cluster.command( "valueTestGet",
2330 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002331 main.log.debug( getValues )
2332 # Check the results
2333 atomicValueGetResult = True
2334 expected = valueValue if valueValue is not None else "null"
2335 main.log.debug( "Checking for value of " + expected )
2336 for i in getValues:
2337 if i != expected:
2338 atomicValueGetResult = False
2339 utilities.assert_equals( expect=True,
2340 actual=atomicValueGetResult,
2341 onpass="Atomic Value get successful",
2342 onfail="Error getting atomic Value " +
2343 str( valueValue ) + ", found: " +
2344 str( getValues ) )
2345
2346 main.step( "Atomic Value getAndSet()" )
2347 oldValue = valueValue
2348 valueValue = "baz"
Jon Hallca319892017-06-15 15:25:22 -07002349 ctrl = main.Cluster.next()
2350 GASValue = ctrl.valueTestGetAndSet( valueName, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002351 main.log.debug( GASValue )
2352 expected = oldValue if oldValue is not None else "null"
2353 utilities.assert_equals( expect=expected,
2354 actual=GASValue,
2355 onpass="Atomic Value GAS successful",
2356 onfail="Error with GetAndSet atomic Value: expected " +
2357 str( expected ) + ", found: " +
2358 str( GASValue ) )
2359
2360 main.step( "Get the value after getAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002361 getValues = main.Cluster.command( "valueTestGet",
2362 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002363 main.log.debug( getValues )
2364 # Check the results
2365 atomicValueGetResult = True
2366 expected = valueValue if valueValue is not None else "null"
2367 main.log.debug( "Checking for value of " + expected )
2368 for i in getValues:
2369 if i != expected:
2370 atomicValueGetResult = False
2371 utilities.assert_equals( expect=True,
2372 actual=atomicValueGetResult,
2373 onpass="Atomic Value get successful",
2374 onfail="Error getting atomic Value: expected " +
2375 str( valueValue ) + ", found: " +
2376 str( getValues ) )
2377
2378 main.step( "Atomic Value destory()" )
2379 valueValue = None
Jon Hallca319892017-06-15 15:25:22 -07002380 ctrl = main.Cluster.next()
2381 destroyResult = ctrl.valueTestDestroy( valueName )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002382 main.log.debug( destroyResult )
2383 # Check the results
2384 utilities.assert_equals( expect=main.TRUE,
2385 actual=destroyResult,
2386 onpass="Atomic Value destroy successful",
2387 onfail="Error destroying atomic Value" )
2388
2389 main.step( "Get the value after destroy()" )
Jon Hallca319892017-06-15 15:25:22 -07002390 getValues = main.Cluster.command( "valueTestGet",
2391 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002392 main.log.debug( getValues )
2393 # Check the results
2394 atomicValueGetResult = True
2395 expected = valueValue if valueValue is not None else "null"
2396 main.log.debug( "Checking for value of " + expected )
2397 for i in getValues:
2398 if i != expected:
2399 atomicValueGetResult = False
2400 utilities.assert_equals( expect=True,
2401 actual=atomicValueGetResult,
2402 onpass="Atomic Value get successful",
2403 onfail="Error getting atomic Value " +
2404 str( valueValue ) + ", found: " +
2405 str( getValues ) )
Jon Halle0f0b342017-04-18 11:43:47 -07002406
2407 # WORK QUEUES
2408 main.step( "Work Queue add()" )
Jon Hallca319892017-06-15 15:25:22 -07002409 ctrl = main.Cluster.next()
2410 addResult = ctrl.workQueueAdd( workQueueName, 'foo' )
Jon Halle0f0b342017-04-18 11:43:47 -07002411 workQueuePending += 1
2412 main.log.debug( addResult )
2413 # Check the results
2414 utilities.assert_equals( expect=main.TRUE,
2415 actual=addResult,
2416 onpass="Work Queue add successful",
2417 onfail="Error adding to Work Queue" )
2418
2419 main.step( "Check the work queue stats" )
2420 statsResults = self.workQueueStatsCheck( workQueueName,
2421 workQueueCompleted,
2422 workQueueInProgress,
2423 workQueuePending )
2424 utilities.assert_equals( expect=True,
2425 actual=statsResults,
2426 onpass="Work Queue stats correct",
2427 onfail="Work Queue stats incorrect " )
2428
2429 main.step( "Work Queue addMultiple()" )
Jon Hallca319892017-06-15 15:25:22 -07002430 ctrl = main.Cluster.next()
2431 addMultipleResult = ctrl.workQueueAddMultiple( workQueueName, 'bar', 'baz' )
Jon Halle0f0b342017-04-18 11:43:47 -07002432 workQueuePending += 2
2433 main.log.debug( addMultipleResult )
2434 # Check the results
2435 utilities.assert_equals( expect=main.TRUE,
2436 actual=addMultipleResult,
2437 onpass="Work Queue add multiple successful",
2438 onfail="Error adding multiple items to Work Queue" )
2439
2440 main.step( "Check the work queue stats" )
2441 statsResults = self.workQueueStatsCheck( workQueueName,
2442 workQueueCompleted,
2443 workQueueInProgress,
2444 workQueuePending )
2445 utilities.assert_equals( expect=True,
2446 actual=statsResults,
2447 onpass="Work Queue stats correct",
2448 onfail="Work Queue stats incorrect " )
2449
2450 main.step( "Work Queue takeAndComplete() 1" )
Jon Hallca319892017-06-15 15:25:22 -07002451 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002452 number = 1
Jon Hallca319892017-06-15 15:25:22 -07002453 take1Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002454 workQueuePending -= number
2455 workQueueCompleted += number
2456 main.log.debug( take1Result )
2457 # Check the results
2458 utilities.assert_equals( expect=main.TRUE,
2459 actual=take1Result,
2460 onpass="Work Queue takeAndComplete 1 successful",
2461 onfail="Error taking 1 from Work Queue" )
2462
2463 main.step( "Check the work queue stats" )
2464 statsResults = self.workQueueStatsCheck( workQueueName,
2465 workQueueCompleted,
2466 workQueueInProgress,
2467 workQueuePending )
2468 utilities.assert_equals( expect=True,
2469 actual=statsResults,
2470 onpass="Work Queue stats correct",
2471 onfail="Work Queue stats incorrect " )
2472
2473 main.step( "Work Queue takeAndComplete() 2" )
Jon Hallca319892017-06-15 15:25:22 -07002474 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002475 number = 2
Jon Hallca319892017-06-15 15:25:22 -07002476 take2Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002477 workQueuePending -= number
2478 workQueueCompleted += number
2479 main.log.debug( take2Result )
2480 # Check the results
2481 utilities.assert_equals( expect=main.TRUE,
2482 actual=take2Result,
2483 onpass="Work Queue takeAndComplete 2 successful",
2484 onfail="Error taking 2 from Work Queue" )
2485
2486 main.step( "Check the work queue stats" )
2487 statsResults = self.workQueueStatsCheck( workQueueName,
2488 workQueueCompleted,
2489 workQueueInProgress,
2490 workQueuePending )
2491 utilities.assert_equals( expect=True,
2492 actual=statsResults,
2493 onpass="Work Queue stats correct",
2494 onfail="Work Queue stats incorrect " )
2495
2496 main.step( "Work Queue destroy()" )
2497 valueValue = None
2498 threads = []
Jon Hallca319892017-06-15 15:25:22 -07002499 ctrl = main.Cluster.next()
2500 destroyResult = ctrl.workQueueDestroy( workQueueName )
Jon Halle0f0b342017-04-18 11:43:47 -07002501 workQueueCompleted = 0
2502 workQueueInProgress = 0
2503 workQueuePending = 0
2504 main.log.debug( destroyResult )
2505 # Check the results
2506 utilities.assert_equals( expect=main.TRUE,
2507 actual=destroyResult,
2508 onpass="Work Queue destroy successful",
2509 onfail="Error destroying Work Queue" )
2510
2511 main.step( "Check the work queue stats" )
2512 statsResults = self.workQueueStatsCheck( workQueueName,
2513 workQueueCompleted,
2514 workQueueInProgress,
2515 workQueuePending )
2516 utilities.assert_equals( expect=True,
2517 actual=statsResults,
2518 onpass="Work Queue stats correct",
2519 onfail="Work Queue stats incorrect " )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002520 except Exception as e:
2521 main.log.error( "Exception: " + str( e ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002522
2523 def cleanUp( self, main ):
2524 """
2525 Clean up
2526 """
2527 import os
2528 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002529 assert main, "main not defined"
2530 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002531
2532 # printing colors to terminal
2533 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2534 'blue': '\033[94m', 'green': '\033[92m',
2535 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
2536 main.case( "Test Cleanup" )
2537 main.step( "Killing tcpdumps" )
2538 main.Mininet2.stopTcpdump()
2539
2540 testname = main.TEST
2541 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2542 main.step( "Copying MN pcap and ONOS log files to test station" )
2543 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2544 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2545 # NOTE: MN Pcap file is being saved to logdir.
2546 # We scp this file as MN and TestON aren't necessarily the same vm
2547
2548 # FIXME: To be replaced with a Jenkin's post script
2549 # TODO: Load these from params
2550 # NOTE: must end in /
2551 logFolder = "/opt/onos/log/"
2552 logFiles = [ "karaf.log", "karaf.log.1" ]
2553 # NOTE: must end in /
2554 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002555 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002556 dstName = main.logdir + "/" + ctrl.name + "-" + f
2557 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002558 logFolder + f, dstName )
2559 # std*.log's
2560 # NOTE: must end in /
2561 logFolder = "/opt/onos/var/"
2562 logFiles = [ "stderr.log", "stdout.log" ]
2563 # NOTE: must end in /
2564 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002565 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002566 dstName = main.logdir + "/" + ctrl.name + "-" + f
2567 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002568 logFolder + f, dstName )
2569 else:
2570 main.log.debug( "skipping saving log files" )
2571
2572 main.step( "Stopping Mininet" )
2573 mnResult = main.Mininet1.stopNet()
2574 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2575 onpass="Mininet stopped",
2576 onfail="MN cleanup NOT successful" )
2577
2578 main.step( "Checking ONOS Logs for errors" )
Devin Lim142b5342017-07-20 15:22:39 -07002579 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002580 main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
2581 main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002582
2583 try:
2584 timerLog = open( main.logdir + "/Timers.csv", 'w' )
2585 main.log.debug( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2586 timerLog.write( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2587 timerLog.close()
2588 except NameError as e:
2589 main.log.exception( e )
Jon Hallca319892017-06-15 15:25:22 -07002590
Devin Lim58046fa2017-07-05 16:55:00 -07002591 def assignMastership( self, main ):
2592 """
2593 Assign mastership to controllers
2594 """
2595 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002596 assert main, "main not defined"
2597 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002598
2599 main.case( "Assigning Controller roles for switches" )
2600 main.caseExplanation = "Check that ONOS is connected to each " +\
2601 "device. Then manually assign" +\
2602 " mastership to specific ONOS nodes using" +\
2603 " 'device-role'"
2604 main.step( "Assign mastership of switches to specific controllers" )
2605 # Manually assign mastership to the controller we want
2606 roleCall = main.TRUE
2607
2608 ipList = []
2609 deviceList = []
Jon Hallca319892017-06-15 15:25:22 -07002610 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07002611 try:
2612 # Assign mastership to specific controllers. This assignment was
2613 # determined for a 7 node cluser, but will work with any sized
2614 # cluster
2615 for i in range( 1, 29 ): # switches 1 through 28
2616 # set up correct variables:
2617 if i == 1:
2618 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002619 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002620 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
2621 elif i == 2:
Devin Lim142b5342017-07-20 15:22:39 -07002622 c = 1 % main.Cluster.numCtrls
2623 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002624 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
2625 elif i == 3:
Devin Lim142b5342017-07-20 15:22:39 -07002626 c = 1 % main.Cluster.numCtrls
2627 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002628 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
2629 elif i == 4:
Devin Lim142b5342017-07-20 15:22:39 -07002630 c = 3 % main.Cluster.numCtrls
2631 ip = main.Cluster.active( c ).ip_address # ONOS4
Devin Lim58046fa2017-07-05 16:55:00 -07002632 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
2633 elif i == 5:
Devin Lim142b5342017-07-20 15:22:39 -07002634 c = 2 % main.Cluster.numCtrls
2635 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002636 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
2637 elif i == 6:
Devin Lim142b5342017-07-20 15:22:39 -07002638 c = 2 % main.Cluster.numCtrls
2639 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002640 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
2641 elif i == 7:
Devin Lim142b5342017-07-20 15:22:39 -07002642 c = 5 % main.Cluster.numCtrls
2643 ip = main.Cluster.active( c ).ip_address # ONOS6
Devin Lim58046fa2017-07-05 16:55:00 -07002644 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
2645 elif i >= 8 and i <= 17:
Devin Lim142b5342017-07-20 15:22:39 -07002646 c = 4 % main.Cluster.numCtrls
2647 ip = main.Cluster.active( c ).ip_address # ONOS5
Devin Lim58046fa2017-07-05 16:55:00 -07002648 dpid = '3' + str( i ).zfill( 3 )
2649 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2650 elif i >= 18 and i <= 27:
Devin Lim142b5342017-07-20 15:22:39 -07002651 c = 6 % main.Cluster.numCtrls
2652 ip = main.Cluster.active( c ).ip_address # ONOS7
Devin Lim58046fa2017-07-05 16:55:00 -07002653 dpid = '6' + str( i ).zfill( 3 )
2654 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2655 elif i == 28:
2656 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002657 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002658 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
2659 else:
2660 main.log.error( "You didn't write an else statement for " +
2661 "switch s" + str( i ) )
2662 roleCall = main.FALSE
2663 # Assign switch
2664 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
2665 # TODO: make this controller dynamic
2666 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
2667 ipList.append( ip )
2668 deviceList.append( deviceId )
2669 except ( AttributeError, AssertionError ):
2670 main.log.exception( "Something is wrong with ONOS device view" )
2671 main.log.info( onosCli.devices() )
2672 utilities.assert_equals(
2673 expect=main.TRUE,
2674 actual=roleCall,
2675 onpass="Re-assigned switch mastership to designated controller",
2676 onfail="Something wrong with deviceRole calls" )
2677
2678 main.step( "Check mastership was correctly assigned" )
2679 roleCheck = main.TRUE
2680 # NOTE: This is due to the fact that device mastership change is not
2681 # atomic and is actually a multi step process
2682 time.sleep( 5 )
2683 for i in range( len( ipList ) ):
2684 ip = ipList[ i ]
2685 deviceId = deviceList[ i ]
2686 # Check assignment
2687 master = onosCli.getRole( deviceId ).get( 'master' )
2688 if ip in master:
2689 roleCheck = roleCheck and main.TRUE
2690 else:
2691 roleCheck = roleCheck and main.FALSE
2692 main.log.error( "Error, controller " + ip + " is not" +
2693 " master " + "of device " +
2694 str( deviceId ) + ". Master is " +
2695 repr( master ) + "." )
2696 utilities.assert_equals(
2697 expect=main.TRUE,
2698 actual=roleCheck,
2699 onpass="Switches were successfully reassigned to designated " +
2700 "controller",
2701 onfail="Switches were not successfully reassigned" )
Jon Hallca319892017-06-15 15:25:22 -07002702
Devin Lim58046fa2017-07-05 16:55:00 -07002703 def bringUpStoppedNode( self, main ):
2704 """
2705 The bring up stopped nodes
2706 """
2707 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002708 assert main, "main not defined"
2709 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002710 assert main.kill, "main.kill not defined"
2711 main.case( "Restart minority of ONOS nodes" )
2712
2713 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
2714 startResults = main.TRUE
2715 restartTime = time.time()
Jon Hallca319892017-06-15 15:25:22 -07002716 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002717 startResults = startResults and\
Jon Hallca319892017-06-15 15:25:22 -07002718 ctrl.onosStart( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002719 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2720 onpass="ONOS nodes started successfully",
2721 onfail="ONOS nodes NOT successfully started" )
2722
2723 main.step( "Checking if ONOS is up yet" )
2724 count = 0
2725 onosIsupResult = main.FALSE
2726 while onosIsupResult == main.FALSE and count < 10:
2727 onosIsupResult = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002728 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002729 onosIsupResult = onosIsupResult and\
Jon Hallca319892017-06-15 15:25:22 -07002730 ctrl.isup( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002731 count = count + 1
2732 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
2733 onpass="ONOS restarted successfully",
2734 onfail="ONOS restart NOT successful" )
2735
Jon Hallca319892017-06-15 15:25:22 -07002736 main.step( "Restarting ONOS nodes" )
Devin Lim58046fa2017-07-05 16:55:00 -07002737 cliResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002738 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002739 cliResults = cliResults and\
Jon Hallca319892017-06-15 15:25:22 -07002740 ctrl.startOnosCli( ctrl.ipAddress )
2741 ctrl.active = True
Devin Lim58046fa2017-07-05 16:55:00 -07002742 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
Jon Hallca319892017-06-15 15:25:22 -07002743 onpass="ONOS node(s) restarted",
2744 onfail="ONOS node(s) did not restart" )
Devin Lim58046fa2017-07-05 16:55:00 -07002745
2746 # Grab the time of restart so we chan check how long the gossip
2747 # protocol has had time to work
2748 main.restartTime = time.time() - restartTime
2749 main.log.debug( "Restart time: " + str( main.restartTime ) )
2750 # TODO: MAke this configurable. Also, we are breaking the above timer
2751 main.step( "Checking ONOS nodes" )
2752 nodeResults = utilities.retry( self.nodesCheck,
2753 False,
Jon Hallca319892017-06-15 15:25:22 -07002754 args=[ main.Cluster.active() ],
Devin Lim58046fa2017-07-05 16:55:00 -07002755 sleep=15,
2756 attempts=5 )
2757
2758 utilities.assert_equals( expect=True, actual=nodeResults,
2759 onpass="Nodes check successful",
2760 onfail="Nodes check NOT successful" )
2761
2762 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07002763 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07002764 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07002765 ctrl.name,
2766 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002767 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -07002768 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002769
Jon Hallca319892017-06-15 15:25:22 -07002770 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -07002771
2772 main.step( "Rerun for election on the node(s) that were killed" )
2773 runResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002774 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002775 runResults = runResults and\
Jon Hallca319892017-06-15 15:25:22 -07002776 ctrl.electionTestRun()
Devin Lim58046fa2017-07-05 16:55:00 -07002777 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2778 onpass="ONOS nodes reran for election topic",
2779 onfail="Errror rerunning for election" )
Devin Lim142b5342017-07-20 15:22:39 -07002780 def tempCell( self, cellName, ipList ):
2781 main.step( "Create cell file" )
2782 cellAppString = main.params[ 'ENV' ][ 'appString' ]
Devin Lim58046fa2017-07-05 16:55:00 -07002783
Devin Lim142b5342017-07-20 15:22:39 -07002784
2785 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
2786 main.Mininet1.ip_address,
2787 cellAppString, ipList , main.ONOScli1.karafUser )
2788 main.step( "Applying cell variable to environment" )
2789 cellResult = main.ONOSbench.setCell( cellName )
2790 verifyResult = main.ONOSbench.verifyCell()
2791
2792
2793 def checkStateAfterEvent( self, main, afterWhich, compareSwitch=False, isRestart=False ):
Devin Lim58046fa2017-07-05 16:55:00 -07002794 """
2795 afterWhich :
Jon Hallca319892017-06-15 15:25:22 -07002796 0: failure
Devin Lim58046fa2017-07-05 16:55:00 -07002797 1: scaling
2798 """
2799 """
2800 Check state after ONOS failure/scaling
2801 """
2802 import json
Devin Lim58046fa2017-07-05 16:55:00 -07002803 assert main, "main not defined"
2804 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002805 main.case( "Running ONOS Constant State Tests" )
2806
2807 OnosAfterWhich = [ "failure" , "scaliing" ]
2808
Devin Lim58046fa2017-07-05 16:55:00 -07002809 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -07002810 self.checkRoleNotNull()
Devin Lim58046fa2017-07-05 16:55:00 -07002811
Devin Lim142b5342017-07-20 15:22:39 -07002812 ONOSMastership, rolesResults, consistentMastership = self.checkTheRole()
Jon Hallca319892017-06-15 15:25:22 -07002813 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07002814
2815 if rolesResults and not consistentMastership:
2816 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002817 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -07002818 main.log.warn( node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07002819 json.dumps( json.loads( ONOSMastership[ i ] ),
2820 sort_keys=True,
2821 indent=4,
2822 separators=( ',', ': ' ) ) )
2823
2824 if compareSwitch:
2825 description2 = "Compare switch roles from before failure"
2826 main.step( description2 )
2827 try:
2828 currentJson = json.loads( ONOSMastership[ 0 ] )
2829 oldJson = json.loads( mastershipState )
2830 except ( ValueError, TypeError ):
2831 main.log.exception( "Something is wrong with parsing " +
2832 "ONOSMastership[0] or mastershipState" )
Jon Hallca319892017-06-15 15:25:22 -07002833 main.log.debug( "ONOSMastership[0]: " + repr( ONOSMastership[ 0 ] ) )
2834 main.log.debug( "mastershipState" + repr( mastershipState ) )
Devin Lim44075962017-08-11 10:56:37 -07002835 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002836 mastershipCheck = main.TRUE
2837 for i in range( 1, 29 ):
2838 switchDPID = str(
2839 main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
2840 current = [ switch[ 'master' ] for switch in currentJson
2841 if switchDPID in switch[ 'id' ] ]
2842 old = [ switch[ 'master' ] for switch in oldJson
2843 if switchDPID in switch[ 'id' ] ]
2844 if current == old:
2845 mastershipCheck = mastershipCheck and main.TRUE
2846 else:
2847 main.log.warn( "Mastership of switch %s changed" % switchDPID )
2848 mastershipCheck = main.FALSE
2849 utilities.assert_equals(
2850 expect=main.TRUE,
2851 actual=mastershipCheck,
2852 onpass="Mastership of Switches was not changed",
2853 onfail="Mastership of some switches changed" )
2854
2855 # NOTE: we expect mastership to change on controller failure/scaling down
Devin Lim142b5342017-07-20 15:22:39 -07002856 ONOSIntents, intentsResults = self.checkingIntents()
Devin Lim58046fa2017-07-05 16:55:00 -07002857 intentCheck = main.FALSE
2858 consistentIntents = True
Devin Lim58046fa2017-07-05 16:55:00 -07002859
2860 main.step( "Check for consistency in Intents from each controller" )
2861 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2862 main.log.info( "Intents are consistent across all ONOS " +
2863 "nodes" )
2864 else:
2865 consistentIntents = False
2866
2867 # Try to make it easy to figure out what is happening
2868 #
2869 # Intent ONOS1 ONOS2 ...
2870 # 0x01 INSTALLED INSTALLING
2871 # ... ... ...
2872 # ... ... ...
2873 title = " ID"
Jon Hallca319892017-06-15 15:25:22 -07002874 for ctrl in main.Cluster.active():
2875 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07002876 main.log.warn( title )
2877 # get all intent keys in the cluster
2878 keys = []
2879 for nodeStr in ONOSIntents:
2880 node = json.loads( nodeStr )
2881 for intent in node:
2882 keys.append( intent.get( 'id' ) )
2883 keys = set( keys )
2884 for key in keys:
2885 row = "%-13s" % key
2886 for nodeStr in ONOSIntents:
2887 node = json.loads( nodeStr )
2888 for intent in node:
2889 if intent.get( 'id' ) == key:
2890 row += "%-15s" % intent.get( 'state' )
2891 main.log.warn( row )
2892 # End table view
2893
2894 utilities.assert_equals(
2895 expect=True,
2896 actual=consistentIntents,
2897 onpass="Intents are consistent across all ONOS nodes",
2898 onfail="ONOS nodes have different views of intents" )
2899 intentStates = []
2900 for node in ONOSIntents: # Iter through ONOS nodes
2901 nodeStates = []
2902 # Iter through intents of a node
2903 try:
2904 for intent in json.loads( node ):
2905 nodeStates.append( intent[ 'state' ] )
2906 except ( ValueError, TypeError ):
2907 main.log.exception( "Error in parsing intents" )
2908 main.log.error( repr( node ) )
2909 intentStates.append( nodeStates )
2910 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2911 main.log.info( dict( out ) )
2912
2913 if intentsResults and not consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07002914 for i in range( len( main.Cluster.active() ) ):
Jon Hall6040bcf2017-08-14 11:15:41 -07002915 ctrl = main.Cluster.controllers[ i ]
Jon Hallca319892017-06-15 15:25:22 -07002916 main.log.warn( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07002917 main.log.warn( json.dumps(
2918 json.loads( ONOSIntents[ i ] ),
2919 sort_keys=True,
2920 indent=4,
2921 separators=( ',', ': ' ) ) )
2922 elif intentsResults and consistentIntents:
2923 intentCheck = main.TRUE
2924
2925 # NOTE: Store has no durability, so intents are lost across system
2926 # restarts
2927 if not isRestart:
2928 main.step( "Compare current intents with intents before the " + OnosAfterWhich[ afterWhich ] )
2929 # NOTE: this requires case 5 to pass for intentState to be set.
2930 # maybe we should stop the test if that fails?
2931 sameIntents = main.FALSE
2932 try:
2933 intentState
2934 except NameError:
2935 main.log.warn( "No previous intent state was saved" )
2936 else:
2937 if intentState and intentState == ONOSIntents[ 0 ]:
2938 sameIntents = main.TRUE
2939 main.log.info( "Intents are consistent with before " + OnosAfterWhich[ afterWhich ] )
2940 # TODO: possibly the states have changed? we may need to figure out
2941 # what the acceptable states are
2942 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2943 sameIntents = main.TRUE
2944 try:
2945 before = json.loads( intentState )
2946 after = json.loads( ONOSIntents[ 0 ] )
2947 for intent in before:
2948 if intent not in after:
2949 sameIntents = main.FALSE
2950 main.log.debug( "Intent is not currently in ONOS " +
2951 "(at least in the same form):" )
2952 main.log.debug( json.dumps( intent ) )
2953 except ( ValueError, TypeError ):
2954 main.log.exception( "Exception printing intents" )
2955 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2956 main.log.debug( repr( intentState ) )
2957 if sameIntents == main.FALSE:
2958 try:
2959 main.log.debug( "ONOS intents before: " )
2960 main.log.debug( json.dumps( json.loads( intentState ),
2961 sort_keys=True, indent=4,
2962 separators=( ',', ': ' ) ) )
2963 main.log.debug( "Current ONOS intents: " )
2964 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2965 sort_keys=True, indent=4,
2966 separators=( ',', ': ' ) ) )
2967 except ( ValueError, TypeError ):
2968 main.log.exception( "Exception printing intents" )
2969 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2970 main.log.debug( repr( intentState ) )
2971 utilities.assert_equals(
2972 expect=main.TRUE,
2973 actual=sameIntents,
2974 onpass="Intents are consistent with before " + OnosAfterWhich[ afterWhich ] ,
2975 onfail="The Intents changed during " + OnosAfterWhich[ afterWhich ] )
2976 intentCheck = intentCheck and sameIntents
2977
2978 main.step( "Get the OF Table entries and compare to before " +
2979 "component " + OnosAfterWhich[ afterWhich ] )
2980 FlowTables = main.TRUE
2981 for i in range( 28 ):
2982 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2983 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2984 curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
2985 FlowTables = FlowTables and curSwitch
2986 if curSwitch == main.FALSE:
2987 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2988 utilities.assert_equals(
2989 expect=main.TRUE,
2990 actual=FlowTables,
2991 onpass="No changes were found in the flow tables",
2992 onfail="Changes were found in the flow tables" )
2993
Jon Hallca319892017-06-15 15:25:22 -07002994 main.Mininet2.pingLongKill()
Devin Lim58046fa2017-07-05 16:55:00 -07002995 """
2996 main.step( "Check the continuous pings to ensure that no packets " +
2997 "were dropped during component failure" )
2998 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2999 main.params[ 'TESTONIP' ] )
3000 LossInPings = main.FALSE
3001 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
3002 for i in range( 8, 18 ):
3003 main.log.info(
3004 "Checking for a loss in pings along flow from s" +
3005 str( i ) )
3006 LossInPings = main.Mininet2.checkForLoss(
3007 "/tmp/ping.h" +
3008 str( i ) ) or LossInPings
3009 if LossInPings == main.TRUE:
3010 main.log.info( "Loss in ping detected" )
3011 elif LossInPings == main.ERROR:
3012 main.log.info( "There are multiple mininet process running" )
3013 elif LossInPings == main.FALSE:
3014 main.log.info( "No Loss in the pings" )
3015 main.log.info( "No loss of dataplane connectivity" )
3016 utilities.assert_equals(
3017 expect=main.FALSE,
3018 actual=LossInPings,
3019 onpass="No Loss of connectivity",
3020 onfail="Loss of dataplane connectivity detected" )
3021 # NOTE: Since intents are not persisted with IntnentStore,
3022 # we expect loss in dataplane connectivity
3023 LossInPings = main.FALSE
3024 """
3025
3026 def compareTopo( self, main ):
3027 """
3028 Compare topo
3029 """
3030 import json
3031 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003032 assert main, "main not defined"
3033 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003034 try:
3035 from tests.dependencies.topology import Topology
3036 except ImportError:
3037 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07003038 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07003039 try:
3040 main.topoRelated
3041 except ( NameError, AttributeError ):
3042 main.topoRelated = Topology()
3043 main.case( "Compare ONOS Topology view to Mininet topology" )
3044 main.caseExplanation = "Compare topology objects between Mininet" +\
3045 " and ONOS"
3046 topoResult = main.FALSE
3047 topoFailMsg = "ONOS topology don't match Mininet"
3048 elapsed = 0
3049 count = 0
3050 main.step( "Comparing ONOS topology to MN topology" )
3051 startTime = time.time()
3052 # Give time for Gossip to work
3053 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
3054 devicesResults = main.TRUE
3055 linksResults = main.TRUE
3056 hostsResults = main.TRUE
3057 hostAttachmentResults = True
3058 count += 1
3059 cliStart = time.time()
Devin Lim142b5342017-07-20 15:22:39 -07003060 devices = main.topoRelated.getAll( "devices", True,
Jon Hallca319892017-06-15 15:25:22 -07003061 kwargs={ 'sleep': 5, 'attempts': 5,
3062 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003063 ipResult = main.TRUE
3064
Devin Lim142b5342017-07-20 15:22:39 -07003065 hosts = main.topoRelated.getAll( "hosts", True,
Jon Hallca319892017-06-15 15:25:22 -07003066 kwargs={ 'sleep': 5, 'attempts': 5,
3067 'randomTime': True },
3068 inJson=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003069
3070 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003071 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003072 if hosts[ controller ]:
3073 for host in hosts[ controller ]:
3074 if host is None or host.get( 'ipAddresses', [] ) == []:
3075 main.log.error(
3076 "Error with host ipAddresses on controller" +
3077 controllerStr + ": " + str( host ) )
3078 ipResult = main.FALSE
Devin Lim142b5342017-07-20 15:22:39 -07003079 ports = main.topoRelated.getAll( "ports" , True,
Jon Hallca319892017-06-15 15:25:22 -07003080 kwargs={ 'sleep': 5, 'attempts': 5,
3081 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003082 links = main.topoRelated.getAll( "links", True,
Jon Hallca319892017-06-15 15:25:22 -07003083 kwargs={ 'sleep': 5, 'attempts': 5,
3084 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003085 clusters = main.topoRelated.getAll( "clusters", True,
Jon Hallca319892017-06-15 15:25:22 -07003086 kwargs={ 'sleep': 5, 'attempts': 5,
3087 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003088
3089 elapsed = time.time() - startTime
3090 cliTime = time.time() - cliStart
3091 print "Elapsed time: " + str( elapsed )
3092 print "CLI time: " + str( cliTime )
3093
3094 if all( e is None for e in devices ) and\
3095 all( e is None for e in hosts ) and\
3096 all( e is None for e in ports ) and\
3097 all( e is None for e in links ) and\
3098 all( e is None for e in clusters ):
3099 topoFailMsg = "Could not get topology from ONOS"
3100 main.log.error( topoFailMsg )
3101 continue # Try again, No use trying to compare
3102
3103 mnSwitches = main.Mininet1.getSwitches()
3104 mnLinks = main.Mininet1.getLinks()
3105 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07003106 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003107 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003108 currentDevicesResult = main.topoRelated.compareDevicePort( main.Mininet1, controller,
3109 mnSwitches,
3110 devices, ports )
3111 utilities.assert_equals( expect=main.TRUE,
3112 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07003113 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003114 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003115 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003116 " Switches view is incorrect" )
3117
3118
3119 currentLinksResult = main.topoRelated.compareBase( links, controller,
3120 main.Mininet1.compareLinks,
3121 [mnSwitches, mnLinks] )
3122 utilities.assert_equals( expect=main.TRUE,
3123 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07003124 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003125 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003126 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003127 " links view is incorrect" )
3128 if hosts[ controller ] and "Error" not in hosts[ controller ]:
3129 currentHostsResult = main.Mininet1.compareHosts(
3130 mnHosts,
3131 hosts[ controller ] )
3132 elif hosts[ controller ] == []:
3133 currentHostsResult = main.TRUE
3134 else:
3135 currentHostsResult = main.FALSE
3136 utilities.assert_equals( expect=main.TRUE,
3137 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07003138 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003139 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07003140 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003141 " hosts don't match Mininet" )
3142 # CHECKING HOST ATTACHMENT POINTS
3143 hostAttachment = True
3144 zeroHosts = False
3145 # FIXME: topo-HA/obelisk specific mappings:
3146 # key is mac and value is dpid
3147 mappings = {}
3148 for i in range( 1, 29 ): # hosts 1 through 28
3149 # set up correct variables:
3150 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
3151 if i == 1:
3152 deviceId = "1000".zfill( 16 )
3153 elif i == 2:
3154 deviceId = "2000".zfill( 16 )
3155 elif i == 3:
3156 deviceId = "3000".zfill( 16 )
3157 elif i == 4:
3158 deviceId = "3004".zfill( 16 )
3159 elif i == 5:
3160 deviceId = "5000".zfill( 16 )
3161 elif i == 6:
3162 deviceId = "6000".zfill( 16 )
3163 elif i == 7:
3164 deviceId = "6007".zfill( 16 )
3165 elif i >= 8 and i <= 17:
3166 dpid = '3' + str( i ).zfill( 3 )
3167 deviceId = dpid.zfill( 16 )
3168 elif i >= 18 and i <= 27:
3169 dpid = '6' + str( i ).zfill( 3 )
3170 deviceId = dpid.zfill( 16 )
3171 elif i == 28:
3172 deviceId = "2800".zfill( 16 )
3173 mappings[ macId ] = deviceId
3174 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3175 if hosts[ controller ] == []:
3176 main.log.warn( "There are no hosts discovered" )
3177 zeroHosts = True
3178 else:
3179 for host in hosts[ controller ]:
3180 mac = None
3181 location = None
3182 device = None
3183 port = None
3184 try:
3185 mac = host.get( 'mac' )
3186 assert mac, "mac field could not be found for this host object"
Devin Limefaf3062017-08-14 16:18:19 -07003187 print host
3188 if 'locations' in host:
3189 location = host.get( 'locations' )[ 0 ]
3190 elif 'location' in host:
3191 location = host.get( 'location' )
Devin Lim58046fa2017-07-05 16:55:00 -07003192 assert location, "location field could not be found for this host object"
3193
3194 # Trim the protocol identifier off deviceId
3195 device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
3196 assert device, "elementId field could not be found for this host location object"
3197
3198 port = location.get( 'port' )
3199 assert port, "port field could not be found for this host location object"
3200
3201 # Now check if this matches where they should be
3202 if mac and device and port:
3203 if str( port ) != "1":
3204 main.log.error( "The attachment port is incorrect for " +
3205 "host " + str( mac ) +
3206 ". Expected: 1 Actual: " + str( port ) )
3207 hostAttachment = False
3208 if device != mappings[ str( mac ) ]:
3209 main.log.error( "The attachment device is incorrect for " +
3210 "host " + str( mac ) +
3211 ". Expected: " + mappings[ str( mac ) ] +
3212 " Actual: " + device )
3213 hostAttachment = False
3214 else:
3215 hostAttachment = False
Devin Limefaf3062017-08-14 16:18:19 -07003216 except ( AssertionError, TypeError ):
Devin Lim58046fa2017-07-05 16:55:00 -07003217 main.log.exception( "Json object not as expected" )
3218 main.log.error( repr( host ) )
3219 hostAttachment = False
3220 else:
3221 main.log.error( "No hosts json output or \"Error\"" +
3222 " in output. hosts = " +
3223 repr( hosts[ controller ] ) )
3224 if zeroHosts is False:
3225 # TODO: Find a way to know if there should be hosts in a
3226 # given point of the test
3227 hostAttachment = True
3228
3229 # END CHECKING HOST ATTACHMENT POINTS
3230 devicesResults = devicesResults and currentDevicesResult
3231 linksResults = linksResults and currentLinksResult
3232 hostsResults = hostsResults and currentHostsResult
3233 hostAttachmentResults = hostAttachmentResults and\
3234 hostAttachment
3235 topoResult = ( devicesResults and linksResults
3236 and hostsResults and ipResult and
3237 hostAttachmentResults )
3238 utilities.assert_equals( expect=True,
3239 actual=topoResult,
3240 onpass="ONOS topology matches Mininet",
3241 onfail=topoFailMsg )
3242 # End of While loop to pull ONOS state
3243
3244 # Compare json objects for hosts and dataplane clusters
3245
3246 # hosts
3247 main.step( "Hosts view is consistent across all ONOS nodes" )
3248 consistentHostsResult = main.TRUE
3249 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003250 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003251 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3252 if hosts[ controller ] == hosts[ 0 ]:
3253 continue
3254 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07003255 main.log.error( "hosts from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003256 " is inconsistent with ONOS1" )
Jon Hallca319892017-06-15 15:25:22 -07003257 main.log.debug( repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003258 consistentHostsResult = main.FALSE
3259
3260 else:
Jon Hallca319892017-06-15 15:25:22 -07003261 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003262 controllerStr )
3263 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003264 main.log.debug( controllerStr +
3265 " hosts response: " +
3266 repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003267 utilities.assert_equals(
3268 expect=main.TRUE,
3269 actual=consistentHostsResult,
3270 onpass="Hosts view is consistent across all ONOS nodes",
3271 onfail="ONOS nodes have different views of hosts" )
3272
3273 main.step( "Hosts information is correct" )
3274 hostsResults = hostsResults and ipResult
3275 utilities.assert_equals(
3276 expect=main.TRUE,
3277 actual=hostsResults,
3278 onpass="Host information is correct",
3279 onfail="Host information is incorrect" )
3280
3281 main.step( "Host attachment points to the network" )
3282 utilities.assert_equals(
3283 expect=True,
3284 actual=hostAttachmentResults,
3285 onpass="Hosts are correctly attached to the network",
3286 onfail="ONOS did not correctly attach hosts to the network" )
3287
3288 # Strongly connected clusters of devices
3289 main.step( "Clusters view is consistent across all ONOS nodes" )
3290 consistentClustersResult = main.TRUE
3291 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003292 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003293 if "Error" not in clusters[ controller ]:
3294 if clusters[ controller ] == clusters[ 0 ]:
3295 continue
3296 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07003297 main.log.error( "clusters from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003298 controllerStr +
3299 " is inconsistent with ONOS1" )
3300 consistentClustersResult = main.FALSE
3301 else:
3302 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07003303 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07003304 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003305 main.log.debug( controllerStr +
3306 " clusters response: " +
3307 repr( clusters[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003308 utilities.assert_equals(
3309 expect=main.TRUE,
3310 actual=consistentClustersResult,
3311 onpass="Clusters view is consistent across all ONOS nodes",
3312 onfail="ONOS nodes have different views of clusters" )
3313 if not consistentClustersResult:
3314 main.log.debug( clusters )
3315 for x in links:
Jon Hallca319892017-06-15 15:25:22 -07003316 main.log.debug( "{}: {}".format( len( x ), x ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003317
3318 main.step( "There is only one SCC" )
3319 # there should always only be one cluster
3320 try:
3321 numClusters = len( json.loads( clusters[ 0 ] ) )
3322 except ( ValueError, TypeError ):
3323 main.log.exception( "Error parsing clusters[0]: " +
3324 repr( clusters[ 0 ] ) )
3325 numClusters = "ERROR"
3326 clusterResults = main.FALSE
3327 if numClusters == 1:
3328 clusterResults = main.TRUE
3329 utilities.assert_equals(
3330 expect=1,
3331 actual=numClusters,
3332 onpass="ONOS shows 1 SCC",
3333 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
3334
3335 topoResult = ( devicesResults and linksResults
3336 and hostsResults and consistentHostsResult
3337 and consistentClustersResult and clusterResults
3338 and ipResult and hostAttachmentResults )
3339
3340 topoResult = topoResult and int( count <= 2 )
3341 note = "note it takes about " + str( int( cliTime ) ) + \
3342 " seconds for the test to make all the cli calls to fetch " +\
3343 "the topology from each ONOS instance"
3344 main.log.info(
3345 "Very crass estimate for topology discovery/convergence( " +
3346 str( note ) + " ): " + str( elapsed ) + " seconds, " +
3347 str( count ) + " tries" )
3348
3349 main.step( "Device information is correct" )
3350 utilities.assert_equals(
3351 expect=main.TRUE,
3352 actual=devicesResults,
3353 onpass="Device information is correct",
3354 onfail="Device information is incorrect" )
3355
3356 main.step( "Links are correct" )
3357 utilities.assert_equals(
3358 expect=main.TRUE,
3359 actual=linksResults,
3360 onpass="Link are correct",
3361 onfail="Links are incorrect" )
3362
3363 main.step( "Hosts are correct" )
3364 utilities.assert_equals(
3365 expect=main.TRUE,
3366 actual=hostsResults,
3367 onpass="Hosts are correct",
3368 onfail="Hosts are incorrect" )
3369
3370 # FIXME: move this to an ONOS state case
3371 main.step( "Checking ONOS nodes" )
3372 nodeResults = utilities.retry( self.nodesCheck,
3373 False,
Jon Hallca319892017-06-15 15:25:22 -07003374 args=[ main.Cluster.active() ],
Devin Lim58046fa2017-07-05 16:55:00 -07003375 attempts=5 )
3376 utilities.assert_equals( expect=True, actual=nodeResults,
3377 onpass="Nodes check successful",
3378 onfail="Nodes check NOT successful" )
3379 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07003380 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07003381 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07003382 ctrl.name,
3383 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003384
3385 if not topoResult:
Devin Lim44075962017-08-11 10:56:37 -07003386 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -07003387
Devin Lim58046fa2017-07-05 16:55:00 -07003388 def linkDown( self, main, fromS="s3", toS="s28" ):
3389 """
3390 Link fromS-toS down
3391 """
3392 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003393 assert main, "main not defined"
3394 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003395 # NOTE: You should probably run a topology check after this
3396
3397 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3398
3399 description = "Turn off a link to ensure that Link Discovery " +\
3400 "is working properly"
3401 main.case( description )
3402
3403 main.step( "Kill Link between " + fromS + " and " + toS )
3404 LinkDown = main.Mininet1.link( END1=fromS, END2=toS, OPTION="down" )
3405 main.log.info( "Waiting " + str( linkSleep ) +
3406 " seconds for link down to be discovered" )
3407 time.sleep( linkSleep )
3408 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
3409 onpass="Link down successful",
3410 onfail="Failed to bring link down" )
3411 # TODO do some sort of check here
3412
3413 def linkUp( self, main, fromS="s3", toS="s28" ):
3414 """
3415 Link fromS-toS up
3416 """
3417 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003418 assert main, "main not defined"
3419 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003420 # NOTE: You should probably run a topology check after this
3421
3422 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3423
3424 description = "Restore a link to ensure that Link Discovery is " + \
3425 "working properly"
3426 main.case( description )
3427
3428 main.step( "Bring link between " + fromS + " and " + toS +" back up" )
3429 LinkUp = main.Mininet1.link( END1=fromS, END2=toS, OPTION="up" )
3430 main.log.info( "Waiting " + str( linkSleep ) +
3431 " seconds for link up to be discovered" )
3432 time.sleep( linkSleep )
3433 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
3434 onpass="Link up successful",
3435 onfail="Failed to bring link up" )
3436
3437 def switchDown( self, main ):
3438 """
3439 Switch Down
3440 """
3441 # NOTE: You should probably run a topology check after this
3442 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003443 assert main, "main not defined"
3444 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003445
3446 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3447
3448 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallca319892017-06-15 15:25:22 -07003449 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003450 main.case( description )
3451 switch = main.params[ 'kill' ][ 'switch' ]
3452 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3453
3454 # TODO: Make this switch parameterizable
3455 main.step( "Kill " + switch )
3456 main.log.info( "Deleting " + switch )
3457 main.Mininet1.delSwitch( switch )
3458 main.log.info( "Waiting " + str( switchSleep ) +
3459 " seconds for switch down to be discovered" )
3460 time.sleep( switchSleep )
3461 device = onosCli.getDevice( dpid=switchDPID )
3462 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003463 main.log.warn( "Bringing down switch " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003464 result = main.FALSE
3465 if device and device[ 'available' ] is False:
3466 result = main.TRUE
3467 utilities.assert_equals( expect=main.TRUE, actual=result,
3468 onpass="Kill switch successful",
3469 onfail="Failed to kill switch?" )
Jon Hallca319892017-06-15 15:25:22 -07003470
Devin Lim58046fa2017-07-05 16:55:00 -07003471 def switchUp( self, main ):
3472 """
3473 Switch Up
3474 """
3475 # NOTE: You should probably run a topology check after this
3476 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003477 assert main, "main not defined"
3478 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003479
3480 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3481 switch = main.params[ 'kill' ][ 'switch' ]
3482 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3483 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallca319892017-06-15 15:25:22 -07003484 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003485 description = "Adding a switch to ensure it is discovered correctly"
3486 main.case( description )
3487
3488 main.step( "Add back " + switch )
3489 main.Mininet1.addSwitch( switch, dpid=switchDPID )
3490 for peer in links:
3491 main.Mininet1.addLink( switch, peer )
Jon Hallca319892017-06-15 15:25:22 -07003492 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -07003493 main.Mininet1.assignSwController( sw=switch, ip=ipList )
3494 main.log.info( "Waiting " + str( switchSleep ) +
3495 " seconds for switch up to be discovered" )
3496 time.sleep( switchSleep )
3497 device = onosCli.getDevice( dpid=switchDPID )
3498 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003499 main.log.debug( "Added device: " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003500 result = main.FALSE
3501 if device and device[ 'available' ]:
3502 result = main.TRUE
3503 utilities.assert_equals( expect=main.TRUE, actual=result,
3504 onpass="add switch successful",
3505 onfail="Failed to add switch?" )
3506
3507 def startElectionApp( self, main ):
3508 """
3509 start election app on all onos nodes
3510 """
Devin Lim58046fa2017-07-05 16:55:00 -07003511 assert main, "main not defined"
3512 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003513
3514 main.case( "Start Leadership Election app" )
3515 main.step( "Install leadership election app" )
Jon Hallca319892017-06-15 15:25:22 -07003516 onosCli = main.Cluster.next()
Devin Lime9f0ccf2017-08-11 17:25:12 -07003517 appResult = onosCli.CLI.activateApp( "org.onosproject.election" )
Devin Lim58046fa2017-07-05 16:55:00 -07003518 utilities.assert_equals(
3519 expect=main.TRUE,
3520 actual=appResult,
3521 onpass="Election app installed",
3522 onfail="Something went wrong with installing Leadership election" )
3523
3524 main.step( "Run for election on each node" )
Jon Hallca319892017-06-15 15:25:22 -07003525 onosCli.electionTestRun()
3526 main.Cluster.command( "electionTestRun" )
Devin Lim58046fa2017-07-05 16:55:00 -07003527 time.sleep( 5 )
Jon Hallca319892017-06-15 15:25:22 -07003528 sameResult, leaders = main.HA.consistentLeaderboards( main.Cluster.active() )
Devin Lim58046fa2017-07-05 16:55:00 -07003529 utilities.assert_equals(
3530 expect=True,
3531 actual=sameResult,
3532 onpass="All nodes see the same leaderboards",
3533 onfail="Inconsistent leaderboards" )
3534
3535 if sameResult:
3536 leader = leaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003537 if onosCli.ipAddress in leader:
Devin Lim58046fa2017-07-05 16:55:00 -07003538 correctLeader = True
3539 else:
3540 correctLeader = False
3541 main.step( "First node was elected leader" )
3542 utilities.assert_equals(
3543 expect=True,
3544 actual=correctLeader,
3545 onpass="Correct leader was elected",
3546 onfail="Incorrect leader" )
Jon Hallca319892017-06-15 15:25:22 -07003547 main.Cluster.testLeader = leader
3548
Devin Lim58046fa2017-07-05 16:55:00 -07003549 def isElectionFunctional( self, main ):
3550 """
3551 Check that Leadership Election is still functional
3552 15.1 Run election on each node
3553 15.2 Check that each node has the same leaders and candidates
3554 15.3 Find current leader and withdraw
3555 15.4 Check that a new node was elected leader
3556 15.5 Check that that new leader was the candidate of old leader
3557 15.6 Run for election on old leader
3558 15.7 Check that oldLeader is a candidate, and leader if only 1 node
3559 15.8 Make sure that the old leader was added to the candidate list
3560
3561 old and new variable prefixes refer to data from before vs after
3562 withdrawl and later before withdrawl vs after re-election
3563 """
3564 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003565 assert main, "main not defined"
3566 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003567
3568 description = "Check that Leadership Election is still functional"
3569 main.case( description )
3570 # NOTE: Need to re-run after restarts since being a canidate is not persistant
3571
3572 oldLeaders = [] # list of lists of each nodes' candidates before
3573 newLeaders = [] # list of lists of each nodes' candidates after
3574 oldLeader = '' # the old leader from oldLeaders, None if not same
3575 newLeader = '' # the new leaders fron newLoeaders, None if not same
3576 oldLeaderCLI = None # the CLI of the old leader used for re-electing
3577 expectNoLeader = False # True when there is only one leader
Devin Lim142b5342017-07-20 15:22:39 -07003578 if len( main.Cluster.runningNodes ) == 1:
Devin Lim58046fa2017-07-05 16:55:00 -07003579 expectNoLeader = True
3580
3581 main.step( "Run for election on each node" )
Devin Lim142b5342017-07-20 15:22:39 -07003582 electionResult = main.Cluster.command( "electionTestRun", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003583 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -07003584 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -07003585 actual=electionResult,
3586 onpass="All nodes successfully ran for leadership",
3587 onfail="At least one node failed to run for leadership" )
3588
3589 if electionResult == main.FALSE:
3590 main.log.error(
3591 "Skipping Test Case because Election Test App isn't loaded" )
3592 main.skipCase()
3593
3594 main.step( "Check that each node shows the same leader and candidates" )
3595 failMessage = "Nodes have different leaderboards"
Jon Hallca319892017-06-15 15:25:22 -07003596 activeCLIs = main.Cluster.active()
3597 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Devin Lim58046fa2017-07-05 16:55:00 -07003598 if sameResult:
3599 oldLeader = oldLeaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003600 main.log.info( "Old leader: " + oldLeader )
Devin Lim58046fa2017-07-05 16:55:00 -07003601 else:
3602 oldLeader = None
3603 utilities.assert_equals(
3604 expect=True,
3605 actual=sameResult,
3606 onpass="Leaderboards are consistent for the election topic",
3607 onfail=failMessage )
3608
3609 main.step( "Find current leader and withdraw" )
3610 withdrawResult = main.TRUE
3611 # do some sanity checking on leader before using it
3612 if oldLeader is None:
3613 main.log.error( "Leadership isn't consistent." )
3614 withdrawResult = main.FALSE
3615 # Get the CLI of the oldLeader
Jon Hallca319892017-06-15 15:25:22 -07003616 for ctrl in main.Cluster.active():
3617 if oldLeader == ctrl.ipAddress:
3618 oldLeaderCLI = ctrl
Devin Lim58046fa2017-07-05 16:55:00 -07003619 break
3620 else: # FOR/ELSE statement
3621 main.log.error( "Leader election, could not find current leader" )
3622 if oldLeader:
3623 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3624 utilities.assert_equals(
3625 expect=main.TRUE,
3626 actual=withdrawResult,
3627 onpass="Node was withdrawn from election",
3628 onfail="Node was not withdrawn from election" )
3629
3630 main.step( "Check that a new node was elected leader" )
3631 failMessage = "Nodes have different leaders"
3632 # Get new leaders and candidates
3633 newLeaderResult, newLeaders = self.consistentLeaderboards( activeCLIs )
3634 newLeader = None
3635 if newLeaderResult:
3636 if newLeaders[ 0 ][ 0 ] == 'none':
3637 main.log.error( "No leader was elected on at least 1 node" )
3638 if not expectNoLeader:
3639 newLeaderResult = False
3640 newLeader = newLeaders[ 0 ][ 0 ]
3641
3642 # Check that the new leader is not the older leader, which was withdrawn
3643 if newLeader == oldLeader:
3644 newLeaderResult = False
3645 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3646 " as the current leader" )
3647 utilities.assert_equals(
3648 expect=True,
3649 actual=newLeaderResult,
3650 onpass="Leadership election passed",
3651 onfail="Something went wrong with Leadership election" )
3652
3653 main.step( "Check that that new leader was the candidate of old leader" )
3654 # candidates[ 2 ] should become the top candidate after withdrawl
3655 correctCandidateResult = main.TRUE
3656 if expectNoLeader:
3657 if newLeader == 'none':
3658 main.log.info( "No leader expected. None found. Pass" )
3659 correctCandidateResult = main.TRUE
3660 else:
3661 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3662 correctCandidateResult = main.FALSE
3663 elif len( oldLeaders[ 0 ] ) >= 3:
3664 if newLeader == oldLeaders[ 0 ][ 2 ]:
3665 # correct leader was elected
3666 correctCandidateResult = main.TRUE
3667 else:
3668 correctCandidateResult = main.FALSE
3669 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3670 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3671 else:
3672 main.log.warn( "Could not determine who should be the correct leader" )
3673 main.log.debug( oldLeaders[ 0 ] )
3674 correctCandidateResult = main.FALSE
3675 utilities.assert_equals(
3676 expect=main.TRUE,
3677 actual=correctCandidateResult,
3678 onpass="Correct Candidate Elected",
3679 onfail="Incorrect Candidate Elected" )
3680
3681 main.step( "Run for election on old leader( just so everyone " +
3682 "is in the hat )" )
3683 if oldLeaderCLI is not None:
3684 runResult = oldLeaderCLI.electionTestRun()
3685 else:
3686 main.log.error( "No old leader to re-elect" )
3687 runResult = main.FALSE
3688 utilities.assert_equals(
3689 expect=main.TRUE,
3690 actual=runResult,
3691 onpass="App re-ran for election",
3692 onfail="App failed to run for election" )
3693
3694 main.step(
3695 "Check that oldLeader is a candidate, and leader if only 1 node" )
3696 # verify leader didn't just change
3697 # Get new leaders and candidates
3698 reRunLeaders = []
3699 time.sleep( 5 ) # Paremterize
3700 positionResult, reRunLeaders = self.consistentLeaderboards( activeCLIs )
3701
3702 # Check that the re-elected node is last on the candidate List
3703 if not reRunLeaders[ 0 ]:
3704 positionResult = main.FALSE
3705 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3706 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
3707 str( reRunLeaders[ 0 ] ) ) )
3708 positionResult = main.FALSE
3709 utilities.assert_equals(
3710 expect=True,
3711 actual=positionResult,
3712 onpass="Old leader successfully re-ran for election",
3713 onfail="Something went wrong with Leadership election after " +
3714 "the old leader re-ran for election" )
Jon Hallca319892017-06-15 15:25:22 -07003715
Devin Lim58046fa2017-07-05 16:55:00 -07003716 def installDistributedPrimitiveApp( self, main ):
3717 """
3718 Install Distributed Primitives app
3719 """
3720 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003721 assert main, "main not defined"
3722 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003723
3724 # Variables for the distributed primitives tests
3725 main.pCounterName = "TestON-Partitions"
3726 main.pCounterValue = 0
3727 main.onosSet = set( [] )
3728 main.onosSetName = "TestON-set"
3729
3730 description = "Install Primitives app"
3731 main.case( description )
3732 main.step( "Install Primitives app" )
3733 appName = "org.onosproject.distributedprimitives"
Devin Lime9f0ccf2017-08-11 17:25:12 -07003734 appResults = main.Cluster.next().CLI.activateApp( appName )
Devin Lim58046fa2017-07-05 16:55:00 -07003735 utilities.assert_equals( expect=main.TRUE,
3736 actual=appResults,
3737 onpass="Primitives app activated",
3738 onfail="Primitives app not activated" )
3739 # TODO check on all nodes instead of sleeping
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003740 time.sleep( 5 ) # To allow all nodes to activate