blob: 9e7e2d5ca782c251572582dfefe1dd4991a90e23 [file] [log] [blame]
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07001"""
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002Copyright 2015 Open Networking Foundation ( ONF )
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003
4Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
5the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
6or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
7
8 TestON is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation, either version 2 of the License, or
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -070011 ( at your option ) any later version.
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -070012
13 TestON is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with TestON. If not, see <http://www.gnu.org/licenses/>.
20"""
Jon Halla440e872016-03-31 15:15:50 -070021import json
Jon Hall41d39f12016-04-11 22:54:35 -070022import time
Jon Halle1a3b752015-07-22 13:02:46 -070023
Jon Hallf37d44d2017-05-24 10:37:30 -070024
Jon Hall41d39f12016-04-11 22:54:35 -070025class HA():
Jon Hall57b50432015-10-22 10:20:10 -070026
Jon Halla440e872016-03-31 15:15:50 -070027 def __init__( self ):
28 self.default = ''
Jon Hall57b50432015-10-22 10:20:10 -070029
Devin Lim58046fa2017-07-05 16:55:00 -070030 def customizeOnosGenPartitions( self ):
Devin Lim58046fa2017-07-05 16:55:00 -070031 # copy gen-partions file to ONOS
32 # NOTE: this assumes TestON and ONOS are on the same machine
33 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
34 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
35 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
36 main.ONOSbench.ip_address,
37 srcFile,
38 dstDir,
39 pwd=main.ONOSbench.pwd,
40 direction="from" )
Jon Hallca319892017-06-15 15:25:22 -070041
Devin Lim58046fa2017-07-05 16:55:00 -070042 def cleanUpGenPartition( self ):
43 # clean up gen-partitions file
44 try:
45 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
46 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
47 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
48 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
49 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
50 str( main.ONOSbench.handle.before ) )
51 except ( pexpect.TIMEOUT, pexpect.EOF ):
52 main.log.exception( "ONOSbench: pexpect exception found:" +
53 main.ONOSbench.handle.before )
Devin Lim44075962017-08-11 10:56:37 -070054 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -070055
Devin Lim58046fa2017-07-05 16:55:00 -070056 def startingMininet( self ):
57 main.step( "Starting Mininet" )
58 # scp topo file to mininet
59 # TODO: move to params?
60 topoName = "obelisk.py"
61 filePath = main.ONOSbench.home + "/tools/test/topos/"
62 main.ONOSbench.scp( main.Mininet1,
63 filePath + topoName,
64 main.Mininet1.home,
65 direction="to" )
66 mnResult = main.Mininet1.startNet()
67 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
68 onpass="Mininet Started",
69 onfail="Error starting Mininet" )
Jon Hallca319892017-06-15 15:25:22 -070070
Devin Lim58046fa2017-07-05 16:55:00 -070071 def scalingMetadata( self ):
72 import re
Devin Lim142b5342017-07-20 15:22:39 -070073 main.step( "Generate initial metadata file" )
Devin Lim58046fa2017-07-05 16:55:00 -070074 main.scaling = main.params[ 'scaling' ].split( "," )
75 main.log.debug( main.scaling )
76 scale = main.scaling.pop( 0 )
77 main.log.debug( scale )
78 if "e" in scale:
79 equal = True
80 else:
81 equal = False
82 main.log.debug( equal )
Devin Lim142b5342017-07-20 15:22:39 -070083 main.Cluster.setRunningNode( int( re.search( "\d+", scale ).group( 0 ) ) )
84 genResult = main.Server.generateFile( main.Cluster.numCtrls, equal=equal )
Devin Lim58046fa2017-07-05 16:55:00 -070085 utilities.assert_equals( expect=main.TRUE, actual=genResult,
86 onpass="New cluster metadata file generated",
87 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -070088
Devin Lim58046fa2017-07-05 16:55:00 -070089 def swapNodeMetadata( self ):
Devin Lim142b5342017-07-20 15:22:39 -070090 main.step( "Generate initial metadata file" )
91 if main.Cluster.numCtrls >= 5:
92 main.Cluster.setRunningNode( main.Cluster.numCtrls - 2 )
Devin Lim58046fa2017-07-05 16:55:00 -070093 else:
94 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
Devin Lim142b5342017-07-20 15:22:39 -070095 genResult = main.Server.generateFile( main.Cluster.numCtrls )
Devin Lim58046fa2017-07-05 16:55:00 -070096 utilities.assert_equals( expect=main.TRUE, actual=genResult,
97 onpass="New cluster metadata file generated",
98 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -070099
Devin Lim142b5342017-07-20 15:22:39 -0700100 def setServerForCluster( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700101 import os
102 main.step( "Setup server for cluster metadata file" )
103 main.serverPort = main.params[ 'server' ][ 'port' ]
104 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
105 main.log.debug( "Root dir: {}".format( rootDir ) )
106 status = main.Server.start( main.ONOSbench,
107 rootDir,
108 port=main.serverPort,
109 logDir=main.logdir + "/server.log" )
110 utilities.assert_equals( expect=main.TRUE, actual=status,
111 onpass="Server started",
112 onfail="Failled to start SimpleHTTPServer" )
113
Jon Hall4f360bc2017-09-07 10:19:52 -0700114 def copyBackupConfig( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700115 main.step( "Copying backup config files" )
116 main.onosServicepath = main.ONOSbench.home + "/tools/package/bin/onos-service"
117 cp = main.ONOSbench.scp( main.ONOSbench,
118 main.onosServicepath,
119 main.onosServicepath + ".backup",
120 direction="to" )
121
122 utilities.assert_equals( expect=main.TRUE,
123 actual=cp,
124 onpass="Copy backup config file succeeded",
125 onfail="Copy backup config file failed" )
Jon Hall4f360bc2017-09-07 10:19:52 -0700126
127 def setMetadataUrl( self ):
128 # NOTE: You should probably backup the config before and reset the config after the test
Devin Lim58046fa2017-07-05 16:55:00 -0700129 # we need to modify the onos-service file to use remote metadata file
130 # url for cluster metadata file
131 iface = main.params[ 'server' ].get( 'interface' )
132 ip = main.ONOSbench.getIpAddr( iface=iface )
133 metaFile = "cluster.json"
134 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, main.serverPort, metaFile )
135 main.log.warn( javaArgs )
136 main.log.warn( repr( javaArgs ) )
137 handle = main.ONOSbench.handle
Jon Hall4173b242017-09-12 17:04:38 -0700138 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs,
139 main.onosServicepath )
Devin Lim58046fa2017-07-05 16:55:00 -0700140 main.log.warn( sed )
141 main.log.warn( repr( sed ) )
142 handle.sendline( sed )
143 handle.expect( metaFile )
144 output = handle.before
145 handle.expect( "\$" )
146 output += handle.before
147 main.log.debug( repr( output ) )
148
149 def cleanUpOnosService( self ):
150 # Cleanup custom onos-service file
151 main.ONOSbench.scp( main.ONOSbench,
152 main.onosServicepath + ".backup",
153 main.onosServicepath,
154 direction="to" )
Jon Hallca319892017-06-15 15:25:22 -0700155
Jon Halla440e872016-03-31 15:15:50 -0700156 def consistentCheck( self ):
157 """
158 Checks that TestON counters are consistent across all nodes.
Jon Halle1a3b752015-07-22 13:02:46 -0700159
Jon Hallf37d44d2017-05-24 10:37:30 -0700160 Returns the tuple ( onosCounters, consistent )
Jon Hall41d39f12016-04-11 22:54:35 -0700161 - onosCounters is the parsed json output of the counters command on
162 all nodes
163 - consistent is main.TRUE if all "TestON" counters are consitent across
164 all nodes or main.FALSE
Jon Halla440e872016-03-31 15:15:50 -0700165 """
Jon Halle1a3b752015-07-22 13:02:46 -0700166 try:
Jon Halla440e872016-03-31 15:15:50 -0700167 # Get onos counters results
168 onosCountersRaw = []
169 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700170 for ctrl in main.Cluster.active():
Jon Halla440e872016-03-31 15:15:50 -0700171 t = main.Thread( target=utilities.retry,
Jon Hallca319892017-06-15 15:25:22 -0700172 name="counters-" + str( ctrl ),
173 args=[ ctrl.counters, [ None ] ],
Jon Hallf37d44d2017-05-24 10:37:30 -0700174 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Halla440e872016-03-31 15:15:50 -0700175 'randomTime': True } )
176 threads.append( t )
177 t.start()
178 for t in threads:
179 t.join()
180 onosCountersRaw.append( t.result )
181 onosCounters = []
Jon Hallca319892017-06-15 15:25:22 -0700182 for i in range( len( onosCountersRaw ) ):
Jon Halla440e872016-03-31 15:15:50 -0700183 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700184 onosCounters.append( json.loads( onosCountersRaw[ i ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700185 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700186 main.log.error( "Could not parse counters response from " +
Devin Lim142b5342017-07-20 15:22:39 -0700187 str( main.Cluster.active( i ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700188 main.log.warn( repr( onosCountersRaw[ i ] ) )
189 onosCounters.append( [] )
190
191 testCounters = {}
192 # make a list of all the "TestON-*" counters in ONOS
Jon Hall41d39f12016-04-11 22:54:35 -0700193 # lookes like a dict whose keys are the name of the ONOS node and
194 # values are a list of the counters. I.E.
Jon Hallf37d44d2017-05-24 10:37:30 -0700195 # { "ONOS1": [ { "name":"TestON-Partitions","value":56 } ]
Jon Halla440e872016-03-31 15:15:50 -0700196 # }
197 # NOTE: There is an assumtion that all nodes are active
198 # based on the above for loops
199 for controller in enumerate( onosCounters ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700200 for key, value in controller[ 1 ].iteritems():
Jon Halla440e872016-03-31 15:15:50 -0700201 if 'TestON' in key:
Devin Lim142b5342017-07-20 15:22:39 -0700202 node = str( main.Cluster.active( controller[ 0 ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700203 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700204 testCounters[ node ].append( { key: value } )
Jon Halla440e872016-03-31 15:15:50 -0700205 except KeyError:
Jon Hallf37d44d2017-05-24 10:37:30 -0700206 testCounters[ node ] = [ { key: value } ]
Jon Halla440e872016-03-31 15:15:50 -0700207 # compare the counters on each node
Jon Hallf37d44d2017-05-24 10:37:30 -0700208 firstV = testCounters.values()[ 0 ]
Jon Halla440e872016-03-31 15:15:50 -0700209 tmp = [ v == firstV for k, v in testCounters.iteritems() ]
210 if all( tmp ):
211 consistent = main.TRUE
212 else:
213 consistent = main.FALSE
214 main.log.error( "ONOS nodes have different values for counters:\n" +
215 testCounters )
216 return ( onosCounters, consistent )
217 except Exception:
218 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700219 main.cleanAndExit()
Jon Halla440e872016-03-31 15:15:50 -0700220
221 def counterCheck( self, counterName, counterValue ):
222 """
223 Checks that TestON counters are consistent across all nodes and that
224 specified counter is in ONOS with the given value
225 """
226 try:
227 correctResults = main.TRUE
228 # Get onos counters results and consistentCheck
229 onosCounters, consistent = self.consistentCheck()
230 # Check for correct values
Jon Hallca319892017-06-15 15:25:22 -0700231 for i in range( len( main.Cluster.active() ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700232 current = onosCounters[ i ]
Jon Halla440e872016-03-31 15:15:50 -0700233 onosValue = None
234 try:
235 onosValue = current.get( counterName )
Jon Hall41d39f12016-04-11 22:54:35 -0700236 except AttributeError:
Devin Lim142b5342017-07-20 15:22:39 -0700237 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -0700238 main.log.exception( node + " counters result " +
Jon Hall41d39f12016-04-11 22:54:35 -0700239 "is not as expected" )
Jon Halla440e872016-03-31 15:15:50 -0700240 correctResults = main.FALSE
241 if onosValue == counterValue:
242 main.log.info( counterName + " counter value is correct" )
243 else:
Jon Hall41d39f12016-04-11 22:54:35 -0700244 main.log.error( counterName +
245 " counter value is incorrect," +
246 " expected value: " + str( counterValue ) +
247 " current value: " + str( onosValue ) )
Jon Halla440e872016-03-31 15:15:50 -0700248 correctResults = main.FALSE
249 return consistent and correctResults
250 except Exception:
251 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700252 main.cleanAndExit()
Jon Hall41d39f12016-04-11 22:54:35 -0700253
254 def consistentLeaderboards( self, nodes ):
255 TOPIC = 'org.onosproject.election'
256 # FIXME: use threads
Jon Halle0f0b342017-04-18 11:43:47 -0700257 # FIXME: should we retry outside the function?
Jon Hall41d39f12016-04-11 22:54:35 -0700258 for n in range( 5 ): # Retry in case election is still happening
259 leaderList = []
260 # Get all leaderboards
261 for cli in nodes:
262 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
263 # Compare leaderboards
Jon Hallf37d44d2017-05-24 10:37:30 -0700264 result = all( i == leaderList[ 0 ] for i in leaderList ) and\
Jon Hall41d39f12016-04-11 22:54:35 -0700265 leaderList is not None
266 main.log.debug( leaderList )
267 main.log.warn( result )
268 if result:
269 return ( result, leaderList )
Jon Hallf37d44d2017-05-24 10:37:30 -0700270 time.sleep( 5 ) # TODO: paramerterize
Jon Hall41d39f12016-04-11 22:54:35 -0700271 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
272 return ( result, leaderList )
273
274 def nodesCheck( self, nodes ):
275 nodesOutput = []
276 results = True
277 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700278 for node in nodes:
279 t = main.Thread( target=node.nodes,
280 name="nodes-" + str( node ),
Jon Hallf37d44d2017-05-24 10:37:30 -0700281 args=[] )
Jon Hall41d39f12016-04-11 22:54:35 -0700282 threads.append( t )
283 t.start()
284
285 for t in threads:
286 t.join()
287 nodesOutput.append( t.result )
Jon Hallca319892017-06-15 15:25:22 -0700288 ips = sorted( main.Cluster.getIps( activeOnly=True ) )
Jon Hall41d39f12016-04-11 22:54:35 -0700289 for i in nodesOutput:
290 try:
291 current = json.loads( i )
292 activeIps = []
293 currentResult = False
294 for node in current:
Jon Hallf37d44d2017-05-24 10:37:30 -0700295 if node[ 'state' ] == 'READY':
296 activeIps.append( node[ 'ip' ] )
Jon Hall41d39f12016-04-11 22:54:35 -0700297 activeIps.sort()
298 if ips == activeIps:
299 currentResult = True
300 except ( ValueError, TypeError ):
301 main.log.error( "Error parsing nodes output" )
302 main.log.warn( repr( i ) )
303 currentResult = False
304 results = results and currentResult
305 return results
Jon Hallca319892017-06-15 15:25:22 -0700306
Devin Lim58046fa2017-07-05 16:55:00 -0700307 def generateGraph( self, testName, plotName="Plot-HA", index=2 ):
Jeremy Ronquillo7f8fb572017-11-14 08:28:41 -0800308 # DEPRECATED: ONOSSetup.py now creates these graphs.
309
310 main.log.debug( "HA.generateGraph() is deprecated; ONOSSetup now creates these graphs." )
Jon Hallca319892017-06-15 15:25:22 -0700311
Devin Lim58046fa2017-07-05 16:55:00 -0700312 def initialSetUp( self, serviceClean=False ):
313 """
314 rest of initialSetup
315 """
Devin Lim58046fa2017-07-05 16:55:00 -0700316 if main.params[ 'tcpdump' ].lower() == "true":
317 main.step( "Start Packet Capture MN" )
318 main.Mininet2.startTcpdump(
319 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
320 + "-MN.pcap",
321 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
322 port=main.params[ 'MNtcpdump' ][ 'port' ] )
323
324 if serviceClean:
325 main.step( "Clean up ONOS service changes" )
Devin Lim142b5342017-07-20 15:22:39 -0700326 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
327 main.ONOSbench.handle.expect( "\$" )
328 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
329 main.ONOSbench.handle.expect( "\$" )
Devin Lim58046fa2017-07-05 16:55:00 -0700330
331 main.step( "Checking ONOS nodes" )
332 nodeResults = utilities.retry( self.nodesCheck,
333 False,
Jon Hallca319892017-06-15 15:25:22 -0700334 args=[ main.Cluster.active() ],
Devin Lim58046fa2017-07-05 16:55:00 -0700335 attempts=5 )
336
337 utilities.assert_equals( expect=True, actual=nodeResults,
338 onpass="Nodes check successful",
339 onfail="Nodes check NOT successful" )
340
341 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -0700342 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700343 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -0700344 ctrl.name,
345 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700346 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -0700347 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700348
349 main.step( "Activate apps defined in the params file" )
350 # get data from the params
351 apps = main.params.get( 'apps' )
352 if apps:
353 apps = apps.split( ',' )
Jon Hallca319892017-06-15 15:25:22 -0700354 main.log.debug( "Apps: " + str( apps ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700355 activateResult = True
356 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700357 main.Cluster.active( 0 ).app( app, "Activate" )
Devin Lim58046fa2017-07-05 16:55:00 -0700358 # TODO: check this worked
359 time.sleep( 10 ) # wait for apps to activate
360 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700361 state = main.Cluster.active( 0 ).appStatus( app )
Devin Lim58046fa2017-07-05 16:55:00 -0700362 if state == "ACTIVE":
363 activateResult = activateResult and True
364 else:
365 main.log.error( "{} is in {} state".format( app, state ) )
366 activateResult = False
367 utilities.assert_equals( expect=True,
368 actual=activateResult,
369 onpass="Successfully activated apps",
370 onfail="Failed to activate apps" )
371 else:
372 main.log.warn( "No apps were specified to be loaded after startup" )
373
374 main.step( "Set ONOS configurations" )
Jon Hallca319892017-06-15 15:25:22 -0700375 # FIXME: This shoudl be part of the general startup sequence
Devin Lim58046fa2017-07-05 16:55:00 -0700376 config = main.params.get( 'ONOS_Configuration' )
377 if config:
378 main.log.debug( config )
379 checkResult = main.TRUE
380 for component in config:
381 for setting in config[ component ]:
382 value = config[ component ][ setting ]
Jon Hallca319892017-06-15 15:25:22 -0700383 check = main.Cluster.next().setCfg( component, setting, value )
Devin Lim58046fa2017-07-05 16:55:00 -0700384 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
385 checkResult = check and checkResult
386 utilities.assert_equals( expect=main.TRUE,
387 actual=checkResult,
388 onpass="Successfully set config",
389 onfail="Failed to set config" )
390 else:
391 main.log.warn( "No configurations were specified to be changed after startup" )
392
Jon Hallca319892017-06-15 15:25:22 -0700393 main.step( "Check app ids" )
394 appCheck = self.appCheck()
395 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700396 onpass="App Ids seem to be correct",
397 onfail="Something is wrong with app Ids" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -0700398
Jon Hallca319892017-06-15 15:25:22 -0700399 def commonChecks( self ):
400 # TODO: make this assertable or assert in here?
401 self.topicsCheck()
402 self.partitionsCheck()
403 self.pendingMapCheck()
404 self.appCheck()
405
406 def topicsCheck( self, extraTopics=[] ):
407 """
408 Check for work partition topics in leaders output
409 """
410 leaders = main.Cluster.next().leaders()
411 missing = False
412 try:
413 if leaders:
414 parsedLeaders = json.loads( leaders )
415 output = json.dumps( parsedLeaders,
416 sort_keys=True,
417 indent=4,
418 separators=( ',', ': ' ) )
419 main.log.debug( "Leaders: " + output )
420 # check for all intent partitions
421 topics = []
422 for i in range( 14 ):
423 topics.append( "work-partition-" + str( i ) )
424 topics += extraTopics
425 main.log.debug( topics )
426 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
427 for topic in topics:
428 if topic not in ONOStopics:
429 main.log.error( "Error: " + topic +
430 " not in leaders" )
431 missing = True
432 else:
433 main.log.error( "leaders() returned None" )
434 except ( ValueError, TypeError ):
435 main.log.exception( "Error parsing leaders" )
436 main.log.error( repr( leaders ) )
437 if missing:
Jon Hall4173b242017-09-12 17:04:38 -0700438 # NOTE Can we refactor this into the Cluster class?
439 # Maybe an option to print the output of a command from each node?
Jon Hallca319892017-06-15 15:25:22 -0700440 for ctrl in main.Cluster.active():
441 response = ctrl.CLI.leaders( jsonFormat=False )
442 main.log.debug( str( ctrl.name ) + " leaders output: \n" +
443 str( response ) )
444 return missing
445
446 def partitionsCheck( self ):
447 # TODO: return something assertable
448 partitions = main.Cluster.next().partitions()
449 try:
450 if partitions:
451 parsedPartitions = json.loads( partitions )
452 output = json.dumps( parsedPartitions,
453 sort_keys=True,
454 indent=4,
455 separators=( ',', ': ' ) )
456 main.log.debug( "Partitions: " + output )
457 # TODO check for a leader in all paritions
458 # TODO check for consistency among nodes
459 else:
460 main.log.error( "partitions() returned None" )
461 except ( ValueError, TypeError ):
462 main.log.exception( "Error parsing partitions" )
463 main.log.error( repr( partitions ) )
464
465 def pendingMapCheck( self ):
466 pendingMap = main.Cluster.next().pendingMap()
467 try:
468 if pendingMap:
469 parsedPending = json.loads( pendingMap )
470 output = json.dumps( parsedPending,
471 sort_keys=True,
472 indent=4,
473 separators=( ',', ': ' ) )
474 main.log.debug( "Pending map: " + output )
475 # TODO check something here?
476 else:
477 main.log.error( "pendingMap() returned None" )
478 except ( ValueError, TypeError ):
479 main.log.exception( "Error parsing pending map" )
480 main.log.error( repr( pendingMap ) )
481
482 def appCheck( self ):
483 """
484 Check App IDs on all nodes
485 """
486 # FIXME: Rename this to appIDCheck? or add a check for isntalled apps
487 appResults = main.Cluster.command( "appToIDCheck" )
488 appCheck = all( i == main.TRUE for i in appResults )
489 if not appCheck:
Devin Lim142b5342017-07-20 15:22:39 -0700490 ctrl = main.Cluster.active( 0 )
Jon Hallca319892017-06-15 15:25:22 -0700491 main.log.debug( "%s apps: %s" % ( ctrl.name, ctrl.apps() ) )
492 main.log.debug( "%s appIDs: %s" % ( ctrl.name, ctrl.appIDs() ) )
493 return appCheck
494
Jon Halle0f0b342017-04-18 11:43:47 -0700495 def workQueueStatsCheck( self, workQueueName, completed, inProgress, pending ):
496 # Completed
Jon Hallca319892017-06-15 15:25:22 -0700497 completedValues = main.Cluster.command( "workQueueTotalCompleted",
498 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700499 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700500 completedResults = [ int( x ) == completed for x in completedValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700501 completedResult = all( completedResults )
502 if not completedResult:
503 main.log.warn( "Expected Work Queue {} to have {} completed, found {}".format(
504 workQueueName, completed, completedValues ) )
505
506 # In Progress
Jon Hallca319892017-06-15 15:25:22 -0700507 inProgressValues = main.Cluster.command( "workQueueTotalInProgress",
508 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700509 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700510 inProgressResults = [ int( x ) == inProgress for x in inProgressValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700511 inProgressResult = all( inProgressResults )
512 if not inProgressResult:
513 main.log.warn( "Expected Work Queue {} to have {} inProgress, found {}".format(
514 workQueueName, inProgress, inProgressValues ) )
515
516 # Pending
Jon Hallca319892017-06-15 15:25:22 -0700517 pendingValues = main.Cluster.command( "workQueueTotalPending",
518 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700519 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700520 pendingResults = [ int( x ) == pending for x in pendingValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700521 pendingResult = all( pendingResults )
522 if not pendingResult:
523 main.log.warn( "Expected Work Queue {} to have {} pending, found {}".format(
524 workQueueName, pending, pendingValues ) )
525 return completedResult and inProgressResult and pendingResult
526
Devin Lim58046fa2017-07-05 16:55:00 -0700527 def assignDevices( self, main ):
528 """
529 Assign devices to controllers
530 """
531 import re
Devin Lim58046fa2017-07-05 16:55:00 -0700532 assert main, "main not defined"
533 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700534
535 main.case( "Assigning devices to controllers" )
536 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " + \
537 "and check that an ONOS node becomes the " + \
538 "master of the device."
539 main.step( "Assign switches to controllers" )
540
Jon Hallca319892017-06-15 15:25:22 -0700541 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -0700542 swList = []
543 for i in range( 1, 29 ):
544 swList.append( "s" + str( i ) )
545 main.Mininet1.assignSwController( sw=swList, ip=ipList )
546
547 mastershipCheck = main.TRUE
548 for i in range( 1, 29 ):
549 response = main.Mininet1.getSwController( "s" + str( i ) )
550 try:
551 main.log.info( str( response ) )
552 except Exception:
553 main.log.info( repr( response ) )
Devin Lim142b5342017-07-20 15:22:39 -0700554 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -0700555 if re.search( "tcp:" + ctrl.ipAddress, response ):
Devin Lim58046fa2017-07-05 16:55:00 -0700556 mastershipCheck = mastershipCheck and main.TRUE
557 else:
Jon Hall4173b242017-09-12 17:04:38 -0700558 main.log.error( "Error, node " + repr( ctrl ) + " is " +
Devin Lim58046fa2017-07-05 16:55:00 -0700559 "not in the list of controllers s" +
560 str( i ) + " is connecting to." )
561 mastershipCheck = main.FALSE
562 utilities.assert_equals(
563 expect=main.TRUE,
564 actual=mastershipCheck,
565 onpass="Switch mastership assigned correctly",
566 onfail="Switches not assigned correctly to controllers" )
Jon Hallca319892017-06-15 15:25:22 -0700567
Devin Lim58046fa2017-07-05 16:55:00 -0700568 def assignIntents( self, main ):
569 """
570 Assign intents
571 """
572 import time
573 import json
Devin Lim58046fa2017-07-05 16:55:00 -0700574 assert main, "main not defined"
575 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700576 try:
577 main.HAlabels
578 except ( NameError, AttributeError ):
579 main.log.error( "main.HAlabels not defined, setting to []" )
580 main.HAlabels = []
581 try:
582 main.HAdata
583 except ( NameError, AttributeError ):
584 main.log.error( "data not defined, setting to []" )
585 main.HAdata = []
586 main.case( "Adding host Intents" )
587 main.caseExplanation = "Discover hosts by using pingall then " +\
588 "assign predetermined host-to-host intents." +\
589 " After installation, check that the intent" +\
590 " is distributed to all nodes and the state" +\
591 " is INSTALLED"
592
593 # install onos-app-fwd
594 main.step( "Install reactive forwarding app" )
Jon Hallca319892017-06-15 15:25:22 -0700595 onosCli = main.Cluster.next()
Devin Lime9f0ccf2017-08-11 17:25:12 -0700596 installResults = onosCli.CLI.activateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700597 utilities.assert_equals( expect=main.TRUE, actual=installResults,
598 onpass="Install fwd successful",
599 onfail="Install fwd failed" )
600
601 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700602 appCheck = self.appCheck()
603 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700604 onpass="App Ids seem to be correct",
605 onfail="Something is wrong with app Ids" )
606
607 main.step( "Discovering Hosts( Via pingall for now )" )
608 # FIXME: Once we have a host discovery mechanism, use that instead
609 # REACTIVE FWD test
610 pingResult = main.FALSE
611 passMsg = "Reactive Pingall test passed"
612 time1 = time.time()
613 pingResult = main.Mininet1.pingall()
614 time2 = time.time()
615 if not pingResult:
616 main.log.warn( "First pingall failed. Trying again..." )
617 pingResult = main.Mininet1.pingall()
618 passMsg += " on the second try"
619 utilities.assert_equals(
620 expect=main.TRUE,
621 actual=pingResult,
622 onpass=passMsg,
623 onfail="Reactive Pingall failed, " +
624 "one or more ping pairs failed" )
625 main.log.info( "Time for pingall: %2f seconds" %
626 ( time2 - time1 ) )
Jon Hallca319892017-06-15 15:25:22 -0700627 if not pingResult:
Devin Lim44075962017-08-11 10:56:37 -0700628 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700629 # timeout for fwd flows
630 time.sleep( 11 )
631 # uninstall onos-app-fwd
632 main.step( "Uninstall reactive forwarding app" )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700633 uninstallResult = onosCli.CLI.deactivateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700634 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
635 onpass="Uninstall fwd successful",
636 onfail="Uninstall fwd failed" )
637
638 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700639 appCheck2 = self.appCheck()
640 utilities.assert_equals( expect=True, actual=appCheck2,
Devin Lim58046fa2017-07-05 16:55:00 -0700641 onpass="App Ids seem to be correct",
642 onfail="Something is wrong with app Ids" )
643
644 main.step( "Add host intents via cli" )
645 intentIds = []
646 # TODO: move the host numbers to params
647 # Maybe look at all the paths we ping?
648 intentAddResult = True
649 hostResult = main.TRUE
650 for i in range( 8, 18 ):
651 main.log.info( "Adding host intent between h" + str( i ) +
652 " and h" + str( i + 10 ) )
653 host1 = "00:00:00:00:00:" + \
654 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
655 host2 = "00:00:00:00:00:" + \
656 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
657 # NOTE: getHost can return None
Jon Hallca319892017-06-15 15:25:22 -0700658 host1Dict = onosCli.CLI.getHost( host1 )
659 host2Dict = onosCli.CLI.getHost( host2 )
Devin Lim58046fa2017-07-05 16:55:00 -0700660 host1Id = None
661 host2Id = None
662 if host1Dict and host2Dict:
663 host1Id = host1Dict.get( 'id', None )
664 host2Id = host2Dict.get( 'id', None )
665 if host1Id and host2Id:
Jon Hallca319892017-06-15 15:25:22 -0700666 nodeNum = len( main.Cluster.active() )
Devin Lim142b5342017-07-20 15:22:39 -0700667 ctrl = main.Cluster.active( i % nodeNum )
Jon Hallca319892017-06-15 15:25:22 -0700668 tmpId = ctrl.CLI.addHostIntent( host1Id, host2Id )
Devin Lim58046fa2017-07-05 16:55:00 -0700669 if tmpId:
670 main.log.info( "Added intent with id: " + tmpId )
671 intentIds.append( tmpId )
672 else:
673 main.log.error( "addHostIntent returned: " +
674 repr( tmpId ) )
675 else:
676 main.log.error( "Error, getHost() failed for h" + str( i ) +
677 " and/or h" + str( i + 10 ) )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700678 hosts = main.Cluster.next().CLI.hosts()
Devin Lim58046fa2017-07-05 16:55:00 -0700679 try:
Jon Hallca319892017-06-15 15:25:22 -0700680 output = json.dumps( json.loads( hosts ),
681 sort_keys=True,
682 indent=4,
683 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700684 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700685 output = repr( hosts )
686 main.log.debug( "Hosts output: %s" % output )
Devin Lim58046fa2017-07-05 16:55:00 -0700687 hostResult = main.FALSE
688 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
689 onpass="Found a host id for each host",
690 onfail="Error looking up host ids" )
691
692 intentStart = time.time()
693 onosIds = onosCli.getAllIntentsId()
694 main.log.info( "Submitted intents: " + str( intentIds ) )
695 main.log.info( "Intents in ONOS: " + str( onosIds ) )
696 for intent in intentIds:
697 if intent in onosIds:
698 pass # intent submitted is in onos
699 else:
700 intentAddResult = False
701 if intentAddResult:
702 intentStop = time.time()
703 else:
704 intentStop = None
705 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700706 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700707 intentStates = []
708 installedCheck = True
709 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
710 count = 0
711 try:
712 for intent in json.loads( intents ):
713 state = intent.get( 'state', None )
714 if "INSTALLED" not in state:
715 installedCheck = False
716 intentId = intent.get( 'id', None )
717 intentStates.append( ( intentId, state ) )
718 except ( ValueError, TypeError ):
719 main.log.exception( "Error parsing intents" )
720 # add submitted intents not in the store
721 tmplist = [ i for i, s in intentStates ]
722 missingIntents = False
723 for i in intentIds:
724 if i not in tmplist:
725 intentStates.append( ( i, " - " ) )
726 missingIntents = True
727 intentStates.sort()
728 for i, s in intentStates:
729 count += 1
730 main.log.info( "%-6s%-15s%-15s" %
731 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700732 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700733
734 intentAddResult = bool( intentAddResult and not missingIntents and
735 installedCheck )
736 if not intentAddResult:
737 main.log.error( "Error in pushing host intents to ONOS" )
738
739 main.step( "Intent Anti-Entropy dispersion" )
740 for j in range( 100 ):
741 correct = True
742 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700743 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700744 onosIds = []
Jon Hallca319892017-06-15 15:25:22 -0700745 ids = ctrl.getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700746 onosIds.append( ids )
Jon Hallca319892017-06-15 15:25:22 -0700747 main.log.debug( "Intents in " + ctrl.name + ": " +
Devin Lim58046fa2017-07-05 16:55:00 -0700748 str( sorted( onosIds ) ) )
749 if sorted( ids ) != sorted( intentIds ):
750 main.log.warn( "Set of intent IDs doesn't match" )
751 correct = False
752 break
753 else:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700754 intents = json.loads( ctrl.CLI.intents() )
Devin Lim58046fa2017-07-05 16:55:00 -0700755 for intent in intents:
756 if intent[ 'state' ] != "INSTALLED":
757 main.log.warn( "Intent " + intent[ 'id' ] +
758 " is " + intent[ 'state' ] )
759 correct = False
760 break
761 if correct:
762 break
763 else:
764 time.sleep( 1 )
765 if not intentStop:
766 intentStop = time.time()
767 global gossipTime
768 gossipTime = intentStop - intentStart
769 main.log.info( "It took about " + str( gossipTime ) +
770 " seconds for all intents to appear in each node" )
771 append = False
772 title = "Gossip Intents"
773 count = 1
774 while append is False:
775 curTitle = title + str( count )
776 if curTitle not in main.HAlabels:
777 main.HAlabels.append( curTitle )
778 main.HAdata.append( str( gossipTime ) )
779 append = True
780 else:
781 count += 1
782 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Devin Lim142b5342017-07-20 15:22:39 -0700783 maxGossipTime = gossipPeriod * len( main.Cluster.runningNodes )
Devin Lim58046fa2017-07-05 16:55:00 -0700784 utilities.assert_greater_equals(
785 expect=maxGossipTime, actual=gossipTime,
786 onpass="ECM anti-entropy for intents worked within " +
787 "expected time",
788 onfail="Intent ECM anti-entropy took too long. " +
789 "Expected time:{}, Actual time:{}".format( maxGossipTime,
790 gossipTime ) )
791 if gossipTime <= maxGossipTime:
792 intentAddResult = True
793
Jon Hallca319892017-06-15 15:25:22 -0700794 pendingMap = main.Cluster.next().pendingMap()
Devin Lim58046fa2017-07-05 16:55:00 -0700795 if not intentAddResult or "key" in pendingMap:
796 import time
797 installedCheck = True
798 main.log.info( "Sleeping 60 seconds to see if intents are found" )
799 time.sleep( 60 )
800 onosIds = onosCli.getAllIntentsId()
801 main.log.info( "Submitted intents: " + str( intentIds ) )
802 main.log.info( "Intents in ONOS: " + str( onosIds ) )
803 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700804 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700805 intentStates = []
806 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
807 count = 0
808 try:
809 for intent in json.loads( intents ):
810 # Iter through intents of a node
811 state = intent.get( 'state', None )
812 if "INSTALLED" not in state:
813 installedCheck = False
814 intentId = intent.get( 'id', None )
815 intentStates.append( ( intentId, state ) )
816 except ( ValueError, TypeError ):
817 main.log.exception( "Error parsing intents" )
818 # add submitted intents not in the store
819 tmplist = [ i for i, s in intentStates ]
820 for i in intentIds:
821 if i not in tmplist:
822 intentStates.append( ( i, " - " ) )
823 intentStates.sort()
824 for i, s in intentStates:
825 count += 1
826 main.log.info( "%-6s%-15s%-15s" %
827 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700828 self.topicsCheck( [ "org.onosproject.election" ] )
829 self.partitionsCheck()
830 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700831
Jon Hallca319892017-06-15 15:25:22 -0700832 def pingAcrossHostIntent( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -0700833 """
834 Ping across added host intents
835 """
836 import json
837 import time
Devin Lim58046fa2017-07-05 16:55:00 -0700838 assert main, "main not defined"
839 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700840 main.case( "Verify connectivity by sending traffic across Intents" )
841 main.caseExplanation = "Ping across added host intents to check " +\
842 "functionality and check the state of " +\
843 "the intent"
844
Jon Hallca319892017-06-15 15:25:22 -0700845 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -0700846 main.step( "Check Intent state" )
847 installedCheck = False
848 loopCount = 0
849 while not installedCheck and loopCount < 40:
850 installedCheck = True
851 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700852 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700853 intentStates = []
854 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
855 count = 0
856 # Iter through intents of a node
857 try:
858 for intent in json.loads( intents ):
859 state = intent.get( 'state', None )
860 if "INSTALLED" not in state:
861 installedCheck = False
Jon Hall8bafdc02017-09-05 11:36:26 -0700862 main.log.debug( "Failed intent: " + str( intent ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700863 intentId = intent.get( 'id', None )
864 intentStates.append( ( intentId, state ) )
865 except ( ValueError, TypeError ):
866 main.log.exception( "Error parsing intents." )
867 # Print states
868 intentStates.sort()
869 for i, s in intentStates:
870 count += 1
871 main.log.info( "%-6s%-15s%-15s" %
872 ( str( count ), str( i ), str( s ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700873 if not installedCheck:
874 time.sleep( 1 )
875 loopCount += 1
876 utilities.assert_equals( expect=True, actual=installedCheck,
877 onpass="Intents are all INSTALLED",
878 onfail="Intents are not all in " +
879 "INSTALLED state" )
880
881 main.step( "Ping across added host intents" )
882 PingResult = main.TRUE
883 for i in range( 8, 18 ):
884 ping = main.Mininet1.pingHost( src="h" + str( i ),
885 target="h" + str( i + 10 ) )
886 PingResult = PingResult and ping
887 if ping == main.FALSE:
888 main.log.warn( "Ping failed between h" + str( i ) +
889 " and h" + str( i + 10 ) )
890 elif ping == main.TRUE:
891 main.log.info( "Ping test passed!" )
892 # Don't set PingResult or you'd override failures
893 if PingResult == main.FALSE:
894 main.log.error(
895 "Intents have not been installed correctly, pings failed." )
896 # TODO: pretty print
Devin Lim58046fa2017-07-05 16:55:00 -0700897 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700898 tmpIntents = onosCli.CLI.intents()
Jon Hallca319892017-06-15 15:25:22 -0700899 output = json.dumps( json.loads( tmpIntents ),
900 sort_keys=True,
901 indent=4,
902 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700903 except ( ValueError, TypeError ):
Jon Hall4173b242017-09-12 17:04:38 -0700904 output = repr( tmpIntents )
Jon Hallca319892017-06-15 15:25:22 -0700905 main.log.debug( "ONOS1 intents: " + output )
Devin Lim58046fa2017-07-05 16:55:00 -0700906 utilities.assert_equals(
907 expect=main.TRUE,
908 actual=PingResult,
909 onpass="Intents have been installed correctly and pings work",
910 onfail="Intents have not been installed correctly, pings failed." )
911
912 main.step( "Check leadership of topics" )
Jon Hallca319892017-06-15 15:25:22 -0700913 topicsCheck = self.topicsCheck()
914 utilities.assert_equals( expect=False, actual=topicsCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700915 onpass="intent Partitions is in leaders",
Jon Hallca319892017-06-15 15:25:22 -0700916 onfail="Some topics were lost" )
917 self.partitionsCheck()
918 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700919
920 if not installedCheck:
921 main.log.info( "Waiting 60 seconds to see if the state of " +
922 "intents change" )
923 time.sleep( 60 )
924 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700925 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700926 intentStates = []
927 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
928 count = 0
929 # Iter through intents of a node
930 try:
931 for intent in json.loads( intents ):
932 state = intent.get( 'state', None )
933 if "INSTALLED" not in state:
934 installedCheck = False
935 intentId = intent.get( 'id', None )
936 intentStates.append( ( intentId, state ) )
937 except ( ValueError, TypeError ):
938 main.log.exception( "Error parsing intents." )
939 intentStates.sort()
940 for i, s in intentStates:
941 count += 1
942 main.log.info( "%-6s%-15s%-15s" %
943 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700944 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700945
Devin Lim58046fa2017-07-05 16:55:00 -0700946 # Print flowrules
Devin Lime9f0ccf2017-08-11 17:25:12 -0700947 main.log.debug( onosCli.CLI.flows() )
Devin Lim58046fa2017-07-05 16:55:00 -0700948 main.step( "Wait a minute then ping again" )
949 # the wait is above
950 PingResult = main.TRUE
951 for i in range( 8, 18 ):
952 ping = main.Mininet1.pingHost( src="h" + str( i ),
953 target="h" + str( i + 10 ) )
954 PingResult = PingResult and ping
955 if ping == main.FALSE:
956 main.log.warn( "Ping failed between h" + str( i ) +
957 " and h" + str( i + 10 ) )
958 elif ping == main.TRUE:
959 main.log.info( "Ping test passed!" )
960 # Don't set PingResult or you'd override failures
961 if PingResult == main.FALSE:
962 main.log.error(
963 "Intents have not been installed correctly, pings failed." )
964 # TODO: pretty print
Jon Hallca319892017-06-15 15:25:22 -0700965 main.log.warn( str( onosCli.name ) + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -0700966 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700967 tmpIntents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700968 main.log.warn( json.dumps( json.loads( tmpIntents ),
969 sort_keys=True,
970 indent=4,
971 separators=( ',', ': ' ) ) )
972 except ( ValueError, TypeError ):
973 main.log.warn( repr( tmpIntents ) )
974 utilities.assert_equals(
975 expect=main.TRUE,
976 actual=PingResult,
977 onpass="Intents have been installed correctly and pings work",
978 onfail="Intents have not been installed correctly, pings failed." )
979
Devin Lim142b5342017-07-20 15:22:39 -0700980 def checkRoleNotNull( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700981 main.step( "Check that each switch has a master" )
Devin Lim58046fa2017-07-05 16:55:00 -0700982 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -0700983 rolesNotNull = main.Cluster.command( "rolesNotNull", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -0700984 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -0700985 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -0700986 actual=rolesNotNull,
987 onpass="Each device has a master",
988 onfail="Some devices don't have a master assigned" )
989
Devin Lim142b5342017-07-20 15:22:39 -0700990 def checkTheRole( self ):
991 main.step( "Read device roles from ONOS" )
Jon Hallca319892017-06-15 15:25:22 -0700992 ONOSMastership = main.Cluster.command( "roles" )
Devin Lim58046fa2017-07-05 16:55:00 -0700993 consistentMastership = True
994 rolesResults = True
Devin Lim58046fa2017-07-05 16:55:00 -0700995 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -0700996 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700997 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hallca319892017-06-15 15:25:22 -0700998 main.log.error( "Error in getting " + node + " roles" )
999 main.log.warn( node + " mastership response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001000 repr( ONOSMastership[ i ] ) )
1001 rolesResults = False
1002 utilities.assert_equals(
1003 expect=True,
1004 actual=rolesResults,
1005 onpass="No error in reading roles output",
1006 onfail="Error in reading roles from ONOS" )
1007
1008 main.step( "Check for consistency in roles from each controller" )
1009 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
1010 main.log.info(
1011 "Switch roles are consistent across all ONOS nodes" )
1012 else:
1013 consistentMastership = False
1014 utilities.assert_equals(
1015 expect=True,
1016 actual=consistentMastership,
1017 onpass="Switch roles are consistent across all ONOS nodes",
1018 onfail="ONOS nodes have different views of switch roles" )
Devin Lim142b5342017-07-20 15:22:39 -07001019 return ONOSMastership, rolesResults, consistentMastership
1020
1021 def checkingIntents( self ):
1022 main.step( "Get the intents from each controller" )
1023 ONOSIntents = main.Cluster.command( "intents", specificDriver=2 )
1024 intentsResults = True
1025 for i in range( len( ONOSIntents ) ):
1026 node = str( main.Cluster.active( i ) )
1027 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
1028 main.log.error( "Error in getting " + node + " intents" )
1029 main.log.warn( node + " intents response: " +
1030 repr( ONOSIntents[ i ] ) )
1031 intentsResults = False
1032 utilities.assert_equals(
1033 expect=True,
1034 actual=intentsResults,
1035 onpass="No error in reading intents output",
1036 onfail="Error in reading intents from ONOS" )
1037 return ONOSIntents, intentsResults
1038
1039 def readingState( self, main ):
1040 """
1041 Reading state of ONOS
1042 """
1043 import json
1044 import time
1045 assert main, "main not defined"
1046 assert utilities.assert_equals, "utilities.assert_equals not defined"
1047 try:
1048 from tests.dependencies.topology import Topology
1049 except ImportError:
1050 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07001051 main.cleanAndExit()
Devin Lim142b5342017-07-20 15:22:39 -07001052 try:
1053 main.topoRelated
1054 except ( NameError, AttributeError ):
1055 main.topoRelated = Topology()
1056 main.case( "Setting up and gathering data for current state" )
1057 # The general idea for this test case is to pull the state of
1058 # ( intents,flows, topology,... ) from each ONOS node
1059 # We can then compare them with each other and also with past states
1060
1061 global mastershipState
1062 mastershipState = '[]'
1063
1064 self.checkRoleNotNull()
1065
1066 main.step( "Get the Mastership of each switch from each controller" )
1067 mastershipCheck = main.FALSE
1068
1069 ONOSMastership, consistentMastership, rolesResults = self.checkTheRole()
Devin Lim58046fa2017-07-05 16:55:00 -07001070
1071 if rolesResults and not consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001072 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001073 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001074 try:
1075 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001076 node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07001077 json.dumps(
1078 json.loads( ONOSMastership[ i ] ),
1079 sort_keys=True,
1080 indent=4,
1081 separators=( ',', ': ' ) ) )
1082 except ( ValueError, TypeError ):
1083 main.log.warn( repr( ONOSMastership[ i ] ) )
1084 elif rolesResults and consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001085 mastershipCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001086 mastershipState = ONOSMastership[ 0 ]
1087
Devin Lim58046fa2017-07-05 16:55:00 -07001088 global intentState
1089 intentState = []
Devin Lim142b5342017-07-20 15:22:39 -07001090 ONOSIntents, intentsResults = self.checkingIntents()
Jon Hallca319892017-06-15 15:25:22 -07001091 intentCheck = main.FALSE
1092 consistentIntents = True
Devin Lim142b5342017-07-20 15:22:39 -07001093
Devin Lim58046fa2017-07-05 16:55:00 -07001094 main.step( "Check for consistency in Intents from each controller" )
1095 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1096 main.log.info( "Intents are consistent across all ONOS " +
1097 "nodes" )
1098 else:
1099 consistentIntents = False
1100 main.log.error( "Intents not consistent" )
1101 utilities.assert_equals(
1102 expect=True,
1103 actual=consistentIntents,
1104 onpass="Intents are consistent across all ONOS nodes",
1105 onfail="ONOS nodes have different views of intents" )
1106
1107 if intentsResults:
1108 # Try to make it easy to figure out what is happening
1109 #
1110 # Intent ONOS1 ONOS2 ...
1111 # 0x01 INSTALLED INSTALLING
1112 # ... ... ...
1113 # ... ... ...
1114 title = " Id"
Jon Hallca319892017-06-15 15:25:22 -07001115 for ctrl in main.Cluster.active():
1116 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07001117 main.log.warn( title )
1118 # get all intent keys in the cluster
1119 keys = []
1120 try:
1121 # Get the set of all intent keys
1122 for nodeStr in ONOSIntents:
1123 node = json.loads( nodeStr )
1124 for intent in node:
1125 keys.append( intent.get( 'id' ) )
1126 keys = set( keys )
1127 # For each intent key, print the state on each node
1128 for key in keys:
1129 row = "%-13s" % key
1130 for nodeStr in ONOSIntents:
1131 node = json.loads( nodeStr )
1132 for intent in node:
1133 if intent.get( 'id', "Error" ) == key:
1134 row += "%-15s" % intent.get( 'state' )
1135 main.log.warn( row )
1136 # End of intent state table
1137 except ValueError as e:
1138 main.log.exception( e )
1139 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1140
1141 if intentsResults and not consistentIntents:
1142 # print the json objects
Jon Hallca319892017-06-15 15:25:22 -07001143 main.log.debug( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001144 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1145 sort_keys=True,
1146 indent=4,
1147 separators=( ',', ': ' ) ) )
1148 for i in range( len( ONOSIntents ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001149 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001150 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallca319892017-06-15 15:25:22 -07001151 main.log.debug( node + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001152 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
1153 sort_keys=True,
1154 indent=4,
1155 separators=( ',', ': ' ) ) )
1156 else:
Jon Hallca319892017-06-15 15:25:22 -07001157 main.log.debug( node + " intents match " + ctrl.name + " intents" )
Devin Lim58046fa2017-07-05 16:55:00 -07001158 elif intentsResults and consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07001159 intentCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001160 intentState = ONOSIntents[ 0 ]
1161
1162 main.step( "Get the flows from each controller" )
1163 global flowState
1164 flowState = []
Jon Hall4173b242017-09-12 17:04:38 -07001165 ONOSFlows = main.Cluster.command( "flows", specificDriver=2 ) # TODO: Possible arg: sleep = 30
Devin Lim58046fa2017-07-05 16:55:00 -07001166 ONOSFlowsJson = []
1167 flowCheck = main.FALSE
1168 consistentFlows = True
1169 flowsResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001170 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001171 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001172 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001173 main.log.error( "Error in getting " + node + " flows" )
1174 main.log.warn( node + " flows response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001175 repr( ONOSFlows[ i ] ) )
1176 flowsResults = False
1177 ONOSFlowsJson.append( None )
1178 else:
1179 try:
1180 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1181 except ( ValueError, TypeError ):
1182 # FIXME: change this to log.error?
Jon Hallca319892017-06-15 15:25:22 -07001183 main.log.exception( "Error in parsing " + node +
Devin Lim58046fa2017-07-05 16:55:00 -07001184 " response as json." )
1185 main.log.error( repr( ONOSFlows[ i ] ) )
1186 ONOSFlowsJson.append( None )
1187 flowsResults = False
1188 utilities.assert_equals(
1189 expect=True,
1190 actual=flowsResults,
1191 onpass="No error in reading flows output",
1192 onfail="Error in reading flows from ONOS" )
1193
1194 main.step( "Check for consistency in Flows from each controller" )
1195 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1196 if all( tmp ):
1197 main.log.info( "Flow count is consistent across all ONOS nodes" )
1198 else:
1199 consistentFlows = False
1200 utilities.assert_equals(
1201 expect=True,
1202 actual=consistentFlows,
1203 onpass="The flow count is consistent across all ONOS nodes",
1204 onfail="ONOS nodes have different flow counts" )
1205
1206 if flowsResults and not consistentFlows:
1207 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001208 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001209 try:
1210 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001211 node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001212 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
1213 indent=4, separators=( ',', ': ' ) ) )
1214 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -07001215 main.log.warn( node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001216 repr( ONOSFlows[ i ] ) )
1217 elif flowsResults and consistentFlows:
1218 flowCheck = main.TRUE
1219 flowState = ONOSFlows[ 0 ]
1220
1221 main.step( "Get the OF Table entries" )
1222 global flows
1223 flows = []
1224 for i in range( 1, 29 ):
1225 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1226 if flowCheck == main.FALSE:
1227 for table in flows:
1228 main.log.warn( table )
1229 # TODO: Compare switch flow tables with ONOS flow tables
1230
1231 main.step( "Start continuous pings" )
1232 main.Mininet2.pingLong(
1233 src=main.params[ 'PING' ][ 'source1' ],
1234 target=main.params[ 'PING' ][ 'target1' ],
1235 pingTime=500 )
1236 main.Mininet2.pingLong(
1237 src=main.params[ 'PING' ][ 'source2' ],
1238 target=main.params[ 'PING' ][ 'target2' ],
1239 pingTime=500 )
1240 main.Mininet2.pingLong(
1241 src=main.params[ 'PING' ][ 'source3' ],
1242 target=main.params[ 'PING' ][ 'target3' ],
1243 pingTime=500 )
1244 main.Mininet2.pingLong(
1245 src=main.params[ 'PING' ][ 'source4' ],
1246 target=main.params[ 'PING' ][ 'target4' ],
1247 pingTime=500 )
1248 main.Mininet2.pingLong(
1249 src=main.params[ 'PING' ][ 'source5' ],
1250 target=main.params[ 'PING' ][ 'target5' ],
1251 pingTime=500 )
1252 main.Mininet2.pingLong(
1253 src=main.params[ 'PING' ][ 'source6' ],
1254 target=main.params[ 'PING' ][ 'target6' ],
1255 pingTime=500 )
1256 main.Mininet2.pingLong(
1257 src=main.params[ 'PING' ][ 'source7' ],
1258 target=main.params[ 'PING' ][ 'target7' ],
1259 pingTime=500 )
1260 main.Mininet2.pingLong(
1261 src=main.params[ 'PING' ][ 'source8' ],
1262 target=main.params[ 'PING' ][ 'target8' ],
1263 pingTime=500 )
1264 main.Mininet2.pingLong(
1265 src=main.params[ 'PING' ][ 'source9' ],
1266 target=main.params[ 'PING' ][ 'target9' ],
1267 pingTime=500 )
1268 main.Mininet2.pingLong(
1269 src=main.params[ 'PING' ][ 'source10' ],
1270 target=main.params[ 'PING' ][ 'target10' ],
1271 pingTime=500 )
1272
1273 main.step( "Collecting topology information from ONOS" )
Devin Lim142b5342017-07-20 15:22:39 -07001274 devices = main.topoRelated.getAll( "devices" )
1275 hosts = main.topoRelated.getAll( "hosts", inJson=True )
1276 ports = main.topoRelated.getAll( "ports" )
1277 links = main.topoRelated.getAll( "links" )
1278 clusters = main.topoRelated.getAll( "clusters" )
Devin Lim58046fa2017-07-05 16:55:00 -07001279 # Compare json objects for hosts and dataplane clusters
1280
1281 # hosts
1282 main.step( "Host view is consistent across ONOS nodes" )
1283 consistentHostsResult = main.TRUE
1284 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001285 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001286 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1287 if hosts[ controller ] == hosts[ 0 ]:
1288 continue
1289 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07001290 main.log.error( "hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001291 controllerStr +
1292 " is inconsistent with ONOS1" )
1293 main.log.warn( repr( hosts[ controller ] ) )
1294 consistentHostsResult = main.FALSE
1295
1296 else:
Jon Hallca319892017-06-15 15:25:22 -07001297 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001298 controllerStr )
1299 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001300 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001301 " hosts response: " +
1302 repr( hosts[ controller ] ) )
1303 utilities.assert_equals(
1304 expect=main.TRUE,
1305 actual=consistentHostsResult,
1306 onpass="Hosts view is consistent across all ONOS nodes",
1307 onfail="ONOS nodes have different views of hosts" )
1308
1309 main.step( "Each host has an IP address" )
1310 ipResult = main.TRUE
1311 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001312 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001313 if hosts[ controller ]:
1314 for host in hosts[ controller ]:
1315 if not host.get( 'ipAddresses', [] ):
Jon Hallca319892017-06-15 15:25:22 -07001316 main.log.error( "Error with host ips on " +
Devin Lim58046fa2017-07-05 16:55:00 -07001317 controllerStr + ": " + str( host ) )
1318 ipResult = main.FALSE
1319 utilities.assert_equals(
1320 expect=main.TRUE,
1321 actual=ipResult,
1322 onpass="The ips of the hosts aren't empty",
1323 onfail="The ip of at least one host is missing" )
1324
1325 # Strongly connected clusters of devices
1326 main.step( "Cluster view is consistent across ONOS nodes" )
1327 consistentClustersResult = main.TRUE
1328 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001329 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001330 if "Error" not in clusters[ controller ]:
1331 if clusters[ controller ] == clusters[ 0 ]:
1332 continue
1333 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07001334 main.log.error( "clusters from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001335 " is inconsistent with ONOS1" )
1336 consistentClustersResult = main.FALSE
1337
1338 else:
1339 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07001340 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07001341 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001342 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001343 " clusters response: " +
1344 repr( clusters[ controller ] ) )
1345 utilities.assert_equals(
1346 expect=main.TRUE,
1347 actual=consistentClustersResult,
1348 onpass="Clusters view is consistent across all ONOS nodes",
1349 onfail="ONOS nodes have different views of clusters" )
1350 if not consistentClustersResult:
1351 main.log.debug( clusters )
1352
1353 # there should always only be one cluster
1354 main.step( "Cluster view correct across ONOS nodes" )
1355 try:
1356 numClusters = len( json.loads( clusters[ 0 ] ) )
1357 except ( ValueError, TypeError ):
1358 main.log.exception( "Error parsing clusters[0]: " +
1359 repr( clusters[ 0 ] ) )
1360 numClusters = "ERROR"
1361 utilities.assert_equals(
1362 expect=1,
1363 actual=numClusters,
1364 onpass="ONOS shows 1 SCC",
1365 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1366
1367 main.step( "Comparing ONOS topology to MN" )
1368 devicesResults = main.TRUE
1369 linksResults = main.TRUE
1370 hostsResults = main.TRUE
1371 mnSwitches = main.Mininet1.getSwitches()
1372 mnLinks = main.Mininet1.getLinks()
1373 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07001374 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001375 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001376 currentDevicesResult = main.topoRelated.compareDevicePort(
1377 main.Mininet1, controller,
1378 mnSwitches, devices, ports )
1379 utilities.assert_equals( expect=main.TRUE,
1380 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07001381 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001382 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001383 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001384 " Switches view is incorrect" )
1385
1386 currentLinksResult = main.topoRelated.compareBase( links, controller,
1387 main.Mininet1.compareLinks,
1388 [ mnSwitches, mnLinks ] )
1389 utilities.assert_equals( expect=main.TRUE,
1390 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07001391 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001392 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001393 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001394 " links view is incorrect" )
1395
1396 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1397 currentHostsResult = main.Mininet1.compareHosts(
1398 mnHosts,
1399 hosts[ controller ] )
1400 else:
1401 currentHostsResult = main.FALSE
1402 utilities.assert_equals( expect=main.TRUE,
1403 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07001404 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001405 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07001406 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001407 " hosts don't match Mininet" )
1408
1409 devicesResults = devicesResults and currentDevicesResult
1410 linksResults = linksResults and currentLinksResult
1411 hostsResults = hostsResults and currentHostsResult
1412
1413 main.step( "Device information is correct" )
1414 utilities.assert_equals(
1415 expect=main.TRUE,
1416 actual=devicesResults,
1417 onpass="Device information is correct",
1418 onfail="Device information is incorrect" )
1419
1420 main.step( "Links are correct" )
1421 utilities.assert_equals(
1422 expect=main.TRUE,
1423 actual=linksResults,
1424 onpass="Link are correct",
1425 onfail="Links are incorrect" )
1426
1427 main.step( "Hosts are correct" )
1428 utilities.assert_equals(
1429 expect=main.TRUE,
1430 actual=hostsResults,
1431 onpass="Hosts are correct",
1432 onfail="Hosts are incorrect" )
1433
1434 def checkDistPrimitivesFunc( self, main ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001435 """
1436 Check for basic functionality with distributed primitives
1437 """
Jon Halle0f0b342017-04-18 11:43:47 -07001438 # TODO: Clean this up so it's not just a cut/paste from the test
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001439 try:
1440 # Make sure variables are defined/set
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001441 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001442 assert main.pCounterName, "main.pCounterName not defined"
1443 assert main.onosSetName, "main.onosSetName not defined"
1444 # NOTE: assert fails if value is 0/None/Empty/False
1445 try:
1446 main.pCounterValue
Devin Lim58046fa2017-07-05 16:55:00 -07001447 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001448 main.log.error( "main.pCounterValue not defined, setting to 0" )
1449 main.pCounterValue = 0
1450 try:
1451 main.onosSet
Devin Lim58046fa2017-07-05 16:55:00 -07001452 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001453 main.log.error( "main.onosSet not defined, setting to empty Set" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001454 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001455 # Variables for the distributed primitives tests. These are local only
1456 addValue = "a"
1457 addAllValue = "a b c d e f"
1458 retainValue = "c d e f"
Jon Halle0f0b342017-04-18 11:43:47 -07001459 valueName = "TestON-Value"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001460 valueValue = None
Jon Halle0f0b342017-04-18 11:43:47 -07001461 workQueueName = "TestON-Queue"
1462 workQueueCompleted = 0
1463 workQueueInProgress = 0
1464 workQueuePending = 0
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001465
1466 description = "Check for basic functionality with distributed " +\
1467 "primitives"
1468 main.case( description )
1469 main.caseExplanation = "Test the methods of the distributed " +\
1470 "primitives (counters and sets) throught the cli"
1471 # DISTRIBUTED ATOMIC COUNTERS
1472 # Partitioned counters
1473 main.step( "Increment then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001474 pCounters = main.Cluster.command( "counterTestAddAndGet",
1475 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001476 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001477 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001478 main.pCounterValue += 1
1479 addedPValues.append( main.pCounterValue )
Jon Hallca319892017-06-15 15:25:22 -07001480 # Check that counter incremented once per controller
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001481 pCounterResults = True
1482 for i in addedPValues:
1483 tmpResult = i in pCounters
1484 pCounterResults = pCounterResults and tmpResult
1485 if not tmpResult:
1486 main.log.error( str( i ) + " is not in partitioned "
1487 "counter incremented results" )
1488 utilities.assert_equals( expect=True,
1489 actual=pCounterResults,
1490 onpass="Default counter incremented",
1491 onfail="Error incrementing default" +
1492 " counter" )
1493
1494 main.step( "Get then Increment a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001495 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1496 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001497 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001498 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001499 addedPValues.append( main.pCounterValue )
1500 main.pCounterValue += 1
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001501 # Check that counter incremented numController times
1502 pCounterResults = True
1503 for i in addedPValues:
1504 tmpResult = i in pCounters
1505 pCounterResults = pCounterResults and tmpResult
1506 if not tmpResult:
1507 main.log.error( str( i ) + " is not in partitioned "
1508 "counter incremented results" )
1509 utilities.assert_equals( expect=True,
1510 actual=pCounterResults,
1511 onpass="Default counter incremented",
1512 onfail="Error incrementing default" +
1513 " counter" )
1514
1515 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001516 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001517 utilities.assert_equals( expect=main.TRUE,
1518 actual=incrementCheck,
1519 onpass="Added counters are correct",
1520 onfail="Added counters are incorrect" )
1521
1522 main.step( "Add -8 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001523 pCounters = main.Cluster.command( "counterTestAddAndGet",
1524 args=[ main.pCounterName ],
1525 kwargs={ "delta": -8 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001526 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001527 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001528 main.pCounterValue += -8
1529 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001530 # Check that counter incremented numController times
1531 pCounterResults = True
1532 for i in addedPValues:
1533 tmpResult = i in pCounters
1534 pCounterResults = pCounterResults and tmpResult
1535 if not tmpResult:
1536 main.log.error( str( i ) + " is not in partitioned "
1537 "counter incremented results" )
1538 utilities.assert_equals( expect=True,
1539 actual=pCounterResults,
1540 onpass="Default counter incremented",
1541 onfail="Error incrementing default" +
1542 " counter" )
1543
1544 main.step( "Add 5 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001545 pCounters = main.Cluster.command( "counterTestAddAndGet",
1546 args=[ main.pCounterName ],
1547 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001548 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001549 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001550 main.pCounterValue += 5
1551 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001552
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001553 # Check that counter incremented numController times
1554 pCounterResults = True
1555 for i in addedPValues:
1556 tmpResult = i in pCounters
1557 pCounterResults = pCounterResults and tmpResult
1558 if not tmpResult:
1559 main.log.error( str( i ) + " is not in partitioned "
1560 "counter incremented results" )
1561 utilities.assert_equals( expect=True,
1562 actual=pCounterResults,
1563 onpass="Default counter incremented",
1564 onfail="Error incrementing default" +
1565 " counter" )
1566
1567 main.step( "Get then add 5 to a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001568 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1569 args=[ main.pCounterName ],
1570 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001571 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001572 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001573 addedPValues.append( main.pCounterValue )
1574 main.pCounterValue += 5
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001575 # Check that counter incremented numController times
1576 pCounterResults = True
1577 for i in addedPValues:
1578 tmpResult = i in pCounters
1579 pCounterResults = pCounterResults and tmpResult
1580 if not tmpResult:
1581 main.log.error( str( i ) + " is not in partitioned "
1582 "counter incremented results" )
1583 utilities.assert_equals( expect=True,
1584 actual=pCounterResults,
1585 onpass="Default counter incremented",
1586 onfail="Error incrementing default" +
1587 " counter" )
1588
1589 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001590 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001591 utilities.assert_equals( expect=main.TRUE,
1592 actual=incrementCheck,
1593 onpass="Added counters are correct",
1594 onfail="Added counters are incorrect" )
1595
1596 # DISTRIBUTED SETS
1597 main.step( "Distributed Set get" )
1598 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001599 getResponses = main.Cluster.command( "setTestGet",
1600 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001601 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001602 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001603 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001604 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001605 current = set( getResponses[ i ] )
1606 if len( current ) == len( getResponses[ i ] ):
1607 # no repeats
1608 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001609 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001610 " has incorrect view" +
1611 " of set " + main.onosSetName + ":\n" +
1612 str( getResponses[ i ] ) )
1613 main.log.debug( "Expected: " + str( main.onosSet ) )
1614 main.log.debug( "Actual: " + str( current ) )
1615 getResults = main.FALSE
1616 else:
1617 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001618 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001619 " has repeat elements in" +
1620 " set " + main.onosSetName + ":\n" +
1621 str( getResponses[ i ] ) )
1622 getResults = main.FALSE
1623 elif getResponses[ i ] == main.ERROR:
1624 getResults = main.FALSE
1625 utilities.assert_equals( expect=main.TRUE,
1626 actual=getResults,
1627 onpass="Set elements are correct",
1628 onfail="Set elements are incorrect" )
1629
1630 main.step( "Distributed Set size" )
Jon Hallca319892017-06-15 15:25:22 -07001631 sizeResponses = main.Cluster.command( "setTestSize",
1632 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001633 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001634 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001635 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001636 if size != sizeResponses[ i ]:
1637 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001638 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001639 " expected a size of " + str( size ) +
1640 " for set " + main.onosSetName +
1641 " but got " + str( sizeResponses[ i ] ) )
1642 utilities.assert_equals( expect=main.TRUE,
1643 actual=sizeResults,
1644 onpass="Set sizes are correct",
1645 onfail="Set sizes are incorrect" )
1646
1647 main.step( "Distributed Set add()" )
1648 main.onosSet.add( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001649 addResponses = main.Cluster.command( "setTestAdd",
1650 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001651 # main.TRUE = successfully changed the set
1652 # main.FALSE = action resulted in no change in set
1653 # main.ERROR - Some error in executing the function
1654 addResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001655 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001656 if addResponses[ i ] == main.TRUE:
1657 # All is well
1658 pass
1659 elif addResponses[ i ] == main.FALSE:
1660 # Already in set, probably fine
1661 pass
1662 elif addResponses[ i ] == main.ERROR:
1663 # Error in execution
1664 addResults = main.FALSE
1665 else:
1666 # unexpected result
1667 addResults = main.FALSE
1668 if addResults != main.TRUE:
1669 main.log.error( "Error executing set add" )
1670
1671 # Check if set is still correct
1672 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001673 getResponses = main.Cluster.command( "setTestGet",
1674 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001675 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001676 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001677 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001678 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001679 current = set( getResponses[ i ] )
1680 if len( current ) == len( getResponses[ i ] ):
1681 # no repeats
1682 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001683 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001684 " of set " + main.onosSetName + ":\n" +
1685 str( getResponses[ i ] ) )
1686 main.log.debug( "Expected: " + str( main.onosSet ) )
1687 main.log.debug( "Actual: " + str( current ) )
1688 getResults = main.FALSE
1689 else:
1690 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001691 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001692 " set " + main.onosSetName + ":\n" +
1693 str( getResponses[ i ] ) )
1694 getResults = main.FALSE
1695 elif getResponses[ i ] == main.ERROR:
1696 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001697 sizeResponses = main.Cluster.command( "setTestSize",
1698 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001699 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001700 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001701 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001702 if size != sizeResponses[ i ]:
1703 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001704 main.log.error( node + " expected a size of " +
1705 str( size ) + " for set " + main.onosSetName +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001706 " but got " + str( sizeResponses[ i ] ) )
1707 addResults = addResults and getResults and sizeResults
1708 utilities.assert_equals( expect=main.TRUE,
1709 actual=addResults,
1710 onpass="Set add correct",
1711 onfail="Set add was incorrect" )
1712
1713 main.step( "Distributed Set addAll()" )
1714 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001715 addResponses = main.Cluster.command( "setTestAdd",
1716 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001717 # main.TRUE = successfully changed the set
1718 # main.FALSE = action resulted in no change in set
1719 # main.ERROR - Some error in executing the function
1720 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001721 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001722 if addResponses[ i ] == main.TRUE:
1723 # All is well
1724 pass
1725 elif addResponses[ i ] == main.FALSE:
1726 # Already in set, probably fine
1727 pass
1728 elif addResponses[ i ] == main.ERROR:
1729 # Error in execution
1730 addAllResults = main.FALSE
1731 else:
1732 # unexpected result
1733 addAllResults = main.FALSE
1734 if addAllResults != main.TRUE:
1735 main.log.error( "Error executing set addAll" )
1736
1737 # Check if set is still correct
1738 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001739 getResponses = main.Cluster.command( "setTestGet",
1740 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001741 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001742 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001743 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001744 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001745 current = set( getResponses[ i ] )
1746 if len( current ) == len( getResponses[ i ] ):
1747 # no repeats
1748 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001749 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001750 " of set " + main.onosSetName + ":\n" +
1751 str( getResponses[ i ] ) )
1752 main.log.debug( "Expected: " + str( main.onosSet ) )
1753 main.log.debug( "Actual: " + str( current ) )
1754 getResults = main.FALSE
1755 else:
1756 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001757 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001758 " set " + main.onosSetName + ":\n" +
1759 str( getResponses[ i ] ) )
1760 getResults = main.FALSE
1761 elif getResponses[ i ] == main.ERROR:
1762 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001763 sizeResponses = main.Cluster.command( "setTestSize",
1764 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001765 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001766 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001767 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001768 if size != sizeResponses[ i ]:
1769 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001770 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001771 " for set " + main.onosSetName +
1772 " but got " + str( sizeResponses[ i ] ) )
1773 addAllResults = addAllResults and getResults and sizeResults
1774 utilities.assert_equals( expect=main.TRUE,
1775 actual=addAllResults,
1776 onpass="Set addAll correct",
1777 onfail="Set addAll was incorrect" )
1778
1779 main.step( "Distributed Set contains()" )
Jon Hallca319892017-06-15 15:25:22 -07001780 containsResponses = main.Cluster.command( "setTestGet",
1781 args=[ main.onosSetName ],
1782 kwargs={ "values": addValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001783 containsResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001784 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001785 if containsResponses[ i ] == main.ERROR:
1786 containsResults = main.FALSE
1787 else:
1788 containsResults = containsResults and\
1789 containsResponses[ i ][ 1 ]
1790 utilities.assert_equals( expect=main.TRUE,
1791 actual=containsResults,
1792 onpass="Set contains is functional",
1793 onfail="Set contains failed" )
1794
1795 main.step( "Distributed Set containsAll()" )
Jon Hallca319892017-06-15 15:25:22 -07001796 containsAllResponses = main.Cluster.command( "setTestGet",
1797 args=[ main.onosSetName ],
1798 kwargs={ "values": addAllValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001799 containsAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001800 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001801 if containsResponses[ i ] == main.ERROR:
1802 containsResults = main.FALSE
1803 else:
1804 containsResults = containsResults and\
1805 containsResponses[ i ][ 1 ]
1806 utilities.assert_equals( expect=main.TRUE,
1807 actual=containsAllResults,
1808 onpass="Set containsAll is functional",
1809 onfail="Set containsAll failed" )
1810
1811 main.step( "Distributed Set remove()" )
1812 main.onosSet.remove( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001813 removeResponses = main.Cluster.command( "setTestRemove",
1814 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001815 # main.TRUE = successfully changed the set
1816 # main.FALSE = action resulted in no change in set
1817 # main.ERROR - Some error in executing the function
1818 removeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001819 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001820 if removeResponses[ i ] == main.TRUE:
1821 # All is well
1822 pass
1823 elif removeResponses[ i ] == main.FALSE:
1824 # not in set, probably fine
1825 pass
1826 elif removeResponses[ i ] == main.ERROR:
1827 # Error in execution
1828 removeResults = main.FALSE
1829 else:
1830 # unexpected result
1831 removeResults = main.FALSE
1832 if removeResults != main.TRUE:
1833 main.log.error( "Error executing set remove" )
1834
1835 # Check if set is still correct
1836 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001837 getResponses = main.Cluster.command( "setTestGet",
1838 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001839 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001840 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001841 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001842 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001843 current = set( getResponses[ i ] )
1844 if len( current ) == len( getResponses[ i ] ):
1845 # no repeats
1846 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001847 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001848 " of set " + main.onosSetName + ":\n" +
1849 str( getResponses[ i ] ) )
1850 main.log.debug( "Expected: " + str( main.onosSet ) )
1851 main.log.debug( "Actual: " + str( current ) )
1852 getResults = main.FALSE
1853 else:
1854 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001855 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001856 " set " + main.onosSetName + ":\n" +
1857 str( getResponses[ i ] ) )
1858 getResults = main.FALSE
1859 elif getResponses[ i ] == main.ERROR:
1860 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001861 sizeResponses = main.Cluster.command( "setTestSize",
1862 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001863 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001864 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001865 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001866 if size != sizeResponses[ i ]:
1867 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001868 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001869 " for set " + main.onosSetName +
1870 " but got " + str( sizeResponses[ i ] ) )
1871 removeResults = removeResults and getResults and sizeResults
1872 utilities.assert_equals( expect=main.TRUE,
1873 actual=removeResults,
1874 onpass="Set remove correct",
1875 onfail="Set remove was incorrect" )
1876
1877 main.step( "Distributed Set removeAll()" )
1878 main.onosSet.difference_update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001879 removeAllResponses = main.Cluster.command( "setTestRemove",
1880 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001881 # main.TRUE = successfully changed the set
1882 # main.FALSE = action resulted in no change in set
1883 # main.ERROR - Some error in executing the function
1884 removeAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001885 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001886 if removeAllResponses[ i ] == main.TRUE:
1887 # All is well
1888 pass
1889 elif removeAllResponses[ i ] == main.FALSE:
1890 # not in set, probably fine
1891 pass
1892 elif removeAllResponses[ i ] == main.ERROR:
1893 # Error in execution
1894 removeAllResults = main.FALSE
1895 else:
1896 # unexpected result
1897 removeAllResults = main.FALSE
1898 if removeAllResults != main.TRUE:
1899 main.log.error( "Error executing set removeAll" )
1900
1901 # Check if set is still correct
1902 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001903 getResponses = main.Cluster.command( "setTestGet",
1904 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001905 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001906 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001907 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001908 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001909 current = set( getResponses[ i ] )
1910 if len( current ) == len( getResponses[ i ] ):
1911 # no repeats
1912 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001913 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001914 " of set " + main.onosSetName + ":\n" +
1915 str( getResponses[ i ] ) )
1916 main.log.debug( "Expected: " + str( main.onosSet ) )
1917 main.log.debug( "Actual: " + str( current ) )
1918 getResults = main.FALSE
1919 else:
1920 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001921 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001922 " set " + main.onosSetName + ":\n" +
1923 str( getResponses[ i ] ) )
1924 getResults = main.FALSE
1925 elif getResponses[ i ] == main.ERROR:
1926 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001927 sizeResponses = main.Cluster.command( "setTestSize",
1928 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001929 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001930 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001931 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001932 if size != sizeResponses[ i ]:
1933 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001934 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001935 " for set " + main.onosSetName +
1936 " but got " + str( sizeResponses[ i ] ) )
1937 removeAllResults = removeAllResults and getResults and sizeResults
1938 utilities.assert_equals( expect=main.TRUE,
1939 actual=removeAllResults,
1940 onpass="Set removeAll correct",
1941 onfail="Set removeAll was incorrect" )
1942
1943 main.step( "Distributed Set addAll()" )
1944 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001945 addResponses = main.Cluster.command( "setTestAdd",
1946 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001947 # main.TRUE = successfully changed the set
1948 # main.FALSE = action resulted in no change in set
1949 # main.ERROR - Some error in executing the function
1950 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001951 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001952 if addResponses[ i ] == main.TRUE:
1953 # All is well
1954 pass
1955 elif addResponses[ i ] == main.FALSE:
1956 # Already in set, probably fine
1957 pass
1958 elif addResponses[ i ] == main.ERROR:
1959 # Error in execution
1960 addAllResults = main.FALSE
1961 else:
1962 # unexpected result
1963 addAllResults = main.FALSE
1964 if addAllResults != main.TRUE:
1965 main.log.error( "Error executing set addAll" )
1966
1967 # Check if set is still correct
1968 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001969 getResponses = main.Cluster.command( "setTestGet",
1970 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001971 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001972 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001973 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001974 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001975 current = set( getResponses[ i ] )
1976 if len( current ) == len( getResponses[ i ] ):
1977 # no repeats
1978 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001979 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001980 " of set " + main.onosSetName + ":\n" +
1981 str( getResponses[ i ] ) )
1982 main.log.debug( "Expected: " + str( main.onosSet ) )
1983 main.log.debug( "Actual: " + str( current ) )
1984 getResults = main.FALSE
1985 else:
1986 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001987 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001988 " set " + main.onosSetName + ":\n" +
1989 str( getResponses[ i ] ) )
1990 getResults = main.FALSE
1991 elif getResponses[ i ] == main.ERROR:
1992 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001993 sizeResponses = main.Cluster.command( "setTestSize",
1994 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001995 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001996 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001997 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001998 if size != sizeResponses[ i ]:
1999 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002000 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002001 " for set " + main.onosSetName +
2002 " but got " + str( sizeResponses[ i ] ) )
2003 addAllResults = addAllResults and getResults and sizeResults
2004 utilities.assert_equals( expect=main.TRUE,
2005 actual=addAllResults,
2006 onpass="Set addAll correct",
2007 onfail="Set addAll was incorrect" )
2008
2009 main.step( "Distributed Set clear()" )
2010 main.onosSet.clear()
Jon Hallca319892017-06-15 15:25:22 -07002011 clearResponses = main.Cluster.command( "setTestRemove",
Jon Hall4173b242017-09-12 17:04:38 -07002012 args=[ main.onosSetName, " " ], # Values doesn't matter
2013 kwargs={ "clear": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002014 # main.TRUE = successfully changed the set
2015 # main.FALSE = action resulted in no change in set
2016 # main.ERROR - Some error in executing the function
2017 clearResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002018 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002019 if clearResponses[ i ] == main.TRUE:
2020 # All is well
2021 pass
2022 elif clearResponses[ i ] == main.FALSE:
2023 # Nothing set, probably fine
2024 pass
2025 elif clearResponses[ i ] == main.ERROR:
2026 # Error in execution
2027 clearResults = main.FALSE
2028 else:
2029 # unexpected result
2030 clearResults = main.FALSE
2031 if clearResults != main.TRUE:
2032 main.log.error( "Error executing set clear" )
2033
2034 # Check if set is still correct
2035 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002036 getResponses = main.Cluster.command( "setTestGet",
2037 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002038 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002039 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002040 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002041 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002042 current = set( getResponses[ i ] )
2043 if len( current ) == len( getResponses[ i ] ):
2044 # no repeats
2045 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002046 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002047 " of set " + main.onosSetName + ":\n" +
2048 str( getResponses[ i ] ) )
2049 main.log.debug( "Expected: " + str( main.onosSet ) )
2050 main.log.debug( "Actual: " + str( current ) )
2051 getResults = main.FALSE
2052 else:
2053 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002054 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002055 " set " + main.onosSetName + ":\n" +
2056 str( getResponses[ i ] ) )
2057 getResults = main.FALSE
2058 elif getResponses[ i ] == main.ERROR:
2059 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002060 sizeResponses = main.Cluster.command( "setTestSize",
2061 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002062 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002063 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002064 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002065 if size != sizeResponses[ i ]:
2066 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002067 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002068 " for set " + main.onosSetName +
2069 " but got " + str( sizeResponses[ i ] ) )
2070 clearResults = clearResults and getResults and sizeResults
2071 utilities.assert_equals( expect=main.TRUE,
2072 actual=clearResults,
2073 onpass="Set clear correct",
2074 onfail="Set clear was incorrect" )
2075
2076 main.step( "Distributed Set addAll()" )
2077 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002078 addResponses = main.Cluster.command( "setTestAdd",
2079 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002080 # main.TRUE = successfully changed the set
2081 # main.FALSE = action resulted in no change in set
2082 # main.ERROR - Some error in executing the function
2083 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002084 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002085 if addResponses[ i ] == main.TRUE:
2086 # All is well
2087 pass
2088 elif addResponses[ i ] == main.FALSE:
2089 # Already in set, probably fine
2090 pass
2091 elif addResponses[ i ] == main.ERROR:
2092 # Error in execution
2093 addAllResults = main.FALSE
2094 else:
2095 # unexpected result
2096 addAllResults = main.FALSE
2097 if addAllResults != main.TRUE:
2098 main.log.error( "Error executing set addAll" )
2099
2100 # Check if set is still correct
2101 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002102 getResponses = main.Cluster.command( "setTestGet",
2103 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002104 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002105 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002106 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002107 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002108 current = set( getResponses[ i ] )
2109 if len( current ) == len( getResponses[ i ] ):
2110 # no repeats
2111 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002112 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002113 " of set " + main.onosSetName + ":\n" +
2114 str( getResponses[ i ] ) )
2115 main.log.debug( "Expected: " + str( main.onosSet ) )
2116 main.log.debug( "Actual: " + str( current ) )
2117 getResults = main.FALSE
2118 else:
2119 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002120 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002121 " set " + main.onosSetName + ":\n" +
2122 str( getResponses[ i ] ) )
2123 getResults = main.FALSE
2124 elif getResponses[ i ] == main.ERROR:
2125 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002126 sizeResponses = main.Cluster.command( "setTestSize",
2127 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002128 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002129 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002130 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002131 if size != sizeResponses[ i ]:
2132 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002133 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002134 " for set " + main.onosSetName +
2135 " but got " + str( sizeResponses[ i ] ) )
2136 addAllResults = addAllResults and getResults and sizeResults
2137 utilities.assert_equals( expect=main.TRUE,
2138 actual=addAllResults,
2139 onpass="Set addAll correct",
2140 onfail="Set addAll was incorrect" )
2141
2142 main.step( "Distributed Set retain()" )
2143 main.onosSet.intersection_update( retainValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002144 retainResponses = main.Cluster.command( "setTestRemove",
2145 args=[ main.onosSetName, retainValue ],
2146 kwargs={ "retain": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002147 # main.TRUE = successfully changed the set
2148 # main.FALSE = action resulted in no change in set
2149 # main.ERROR - Some error in executing the function
2150 retainResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002151 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002152 if retainResponses[ i ] == main.TRUE:
2153 # All is well
2154 pass
2155 elif retainResponses[ i ] == main.FALSE:
2156 # Already in set, probably fine
2157 pass
2158 elif retainResponses[ i ] == main.ERROR:
2159 # Error in execution
2160 retainResults = main.FALSE
2161 else:
2162 # unexpected result
2163 retainResults = main.FALSE
2164 if retainResults != main.TRUE:
2165 main.log.error( "Error executing set retain" )
2166
2167 # Check if set is still correct
2168 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002169 getResponses = main.Cluster.command( "setTestGet",
2170 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002171 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002172 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002173 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002174 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002175 current = set( getResponses[ i ] )
2176 if len( current ) == len( getResponses[ i ] ):
2177 # no repeats
2178 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002179 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002180 " of set " + main.onosSetName + ":\n" +
2181 str( getResponses[ i ] ) )
2182 main.log.debug( "Expected: " + str( main.onosSet ) )
2183 main.log.debug( "Actual: " + str( current ) )
2184 getResults = main.FALSE
2185 else:
2186 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002187 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002188 " set " + main.onosSetName + ":\n" +
2189 str( getResponses[ i ] ) )
2190 getResults = main.FALSE
2191 elif getResponses[ i ] == main.ERROR:
2192 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002193 sizeResponses = main.Cluster.command( "setTestSize",
2194 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002195 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002196 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002197 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002198 if size != sizeResponses[ i ]:
2199 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002200 main.log.error( node + " expected a size of " +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002201 str( size ) + " for set " + main.onosSetName +
2202 " but got " + str( sizeResponses[ i ] ) )
2203 retainResults = retainResults and getResults and sizeResults
2204 utilities.assert_equals( expect=main.TRUE,
2205 actual=retainResults,
2206 onpass="Set retain correct",
2207 onfail="Set retain was incorrect" )
2208
2209 # Transactional maps
2210 main.step( "Partitioned Transactional maps put" )
2211 tMapValue = "Testing"
2212 numKeys = 100
2213 putResult = True
Jon Hallca319892017-06-15 15:25:22 -07002214 ctrl = main.Cluster.next()
2215 putResponses = ctrl.transactionalMapPut( numKeys, tMapValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002216 if putResponses and len( putResponses ) == 100:
2217 for i in putResponses:
2218 if putResponses[ i ][ 'value' ] != tMapValue:
2219 putResult = False
2220 else:
2221 putResult = False
2222 if not putResult:
2223 main.log.debug( "Put response values: " + str( putResponses ) )
2224 utilities.assert_equals( expect=True,
2225 actual=putResult,
2226 onpass="Partitioned Transactional Map put successful",
2227 onfail="Partitioned Transactional Map put values are incorrect" )
2228
2229 main.step( "Partitioned Transactional maps get" )
2230 # FIXME: is this sleep needed?
2231 time.sleep( 5 )
2232
2233 getCheck = True
2234 for n in range( 1, numKeys + 1 ):
Jon Hallca319892017-06-15 15:25:22 -07002235 getResponses = main.Cluster.command( "transactionalMapGet",
2236 args=[ "Key" + str( n ) ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002237 valueCheck = True
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002238 for node in getResponses:
2239 if node != tMapValue:
2240 valueCheck = False
2241 if not valueCheck:
Jon Hallf37d44d2017-05-24 10:37:30 -07002242 main.log.warn( "Values for key 'Key" + str(n) + "' do not match:" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002243 main.log.warn( getResponses )
2244 getCheck = getCheck and valueCheck
2245 utilities.assert_equals( expect=True,
2246 actual=getCheck,
2247 onpass="Partitioned Transactional Map get values were correct",
2248 onfail="Partitioned Transactional Map values incorrect" )
2249
2250 # DISTRIBUTED ATOMIC VALUE
2251 main.step( "Get the value of a new value" )
Jon Hallca319892017-06-15 15:25:22 -07002252 getValues = main.Cluster.command( "valueTestGet",
2253 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002254 main.log.debug( getValues )
2255 # Check the results
2256 atomicValueGetResult = True
2257 expected = valueValue if valueValue is not None else "null"
2258 main.log.debug( "Checking for value of " + expected )
2259 for i in getValues:
2260 if i != expected:
2261 atomicValueGetResult = False
2262 utilities.assert_equals( expect=True,
2263 actual=atomicValueGetResult,
2264 onpass="Atomic Value get successful",
2265 onfail="Error getting atomic Value " +
2266 str( valueValue ) + ", found: " +
2267 str( getValues ) )
2268
2269 main.step( "Atomic Value set()" )
2270 valueValue = "foo"
Jon Hallca319892017-06-15 15:25:22 -07002271 setValues = main.Cluster.command( "valueTestSet",
2272 args=[ valueName, valueValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002273 main.log.debug( setValues )
2274 # Check the results
2275 atomicValueSetResults = True
2276 for i in setValues:
2277 if i != main.TRUE:
2278 atomicValueSetResults = False
2279 utilities.assert_equals( expect=True,
2280 actual=atomicValueSetResults,
2281 onpass="Atomic Value set successful",
2282 onfail="Error setting atomic Value" +
2283 str( setValues ) )
2284
2285 main.step( "Get the value after set()" )
Jon Hallca319892017-06-15 15:25:22 -07002286 getValues = main.Cluster.command( "valueTestGet",
2287 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002288 main.log.debug( getValues )
2289 # Check the results
2290 atomicValueGetResult = True
2291 expected = valueValue if valueValue is not None else "null"
2292 main.log.debug( "Checking for value of " + expected )
2293 for i in getValues:
2294 if i != expected:
2295 atomicValueGetResult = False
2296 utilities.assert_equals( expect=True,
2297 actual=atomicValueGetResult,
2298 onpass="Atomic Value get successful",
2299 onfail="Error getting atomic Value " +
2300 str( valueValue ) + ", found: " +
2301 str( getValues ) )
2302
2303 main.step( "Atomic Value compareAndSet()" )
2304 oldValue = valueValue
2305 valueValue = "bar"
Jon Hallca319892017-06-15 15:25:22 -07002306 ctrl = main.Cluster.next()
2307 CASValue = ctrl.valueTestCompareAndSet( valueName, oldValue, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002308 main.log.debug( CASValue )
2309 utilities.assert_equals( expect=main.TRUE,
2310 actual=CASValue,
2311 onpass="Atomic Value comapreAndSet successful",
2312 onfail="Error setting atomic Value:" +
2313 str( CASValue ) )
2314
2315 main.step( "Get the value after compareAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002316 getValues = main.Cluster.command( "valueTestGet",
2317 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002318 main.log.debug( getValues )
2319 # Check the results
2320 atomicValueGetResult = True
2321 expected = valueValue if valueValue is not None else "null"
2322 main.log.debug( "Checking for value of " + expected )
2323 for i in getValues:
2324 if i != expected:
2325 atomicValueGetResult = False
2326 utilities.assert_equals( expect=True,
2327 actual=atomicValueGetResult,
2328 onpass="Atomic Value get successful",
2329 onfail="Error getting atomic Value " +
2330 str( valueValue ) + ", found: " +
2331 str( getValues ) )
2332
2333 main.step( "Atomic Value getAndSet()" )
2334 oldValue = valueValue
2335 valueValue = "baz"
Jon Hallca319892017-06-15 15:25:22 -07002336 ctrl = main.Cluster.next()
2337 GASValue = ctrl.valueTestGetAndSet( valueName, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002338 main.log.debug( GASValue )
2339 expected = oldValue if oldValue is not None else "null"
2340 utilities.assert_equals( expect=expected,
2341 actual=GASValue,
2342 onpass="Atomic Value GAS successful",
2343 onfail="Error with GetAndSet atomic Value: expected " +
2344 str( expected ) + ", found: " +
2345 str( GASValue ) )
2346
2347 main.step( "Get the value after getAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002348 getValues = main.Cluster.command( "valueTestGet",
2349 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002350 main.log.debug( getValues )
2351 # Check the results
2352 atomicValueGetResult = True
2353 expected = valueValue if valueValue is not None else "null"
2354 main.log.debug( "Checking for value of " + expected )
2355 for i in getValues:
2356 if i != expected:
2357 atomicValueGetResult = False
2358 utilities.assert_equals( expect=True,
2359 actual=atomicValueGetResult,
2360 onpass="Atomic Value get successful",
2361 onfail="Error getting atomic Value: expected " +
2362 str( valueValue ) + ", found: " +
2363 str( getValues ) )
2364
2365 main.step( "Atomic Value destory()" )
2366 valueValue = None
Jon Hallca319892017-06-15 15:25:22 -07002367 ctrl = main.Cluster.next()
2368 destroyResult = ctrl.valueTestDestroy( valueName )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002369 main.log.debug( destroyResult )
2370 # Check the results
2371 utilities.assert_equals( expect=main.TRUE,
2372 actual=destroyResult,
2373 onpass="Atomic Value destroy successful",
2374 onfail="Error destroying atomic Value" )
2375
2376 main.step( "Get the value after destroy()" )
Jon Hallca319892017-06-15 15:25:22 -07002377 getValues = main.Cluster.command( "valueTestGet",
2378 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002379 main.log.debug( getValues )
2380 # Check the results
2381 atomicValueGetResult = True
2382 expected = valueValue if valueValue is not None else "null"
2383 main.log.debug( "Checking for value of " + expected )
2384 for i in getValues:
2385 if i != expected:
2386 atomicValueGetResult = False
2387 utilities.assert_equals( expect=True,
2388 actual=atomicValueGetResult,
2389 onpass="Atomic Value get successful",
2390 onfail="Error getting atomic Value " +
2391 str( valueValue ) + ", found: " +
2392 str( getValues ) )
Jon Halle0f0b342017-04-18 11:43:47 -07002393
2394 # WORK QUEUES
2395 main.step( "Work Queue add()" )
Jon Hallca319892017-06-15 15:25:22 -07002396 ctrl = main.Cluster.next()
2397 addResult = ctrl.workQueueAdd( workQueueName, 'foo' )
Jon Halle0f0b342017-04-18 11:43:47 -07002398 workQueuePending += 1
2399 main.log.debug( addResult )
2400 # Check the results
2401 utilities.assert_equals( expect=main.TRUE,
2402 actual=addResult,
2403 onpass="Work Queue add successful",
2404 onfail="Error adding to Work Queue" )
2405
2406 main.step( "Check the work queue stats" )
2407 statsResults = self.workQueueStatsCheck( workQueueName,
2408 workQueueCompleted,
2409 workQueueInProgress,
2410 workQueuePending )
2411 utilities.assert_equals( expect=True,
2412 actual=statsResults,
2413 onpass="Work Queue stats correct",
2414 onfail="Work Queue stats incorrect " )
2415
2416 main.step( "Work Queue addMultiple()" )
Jon Hallca319892017-06-15 15:25:22 -07002417 ctrl = main.Cluster.next()
2418 addMultipleResult = ctrl.workQueueAddMultiple( workQueueName, 'bar', 'baz' )
Jon Halle0f0b342017-04-18 11:43:47 -07002419 workQueuePending += 2
2420 main.log.debug( addMultipleResult )
2421 # Check the results
2422 utilities.assert_equals( expect=main.TRUE,
2423 actual=addMultipleResult,
2424 onpass="Work Queue add multiple successful",
2425 onfail="Error adding multiple items to Work Queue" )
2426
2427 main.step( "Check the work queue stats" )
2428 statsResults = self.workQueueStatsCheck( workQueueName,
2429 workQueueCompleted,
2430 workQueueInProgress,
2431 workQueuePending )
2432 utilities.assert_equals( expect=True,
2433 actual=statsResults,
2434 onpass="Work Queue stats correct",
2435 onfail="Work Queue stats incorrect " )
2436
2437 main.step( "Work Queue takeAndComplete() 1" )
Jon Hallca319892017-06-15 15:25:22 -07002438 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002439 number = 1
Jon Hallca319892017-06-15 15:25:22 -07002440 take1Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002441 workQueuePending -= number
2442 workQueueCompleted += number
2443 main.log.debug( take1Result )
2444 # Check the results
2445 utilities.assert_equals( expect=main.TRUE,
2446 actual=take1Result,
2447 onpass="Work Queue takeAndComplete 1 successful",
2448 onfail="Error taking 1 from Work Queue" )
2449
2450 main.step( "Check the work queue stats" )
2451 statsResults = self.workQueueStatsCheck( workQueueName,
2452 workQueueCompleted,
2453 workQueueInProgress,
2454 workQueuePending )
2455 utilities.assert_equals( expect=True,
2456 actual=statsResults,
2457 onpass="Work Queue stats correct",
2458 onfail="Work Queue stats incorrect " )
2459
2460 main.step( "Work Queue takeAndComplete() 2" )
Jon Hallca319892017-06-15 15:25:22 -07002461 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002462 number = 2
Jon Hallca319892017-06-15 15:25:22 -07002463 take2Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002464 workQueuePending -= number
2465 workQueueCompleted += number
2466 main.log.debug( take2Result )
2467 # Check the results
2468 utilities.assert_equals( expect=main.TRUE,
2469 actual=take2Result,
2470 onpass="Work Queue takeAndComplete 2 successful",
2471 onfail="Error taking 2 from Work Queue" )
2472
2473 main.step( "Check the work queue stats" )
2474 statsResults = self.workQueueStatsCheck( workQueueName,
2475 workQueueCompleted,
2476 workQueueInProgress,
2477 workQueuePending )
2478 utilities.assert_equals( expect=True,
2479 actual=statsResults,
2480 onpass="Work Queue stats correct",
2481 onfail="Work Queue stats incorrect " )
2482
2483 main.step( "Work Queue destroy()" )
2484 valueValue = None
2485 threads = []
Jon Hallca319892017-06-15 15:25:22 -07002486 ctrl = main.Cluster.next()
2487 destroyResult = ctrl.workQueueDestroy( workQueueName )
Jon Halle0f0b342017-04-18 11:43:47 -07002488 workQueueCompleted = 0
2489 workQueueInProgress = 0
2490 workQueuePending = 0
2491 main.log.debug( destroyResult )
2492 # Check the results
2493 utilities.assert_equals( expect=main.TRUE,
2494 actual=destroyResult,
2495 onpass="Work Queue destroy successful",
2496 onfail="Error destroying Work Queue" )
2497
2498 main.step( "Check the work queue stats" )
2499 statsResults = self.workQueueStatsCheck( workQueueName,
2500 workQueueCompleted,
2501 workQueueInProgress,
2502 workQueuePending )
2503 utilities.assert_equals( expect=True,
2504 actual=statsResults,
2505 onpass="Work Queue stats correct",
2506 onfail="Work Queue stats incorrect " )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002507 except Exception as e:
2508 main.log.error( "Exception: " + str( e ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002509
2510 def cleanUp( self, main ):
2511 """
2512 Clean up
2513 """
2514 import os
2515 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002516 assert main, "main not defined"
2517 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002518
2519 # printing colors to terminal
2520 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2521 'blue': '\033[94m', 'green': '\033[92m',
2522 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
Jon Hall4173b242017-09-12 17:04:38 -07002523
Devin Lim58046fa2017-07-05 16:55:00 -07002524 main.case( "Test Cleanup" )
Jon Hall4173b242017-09-12 17:04:38 -07002525
2526 main.step( "Checking raft log size" )
2527 # TODO: this is a flaky check, but the intent is to make sure the raft logs
2528 # get compacted periodically
2529 logCheck = main.Cluster.checkPartitionSize()
2530 utilities.assert_equals( expect=True, actual=logCheck,
2531 onpass="Raft log size is not too big",
2532 onfail="Raft logs grew too big" )
2533
Devin Lim58046fa2017-07-05 16:55:00 -07002534 main.step( "Killing tcpdumps" )
2535 main.Mininet2.stopTcpdump()
2536
2537 testname = main.TEST
2538 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2539 main.step( "Copying MN pcap and ONOS log files to test station" )
2540 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2541 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2542 # NOTE: MN Pcap file is being saved to logdir.
2543 # We scp this file as MN and TestON aren't necessarily the same vm
2544
2545 # FIXME: To be replaced with a Jenkin's post script
2546 # TODO: Load these from params
2547 # NOTE: must end in /
2548 logFolder = "/opt/onos/log/"
2549 logFiles = [ "karaf.log", "karaf.log.1" ]
2550 # NOTE: must end in /
2551 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002552 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002553 dstName = main.logdir + "/" + ctrl.name + "-" + f
2554 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002555 logFolder + f, dstName )
2556 # std*.log's
2557 # NOTE: must end in /
2558 logFolder = "/opt/onos/var/"
2559 logFiles = [ "stderr.log", "stdout.log" ]
2560 # NOTE: must end in /
2561 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002562 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002563 dstName = main.logdir + "/" + ctrl.name + "-" + f
2564 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002565 logFolder + f, dstName )
2566 else:
2567 main.log.debug( "skipping saving log files" )
2568
2569 main.step( "Stopping Mininet" )
2570 mnResult = main.Mininet1.stopNet()
2571 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2572 onpass="Mininet stopped",
2573 onfail="MN cleanup NOT successful" )
2574
2575 main.step( "Checking ONOS Logs for errors" )
Devin Lim142b5342017-07-20 15:22:39 -07002576 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002577 main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
2578 main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002579
2580 try:
2581 timerLog = open( main.logdir + "/Timers.csv", 'w' )
2582 main.log.debug( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2583 timerLog.write( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2584 timerLog.close()
2585 except NameError as e:
2586 main.log.exception( e )
Jon Hallca319892017-06-15 15:25:22 -07002587
Devin Lim58046fa2017-07-05 16:55:00 -07002588 def assignMastership( self, main ):
2589 """
2590 Assign mastership to controllers
2591 """
2592 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002593 assert main, "main not defined"
2594 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002595
2596 main.case( "Assigning Controller roles for switches" )
2597 main.caseExplanation = "Check that ONOS is connected to each " +\
2598 "device. Then manually assign" +\
2599 " mastership to specific ONOS nodes using" +\
2600 " 'device-role'"
2601 main.step( "Assign mastership of switches to specific controllers" )
2602 # Manually assign mastership to the controller we want
2603 roleCall = main.TRUE
2604
2605 ipList = []
2606 deviceList = []
Jon Hallca319892017-06-15 15:25:22 -07002607 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07002608 try:
2609 # Assign mastership to specific controllers. This assignment was
2610 # determined for a 7 node cluser, but will work with any sized
2611 # cluster
2612 for i in range( 1, 29 ): # switches 1 through 28
2613 # set up correct variables:
2614 if i == 1:
2615 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002616 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002617 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
2618 elif i == 2:
Devin Lim142b5342017-07-20 15:22:39 -07002619 c = 1 % main.Cluster.numCtrls
2620 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002621 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
2622 elif i == 3:
Devin Lim142b5342017-07-20 15:22:39 -07002623 c = 1 % main.Cluster.numCtrls
2624 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002625 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
2626 elif i == 4:
Devin Lim142b5342017-07-20 15:22:39 -07002627 c = 3 % main.Cluster.numCtrls
2628 ip = main.Cluster.active( c ).ip_address # ONOS4
Devin Lim58046fa2017-07-05 16:55:00 -07002629 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
2630 elif i == 5:
Devin Lim142b5342017-07-20 15:22:39 -07002631 c = 2 % main.Cluster.numCtrls
2632 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002633 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
2634 elif i == 6:
Devin Lim142b5342017-07-20 15:22:39 -07002635 c = 2 % main.Cluster.numCtrls
2636 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002637 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
2638 elif i == 7:
Devin Lim142b5342017-07-20 15:22:39 -07002639 c = 5 % main.Cluster.numCtrls
2640 ip = main.Cluster.active( c ).ip_address # ONOS6
Devin Lim58046fa2017-07-05 16:55:00 -07002641 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
2642 elif i >= 8 and i <= 17:
Devin Lim142b5342017-07-20 15:22:39 -07002643 c = 4 % main.Cluster.numCtrls
2644 ip = main.Cluster.active( c ).ip_address # ONOS5
Devin Lim58046fa2017-07-05 16:55:00 -07002645 dpid = '3' + str( i ).zfill( 3 )
2646 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2647 elif i >= 18 and i <= 27:
Devin Lim142b5342017-07-20 15:22:39 -07002648 c = 6 % main.Cluster.numCtrls
2649 ip = main.Cluster.active( c ).ip_address # ONOS7
Devin Lim58046fa2017-07-05 16:55:00 -07002650 dpid = '6' + str( i ).zfill( 3 )
2651 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2652 elif i == 28:
2653 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002654 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002655 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
2656 else:
2657 main.log.error( "You didn't write an else statement for " +
2658 "switch s" + str( i ) )
2659 roleCall = main.FALSE
2660 # Assign switch
2661 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
2662 # TODO: make this controller dynamic
2663 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
2664 ipList.append( ip )
2665 deviceList.append( deviceId )
2666 except ( AttributeError, AssertionError ):
2667 main.log.exception( "Something is wrong with ONOS device view" )
2668 main.log.info( onosCli.devices() )
2669 utilities.assert_equals(
2670 expect=main.TRUE,
2671 actual=roleCall,
2672 onpass="Re-assigned switch mastership to designated controller",
2673 onfail="Something wrong with deviceRole calls" )
2674
2675 main.step( "Check mastership was correctly assigned" )
2676 roleCheck = main.TRUE
2677 # NOTE: This is due to the fact that device mastership change is not
2678 # atomic and is actually a multi step process
2679 time.sleep( 5 )
2680 for i in range( len( ipList ) ):
2681 ip = ipList[ i ]
2682 deviceId = deviceList[ i ]
2683 # Check assignment
2684 master = onosCli.getRole( deviceId ).get( 'master' )
2685 if ip in master:
2686 roleCheck = roleCheck and main.TRUE
2687 else:
2688 roleCheck = roleCheck and main.FALSE
2689 main.log.error( "Error, controller " + ip + " is not" +
2690 " master " + "of device " +
2691 str( deviceId ) + ". Master is " +
2692 repr( master ) + "." )
2693 utilities.assert_equals(
2694 expect=main.TRUE,
2695 actual=roleCheck,
2696 onpass="Switches were successfully reassigned to designated " +
2697 "controller",
2698 onfail="Switches were not successfully reassigned" )
Jon Hallca319892017-06-15 15:25:22 -07002699
Devin Lim58046fa2017-07-05 16:55:00 -07002700 def bringUpStoppedNode( self, main ):
2701 """
2702 The bring up stopped nodes
2703 """
2704 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002705 assert main, "main not defined"
2706 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002707 assert main.kill, "main.kill not defined"
2708 main.case( "Restart minority of ONOS nodes" )
2709
2710 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
2711 startResults = main.TRUE
2712 restartTime = time.time()
Jon Hallca319892017-06-15 15:25:22 -07002713 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002714 startResults = startResults and\
Jon Hallca319892017-06-15 15:25:22 -07002715 ctrl.onosStart( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002716 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2717 onpass="ONOS nodes started successfully",
2718 onfail="ONOS nodes NOT successfully started" )
2719
2720 main.step( "Checking if ONOS is up yet" )
2721 count = 0
2722 onosIsupResult = main.FALSE
2723 while onosIsupResult == main.FALSE and count < 10:
2724 onosIsupResult = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002725 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002726 onosIsupResult = onosIsupResult and\
Jon Hallca319892017-06-15 15:25:22 -07002727 ctrl.isup( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002728 count = count + 1
2729 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
2730 onpass="ONOS restarted successfully",
2731 onfail="ONOS restart NOT successful" )
2732
Jon Hallca319892017-06-15 15:25:22 -07002733 main.step( "Restarting ONOS nodes" )
Devin Lim58046fa2017-07-05 16:55:00 -07002734 cliResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002735 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002736 cliResults = cliResults and\
Jon Hallca319892017-06-15 15:25:22 -07002737 ctrl.startOnosCli( ctrl.ipAddress )
2738 ctrl.active = True
Devin Lim58046fa2017-07-05 16:55:00 -07002739 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
Jon Hallca319892017-06-15 15:25:22 -07002740 onpass="ONOS node(s) restarted",
2741 onfail="ONOS node(s) did not restart" )
Devin Lim58046fa2017-07-05 16:55:00 -07002742
2743 # Grab the time of restart so we chan check how long the gossip
2744 # protocol has had time to work
2745 main.restartTime = time.time() - restartTime
2746 main.log.debug( "Restart time: " + str( main.restartTime ) )
2747 # TODO: MAke this configurable. Also, we are breaking the above timer
2748 main.step( "Checking ONOS nodes" )
2749 nodeResults = utilities.retry( self.nodesCheck,
2750 False,
Jon Hallca319892017-06-15 15:25:22 -07002751 args=[ main.Cluster.active() ],
Devin Lim58046fa2017-07-05 16:55:00 -07002752 sleep=15,
2753 attempts=5 )
2754
2755 utilities.assert_equals( expect=True, actual=nodeResults,
2756 onpass="Nodes check successful",
2757 onfail="Nodes check NOT successful" )
2758
2759 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07002760 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07002761 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07002762 ctrl.name,
2763 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002764 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -07002765 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002766
Jon Hallca319892017-06-15 15:25:22 -07002767 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -07002768
2769 main.step( "Rerun for election on the node(s) that were killed" )
2770 runResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002771 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002772 runResults = runResults and\
Jon Hallca319892017-06-15 15:25:22 -07002773 ctrl.electionTestRun()
Devin Lim58046fa2017-07-05 16:55:00 -07002774 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2775 onpass="ONOS nodes reran for election topic",
2776 onfail="Errror rerunning for election" )
Jon Hall4173b242017-09-12 17:04:38 -07002777
Devin Lim142b5342017-07-20 15:22:39 -07002778 def tempCell( self, cellName, ipList ):
2779 main.step( "Create cell file" )
2780 cellAppString = main.params[ 'ENV' ][ 'appString' ]
Devin Lim58046fa2017-07-05 16:55:00 -07002781
Devin Lim142b5342017-07-20 15:22:39 -07002782 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
2783 main.Mininet1.ip_address,
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002784 cellAppString, ipList, main.ONOScli1.karafUser )
Devin Lim142b5342017-07-20 15:22:39 -07002785 main.step( "Applying cell variable to environment" )
2786 cellResult = main.ONOSbench.setCell( cellName )
2787 verifyResult = main.ONOSbench.verifyCell()
2788
Devin Lim142b5342017-07-20 15:22:39 -07002789 def checkStateAfterEvent( self, main, afterWhich, compareSwitch=False, isRestart=False ):
Devin Lim58046fa2017-07-05 16:55:00 -07002790 """
2791 afterWhich :
Jon Hallca319892017-06-15 15:25:22 -07002792 0: failure
Devin Lim58046fa2017-07-05 16:55:00 -07002793 1: scaling
2794 """
2795 """
2796 Check state after ONOS failure/scaling
2797 """
2798 import json
Devin Lim58046fa2017-07-05 16:55:00 -07002799 assert main, "main not defined"
2800 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002801 main.case( "Running ONOS Constant State Tests" )
2802
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002803 OnosAfterWhich = [ "failure", "scaliing" ]
Devin Lim58046fa2017-07-05 16:55:00 -07002804
Devin Lim58046fa2017-07-05 16:55:00 -07002805 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -07002806 self.checkRoleNotNull()
Devin Lim58046fa2017-07-05 16:55:00 -07002807
Devin Lim142b5342017-07-20 15:22:39 -07002808 ONOSMastership, rolesResults, consistentMastership = self.checkTheRole()
Jon Hallca319892017-06-15 15:25:22 -07002809 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07002810
2811 if rolesResults and not consistentMastership:
2812 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002813 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -07002814 main.log.warn( node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07002815 json.dumps( json.loads( ONOSMastership[ i ] ),
2816 sort_keys=True,
2817 indent=4,
2818 separators=( ',', ': ' ) ) )
2819
2820 if compareSwitch:
2821 description2 = "Compare switch roles from before failure"
2822 main.step( description2 )
2823 try:
2824 currentJson = json.loads( ONOSMastership[ 0 ] )
2825 oldJson = json.loads( mastershipState )
2826 except ( ValueError, TypeError ):
2827 main.log.exception( "Something is wrong with parsing " +
2828 "ONOSMastership[0] or mastershipState" )
Jon Hallca319892017-06-15 15:25:22 -07002829 main.log.debug( "ONOSMastership[0]: " + repr( ONOSMastership[ 0 ] ) )
2830 main.log.debug( "mastershipState" + repr( mastershipState ) )
Devin Lim44075962017-08-11 10:56:37 -07002831 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002832 mastershipCheck = main.TRUE
2833 for i in range( 1, 29 ):
2834 switchDPID = str(
2835 main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
2836 current = [ switch[ 'master' ] for switch in currentJson
2837 if switchDPID in switch[ 'id' ] ]
2838 old = [ switch[ 'master' ] for switch in oldJson
2839 if switchDPID in switch[ 'id' ] ]
2840 if current == old:
2841 mastershipCheck = mastershipCheck and main.TRUE
2842 else:
2843 main.log.warn( "Mastership of switch %s changed" % switchDPID )
2844 mastershipCheck = main.FALSE
2845 utilities.assert_equals(
2846 expect=main.TRUE,
2847 actual=mastershipCheck,
2848 onpass="Mastership of Switches was not changed",
2849 onfail="Mastership of some switches changed" )
2850
2851 # NOTE: we expect mastership to change on controller failure/scaling down
Devin Lim142b5342017-07-20 15:22:39 -07002852 ONOSIntents, intentsResults = self.checkingIntents()
Devin Lim58046fa2017-07-05 16:55:00 -07002853 intentCheck = main.FALSE
2854 consistentIntents = True
Devin Lim58046fa2017-07-05 16:55:00 -07002855
2856 main.step( "Check for consistency in Intents from each controller" )
2857 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2858 main.log.info( "Intents are consistent across all ONOS " +
2859 "nodes" )
2860 else:
2861 consistentIntents = False
2862
2863 # Try to make it easy to figure out what is happening
2864 #
2865 # Intent ONOS1 ONOS2 ...
2866 # 0x01 INSTALLED INSTALLING
2867 # ... ... ...
2868 # ... ... ...
2869 title = " ID"
Jon Hallca319892017-06-15 15:25:22 -07002870 for ctrl in main.Cluster.active():
2871 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07002872 main.log.warn( title )
2873 # get all intent keys in the cluster
2874 keys = []
2875 for nodeStr in ONOSIntents:
2876 node = json.loads( nodeStr )
2877 for intent in node:
2878 keys.append( intent.get( 'id' ) )
2879 keys = set( keys )
2880 for key in keys:
2881 row = "%-13s" % key
2882 for nodeStr in ONOSIntents:
2883 node = json.loads( nodeStr )
2884 for intent in node:
2885 if intent.get( 'id' ) == key:
2886 row += "%-15s" % intent.get( 'state' )
2887 main.log.warn( row )
2888 # End table view
2889
2890 utilities.assert_equals(
2891 expect=True,
2892 actual=consistentIntents,
2893 onpass="Intents are consistent across all ONOS nodes",
2894 onfail="ONOS nodes have different views of intents" )
2895 intentStates = []
2896 for node in ONOSIntents: # Iter through ONOS nodes
2897 nodeStates = []
2898 # Iter through intents of a node
2899 try:
2900 for intent in json.loads( node ):
2901 nodeStates.append( intent[ 'state' ] )
2902 except ( ValueError, TypeError ):
2903 main.log.exception( "Error in parsing intents" )
2904 main.log.error( repr( node ) )
2905 intentStates.append( nodeStates )
2906 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2907 main.log.info( dict( out ) )
2908
2909 if intentsResults and not consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07002910 for i in range( len( main.Cluster.active() ) ):
Jon Hall6040bcf2017-08-14 11:15:41 -07002911 ctrl = main.Cluster.controllers[ i ]
Jon Hallca319892017-06-15 15:25:22 -07002912 main.log.warn( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07002913 main.log.warn( json.dumps(
2914 json.loads( ONOSIntents[ i ] ),
2915 sort_keys=True,
2916 indent=4,
2917 separators=( ',', ': ' ) ) )
2918 elif intentsResults and consistentIntents:
2919 intentCheck = main.TRUE
2920
2921 # NOTE: Store has no durability, so intents are lost across system
2922 # restarts
2923 if not isRestart:
2924 main.step( "Compare current intents with intents before the " + OnosAfterWhich[ afterWhich ] )
2925 # NOTE: this requires case 5 to pass for intentState to be set.
2926 # maybe we should stop the test if that fails?
2927 sameIntents = main.FALSE
2928 try:
2929 intentState
2930 except NameError:
2931 main.log.warn( "No previous intent state was saved" )
2932 else:
2933 if intentState and intentState == ONOSIntents[ 0 ]:
2934 sameIntents = main.TRUE
2935 main.log.info( "Intents are consistent with before " + OnosAfterWhich[ afterWhich ] )
2936 # TODO: possibly the states have changed? we may need to figure out
2937 # what the acceptable states are
2938 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2939 sameIntents = main.TRUE
2940 try:
2941 before = json.loads( intentState )
2942 after = json.loads( ONOSIntents[ 0 ] )
2943 for intent in before:
2944 if intent not in after:
2945 sameIntents = main.FALSE
2946 main.log.debug( "Intent is not currently in ONOS " +
2947 "(at least in the same form):" )
2948 main.log.debug( json.dumps( intent ) )
2949 except ( ValueError, TypeError ):
2950 main.log.exception( "Exception printing intents" )
2951 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2952 main.log.debug( repr( intentState ) )
2953 if sameIntents == main.FALSE:
2954 try:
2955 main.log.debug( "ONOS intents before: " )
2956 main.log.debug( json.dumps( json.loads( intentState ),
2957 sort_keys=True, indent=4,
2958 separators=( ',', ': ' ) ) )
2959 main.log.debug( "Current ONOS intents: " )
2960 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2961 sort_keys=True, indent=4,
2962 separators=( ',', ': ' ) ) )
2963 except ( ValueError, TypeError ):
2964 main.log.exception( "Exception printing intents" )
2965 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2966 main.log.debug( repr( intentState ) )
2967 utilities.assert_equals(
2968 expect=main.TRUE,
2969 actual=sameIntents,
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002970 onpass="Intents are consistent with before " + OnosAfterWhich[ afterWhich ],
Devin Lim58046fa2017-07-05 16:55:00 -07002971 onfail="The Intents changed during " + OnosAfterWhich[ afterWhich ] )
2972 intentCheck = intentCheck and sameIntents
2973
2974 main.step( "Get the OF Table entries and compare to before " +
2975 "component " + OnosAfterWhich[ afterWhich ] )
2976 FlowTables = main.TRUE
2977 for i in range( 28 ):
2978 main.log.info( "Checking flow table on s" + str( i + 1 ) )
2979 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
2980 curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
2981 FlowTables = FlowTables and curSwitch
2982 if curSwitch == main.FALSE:
2983 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
2984 utilities.assert_equals(
2985 expect=main.TRUE,
2986 actual=FlowTables,
2987 onpass="No changes were found in the flow tables",
2988 onfail="Changes were found in the flow tables" )
2989
Jon Hallca319892017-06-15 15:25:22 -07002990 main.Mininet2.pingLongKill()
Devin Lim58046fa2017-07-05 16:55:00 -07002991 """
2992 main.step( "Check the continuous pings to ensure that no packets " +
2993 "were dropped during component failure" )
2994 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
2995 main.params[ 'TESTONIP' ] )
2996 LossInPings = main.FALSE
2997 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
2998 for i in range( 8, 18 ):
2999 main.log.info(
3000 "Checking for a loss in pings along flow from s" +
3001 str( i ) )
3002 LossInPings = main.Mininet2.checkForLoss(
3003 "/tmp/ping.h" +
3004 str( i ) ) or LossInPings
3005 if LossInPings == main.TRUE:
3006 main.log.info( "Loss in ping detected" )
3007 elif LossInPings == main.ERROR:
3008 main.log.info( "There are multiple mininet process running" )
3009 elif LossInPings == main.FALSE:
3010 main.log.info( "No Loss in the pings" )
3011 main.log.info( "No loss of dataplane connectivity" )
3012 utilities.assert_equals(
3013 expect=main.FALSE,
3014 actual=LossInPings,
3015 onpass="No Loss of connectivity",
3016 onfail="Loss of dataplane connectivity detected" )
3017 # NOTE: Since intents are not persisted with IntnentStore,
3018 # we expect loss in dataplane connectivity
3019 LossInPings = main.FALSE
3020 """
Devin Lim58046fa2017-07-05 16:55:00 -07003021 def compareTopo( self, main ):
3022 """
3023 Compare topo
3024 """
3025 import json
3026 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003027 assert main, "main not defined"
3028 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003029 try:
3030 from tests.dependencies.topology import Topology
3031 except ImportError:
3032 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07003033 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07003034 try:
3035 main.topoRelated
3036 except ( NameError, AttributeError ):
3037 main.topoRelated = Topology()
3038 main.case( "Compare ONOS Topology view to Mininet topology" )
3039 main.caseExplanation = "Compare topology objects between Mininet" +\
3040 " and ONOS"
3041 topoResult = main.FALSE
3042 topoFailMsg = "ONOS topology don't match Mininet"
3043 elapsed = 0
3044 count = 0
3045 main.step( "Comparing ONOS topology to MN topology" )
3046 startTime = time.time()
3047 # Give time for Gossip to work
3048 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
3049 devicesResults = main.TRUE
3050 linksResults = main.TRUE
3051 hostsResults = main.TRUE
3052 hostAttachmentResults = True
3053 count += 1
3054 cliStart = time.time()
Devin Lim142b5342017-07-20 15:22:39 -07003055 devices = main.topoRelated.getAll( "devices", True,
Jon Hallca319892017-06-15 15:25:22 -07003056 kwargs={ 'sleep': 5, 'attempts': 5,
3057 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003058 ipResult = main.TRUE
3059
Devin Lim142b5342017-07-20 15:22:39 -07003060 hosts = main.topoRelated.getAll( "hosts", True,
Jon Hallca319892017-06-15 15:25:22 -07003061 kwargs={ 'sleep': 5, 'attempts': 5,
3062 'randomTime': True },
3063 inJson=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003064
3065 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003066 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003067 if hosts[ controller ]:
3068 for host in hosts[ controller ]:
3069 if host is None or host.get( 'ipAddresses', [] ) == []:
3070 main.log.error(
3071 "Error with host ipAddresses on controller" +
3072 controllerStr + ": " + str( host ) )
3073 ipResult = main.FALSE
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07003074 ports = main.topoRelated.getAll( "ports", True,
Jon Hallca319892017-06-15 15:25:22 -07003075 kwargs={ 'sleep': 5, 'attempts': 5,
3076 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003077 links = main.topoRelated.getAll( "links", True,
Jon Hallca319892017-06-15 15:25:22 -07003078 kwargs={ 'sleep': 5, 'attempts': 5,
3079 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003080 clusters = main.topoRelated.getAll( "clusters", True,
Jon Hallca319892017-06-15 15:25:22 -07003081 kwargs={ 'sleep': 5, 'attempts': 5,
3082 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003083
3084 elapsed = time.time() - startTime
3085 cliTime = time.time() - cliStart
3086 print "Elapsed time: " + str( elapsed )
3087 print "CLI time: " + str( cliTime )
3088
3089 if all( e is None for e in devices ) and\
3090 all( e is None for e in hosts ) and\
3091 all( e is None for e in ports ) and\
3092 all( e is None for e in links ) and\
3093 all( e is None for e in clusters ):
3094 topoFailMsg = "Could not get topology from ONOS"
3095 main.log.error( topoFailMsg )
3096 continue # Try again, No use trying to compare
3097
3098 mnSwitches = main.Mininet1.getSwitches()
3099 mnLinks = main.Mininet1.getLinks()
3100 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07003101 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003102 controllerStr = str( main.Cluster.active( controller ) )
Jon Hall4173b242017-09-12 17:04:38 -07003103 currentDevicesResult = main.topoRelated.compareDevicePort( main.Mininet1,
3104 controller,
3105 mnSwitches,
3106 devices,
3107 ports )
Devin Lim58046fa2017-07-05 16:55:00 -07003108 utilities.assert_equals( expect=main.TRUE,
3109 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07003110 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003111 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003112 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003113 " Switches view is incorrect" )
3114
Devin Lim58046fa2017-07-05 16:55:00 -07003115 currentLinksResult = main.topoRelated.compareBase( links, controller,
Jon Hall4173b242017-09-12 17:04:38 -07003116 main.Mininet1.compareLinks,
3117 [ mnSwitches, mnLinks ] )
Devin Lim58046fa2017-07-05 16:55:00 -07003118 utilities.assert_equals( expect=main.TRUE,
3119 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07003120 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003121 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003122 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003123 " links view is incorrect" )
3124 if hosts[ controller ] and "Error" not in hosts[ controller ]:
3125 currentHostsResult = main.Mininet1.compareHosts(
3126 mnHosts,
3127 hosts[ controller ] )
3128 elif hosts[ controller ] == []:
3129 currentHostsResult = main.TRUE
3130 else:
3131 currentHostsResult = main.FALSE
3132 utilities.assert_equals( expect=main.TRUE,
3133 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07003134 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003135 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07003136 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003137 " hosts don't match Mininet" )
3138 # CHECKING HOST ATTACHMENT POINTS
3139 hostAttachment = True
3140 zeroHosts = False
3141 # FIXME: topo-HA/obelisk specific mappings:
3142 # key is mac and value is dpid
3143 mappings = {}
3144 for i in range( 1, 29 ): # hosts 1 through 28
3145 # set up correct variables:
3146 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
3147 if i == 1:
3148 deviceId = "1000".zfill( 16 )
3149 elif i == 2:
3150 deviceId = "2000".zfill( 16 )
3151 elif i == 3:
3152 deviceId = "3000".zfill( 16 )
3153 elif i == 4:
3154 deviceId = "3004".zfill( 16 )
3155 elif i == 5:
3156 deviceId = "5000".zfill( 16 )
3157 elif i == 6:
3158 deviceId = "6000".zfill( 16 )
3159 elif i == 7:
3160 deviceId = "6007".zfill( 16 )
3161 elif i >= 8 and i <= 17:
3162 dpid = '3' + str( i ).zfill( 3 )
3163 deviceId = dpid.zfill( 16 )
3164 elif i >= 18 and i <= 27:
3165 dpid = '6' + str( i ).zfill( 3 )
3166 deviceId = dpid.zfill( 16 )
3167 elif i == 28:
3168 deviceId = "2800".zfill( 16 )
3169 mappings[ macId ] = deviceId
3170 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3171 if hosts[ controller ] == []:
3172 main.log.warn( "There are no hosts discovered" )
3173 zeroHosts = True
3174 else:
3175 for host in hosts[ controller ]:
3176 mac = None
3177 location = None
3178 device = None
3179 port = None
3180 try:
3181 mac = host.get( 'mac' )
3182 assert mac, "mac field could not be found for this host object"
Devin Limefaf3062017-08-14 16:18:19 -07003183 print host
3184 if 'locations' in host:
3185 location = host.get( 'locations' )[ 0 ]
3186 elif 'location' in host:
3187 location = host.get( 'location' )
Devin Lim58046fa2017-07-05 16:55:00 -07003188 assert location, "location field could not be found for this host object"
3189
3190 # Trim the protocol identifier off deviceId
3191 device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
3192 assert device, "elementId field could not be found for this host location object"
3193
3194 port = location.get( 'port' )
3195 assert port, "port field could not be found for this host location object"
3196
3197 # Now check if this matches where they should be
3198 if mac and device and port:
3199 if str( port ) != "1":
3200 main.log.error( "The attachment port is incorrect for " +
3201 "host " + str( mac ) +
3202 ". Expected: 1 Actual: " + str( port ) )
3203 hostAttachment = False
3204 if device != mappings[ str( mac ) ]:
3205 main.log.error( "The attachment device is incorrect for " +
3206 "host " + str( mac ) +
3207 ". Expected: " + mappings[ str( mac ) ] +
3208 " Actual: " + device )
3209 hostAttachment = False
3210 else:
3211 hostAttachment = False
Devin Limefaf3062017-08-14 16:18:19 -07003212 except ( AssertionError, TypeError ):
Devin Lim58046fa2017-07-05 16:55:00 -07003213 main.log.exception( "Json object not as expected" )
3214 main.log.error( repr( host ) )
3215 hostAttachment = False
3216 else:
3217 main.log.error( "No hosts json output or \"Error\"" +
3218 " in output. hosts = " +
3219 repr( hosts[ controller ] ) )
3220 if zeroHosts is False:
3221 # TODO: Find a way to know if there should be hosts in a
3222 # given point of the test
3223 hostAttachment = True
3224
3225 # END CHECKING HOST ATTACHMENT POINTS
3226 devicesResults = devicesResults and currentDevicesResult
3227 linksResults = linksResults and currentLinksResult
3228 hostsResults = hostsResults and currentHostsResult
3229 hostAttachmentResults = hostAttachmentResults and\
3230 hostAttachment
3231 topoResult = ( devicesResults and linksResults
3232 and hostsResults and ipResult and
3233 hostAttachmentResults )
3234 utilities.assert_equals( expect=True,
3235 actual=topoResult,
3236 onpass="ONOS topology matches Mininet",
3237 onfail=topoFailMsg )
3238 # End of While loop to pull ONOS state
3239
3240 # Compare json objects for hosts and dataplane clusters
3241
3242 # hosts
3243 main.step( "Hosts view is consistent across all ONOS nodes" )
3244 consistentHostsResult = main.TRUE
3245 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003246 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003247 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3248 if hosts[ controller ] == hosts[ 0 ]:
3249 continue
3250 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07003251 main.log.error( "hosts from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003252 " is inconsistent with ONOS1" )
Jon Hallca319892017-06-15 15:25:22 -07003253 main.log.debug( repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003254 consistentHostsResult = main.FALSE
3255
3256 else:
Jon Hallca319892017-06-15 15:25:22 -07003257 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003258 controllerStr )
3259 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003260 main.log.debug( controllerStr +
3261 " hosts response: " +
3262 repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003263 utilities.assert_equals(
3264 expect=main.TRUE,
3265 actual=consistentHostsResult,
3266 onpass="Hosts view is consistent across all ONOS nodes",
3267 onfail="ONOS nodes have different views of hosts" )
3268
3269 main.step( "Hosts information is correct" )
3270 hostsResults = hostsResults and ipResult
3271 utilities.assert_equals(
3272 expect=main.TRUE,
3273 actual=hostsResults,
3274 onpass="Host information is correct",
3275 onfail="Host information is incorrect" )
3276
3277 main.step( "Host attachment points to the network" )
3278 utilities.assert_equals(
3279 expect=True,
3280 actual=hostAttachmentResults,
3281 onpass="Hosts are correctly attached to the network",
3282 onfail="ONOS did not correctly attach hosts to the network" )
3283
3284 # Strongly connected clusters of devices
3285 main.step( "Clusters view is consistent across all ONOS nodes" )
3286 consistentClustersResult = main.TRUE
3287 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003288 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003289 if "Error" not in clusters[ controller ]:
3290 if clusters[ controller ] == clusters[ 0 ]:
3291 continue
3292 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07003293 main.log.error( "clusters from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003294 controllerStr +
3295 " is inconsistent with ONOS1" )
3296 consistentClustersResult = main.FALSE
3297 else:
3298 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07003299 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07003300 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003301 main.log.debug( controllerStr +
3302 " clusters response: " +
3303 repr( clusters[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003304 utilities.assert_equals(
3305 expect=main.TRUE,
3306 actual=consistentClustersResult,
3307 onpass="Clusters view is consistent across all ONOS nodes",
3308 onfail="ONOS nodes have different views of clusters" )
3309 if not consistentClustersResult:
3310 main.log.debug( clusters )
3311 for x in links:
Jon Hallca319892017-06-15 15:25:22 -07003312 main.log.debug( "{}: {}".format( len( x ), x ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003313
3314 main.step( "There is only one SCC" )
3315 # there should always only be one cluster
3316 try:
3317 numClusters = len( json.loads( clusters[ 0 ] ) )
3318 except ( ValueError, TypeError ):
3319 main.log.exception( "Error parsing clusters[0]: " +
3320 repr( clusters[ 0 ] ) )
3321 numClusters = "ERROR"
3322 clusterResults = main.FALSE
3323 if numClusters == 1:
3324 clusterResults = main.TRUE
3325 utilities.assert_equals(
3326 expect=1,
3327 actual=numClusters,
3328 onpass="ONOS shows 1 SCC",
3329 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
3330
3331 topoResult = ( devicesResults and linksResults
3332 and hostsResults and consistentHostsResult
3333 and consistentClustersResult and clusterResults
3334 and ipResult and hostAttachmentResults )
3335
3336 topoResult = topoResult and int( count <= 2 )
3337 note = "note it takes about " + str( int( cliTime ) ) + \
3338 " seconds for the test to make all the cli calls to fetch " +\
3339 "the topology from each ONOS instance"
3340 main.log.info(
3341 "Very crass estimate for topology discovery/convergence( " +
3342 str( note ) + " ): " + str( elapsed ) + " seconds, " +
3343 str( count ) + " tries" )
3344
3345 main.step( "Device information is correct" )
3346 utilities.assert_equals(
3347 expect=main.TRUE,
3348 actual=devicesResults,
3349 onpass="Device information is correct",
3350 onfail="Device information is incorrect" )
3351
3352 main.step( "Links are correct" )
3353 utilities.assert_equals(
3354 expect=main.TRUE,
3355 actual=linksResults,
3356 onpass="Link are correct",
3357 onfail="Links are incorrect" )
3358
3359 main.step( "Hosts are correct" )
3360 utilities.assert_equals(
3361 expect=main.TRUE,
3362 actual=hostsResults,
3363 onpass="Hosts are correct",
3364 onfail="Hosts are incorrect" )
3365
3366 # FIXME: move this to an ONOS state case
3367 main.step( "Checking ONOS nodes" )
3368 nodeResults = utilities.retry( self.nodesCheck,
3369 False,
Jon Hallca319892017-06-15 15:25:22 -07003370 args=[ main.Cluster.active() ],
Devin Lim58046fa2017-07-05 16:55:00 -07003371 attempts=5 )
3372 utilities.assert_equals( expect=True, actual=nodeResults,
3373 onpass="Nodes check successful",
3374 onfail="Nodes check NOT successful" )
3375 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07003376 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07003377 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07003378 ctrl.name,
3379 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003380
3381 if not topoResult:
Devin Lim44075962017-08-11 10:56:37 -07003382 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -07003383
Devin Lim58046fa2017-07-05 16:55:00 -07003384 def linkDown( self, main, fromS="s3", toS="s28" ):
3385 """
3386 Link fromS-toS down
3387 """
3388 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003389 assert main, "main not defined"
3390 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003391 # NOTE: You should probably run a topology check after this
3392
3393 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3394
3395 description = "Turn off a link to ensure that Link Discovery " +\
3396 "is working properly"
3397 main.case( description )
3398
3399 main.step( "Kill Link between " + fromS + " and " + toS )
3400 LinkDown = main.Mininet1.link( END1=fromS, END2=toS, OPTION="down" )
3401 main.log.info( "Waiting " + str( linkSleep ) +
3402 " seconds for link down to be discovered" )
3403 time.sleep( linkSleep )
3404 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
3405 onpass="Link down successful",
3406 onfail="Failed to bring link down" )
3407 # TODO do some sort of check here
3408
3409 def linkUp( self, main, fromS="s3", toS="s28" ):
3410 """
3411 Link fromS-toS up
3412 """
3413 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003414 assert main, "main not defined"
3415 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003416 # NOTE: You should probably run a topology check after this
3417
3418 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3419
3420 description = "Restore a link to ensure that Link Discovery is " + \
3421 "working properly"
3422 main.case( description )
3423
Jon Hall4173b242017-09-12 17:04:38 -07003424 main.step( "Bring link between " + fromS + " and " + toS + " back up" )
Devin Lim58046fa2017-07-05 16:55:00 -07003425 LinkUp = main.Mininet1.link( END1=fromS, END2=toS, OPTION="up" )
3426 main.log.info( "Waiting " + str( linkSleep ) +
3427 " seconds for link up to be discovered" )
3428 time.sleep( linkSleep )
3429 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
3430 onpass="Link up successful",
3431 onfail="Failed to bring link up" )
3432
3433 def switchDown( self, main ):
3434 """
3435 Switch Down
3436 """
3437 # NOTE: You should probably run a topology check after this
3438 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003439 assert main, "main not defined"
3440 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003441
3442 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3443
3444 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallca319892017-06-15 15:25:22 -07003445 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003446 main.case( description )
3447 switch = main.params[ 'kill' ][ 'switch' ]
3448 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3449
3450 # TODO: Make this switch parameterizable
3451 main.step( "Kill " + switch )
3452 main.log.info( "Deleting " + switch )
3453 main.Mininet1.delSwitch( switch )
3454 main.log.info( "Waiting " + str( switchSleep ) +
3455 " seconds for switch down to be discovered" )
3456 time.sleep( switchSleep )
3457 device = onosCli.getDevice( dpid=switchDPID )
3458 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003459 main.log.warn( "Bringing down switch " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003460 result = main.FALSE
3461 if device and device[ 'available' ] is False:
3462 result = main.TRUE
3463 utilities.assert_equals( expect=main.TRUE, actual=result,
3464 onpass="Kill switch successful",
3465 onfail="Failed to kill switch?" )
Jon Hallca319892017-06-15 15:25:22 -07003466
Devin Lim58046fa2017-07-05 16:55:00 -07003467 def switchUp( self, main ):
3468 """
3469 Switch Up
3470 """
3471 # NOTE: You should probably run a topology check after this
3472 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003473 assert main, "main not defined"
3474 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003475
3476 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3477 switch = main.params[ 'kill' ][ 'switch' ]
3478 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3479 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallca319892017-06-15 15:25:22 -07003480 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003481 description = "Adding a switch to ensure it is discovered correctly"
3482 main.case( description )
3483
3484 main.step( "Add back " + switch )
3485 main.Mininet1.addSwitch( switch, dpid=switchDPID )
3486 for peer in links:
3487 main.Mininet1.addLink( switch, peer )
Jon Hallca319892017-06-15 15:25:22 -07003488 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -07003489 main.Mininet1.assignSwController( sw=switch, ip=ipList )
3490 main.log.info( "Waiting " + str( switchSleep ) +
3491 " seconds for switch up to be discovered" )
3492 time.sleep( switchSleep )
3493 device = onosCli.getDevice( dpid=switchDPID )
3494 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003495 main.log.debug( "Added device: " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003496 result = main.FALSE
3497 if device and device[ 'available' ]:
3498 result = main.TRUE
3499 utilities.assert_equals( expect=main.TRUE, actual=result,
3500 onpass="add switch successful",
3501 onfail="Failed to add switch?" )
3502
3503 def startElectionApp( self, main ):
3504 """
3505 start election app on all onos nodes
3506 """
Devin Lim58046fa2017-07-05 16:55:00 -07003507 assert main, "main not defined"
3508 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003509
3510 main.case( "Start Leadership Election app" )
3511 main.step( "Install leadership election app" )
Jon Hallca319892017-06-15 15:25:22 -07003512 onosCli = main.Cluster.next()
Devin Lime9f0ccf2017-08-11 17:25:12 -07003513 appResult = onosCli.CLI.activateApp( "org.onosproject.election" )
Devin Lim58046fa2017-07-05 16:55:00 -07003514 utilities.assert_equals(
3515 expect=main.TRUE,
3516 actual=appResult,
3517 onpass="Election app installed",
3518 onfail="Something went wrong with installing Leadership election" )
3519
3520 main.step( "Run for election on each node" )
Jon Hallca319892017-06-15 15:25:22 -07003521 onosCli.electionTestRun()
3522 main.Cluster.command( "electionTestRun" )
Devin Lim58046fa2017-07-05 16:55:00 -07003523 time.sleep( 5 )
Jon Hallca319892017-06-15 15:25:22 -07003524 sameResult, leaders = main.HA.consistentLeaderboards( main.Cluster.active() )
Devin Lim58046fa2017-07-05 16:55:00 -07003525 utilities.assert_equals(
3526 expect=True,
3527 actual=sameResult,
3528 onpass="All nodes see the same leaderboards",
3529 onfail="Inconsistent leaderboards" )
3530
3531 if sameResult:
3532 leader = leaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003533 if onosCli.ipAddress in leader:
Devin Lim58046fa2017-07-05 16:55:00 -07003534 correctLeader = True
3535 else:
3536 correctLeader = False
3537 main.step( "First node was elected leader" )
3538 utilities.assert_equals(
3539 expect=True,
3540 actual=correctLeader,
3541 onpass="Correct leader was elected",
3542 onfail="Incorrect leader" )
Jon Hallca319892017-06-15 15:25:22 -07003543 main.Cluster.testLeader = leader
3544
Devin Lim58046fa2017-07-05 16:55:00 -07003545 def isElectionFunctional( self, main ):
3546 """
3547 Check that Leadership Election is still functional
3548 15.1 Run election on each node
3549 15.2 Check that each node has the same leaders and candidates
3550 15.3 Find current leader and withdraw
3551 15.4 Check that a new node was elected leader
3552 15.5 Check that that new leader was the candidate of old leader
3553 15.6 Run for election on old leader
3554 15.7 Check that oldLeader is a candidate, and leader if only 1 node
3555 15.8 Make sure that the old leader was added to the candidate list
3556
3557 old and new variable prefixes refer to data from before vs after
3558 withdrawl and later before withdrawl vs after re-election
3559 """
3560 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003561 assert main, "main not defined"
3562 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003563
3564 description = "Check that Leadership Election is still functional"
3565 main.case( description )
3566 # NOTE: Need to re-run after restarts since being a canidate is not persistant
3567
3568 oldLeaders = [] # list of lists of each nodes' candidates before
3569 newLeaders = [] # list of lists of each nodes' candidates after
3570 oldLeader = '' # the old leader from oldLeaders, None if not same
3571 newLeader = '' # the new leaders fron newLoeaders, None if not same
3572 oldLeaderCLI = None # the CLI of the old leader used for re-electing
3573 expectNoLeader = False # True when there is only one leader
Devin Lim142b5342017-07-20 15:22:39 -07003574 if len( main.Cluster.runningNodes ) == 1:
Devin Lim58046fa2017-07-05 16:55:00 -07003575 expectNoLeader = True
3576
3577 main.step( "Run for election on each node" )
Devin Lim142b5342017-07-20 15:22:39 -07003578 electionResult = main.Cluster.command( "electionTestRun", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003579 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -07003580 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -07003581 actual=electionResult,
3582 onpass="All nodes successfully ran for leadership",
3583 onfail="At least one node failed to run for leadership" )
3584
3585 if electionResult == main.FALSE:
3586 main.log.error(
3587 "Skipping Test Case because Election Test App isn't loaded" )
3588 main.skipCase()
3589
3590 main.step( "Check that each node shows the same leader and candidates" )
3591 failMessage = "Nodes have different leaderboards"
Jon Hallca319892017-06-15 15:25:22 -07003592 activeCLIs = main.Cluster.active()
3593 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Devin Lim58046fa2017-07-05 16:55:00 -07003594 if sameResult:
3595 oldLeader = oldLeaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003596 main.log.info( "Old leader: " + oldLeader )
Devin Lim58046fa2017-07-05 16:55:00 -07003597 else:
3598 oldLeader = None
3599 utilities.assert_equals(
3600 expect=True,
3601 actual=sameResult,
3602 onpass="Leaderboards are consistent for the election topic",
3603 onfail=failMessage )
3604
3605 main.step( "Find current leader and withdraw" )
3606 withdrawResult = main.TRUE
3607 # do some sanity checking on leader before using it
3608 if oldLeader is None:
3609 main.log.error( "Leadership isn't consistent." )
3610 withdrawResult = main.FALSE
3611 # Get the CLI of the oldLeader
Jon Hallca319892017-06-15 15:25:22 -07003612 for ctrl in main.Cluster.active():
3613 if oldLeader == ctrl.ipAddress:
3614 oldLeaderCLI = ctrl
Devin Lim58046fa2017-07-05 16:55:00 -07003615 break
3616 else: # FOR/ELSE statement
3617 main.log.error( "Leader election, could not find current leader" )
3618 if oldLeader:
3619 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3620 utilities.assert_equals(
3621 expect=main.TRUE,
3622 actual=withdrawResult,
3623 onpass="Node was withdrawn from election",
3624 onfail="Node was not withdrawn from election" )
3625
3626 main.step( "Check that a new node was elected leader" )
3627 failMessage = "Nodes have different leaders"
3628 # Get new leaders and candidates
3629 newLeaderResult, newLeaders = self.consistentLeaderboards( activeCLIs )
3630 newLeader = None
3631 if newLeaderResult:
3632 if newLeaders[ 0 ][ 0 ] == 'none':
3633 main.log.error( "No leader was elected on at least 1 node" )
3634 if not expectNoLeader:
3635 newLeaderResult = False
3636 newLeader = newLeaders[ 0 ][ 0 ]
3637
3638 # Check that the new leader is not the older leader, which was withdrawn
3639 if newLeader == oldLeader:
3640 newLeaderResult = False
3641 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3642 " as the current leader" )
3643 utilities.assert_equals(
3644 expect=True,
3645 actual=newLeaderResult,
3646 onpass="Leadership election passed",
3647 onfail="Something went wrong with Leadership election" )
3648
3649 main.step( "Check that that new leader was the candidate of old leader" )
3650 # candidates[ 2 ] should become the top candidate after withdrawl
3651 correctCandidateResult = main.TRUE
3652 if expectNoLeader:
3653 if newLeader == 'none':
3654 main.log.info( "No leader expected. None found. Pass" )
3655 correctCandidateResult = main.TRUE
3656 else:
3657 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3658 correctCandidateResult = main.FALSE
3659 elif len( oldLeaders[ 0 ] ) >= 3:
3660 if newLeader == oldLeaders[ 0 ][ 2 ]:
3661 # correct leader was elected
3662 correctCandidateResult = main.TRUE
3663 else:
3664 correctCandidateResult = main.FALSE
3665 main.log.error( "Candidate {} was elected. {} should have had priority.".format(
3666 newLeader, oldLeaders[ 0 ][ 2 ] ) )
3667 else:
3668 main.log.warn( "Could not determine who should be the correct leader" )
3669 main.log.debug( oldLeaders[ 0 ] )
3670 correctCandidateResult = main.FALSE
3671 utilities.assert_equals(
3672 expect=main.TRUE,
3673 actual=correctCandidateResult,
3674 onpass="Correct Candidate Elected",
3675 onfail="Incorrect Candidate Elected" )
3676
3677 main.step( "Run for election on old leader( just so everyone " +
3678 "is in the hat )" )
3679 if oldLeaderCLI is not None:
3680 runResult = oldLeaderCLI.electionTestRun()
3681 else:
3682 main.log.error( "No old leader to re-elect" )
3683 runResult = main.FALSE
3684 utilities.assert_equals(
3685 expect=main.TRUE,
3686 actual=runResult,
3687 onpass="App re-ran for election",
3688 onfail="App failed to run for election" )
3689
3690 main.step(
3691 "Check that oldLeader is a candidate, and leader if only 1 node" )
3692 # verify leader didn't just change
3693 # Get new leaders and candidates
3694 reRunLeaders = []
3695 time.sleep( 5 ) # Paremterize
3696 positionResult, reRunLeaders = self.consistentLeaderboards( activeCLIs )
3697
3698 # Check that the re-elected node is last on the candidate List
3699 if not reRunLeaders[ 0 ]:
3700 positionResult = main.FALSE
3701 elif oldLeader != reRunLeaders[ 0 ][ -1 ]:
3702 main.log.error( "Old Leader ({}) not in the proper position: {} ".format( str( oldLeader ),
3703 str( reRunLeaders[ 0 ] ) ) )
3704 positionResult = main.FALSE
3705 utilities.assert_equals(
3706 expect=True,
3707 actual=positionResult,
3708 onpass="Old leader successfully re-ran for election",
3709 onfail="Something went wrong with Leadership election after " +
3710 "the old leader re-ran for election" )
Jon Hallca319892017-06-15 15:25:22 -07003711
Devin Lim58046fa2017-07-05 16:55:00 -07003712 def installDistributedPrimitiveApp( self, main ):
3713 """
3714 Install Distributed Primitives app
3715 """
3716 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003717 assert main, "main not defined"
3718 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003719
3720 # Variables for the distributed primitives tests
3721 main.pCounterName = "TestON-Partitions"
3722 main.pCounterValue = 0
3723 main.onosSet = set( [] )
3724 main.onosSetName = "TestON-set"
3725
3726 description = "Install Primitives app"
3727 main.case( description )
3728 main.step( "Install Primitives app" )
3729 appName = "org.onosproject.distributedprimitives"
Devin Lime9f0ccf2017-08-11 17:25:12 -07003730 appResults = main.Cluster.next().CLI.activateApp( appName )
Devin Lim58046fa2017-07-05 16:55:00 -07003731 utilities.assert_equals( expect=main.TRUE,
3732 actual=appResults,
3733 onpass="Primitives app activated",
3734 onfail="Primitives app not activated" )
3735 # TODO check on all nodes instead of sleeping
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003736 time.sleep( 5 ) # To allow all nodes to activate