blob: fa75618fe92205d044dfda61fc8d501cd9b5619a [file] [log] [blame]
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07001"""
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002Copyright 2015 Open Networking Foundation ( ONF )
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003
4Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
5the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
6or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
7
8 TestON is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation, either version 2 of the License, or
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -070011 ( at your option ) any later version.
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -070012
13 TestON is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with TestON. If not, see <http://www.gnu.org/licenses/>.
20"""
Jon Halla440e872016-03-31 15:15:50 -070021import json
Jon Hall41d39f12016-04-11 22:54:35 -070022import time
Jon Halle1a3b752015-07-22 13:02:46 -070023
Jon Hallf37d44d2017-05-24 10:37:30 -070024
Jon Hall41d39f12016-04-11 22:54:35 -070025class HA():
Jon Hall57b50432015-10-22 10:20:10 -070026
Jon Halla440e872016-03-31 15:15:50 -070027 def __init__( self ):
28 self.default = ''
Jon Hall57b50432015-10-22 10:20:10 -070029
Devin Lim58046fa2017-07-05 16:55:00 -070030 def customizeOnosGenPartitions( self ):
Devin Lim58046fa2017-07-05 16:55:00 -070031 # copy gen-partions file to ONOS
32 # NOTE: this assumes TestON and ONOS are on the same machine
33 srcFile = main.testDir + "/HA/dependencies/onos-gen-partitions"
34 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
35 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
36 main.ONOSbench.ip_address,
37 srcFile,
38 dstDir,
39 pwd=main.ONOSbench.pwd,
40 direction="from" )
Jon Hallca319892017-06-15 15:25:22 -070041
Devin Lim58046fa2017-07-05 16:55:00 -070042 def cleanUpGenPartition( self ):
43 # clean up gen-partitions file
44 try:
45 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
46 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
47 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
48 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
49 main.log.info( " Cleaning custom gen partitions file, response was: \n" +
50 str( main.ONOSbench.handle.before ) )
51 except ( pexpect.TIMEOUT, pexpect.EOF ):
52 main.log.exception( "ONOSbench: pexpect exception found:" +
53 main.ONOSbench.handle.before )
Devin Lim44075962017-08-11 10:56:37 -070054 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -070055
Devin Lim58046fa2017-07-05 16:55:00 -070056 def startingMininet( self ):
57 main.step( "Starting Mininet" )
58 # scp topo file to mininet
59 # TODO: move to params?
60 topoName = "obelisk.py"
61 filePath = main.ONOSbench.home + "/tools/test/topos/"
62 main.ONOSbench.scp( main.Mininet1,
63 filePath + topoName,
64 main.Mininet1.home,
65 direction="to" )
66 mnResult = main.Mininet1.startNet()
67 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
68 onpass="Mininet Started",
69 onfail="Error starting Mininet" )
Jon Hallca319892017-06-15 15:25:22 -070070
Devin Lim58046fa2017-07-05 16:55:00 -070071 def scalingMetadata( self ):
72 import re
Devin Lim142b5342017-07-20 15:22:39 -070073 main.step( "Generate initial metadata file" )
Devin Lim58046fa2017-07-05 16:55:00 -070074 main.scaling = main.params[ 'scaling' ].split( "," )
75 main.log.debug( main.scaling )
76 scale = main.scaling.pop( 0 )
77 main.log.debug( scale )
78 if "e" in scale:
79 equal = True
80 else:
81 equal = False
82 main.log.debug( equal )
Devin Lim142b5342017-07-20 15:22:39 -070083 main.Cluster.setRunningNode( int( re.search( "\d+", scale ).group( 0 ) ) )
84 genResult = main.Server.generateFile( main.Cluster.numCtrls, equal=equal )
Devin Lim58046fa2017-07-05 16:55:00 -070085 utilities.assert_equals( expect=main.TRUE, actual=genResult,
86 onpass="New cluster metadata file generated",
87 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -070088
Devin Lim58046fa2017-07-05 16:55:00 -070089 def swapNodeMetadata( self ):
Devin Lim142b5342017-07-20 15:22:39 -070090 main.step( "Generate initial metadata file" )
91 if main.Cluster.numCtrls >= 5:
92 main.Cluster.setRunningNode( main.Cluster.numCtrls - 2 )
Devin Lim58046fa2017-07-05 16:55:00 -070093 else:
94 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
Devin Lim142b5342017-07-20 15:22:39 -070095 genResult = main.Server.generateFile( main.Cluster.numCtrls )
Devin Lim58046fa2017-07-05 16:55:00 -070096 utilities.assert_equals( expect=main.TRUE, actual=genResult,
97 onpass="New cluster metadata file generated",
98 onfail="Failled to generate new metadata file" )
Jon Hallca319892017-06-15 15:25:22 -070099
Devin Lim142b5342017-07-20 15:22:39 -0700100 def setServerForCluster( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700101 import os
102 main.step( "Setup server for cluster metadata file" )
103 main.serverPort = main.params[ 'server' ][ 'port' ]
104 rootDir = os.path.dirname( main.testFile ) + "/dependencies"
105 main.log.debug( "Root dir: {}".format( rootDir ) )
106 status = main.Server.start( main.ONOSbench,
107 rootDir,
108 port=main.serverPort,
109 logDir=main.logdir + "/server.log" )
110 utilities.assert_equals( expect=main.TRUE, actual=status,
111 onpass="Server started",
112 onfail="Failled to start SimpleHTTPServer" )
113
Jon Hall4f360bc2017-09-07 10:19:52 -0700114 def copyBackupConfig( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700115 main.step( "Copying backup config files" )
116 main.onosServicepath = main.ONOSbench.home + "/tools/package/bin/onos-service"
117 cp = main.ONOSbench.scp( main.ONOSbench,
118 main.onosServicepath,
119 main.onosServicepath + ".backup",
120 direction="to" )
121
122 utilities.assert_equals( expect=main.TRUE,
123 actual=cp,
124 onpass="Copy backup config file succeeded",
125 onfail="Copy backup config file failed" )
Jon Hall4f360bc2017-09-07 10:19:52 -0700126
127 def setMetadataUrl( self ):
128 # NOTE: You should probably backup the config before and reset the config after the test
Devin Lim58046fa2017-07-05 16:55:00 -0700129 # we need to modify the onos-service file to use remote metadata file
130 # url for cluster metadata file
131 iface = main.params[ 'server' ].get( 'interface' )
132 ip = main.ONOSbench.getIpAddr( iface=iface )
133 metaFile = "cluster.json"
134 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, main.serverPort, metaFile )
135 main.log.warn( javaArgs )
136 main.log.warn( repr( javaArgs ) )
137 handle = main.ONOSbench.handle
Jon Hall4173b242017-09-12 17:04:38 -0700138 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs,
139 main.onosServicepath )
Devin Lim58046fa2017-07-05 16:55:00 -0700140 main.log.warn( sed )
141 main.log.warn( repr( sed ) )
142 handle.sendline( sed )
143 handle.expect( metaFile )
144 output = handle.before
145 handle.expect( "\$" )
146 output += handle.before
147 main.log.debug( repr( output ) )
148
149 def cleanUpOnosService( self ):
150 # Cleanup custom onos-service file
151 main.ONOSbench.scp( main.ONOSbench,
152 main.onosServicepath + ".backup",
153 main.onosServicepath,
154 direction="to" )
Jon Hallca319892017-06-15 15:25:22 -0700155
Jon Halla440e872016-03-31 15:15:50 -0700156 def consistentCheck( self ):
157 """
158 Checks that TestON counters are consistent across all nodes.
Jon Halle1a3b752015-07-22 13:02:46 -0700159
Jon Hallf37d44d2017-05-24 10:37:30 -0700160 Returns the tuple ( onosCounters, consistent )
Jon Hall41d39f12016-04-11 22:54:35 -0700161 - onosCounters is the parsed json output of the counters command on
162 all nodes
163 - consistent is main.TRUE if all "TestON" counters are consitent across
164 all nodes or main.FALSE
Jon Halla440e872016-03-31 15:15:50 -0700165 """
Jon Halle1a3b752015-07-22 13:02:46 -0700166 try:
Jon Halla440e872016-03-31 15:15:50 -0700167 # Get onos counters results
168 onosCountersRaw = []
169 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700170 for ctrl in main.Cluster.active():
Jon Halla440e872016-03-31 15:15:50 -0700171 t = main.Thread( target=utilities.retry,
Jon Hallca319892017-06-15 15:25:22 -0700172 name="counters-" + str( ctrl ),
173 args=[ ctrl.counters, [ None ] ],
Jon Hallf37d44d2017-05-24 10:37:30 -0700174 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Halla440e872016-03-31 15:15:50 -0700175 'randomTime': True } )
176 threads.append( t )
177 t.start()
178 for t in threads:
179 t.join()
180 onosCountersRaw.append( t.result )
181 onosCounters = []
Jon Hallca319892017-06-15 15:25:22 -0700182 for i in range( len( onosCountersRaw ) ):
Jon Halla440e872016-03-31 15:15:50 -0700183 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700184 onosCounters.append( json.loads( onosCountersRaw[ i ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700185 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700186 main.log.error( "Could not parse counters response from " +
Devin Lim142b5342017-07-20 15:22:39 -0700187 str( main.Cluster.active( i ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700188 main.log.warn( repr( onosCountersRaw[ i ] ) )
189 onosCounters.append( [] )
190
191 testCounters = {}
192 # make a list of all the "TestON-*" counters in ONOS
Jon Hall41d39f12016-04-11 22:54:35 -0700193 # lookes like a dict whose keys are the name of the ONOS node and
194 # values are a list of the counters. I.E.
Jon Hallf37d44d2017-05-24 10:37:30 -0700195 # { "ONOS1": [ { "name":"TestON-Partitions","value":56 } ]
Jon Halla440e872016-03-31 15:15:50 -0700196 # }
197 # NOTE: There is an assumtion that all nodes are active
198 # based on the above for loops
199 for controller in enumerate( onosCounters ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700200 for key, value in controller[ 1 ].iteritems():
Jon Halla440e872016-03-31 15:15:50 -0700201 if 'TestON' in key:
Devin Lim142b5342017-07-20 15:22:39 -0700202 node = str( main.Cluster.active( controller[ 0 ] ) )
Jon Halla440e872016-03-31 15:15:50 -0700203 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700204 testCounters[ node ].append( { key: value } )
Jon Halla440e872016-03-31 15:15:50 -0700205 except KeyError:
Jon Hallf37d44d2017-05-24 10:37:30 -0700206 testCounters[ node ] = [ { key: value } ]
Jon Halla440e872016-03-31 15:15:50 -0700207 # compare the counters on each node
Jon Hallf37d44d2017-05-24 10:37:30 -0700208 firstV = testCounters.values()[ 0 ]
Jon Halla440e872016-03-31 15:15:50 -0700209 tmp = [ v == firstV for k, v in testCounters.iteritems() ]
210 if all( tmp ):
211 consistent = main.TRUE
212 else:
213 consistent = main.FALSE
214 main.log.error( "ONOS nodes have different values for counters:\n" +
215 testCounters )
216 return ( onosCounters, consistent )
217 except Exception:
218 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700219 main.cleanAndExit()
Jon Halla440e872016-03-31 15:15:50 -0700220
221 def counterCheck( self, counterName, counterValue ):
222 """
223 Checks that TestON counters are consistent across all nodes and that
224 specified counter is in ONOS with the given value
225 """
226 try:
227 correctResults = main.TRUE
228 # Get onos counters results and consistentCheck
229 onosCounters, consistent = self.consistentCheck()
230 # Check for correct values
Jon Hallca319892017-06-15 15:25:22 -0700231 for i in range( len( main.Cluster.active() ) ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700232 current = onosCounters[ i ]
Jon Halla440e872016-03-31 15:15:50 -0700233 onosValue = None
234 try:
235 onosValue = current.get( counterName )
Jon Hall41d39f12016-04-11 22:54:35 -0700236 except AttributeError:
Devin Lim142b5342017-07-20 15:22:39 -0700237 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -0700238 main.log.exception( node + " counters result " +
Jon Hall41d39f12016-04-11 22:54:35 -0700239 "is not as expected" )
Jon Halla440e872016-03-31 15:15:50 -0700240 correctResults = main.FALSE
241 if onosValue == counterValue:
242 main.log.info( counterName + " counter value is correct" )
243 else:
Jon Hall41d39f12016-04-11 22:54:35 -0700244 main.log.error( counterName +
245 " counter value is incorrect," +
246 " expected value: " + str( counterValue ) +
247 " current value: " + str( onosValue ) )
Jon Halla440e872016-03-31 15:15:50 -0700248 correctResults = main.FALSE
249 return consistent and correctResults
250 except Exception:
251 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700252 main.cleanAndExit()
Jon Hall41d39f12016-04-11 22:54:35 -0700253
254 def consistentLeaderboards( self, nodes ):
255 TOPIC = 'org.onosproject.election'
256 # FIXME: use threads
Jon Halle0f0b342017-04-18 11:43:47 -0700257 # FIXME: should we retry outside the function?
Jon Hall41d39f12016-04-11 22:54:35 -0700258 for n in range( 5 ): # Retry in case election is still happening
259 leaderList = []
260 # Get all leaderboards
261 for cli in nodes:
262 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
263 # Compare leaderboards
Jon Hallf37d44d2017-05-24 10:37:30 -0700264 result = all( i == leaderList[ 0 ] for i in leaderList ) and\
Jon Hall41d39f12016-04-11 22:54:35 -0700265 leaderList is not None
266 main.log.debug( leaderList )
267 main.log.warn( result )
268 if result:
269 return ( result, leaderList )
Jon Hallf37d44d2017-05-24 10:37:30 -0700270 time.sleep( 5 ) # TODO: paramerterize
Jon Hall41d39f12016-04-11 22:54:35 -0700271 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
272 return ( result, leaderList )
273
Devin Lim58046fa2017-07-05 16:55:00 -0700274 def generateGraph( self, testName, plotName="Plot-HA", index=2 ):
Jeremy Ronquillo7f8fb572017-11-14 08:28:41 -0800275 # DEPRECATED: ONOSSetup.py now creates these graphs.
276
277 main.log.debug( "HA.generateGraph() is deprecated; ONOSSetup now creates these graphs." )
Jon Hallca319892017-06-15 15:25:22 -0700278
Devin Lim58046fa2017-07-05 16:55:00 -0700279 def initialSetUp( self, serviceClean=False ):
280 """
281 rest of initialSetup
282 """
Devin Lim58046fa2017-07-05 16:55:00 -0700283 if main.params[ 'tcpdump' ].lower() == "true":
284 main.step( "Start Packet Capture MN" )
285 main.Mininet2.startTcpdump(
286 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
287 + "-MN.pcap",
288 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
289 port=main.params[ 'MNtcpdump' ][ 'port' ] )
290
291 if serviceClean:
292 main.step( "Clean up ONOS service changes" )
Devin Lim142b5342017-07-20 15:22:39 -0700293 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
294 main.ONOSbench.handle.expect( "\$" )
295 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
296 main.ONOSbench.handle.expect( "\$" )
Devin Lim58046fa2017-07-05 16:55:00 -0700297
298 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -0800299 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700300 False,
Jon Hall5d5876e2017-11-30 09:33:16 -0800301 attempts=9 )
Devin Lim58046fa2017-07-05 16:55:00 -0700302
303 utilities.assert_equals( expect=True, actual=nodeResults,
304 onpass="Nodes check successful",
305 onfail="Nodes check NOT successful" )
306
307 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -0700308 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700309 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -0700310 ctrl.name,
311 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700312 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -0700313 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700314
315 main.step( "Activate apps defined in the params file" )
316 # get data from the params
317 apps = main.params.get( 'apps' )
318 if apps:
319 apps = apps.split( ',' )
Jon Hallca319892017-06-15 15:25:22 -0700320 main.log.debug( "Apps: " + str( apps ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700321 activateResult = True
322 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700323 main.Cluster.active( 0 ).app( app, "Activate" )
Devin Lim58046fa2017-07-05 16:55:00 -0700324 # TODO: check this worked
325 time.sleep( 10 ) # wait for apps to activate
326 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700327 state = main.Cluster.active( 0 ).appStatus( app )
Devin Lim58046fa2017-07-05 16:55:00 -0700328 if state == "ACTIVE":
329 activateResult = activateResult and True
330 else:
331 main.log.error( "{} is in {} state".format( app, state ) )
332 activateResult = False
333 utilities.assert_equals( expect=True,
334 actual=activateResult,
335 onpass="Successfully activated apps",
336 onfail="Failed to activate apps" )
337 else:
338 main.log.warn( "No apps were specified to be loaded after startup" )
339
340 main.step( "Set ONOS configurations" )
Jon Hallca319892017-06-15 15:25:22 -0700341 # FIXME: This shoudl be part of the general startup sequence
Devin Lim58046fa2017-07-05 16:55:00 -0700342 config = main.params.get( 'ONOS_Configuration' )
343 if config:
344 main.log.debug( config )
345 checkResult = main.TRUE
346 for component in config:
347 for setting in config[ component ]:
348 value = config[ component ][ setting ]
Jon Hallca319892017-06-15 15:25:22 -0700349 check = main.Cluster.next().setCfg( component, setting, value )
Devin Lim58046fa2017-07-05 16:55:00 -0700350 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
351 checkResult = check and checkResult
352 utilities.assert_equals( expect=main.TRUE,
353 actual=checkResult,
354 onpass="Successfully set config",
355 onfail="Failed to set config" )
356 else:
357 main.log.warn( "No configurations were specified to be changed after startup" )
358
Jon Hallca319892017-06-15 15:25:22 -0700359 main.step( "Check app ids" )
360 appCheck = self.appCheck()
361 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700362 onpass="App Ids seem to be correct",
363 onfail="Something is wrong with app Ids" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -0700364
Jon Hallca319892017-06-15 15:25:22 -0700365 def commonChecks( self ):
366 # TODO: make this assertable or assert in here?
367 self.topicsCheck()
368 self.partitionsCheck()
369 self.pendingMapCheck()
370 self.appCheck()
371
372 def topicsCheck( self, extraTopics=[] ):
373 """
374 Check for work partition topics in leaders output
375 """
376 leaders = main.Cluster.next().leaders()
377 missing = False
378 try:
379 if leaders:
380 parsedLeaders = json.loads( leaders )
381 output = json.dumps( parsedLeaders,
382 sort_keys=True,
383 indent=4,
384 separators=( ',', ': ' ) )
385 main.log.debug( "Leaders: " + output )
386 # check for all intent partitions
387 topics = []
388 for i in range( 14 ):
389 topics.append( "work-partition-" + str( i ) )
390 topics += extraTopics
391 main.log.debug( topics )
392 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
393 for topic in topics:
394 if topic not in ONOStopics:
395 main.log.error( "Error: " + topic +
396 " not in leaders" )
397 missing = True
398 else:
399 main.log.error( "leaders() returned None" )
400 except ( ValueError, TypeError ):
401 main.log.exception( "Error parsing leaders" )
402 main.log.error( repr( leaders ) )
403 if missing:
Jon Hall4173b242017-09-12 17:04:38 -0700404 # NOTE Can we refactor this into the Cluster class?
405 # Maybe an option to print the output of a command from each node?
Jon Hallca319892017-06-15 15:25:22 -0700406 for ctrl in main.Cluster.active():
407 response = ctrl.CLI.leaders( jsonFormat=False )
408 main.log.debug( str( ctrl.name ) + " leaders output: \n" +
409 str( response ) )
410 return missing
411
412 def partitionsCheck( self ):
413 # TODO: return something assertable
414 partitions = main.Cluster.next().partitions()
415 try:
416 if partitions:
417 parsedPartitions = json.loads( partitions )
418 output = json.dumps( parsedPartitions,
419 sort_keys=True,
420 indent=4,
421 separators=( ',', ': ' ) )
422 main.log.debug( "Partitions: " + output )
423 # TODO check for a leader in all paritions
424 # TODO check for consistency among nodes
425 else:
426 main.log.error( "partitions() returned None" )
427 except ( ValueError, TypeError ):
428 main.log.exception( "Error parsing partitions" )
429 main.log.error( repr( partitions ) )
430
431 def pendingMapCheck( self ):
432 pendingMap = main.Cluster.next().pendingMap()
433 try:
434 if pendingMap:
435 parsedPending = json.loads( pendingMap )
436 output = json.dumps( parsedPending,
437 sort_keys=True,
438 indent=4,
439 separators=( ',', ': ' ) )
440 main.log.debug( "Pending map: " + output )
441 # TODO check something here?
442 else:
443 main.log.error( "pendingMap() returned None" )
444 except ( ValueError, TypeError ):
445 main.log.exception( "Error parsing pending map" )
446 main.log.error( repr( pendingMap ) )
447
448 def appCheck( self ):
449 """
450 Check App IDs on all nodes
451 """
452 # FIXME: Rename this to appIDCheck? or add a check for isntalled apps
453 appResults = main.Cluster.command( "appToIDCheck" )
454 appCheck = all( i == main.TRUE for i in appResults )
455 if not appCheck:
Devin Lim142b5342017-07-20 15:22:39 -0700456 ctrl = main.Cluster.active( 0 )
Jon Hallca319892017-06-15 15:25:22 -0700457 main.log.debug( "%s apps: %s" % ( ctrl.name, ctrl.apps() ) )
458 main.log.debug( "%s appIDs: %s" % ( ctrl.name, ctrl.appIDs() ) )
459 return appCheck
460
Jon Halle0f0b342017-04-18 11:43:47 -0700461 def workQueueStatsCheck( self, workQueueName, completed, inProgress, pending ):
462 # Completed
Jon Hallca319892017-06-15 15:25:22 -0700463 completedValues = main.Cluster.command( "workQueueTotalCompleted",
464 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700465 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700466 completedResults = [ int( x ) == completed for x in completedValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700467 completedResult = all( completedResults )
468 if not completedResult:
469 main.log.warn( "Expected Work Queue {} to have {} completed, found {}".format(
470 workQueueName, completed, completedValues ) )
471
472 # In Progress
Jon Hallca319892017-06-15 15:25:22 -0700473 inProgressValues = main.Cluster.command( "workQueueTotalInProgress",
474 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700475 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700476 inProgressResults = [ int( x ) == inProgress for x in inProgressValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700477 inProgressResult = all( inProgressResults )
478 if not inProgressResult:
479 main.log.warn( "Expected Work Queue {} to have {} inProgress, found {}".format(
480 workQueueName, inProgress, inProgressValues ) )
481
482 # Pending
Jon Hallca319892017-06-15 15:25:22 -0700483 pendingValues = main.Cluster.command( "workQueueTotalPending",
484 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700485 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700486 pendingResults = [ int( x ) == pending for x in pendingValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700487 pendingResult = all( pendingResults )
488 if not pendingResult:
489 main.log.warn( "Expected Work Queue {} to have {} pending, found {}".format(
490 workQueueName, pending, pendingValues ) )
491 return completedResult and inProgressResult and pendingResult
492
Devin Lim58046fa2017-07-05 16:55:00 -0700493 def assignDevices( self, main ):
494 """
495 Assign devices to controllers
496 """
497 import re
Devin Lim58046fa2017-07-05 16:55:00 -0700498 assert main, "main not defined"
499 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700500
501 main.case( "Assigning devices to controllers" )
502 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " + \
503 "and check that an ONOS node becomes the " + \
504 "master of the device."
505 main.step( "Assign switches to controllers" )
506
Jon Hallca319892017-06-15 15:25:22 -0700507 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -0700508 swList = []
509 for i in range( 1, 29 ):
510 swList.append( "s" + str( i ) )
511 main.Mininet1.assignSwController( sw=swList, ip=ipList )
512
513 mastershipCheck = main.TRUE
514 for i in range( 1, 29 ):
515 response = main.Mininet1.getSwController( "s" + str( i ) )
516 try:
517 main.log.info( str( response ) )
518 except Exception:
519 main.log.info( repr( response ) )
Devin Lim142b5342017-07-20 15:22:39 -0700520 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -0700521 if re.search( "tcp:" + ctrl.ipAddress, response ):
Devin Lim58046fa2017-07-05 16:55:00 -0700522 mastershipCheck = mastershipCheck and main.TRUE
523 else:
Jon Hall4173b242017-09-12 17:04:38 -0700524 main.log.error( "Error, node " + repr( ctrl ) + " is " +
Devin Lim58046fa2017-07-05 16:55:00 -0700525 "not in the list of controllers s" +
526 str( i ) + " is connecting to." )
527 mastershipCheck = main.FALSE
528 utilities.assert_equals(
529 expect=main.TRUE,
530 actual=mastershipCheck,
531 onpass="Switch mastership assigned correctly",
532 onfail="Switches not assigned correctly to controllers" )
Jon Hallca319892017-06-15 15:25:22 -0700533
Devin Lim58046fa2017-07-05 16:55:00 -0700534 def assignIntents( self, main ):
535 """
536 Assign intents
537 """
538 import time
539 import json
Devin Lim58046fa2017-07-05 16:55:00 -0700540 assert main, "main not defined"
541 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700542 try:
543 main.HAlabels
544 except ( NameError, AttributeError ):
545 main.log.error( "main.HAlabels not defined, setting to []" )
546 main.HAlabels = []
547 try:
548 main.HAdata
549 except ( NameError, AttributeError ):
550 main.log.error( "data not defined, setting to []" )
551 main.HAdata = []
552 main.case( "Adding host Intents" )
553 main.caseExplanation = "Discover hosts by using pingall then " +\
554 "assign predetermined host-to-host intents." +\
555 " After installation, check that the intent" +\
556 " is distributed to all nodes and the state" +\
557 " is INSTALLED"
558
559 # install onos-app-fwd
560 main.step( "Install reactive forwarding app" )
Jon Hallca319892017-06-15 15:25:22 -0700561 onosCli = main.Cluster.next()
Devin Lime9f0ccf2017-08-11 17:25:12 -0700562 installResults = onosCli.CLI.activateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700563 utilities.assert_equals( expect=main.TRUE, actual=installResults,
564 onpass="Install fwd successful",
565 onfail="Install fwd failed" )
566
567 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700568 appCheck = self.appCheck()
569 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700570 onpass="App Ids seem to be correct",
571 onfail="Something is wrong with app Ids" )
572
573 main.step( "Discovering Hosts( Via pingall for now )" )
574 # FIXME: Once we have a host discovery mechanism, use that instead
575 # REACTIVE FWD test
576 pingResult = main.FALSE
577 passMsg = "Reactive Pingall test passed"
578 time1 = time.time()
579 pingResult = main.Mininet1.pingall()
580 time2 = time.time()
581 if not pingResult:
582 main.log.warn( "First pingall failed. Trying again..." )
583 pingResult = main.Mininet1.pingall()
584 passMsg += " on the second try"
585 utilities.assert_equals(
586 expect=main.TRUE,
587 actual=pingResult,
588 onpass=passMsg,
589 onfail="Reactive Pingall failed, " +
590 "one or more ping pairs failed" )
591 main.log.info( "Time for pingall: %2f seconds" %
592 ( time2 - time1 ) )
Jon Hallca319892017-06-15 15:25:22 -0700593 if not pingResult:
Devin Lim44075962017-08-11 10:56:37 -0700594 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700595 # timeout for fwd flows
596 time.sleep( 11 )
597 # uninstall onos-app-fwd
598 main.step( "Uninstall reactive forwarding app" )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700599 uninstallResult = onosCli.CLI.deactivateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700600 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
601 onpass="Uninstall fwd successful",
602 onfail="Uninstall fwd failed" )
603
604 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700605 appCheck2 = self.appCheck()
606 utilities.assert_equals( expect=True, actual=appCheck2,
Devin Lim58046fa2017-07-05 16:55:00 -0700607 onpass="App Ids seem to be correct",
608 onfail="Something is wrong with app Ids" )
609
610 main.step( "Add host intents via cli" )
611 intentIds = []
612 # TODO: move the host numbers to params
613 # Maybe look at all the paths we ping?
614 intentAddResult = True
615 hostResult = main.TRUE
616 for i in range( 8, 18 ):
617 main.log.info( "Adding host intent between h" + str( i ) +
618 " and h" + str( i + 10 ) )
619 host1 = "00:00:00:00:00:" + \
620 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
621 host2 = "00:00:00:00:00:" + \
622 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
623 # NOTE: getHost can return None
Jon Hallca319892017-06-15 15:25:22 -0700624 host1Dict = onosCli.CLI.getHost( host1 )
625 host2Dict = onosCli.CLI.getHost( host2 )
Devin Lim58046fa2017-07-05 16:55:00 -0700626 host1Id = None
627 host2Id = None
628 if host1Dict and host2Dict:
629 host1Id = host1Dict.get( 'id', None )
630 host2Id = host2Dict.get( 'id', None )
631 if host1Id and host2Id:
Jon Hallca319892017-06-15 15:25:22 -0700632 nodeNum = len( main.Cluster.active() )
Devin Lim142b5342017-07-20 15:22:39 -0700633 ctrl = main.Cluster.active( i % nodeNum )
Jon Hallca319892017-06-15 15:25:22 -0700634 tmpId = ctrl.CLI.addHostIntent( host1Id, host2Id )
Devin Lim58046fa2017-07-05 16:55:00 -0700635 if tmpId:
636 main.log.info( "Added intent with id: " + tmpId )
637 intentIds.append( tmpId )
638 else:
639 main.log.error( "addHostIntent returned: " +
640 repr( tmpId ) )
641 else:
642 main.log.error( "Error, getHost() failed for h" + str( i ) +
643 " and/or h" + str( i + 10 ) )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700644 hosts = main.Cluster.next().CLI.hosts()
Devin Lim58046fa2017-07-05 16:55:00 -0700645 try:
Jon Hallca319892017-06-15 15:25:22 -0700646 output = json.dumps( json.loads( hosts ),
647 sort_keys=True,
648 indent=4,
649 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700650 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700651 output = repr( hosts )
652 main.log.debug( "Hosts output: %s" % output )
Devin Lim58046fa2017-07-05 16:55:00 -0700653 hostResult = main.FALSE
654 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
655 onpass="Found a host id for each host",
656 onfail="Error looking up host ids" )
657
658 intentStart = time.time()
659 onosIds = onosCli.getAllIntentsId()
660 main.log.info( "Submitted intents: " + str( intentIds ) )
661 main.log.info( "Intents in ONOS: " + str( onosIds ) )
662 for intent in intentIds:
663 if intent in onosIds:
664 pass # intent submitted is in onos
665 else:
666 intentAddResult = False
667 if intentAddResult:
668 intentStop = time.time()
669 else:
670 intentStop = None
671 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700672 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700673 intentStates = []
674 installedCheck = True
675 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
676 count = 0
677 try:
678 for intent in json.loads( intents ):
679 state = intent.get( 'state', None )
680 if "INSTALLED" not in state:
681 installedCheck = False
682 intentId = intent.get( 'id', None )
683 intentStates.append( ( intentId, state ) )
684 except ( ValueError, TypeError ):
685 main.log.exception( "Error parsing intents" )
686 # add submitted intents not in the store
687 tmplist = [ i for i, s in intentStates ]
688 missingIntents = False
689 for i in intentIds:
690 if i not in tmplist:
691 intentStates.append( ( i, " - " ) )
692 missingIntents = True
693 intentStates.sort()
694 for i, s in intentStates:
695 count += 1
696 main.log.info( "%-6s%-15s%-15s" %
697 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700698 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700699
700 intentAddResult = bool( intentAddResult and not missingIntents and
701 installedCheck )
702 if not intentAddResult:
703 main.log.error( "Error in pushing host intents to ONOS" )
704
705 main.step( "Intent Anti-Entropy dispersion" )
706 for j in range( 100 ):
707 correct = True
708 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700709 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700710 onosIds = []
Jon Hallca319892017-06-15 15:25:22 -0700711 ids = ctrl.getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700712 onosIds.append( ids )
Jon Hallca319892017-06-15 15:25:22 -0700713 main.log.debug( "Intents in " + ctrl.name + ": " +
Devin Lim58046fa2017-07-05 16:55:00 -0700714 str( sorted( onosIds ) ) )
715 if sorted( ids ) != sorted( intentIds ):
716 main.log.warn( "Set of intent IDs doesn't match" )
717 correct = False
718 break
719 else:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700720 intents = json.loads( ctrl.CLI.intents() )
Devin Lim58046fa2017-07-05 16:55:00 -0700721 for intent in intents:
722 if intent[ 'state' ] != "INSTALLED":
723 main.log.warn( "Intent " + intent[ 'id' ] +
724 " is " + intent[ 'state' ] )
725 correct = False
726 break
727 if correct:
728 break
729 else:
730 time.sleep( 1 )
731 if not intentStop:
732 intentStop = time.time()
733 global gossipTime
734 gossipTime = intentStop - intentStart
735 main.log.info( "It took about " + str( gossipTime ) +
736 " seconds for all intents to appear in each node" )
737 append = False
738 title = "Gossip Intents"
739 count = 1
740 while append is False:
741 curTitle = title + str( count )
742 if curTitle not in main.HAlabels:
743 main.HAlabels.append( curTitle )
744 main.HAdata.append( str( gossipTime ) )
745 append = True
746 else:
747 count += 1
748 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Devin Lim142b5342017-07-20 15:22:39 -0700749 maxGossipTime = gossipPeriod * len( main.Cluster.runningNodes )
Devin Lim58046fa2017-07-05 16:55:00 -0700750 utilities.assert_greater_equals(
751 expect=maxGossipTime, actual=gossipTime,
752 onpass="ECM anti-entropy for intents worked within " +
753 "expected time",
754 onfail="Intent ECM anti-entropy took too long. " +
755 "Expected time:{}, Actual time:{}".format( maxGossipTime,
756 gossipTime ) )
757 if gossipTime <= maxGossipTime:
758 intentAddResult = True
759
Jon Hallca319892017-06-15 15:25:22 -0700760 pendingMap = main.Cluster.next().pendingMap()
Devin Lim58046fa2017-07-05 16:55:00 -0700761 if not intentAddResult or "key" in pendingMap:
762 import time
763 installedCheck = True
764 main.log.info( "Sleeping 60 seconds to see if intents are found" )
765 time.sleep( 60 )
766 onosIds = onosCli.getAllIntentsId()
767 main.log.info( "Submitted intents: " + str( intentIds ) )
768 main.log.info( "Intents in ONOS: " + str( onosIds ) )
769 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700770 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700771 intentStates = []
772 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
773 count = 0
774 try:
775 for intent in json.loads( intents ):
776 # Iter through intents of a node
777 state = intent.get( 'state', None )
778 if "INSTALLED" not in state:
779 installedCheck = False
780 intentId = intent.get( 'id', None )
781 intentStates.append( ( intentId, state ) )
782 except ( ValueError, TypeError ):
783 main.log.exception( "Error parsing intents" )
784 # add submitted intents not in the store
785 tmplist = [ i for i, s in intentStates ]
786 for i in intentIds:
787 if i not in tmplist:
788 intentStates.append( ( i, " - " ) )
789 intentStates.sort()
790 for i, s in intentStates:
791 count += 1
792 main.log.info( "%-6s%-15s%-15s" %
793 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700794 self.topicsCheck( [ "org.onosproject.election" ] )
795 self.partitionsCheck()
796 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700797
Jon Hallca319892017-06-15 15:25:22 -0700798 def pingAcrossHostIntent( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -0700799 """
800 Ping across added host intents
801 """
802 import json
803 import time
Devin Lim58046fa2017-07-05 16:55:00 -0700804 assert main, "main not defined"
805 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700806 main.case( "Verify connectivity by sending traffic across Intents" )
807 main.caseExplanation = "Ping across added host intents to check " +\
808 "functionality and check the state of " +\
809 "the intent"
810
Jon Hallca319892017-06-15 15:25:22 -0700811 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -0700812 main.step( "Check Intent state" )
813 installedCheck = False
814 loopCount = 0
Jon Hall5d5876e2017-11-30 09:33:16 -0800815 while not installedCheck and loopCount < 90:
Devin Lim58046fa2017-07-05 16:55:00 -0700816 installedCheck = True
817 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700818 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700819 intentStates = []
820 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
821 count = 0
822 # Iter through intents of a node
823 try:
824 for intent in json.loads( intents ):
825 state = intent.get( 'state', None )
826 if "INSTALLED" not in state:
827 installedCheck = False
Jon Hall8bafdc02017-09-05 11:36:26 -0700828 main.log.debug( "Failed intent: " + str( intent ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700829 intentId = intent.get( 'id', None )
830 intentStates.append( ( intentId, state ) )
831 except ( ValueError, TypeError ):
832 main.log.exception( "Error parsing intents." )
833 # Print states
834 intentStates.sort()
835 for i, s in intentStates:
836 count += 1
837 main.log.info( "%-6s%-15s%-15s" %
838 ( str( count ), str( i ), str( s ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700839 if not installedCheck:
840 time.sleep( 1 )
841 loopCount += 1
842 utilities.assert_equals( expect=True, actual=installedCheck,
843 onpass="Intents are all INSTALLED",
844 onfail="Intents are not all in " +
845 "INSTALLED state" )
846
847 main.step( "Ping across added host intents" )
848 PingResult = main.TRUE
849 for i in range( 8, 18 ):
850 ping = main.Mininet1.pingHost( src="h" + str( i ),
851 target="h" + str( i + 10 ) )
852 PingResult = PingResult and ping
853 if ping == main.FALSE:
854 main.log.warn( "Ping failed between h" + str( i ) +
855 " and h" + str( i + 10 ) )
856 elif ping == main.TRUE:
857 main.log.info( "Ping test passed!" )
858 # Don't set PingResult or you'd override failures
859 if PingResult == main.FALSE:
860 main.log.error(
861 "Intents have not been installed correctly, pings failed." )
862 # TODO: pretty print
Devin Lim58046fa2017-07-05 16:55:00 -0700863 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700864 tmpIntents = onosCli.CLI.intents()
Jon Hallca319892017-06-15 15:25:22 -0700865 output = json.dumps( json.loads( tmpIntents ),
866 sort_keys=True,
867 indent=4,
868 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700869 except ( ValueError, TypeError ):
Jon Hall4173b242017-09-12 17:04:38 -0700870 output = repr( tmpIntents )
Jon Hallca319892017-06-15 15:25:22 -0700871 main.log.debug( "ONOS1 intents: " + output )
Devin Lim58046fa2017-07-05 16:55:00 -0700872 utilities.assert_equals(
873 expect=main.TRUE,
874 actual=PingResult,
875 onpass="Intents have been installed correctly and pings work",
876 onfail="Intents have not been installed correctly, pings failed." )
877
878 main.step( "Check leadership of topics" )
Jon Hallca319892017-06-15 15:25:22 -0700879 topicsCheck = self.topicsCheck()
880 utilities.assert_equals( expect=False, actual=topicsCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700881 onpass="intent Partitions is in leaders",
Jon Hallca319892017-06-15 15:25:22 -0700882 onfail="Some topics were lost" )
883 self.partitionsCheck()
884 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700885
886 if not installedCheck:
887 main.log.info( "Waiting 60 seconds to see if the state of " +
888 "intents change" )
889 time.sleep( 60 )
890 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700891 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700892 intentStates = []
893 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
894 count = 0
895 # Iter through intents of a node
896 try:
897 for intent in json.loads( intents ):
898 state = intent.get( 'state', None )
899 if "INSTALLED" not in state:
900 installedCheck = False
901 intentId = intent.get( 'id', None )
902 intentStates.append( ( intentId, state ) )
903 except ( ValueError, TypeError ):
904 main.log.exception( "Error parsing intents." )
905 intentStates.sort()
906 for i, s in intentStates:
907 count += 1
908 main.log.info( "%-6s%-15s%-15s" %
909 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700910 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700911
Devin Lim58046fa2017-07-05 16:55:00 -0700912 # Print flowrules
Devin Lime9f0ccf2017-08-11 17:25:12 -0700913 main.log.debug( onosCli.CLI.flows() )
Devin Lim58046fa2017-07-05 16:55:00 -0700914 main.step( "Wait a minute then ping again" )
915 # the wait is above
916 PingResult = main.TRUE
917 for i in range( 8, 18 ):
918 ping = main.Mininet1.pingHost( src="h" + str( i ),
919 target="h" + str( i + 10 ) )
920 PingResult = PingResult and ping
921 if ping == main.FALSE:
922 main.log.warn( "Ping failed between h" + str( i ) +
923 " and h" + str( i + 10 ) )
924 elif ping == main.TRUE:
925 main.log.info( "Ping test passed!" )
926 # Don't set PingResult or you'd override failures
927 if PingResult == main.FALSE:
928 main.log.error(
929 "Intents have not been installed correctly, pings failed." )
930 # TODO: pretty print
Jon Hallca319892017-06-15 15:25:22 -0700931 main.log.warn( str( onosCli.name ) + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -0700932 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700933 tmpIntents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700934 main.log.warn( json.dumps( json.loads( tmpIntents ),
935 sort_keys=True,
936 indent=4,
937 separators=( ',', ': ' ) ) )
938 except ( ValueError, TypeError ):
939 main.log.warn( repr( tmpIntents ) )
940 utilities.assert_equals(
941 expect=main.TRUE,
942 actual=PingResult,
943 onpass="Intents have been installed correctly and pings work",
944 onfail="Intents have not been installed correctly, pings failed." )
945
Devin Lim142b5342017-07-20 15:22:39 -0700946 def checkRoleNotNull( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700947 main.step( "Check that each switch has a master" )
Devin Lim58046fa2017-07-05 16:55:00 -0700948 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -0700949 rolesNotNull = main.Cluster.command( "rolesNotNull", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -0700950 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -0700951 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -0700952 actual=rolesNotNull,
953 onpass="Each device has a master",
954 onfail="Some devices don't have a master assigned" )
955
Devin Lim142b5342017-07-20 15:22:39 -0700956 def checkTheRole( self ):
957 main.step( "Read device roles from ONOS" )
Jon Hallca319892017-06-15 15:25:22 -0700958 ONOSMastership = main.Cluster.command( "roles" )
Devin Lim58046fa2017-07-05 16:55:00 -0700959 consistentMastership = True
960 rolesResults = True
Devin Lim58046fa2017-07-05 16:55:00 -0700961 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -0700962 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700963 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hallca319892017-06-15 15:25:22 -0700964 main.log.error( "Error in getting " + node + " roles" )
965 main.log.warn( node + " mastership response: " +
Devin Lim58046fa2017-07-05 16:55:00 -0700966 repr( ONOSMastership[ i ] ) )
967 rolesResults = False
968 utilities.assert_equals(
969 expect=True,
970 actual=rolesResults,
971 onpass="No error in reading roles output",
972 onfail="Error in reading roles from ONOS" )
973
974 main.step( "Check for consistency in roles from each controller" )
975 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
976 main.log.info(
977 "Switch roles are consistent across all ONOS nodes" )
978 else:
979 consistentMastership = False
980 utilities.assert_equals(
981 expect=True,
982 actual=consistentMastership,
983 onpass="Switch roles are consistent across all ONOS nodes",
984 onfail="ONOS nodes have different views of switch roles" )
Devin Lim142b5342017-07-20 15:22:39 -0700985 return ONOSMastership, rolesResults, consistentMastership
986
987 def checkingIntents( self ):
988 main.step( "Get the intents from each controller" )
989 ONOSIntents = main.Cluster.command( "intents", specificDriver=2 )
990 intentsResults = True
991 for i in range( len( ONOSIntents ) ):
992 node = str( main.Cluster.active( i ) )
993 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
994 main.log.error( "Error in getting " + node + " intents" )
995 main.log.warn( node + " intents response: " +
996 repr( ONOSIntents[ i ] ) )
997 intentsResults = False
998 utilities.assert_equals(
999 expect=True,
1000 actual=intentsResults,
1001 onpass="No error in reading intents output",
1002 onfail="Error in reading intents from ONOS" )
1003 return ONOSIntents, intentsResults
1004
1005 def readingState( self, main ):
1006 """
1007 Reading state of ONOS
1008 """
1009 import json
1010 import time
1011 assert main, "main not defined"
1012 assert utilities.assert_equals, "utilities.assert_equals not defined"
1013 try:
1014 from tests.dependencies.topology import Topology
1015 except ImportError:
1016 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07001017 main.cleanAndExit()
Devin Lim142b5342017-07-20 15:22:39 -07001018 try:
1019 main.topoRelated
1020 except ( NameError, AttributeError ):
1021 main.topoRelated = Topology()
1022 main.case( "Setting up and gathering data for current state" )
1023 # The general idea for this test case is to pull the state of
1024 # ( intents,flows, topology,... ) from each ONOS node
1025 # We can then compare them with each other and also with past states
1026
1027 global mastershipState
1028 mastershipState = '[]'
1029
1030 self.checkRoleNotNull()
1031
1032 main.step( "Get the Mastership of each switch from each controller" )
1033 mastershipCheck = main.FALSE
1034
1035 ONOSMastership, consistentMastership, rolesResults = self.checkTheRole()
Devin Lim58046fa2017-07-05 16:55:00 -07001036
1037 if rolesResults and not consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001038 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001039 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001040 try:
1041 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001042 node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07001043 json.dumps(
1044 json.loads( ONOSMastership[ i ] ),
1045 sort_keys=True,
1046 indent=4,
1047 separators=( ',', ': ' ) ) )
1048 except ( ValueError, TypeError ):
1049 main.log.warn( repr( ONOSMastership[ i ] ) )
1050 elif rolesResults and consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001051 mastershipCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001052 mastershipState = ONOSMastership[ 0 ]
1053
Devin Lim58046fa2017-07-05 16:55:00 -07001054 global intentState
1055 intentState = []
Devin Lim142b5342017-07-20 15:22:39 -07001056 ONOSIntents, intentsResults = self.checkingIntents()
Jon Hallca319892017-06-15 15:25:22 -07001057 intentCheck = main.FALSE
1058 consistentIntents = True
Devin Lim142b5342017-07-20 15:22:39 -07001059
Devin Lim58046fa2017-07-05 16:55:00 -07001060 main.step( "Check for consistency in Intents from each controller" )
1061 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1062 main.log.info( "Intents are consistent across all ONOS " +
1063 "nodes" )
1064 else:
1065 consistentIntents = False
1066 main.log.error( "Intents not consistent" )
1067 utilities.assert_equals(
1068 expect=True,
1069 actual=consistentIntents,
1070 onpass="Intents are consistent across all ONOS nodes",
1071 onfail="ONOS nodes have different views of intents" )
1072
1073 if intentsResults:
1074 # Try to make it easy to figure out what is happening
1075 #
1076 # Intent ONOS1 ONOS2 ...
1077 # 0x01 INSTALLED INSTALLING
1078 # ... ... ...
1079 # ... ... ...
1080 title = " Id"
Jon Hallca319892017-06-15 15:25:22 -07001081 for ctrl in main.Cluster.active():
1082 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07001083 main.log.warn( title )
1084 # get all intent keys in the cluster
1085 keys = []
1086 try:
1087 # Get the set of all intent keys
1088 for nodeStr in ONOSIntents:
1089 node = json.loads( nodeStr )
1090 for intent in node:
1091 keys.append( intent.get( 'id' ) )
1092 keys = set( keys )
1093 # For each intent key, print the state on each node
1094 for key in keys:
1095 row = "%-13s" % key
1096 for nodeStr in ONOSIntents:
1097 node = json.loads( nodeStr )
1098 for intent in node:
1099 if intent.get( 'id', "Error" ) == key:
1100 row += "%-15s" % intent.get( 'state' )
1101 main.log.warn( row )
1102 # End of intent state table
1103 except ValueError as e:
1104 main.log.exception( e )
1105 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1106
1107 if intentsResults and not consistentIntents:
1108 # print the json objects
Jon Hallca319892017-06-15 15:25:22 -07001109 main.log.debug( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001110 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1111 sort_keys=True,
1112 indent=4,
1113 separators=( ',', ': ' ) ) )
1114 for i in range( len( ONOSIntents ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001115 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001116 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallca319892017-06-15 15:25:22 -07001117 main.log.debug( node + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001118 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
1119 sort_keys=True,
1120 indent=4,
1121 separators=( ',', ': ' ) ) )
1122 else:
Jon Hallca319892017-06-15 15:25:22 -07001123 main.log.debug( node + " intents match " + ctrl.name + " intents" )
Devin Lim58046fa2017-07-05 16:55:00 -07001124 elif intentsResults and consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07001125 intentCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001126 intentState = ONOSIntents[ 0 ]
1127
1128 main.step( "Get the flows from each controller" )
1129 global flowState
1130 flowState = []
Jon Hall4173b242017-09-12 17:04:38 -07001131 ONOSFlows = main.Cluster.command( "flows", specificDriver=2 ) # TODO: Possible arg: sleep = 30
Devin Lim58046fa2017-07-05 16:55:00 -07001132 ONOSFlowsJson = []
1133 flowCheck = main.FALSE
1134 consistentFlows = True
1135 flowsResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001136 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001137 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001138 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001139 main.log.error( "Error in getting " + node + " flows" )
1140 main.log.warn( node + " flows response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001141 repr( ONOSFlows[ i ] ) )
1142 flowsResults = False
1143 ONOSFlowsJson.append( None )
1144 else:
1145 try:
1146 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1147 except ( ValueError, TypeError ):
1148 # FIXME: change this to log.error?
Jon Hallca319892017-06-15 15:25:22 -07001149 main.log.exception( "Error in parsing " + node +
Devin Lim58046fa2017-07-05 16:55:00 -07001150 " response as json." )
1151 main.log.error( repr( ONOSFlows[ i ] ) )
1152 ONOSFlowsJson.append( None )
1153 flowsResults = False
1154 utilities.assert_equals(
1155 expect=True,
1156 actual=flowsResults,
1157 onpass="No error in reading flows output",
1158 onfail="Error in reading flows from ONOS" )
1159
1160 main.step( "Check for consistency in Flows from each controller" )
1161 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1162 if all( tmp ):
1163 main.log.info( "Flow count is consistent across all ONOS nodes" )
1164 else:
1165 consistentFlows = False
1166 utilities.assert_equals(
1167 expect=True,
1168 actual=consistentFlows,
1169 onpass="The flow count is consistent across all ONOS nodes",
1170 onfail="ONOS nodes have different flow counts" )
1171
1172 if flowsResults and not consistentFlows:
1173 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001174 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001175 try:
1176 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001177 node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001178 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
1179 indent=4, separators=( ',', ': ' ) ) )
1180 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -07001181 main.log.warn( node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001182 repr( ONOSFlows[ i ] ) )
1183 elif flowsResults and consistentFlows:
1184 flowCheck = main.TRUE
1185 flowState = ONOSFlows[ 0 ]
1186
1187 main.step( "Get the OF Table entries" )
1188 global flows
1189 flows = []
1190 for i in range( 1, 29 ):
1191 flows.append( main.Mininet1.getFlowTable( "s" + str( i ), version="1.3", debug=False ) )
1192 if flowCheck == main.FALSE:
1193 for table in flows:
1194 main.log.warn( table )
1195 # TODO: Compare switch flow tables with ONOS flow tables
1196
1197 main.step( "Start continuous pings" )
1198 main.Mininet2.pingLong(
1199 src=main.params[ 'PING' ][ 'source1' ],
1200 target=main.params[ 'PING' ][ 'target1' ],
1201 pingTime=500 )
1202 main.Mininet2.pingLong(
1203 src=main.params[ 'PING' ][ 'source2' ],
1204 target=main.params[ 'PING' ][ 'target2' ],
1205 pingTime=500 )
1206 main.Mininet2.pingLong(
1207 src=main.params[ 'PING' ][ 'source3' ],
1208 target=main.params[ 'PING' ][ 'target3' ],
1209 pingTime=500 )
1210 main.Mininet2.pingLong(
1211 src=main.params[ 'PING' ][ 'source4' ],
1212 target=main.params[ 'PING' ][ 'target4' ],
1213 pingTime=500 )
1214 main.Mininet2.pingLong(
1215 src=main.params[ 'PING' ][ 'source5' ],
1216 target=main.params[ 'PING' ][ 'target5' ],
1217 pingTime=500 )
1218 main.Mininet2.pingLong(
1219 src=main.params[ 'PING' ][ 'source6' ],
1220 target=main.params[ 'PING' ][ 'target6' ],
1221 pingTime=500 )
1222 main.Mininet2.pingLong(
1223 src=main.params[ 'PING' ][ 'source7' ],
1224 target=main.params[ 'PING' ][ 'target7' ],
1225 pingTime=500 )
1226 main.Mininet2.pingLong(
1227 src=main.params[ 'PING' ][ 'source8' ],
1228 target=main.params[ 'PING' ][ 'target8' ],
1229 pingTime=500 )
1230 main.Mininet2.pingLong(
1231 src=main.params[ 'PING' ][ 'source9' ],
1232 target=main.params[ 'PING' ][ 'target9' ],
1233 pingTime=500 )
1234 main.Mininet2.pingLong(
1235 src=main.params[ 'PING' ][ 'source10' ],
1236 target=main.params[ 'PING' ][ 'target10' ],
1237 pingTime=500 )
1238
1239 main.step( "Collecting topology information from ONOS" )
Devin Lim142b5342017-07-20 15:22:39 -07001240 devices = main.topoRelated.getAll( "devices" )
1241 hosts = main.topoRelated.getAll( "hosts", inJson=True )
1242 ports = main.topoRelated.getAll( "ports" )
1243 links = main.topoRelated.getAll( "links" )
1244 clusters = main.topoRelated.getAll( "clusters" )
Devin Lim58046fa2017-07-05 16:55:00 -07001245 # Compare json objects for hosts and dataplane clusters
1246
1247 # hosts
1248 main.step( "Host view is consistent across ONOS nodes" )
1249 consistentHostsResult = main.TRUE
1250 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001251 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001252 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1253 if hosts[ controller ] == hosts[ 0 ]:
1254 continue
1255 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07001256 main.log.error( "hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001257 controllerStr +
1258 " is inconsistent with ONOS1" )
1259 main.log.warn( repr( hosts[ controller ] ) )
1260 consistentHostsResult = main.FALSE
1261
1262 else:
Jon Hallca319892017-06-15 15:25:22 -07001263 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001264 controllerStr )
1265 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001266 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001267 " hosts response: " +
1268 repr( hosts[ controller ] ) )
1269 utilities.assert_equals(
1270 expect=main.TRUE,
1271 actual=consistentHostsResult,
1272 onpass="Hosts view is consistent across all ONOS nodes",
1273 onfail="ONOS nodes have different views of hosts" )
1274
1275 main.step( "Each host has an IP address" )
1276 ipResult = main.TRUE
1277 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001278 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001279 if hosts[ controller ]:
1280 for host in hosts[ controller ]:
1281 if not host.get( 'ipAddresses', [] ):
Jon Hallca319892017-06-15 15:25:22 -07001282 main.log.error( "Error with host ips on " +
Devin Lim58046fa2017-07-05 16:55:00 -07001283 controllerStr + ": " + str( host ) )
1284 ipResult = main.FALSE
1285 utilities.assert_equals(
1286 expect=main.TRUE,
1287 actual=ipResult,
1288 onpass="The ips of the hosts aren't empty",
1289 onfail="The ip of at least one host is missing" )
1290
1291 # Strongly connected clusters of devices
1292 main.step( "Cluster view is consistent across ONOS nodes" )
1293 consistentClustersResult = main.TRUE
1294 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001295 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001296 if "Error" not in clusters[ controller ]:
1297 if clusters[ controller ] == clusters[ 0 ]:
1298 continue
1299 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07001300 main.log.error( "clusters from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001301 " is inconsistent with ONOS1" )
1302 consistentClustersResult = main.FALSE
1303
1304 else:
1305 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07001306 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07001307 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001308 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001309 " clusters response: " +
1310 repr( clusters[ controller ] ) )
1311 utilities.assert_equals(
1312 expect=main.TRUE,
1313 actual=consistentClustersResult,
1314 onpass="Clusters view is consistent across all ONOS nodes",
1315 onfail="ONOS nodes have different views of clusters" )
1316 if not consistentClustersResult:
1317 main.log.debug( clusters )
1318
1319 # there should always only be one cluster
1320 main.step( "Cluster view correct across ONOS nodes" )
1321 try:
1322 numClusters = len( json.loads( clusters[ 0 ] ) )
1323 except ( ValueError, TypeError ):
1324 main.log.exception( "Error parsing clusters[0]: " +
1325 repr( clusters[ 0 ] ) )
1326 numClusters = "ERROR"
1327 utilities.assert_equals(
1328 expect=1,
1329 actual=numClusters,
1330 onpass="ONOS shows 1 SCC",
1331 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1332
1333 main.step( "Comparing ONOS topology to MN" )
1334 devicesResults = main.TRUE
1335 linksResults = main.TRUE
1336 hostsResults = main.TRUE
1337 mnSwitches = main.Mininet1.getSwitches()
1338 mnLinks = main.Mininet1.getLinks()
1339 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07001340 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001341 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001342 currentDevicesResult = main.topoRelated.compareDevicePort(
1343 main.Mininet1, controller,
1344 mnSwitches, devices, ports )
1345 utilities.assert_equals( expect=main.TRUE,
1346 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07001347 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001348 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001349 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001350 " Switches view is incorrect" )
1351
1352 currentLinksResult = main.topoRelated.compareBase( links, controller,
1353 main.Mininet1.compareLinks,
1354 [ mnSwitches, mnLinks ] )
1355 utilities.assert_equals( expect=main.TRUE,
1356 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07001357 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001358 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001359 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001360 " links view is incorrect" )
1361
1362 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1363 currentHostsResult = main.Mininet1.compareHosts(
1364 mnHosts,
1365 hosts[ controller ] )
1366 else:
1367 currentHostsResult = main.FALSE
1368 utilities.assert_equals( expect=main.TRUE,
1369 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07001370 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001371 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07001372 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001373 " hosts don't match Mininet" )
1374
1375 devicesResults = devicesResults and currentDevicesResult
1376 linksResults = linksResults and currentLinksResult
1377 hostsResults = hostsResults and currentHostsResult
1378
1379 main.step( "Device information is correct" )
1380 utilities.assert_equals(
1381 expect=main.TRUE,
1382 actual=devicesResults,
1383 onpass="Device information is correct",
1384 onfail="Device information is incorrect" )
1385
1386 main.step( "Links are correct" )
1387 utilities.assert_equals(
1388 expect=main.TRUE,
1389 actual=linksResults,
1390 onpass="Link are correct",
1391 onfail="Links are incorrect" )
1392
1393 main.step( "Hosts are correct" )
1394 utilities.assert_equals(
1395 expect=main.TRUE,
1396 actual=hostsResults,
1397 onpass="Hosts are correct",
1398 onfail="Hosts are incorrect" )
1399
1400 def checkDistPrimitivesFunc( self, main ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001401 """
1402 Check for basic functionality with distributed primitives
1403 """
Jon Halle0f0b342017-04-18 11:43:47 -07001404 # TODO: Clean this up so it's not just a cut/paste from the test
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001405 try:
1406 # Make sure variables are defined/set
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001407 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001408 assert main.pCounterName, "main.pCounterName not defined"
1409 assert main.onosSetName, "main.onosSetName not defined"
1410 # NOTE: assert fails if value is 0/None/Empty/False
1411 try:
1412 main.pCounterValue
Devin Lim58046fa2017-07-05 16:55:00 -07001413 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001414 main.log.error( "main.pCounterValue not defined, setting to 0" )
1415 main.pCounterValue = 0
1416 try:
1417 main.onosSet
Devin Lim58046fa2017-07-05 16:55:00 -07001418 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001419 main.log.error( "main.onosSet not defined, setting to empty Set" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001420 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001421 # Variables for the distributed primitives tests. These are local only
1422 addValue = "a"
1423 addAllValue = "a b c d e f"
1424 retainValue = "c d e f"
Jon Halle0f0b342017-04-18 11:43:47 -07001425 valueName = "TestON-Value"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001426 valueValue = None
Jon Halle0f0b342017-04-18 11:43:47 -07001427 workQueueName = "TestON-Queue"
1428 workQueueCompleted = 0
1429 workQueueInProgress = 0
1430 workQueuePending = 0
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001431
1432 description = "Check for basic functionality with distributed " +\
1433 "primitives"
1434 main.case( description )
1435 main.caseExplanation = "Test the methods of the distributed " +\
1436 "primitives (counters and sets) throught the cli"
1437 # DISTRIBUTED ATOMIC COUNTERS
1438 # Partitioned counters
1439 main.step( "Increment then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001440 pCounters = main.Cluster.command( "counterTestAddAndGet",
1441 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001442 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001443 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001444 main.pCounterValue += 1
1445 addedPValues.append( main.pCounterValue )
Jon Hallca319892017-06-15 15:25:22 -07001446 # Check that counter incremented once per controller
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001447 pCounterResults = True
1448 for i in addedPValues:
1449 tmpResult = i in pCounters
1450 pCounterResults = pCounterResults and tmpResult
1451 if not tmpResult:
1452 main.log.error( str( i ) + " is not in partitioned "
1453 "counter incremented results" )
1454 utilities.assert_equals( expect=True,
1455 actual=pCounterResults,
1456 onpass="Default counter incremented",
1457 onfail="Error incrementing default" +
1458 " counter" )
1459
1460 main.step( "Get then Increment a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001461 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1462 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001463 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001464 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001465 addedPValues.append( main.pCounterValue )
1466 main.pCounterValue += 1
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001467 # Check that counter incremented numController times
1468 pCounterResults = True
1469 for i in addedPValues:
1470 tmpResult = i in pCounters
1471 pCounterResults = pCounterResults and tmpResult
1472 if not tmpResult:
1473 main.log.error( str( i ) + " is not in partitioned "
1474 "counter incremented results" )
1475 utilities.assert_equals( expect=True,
1476 actual=pCounterResults,
1477 onpass="Default counter incremented",
1478 onfail="Error incrementing default" +
1479 " counter" )
1480
1481 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001482 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001483 utilities.assert_equals( expect=main.TRUE,
1484 actual=incrementCheck,
1485 onpass="Added counters are correct",
1486 onfail="Added counters are incorrect" )
1487
1488 main.step( "Add -8 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001489 pCounters = main.Cluster.command( "counterTestAddAndGet",
1490 args=[ main.pCounterName ],
1491 kwargs={ "delta": -8 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001492 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001493 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001494 main.pCounterValue += -8
1495 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001496 # Check that counter incremented numController times
1497 pCounterResults = True
1498 for i in addedPValues:
1499 tmpResult = i in pCounters
1500 pCounterResults = pCounterResults and tmpResult
1501 if not tmpResult:
1502 main.log.error( str( i ) + " is not in partitioned "
1503 "counter incremented results" )
1504 utilities.assert_equals( expect=True,
1505 actual=pCounterResults,
1506 onpass="Default counter incremented",
1507 onfail="Error incrementing default" +
1508 " counter" )
1509
1510 main.step( "Add 5 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001511 pCounters = main.Cluster.command( "counterTestAddAndGet",
1512 args=[ main.pCounterName ],
1513 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001514 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001515 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001516 main.pCounterValue += 5
1517 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001518
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001519 # Check that counter incremented numController times
1520 pCounterResults = True
1521 for i in addedPValues:
1522 tmpResult = i in pCounters
1523 pCounterResults = pCounterResults and tmpResult
1524 if not tmpResult:
1525 main.log.error( str( i ) + " is not in partitioned "
1526 "counter incremented results" )
1527 utilities.assert_equals( expect=True,
1528 actual=pCounterResults,
1529 onpass="Default counter incremented",
1530 onfail="Error incrementing default" +
1531 " counter" )
1532
1533 main.step( "Get then add 5 to a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001534 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1535 args=[ main.pCounterName ],
1536 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001537 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001538 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001539 addedPValues.append( main.pCounterValue )
1540 main.pCounterValue += 5
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001541 # Check that counter incremented numController times
1542 pCounterResults = True
1543 for i in addedPValues:
1544 tmpResult = i in pCounters
1545 pCounterResults = pCounterResults and tmpResult
1546 if not tmpResult:
1547 main.log.error( str( i ) + " is not in partitioned "
1548 "counter incremented results" )
1549 utilities.assert_equals( expect=True,
1550 actual=pCounterResults,
1551 onpass="Default counter incremented",
1552 onfail="Error incrementing default" +
1553 " counter" )
1554
1555 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001556 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001557 utilities.assert_equals( expect=main.TRUE,
1558 actual=incrementCheck,
1559 onpass="Added counters are correct",
1560 onfail="Added counters are incorrect" )
1561
1562 # DISTRIBUTED SETS
1563 main.step( "Distributed Set get" )
1564 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001565 getResponses = main.Cluster.command( "setTestGet",
1566 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001567 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001568 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001569 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001570 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001571 current = set( getResponses[ i ] )
1572 if len( current ) == len( getResponses[ i ] ):
1573 # no repeats
1574 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001575 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001576 " has incorrect view" +
1577 " of set " + main.onosSetName + ":\n" +
1578 str( getResponses[ i ] ) )
1579 main.log.debug( "Expected: " + str( main.onosSet ) )
1580 main.log.debug( "Actual: " + str( current ) )
1581 getResults = main.FALSE
1582 else:
1583 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001584 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001585 " has repeat elements in" +
1586 " set " + main.onosSetName + ":\n" +
1587 str( getResponses[ i ] ) )
1588 getResults = main.FALSE
1589 elif getResponses[ i ] == main.ERROR:
1590 getResults = main.FALSE
1591 utilities.assert_equals( expect=main.TRUE,
1592 actual=getResults,
1593 onpass="Set elements are correct",
1594 onfail="Set elements are incorrect" )
1595
1596 main.step( "Distributed Set size" )
Jon Hallca319892017-06-15 15:25:22 -07001597 sizeResponses = main.Cluster.command( "setTestSize",
1598 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001599 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001600 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001601 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001602 if size != sizeResponses[ i ]:
1603 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001604 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001605 " expected a size of " + str( size ) +
1606 " for set " + main.onosSetName +
1607 " but got " + str( sizeResponses[ i ] ) )
1608 utilities.assert_equals( expect=main.TRUE,
1609 actual=sizeResults,
1610 onpass="Set sizes are correct",
1611 onfail="Set sizes are incorrect" )
1612
1613 main.step( "Distributed Set add()" )
1614 main.onosSet.add( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001615 addResponses = main.Cluster.command( "setTestAdd",
1616 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001617 # main.TRUE = successfully changed the set
1618 # main.FALSE = action resulted in no change in set
1619 # main.ERROR - Some error in executing the function
1620 addResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001621 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001622 if addResponses[ i ] == main.TRUE:
1623 # All is well
1624 pass
1625 elif addResponses[ i ] == main.FALSE:
1626 # Already in set, probably fine
1627 pass
1628 elif addResponses[ i ] == main.ERROR:
1629 # Error in execution
1630 addResults = main.FALSE
1631 else:
1632 # unexpected result
1633 addResults = main.FALSE
1634 if addResults != main.TRUE:
1635 main.log.error( "Error executing set add" )
1636
1637 # Check if set is still correct
1638 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001639 getResponses = main.Cluster.command( "setTestGet",
1640 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001641 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001642 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001643 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001644 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001645 current = set( getResponses[ i ] )
1646 if len( current ) == len( getResponses[ i ] ):
1647 # no repeats
1648 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001649 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001650 " of set " + main.onosSetName + ":\n" +
1651 str( getResponses[ i ] ) )
1652 main.log.debug( "Expected: " + str( main.onosSet ) )
1653 main.log.debug( "Actual: " + str( current ) )
1654 getResults = main.FALSE
1655 else:
1656 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001657 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001658 " set " + main.onosSetName + ":\n" +
1659 str( getResponses[ i ] ) )
1660 getResults = main.FALSE
1661 elif getResponses[ i ] == main.ERROR:
1662 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001663 sizeResponses = main.Cluster.command( "setTestSize",
1664 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001665 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001666 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001667 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001668 if size != sizeResponses[ i ]:
1669 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001670 main.log.error( node + " expected a size of " +
1671 str( size ) + " for set " + main.onosSetName +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001672 " but got " + str( sizeResponses[ i ] ) )
1673 addResults = addResults and getResults and sizeResults
1674 utilities.assert_equals( expect=main.TRUE,
1675 actual=addResults,
1676 onpass="Set add correct",
1677 onfail="Set add was incorrect" )
1678
1679 main.step( "Distributed Set addAll()" )
1680 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001681 addResponses = main.Cluster.command( "setTestAdd",
1682 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001683 # main.TRUE = successfully changed the set
1684 # main.FALSE = action resulted in no change in set
1685 # main.ERROR - Some error in executing the function
1686 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001687 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001688 if addResponses[ i ] == main.TRUE:
1689 # All is well
1690 pass
1691 elif addResponses[ i ] == main.FALSE:
1692 # Already in set, probably fine
1693 pass
1694 elif addResponses[ i ] == main.ERROR:
1695 # Error in execution
1696 addAllResults = main.FALSE
1697 else:
1698 # unexpected result
1699 addAllResults = main.FALSE
1700 if addAllResults != main.TRUE:
1701 main.log.error( "Error executing set addAll" )
1702
1703 # Check if set is still correct
1704 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001705 getResponses = main.Cluster.command( "setTestGet",
1706 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001707 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001708 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001709 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001710 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001711 current = set( getResponses[ i ] )
1712 if len( current ) == len( getResponses[ i ] ):
1713 # no repeats
1714 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001715 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001716 " of set " + main.onosSetName + ":\n" +
1717 str( getResponses[ i ] ) )
1718 main.log.debug( "Expected: " + str( main.onosSet ) )
1719 main.log.debug( "Actual: " + str( current ) )
1720 getResults = main.FALSE
1721 else:
1722 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001723 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001724 " set " + main.onosSetName + ":\n" +
1725 str( getResponses[ i ] ) )
1726 getResults = main.FALSE
1727 elif getResponses[ i ] == main.ERROR:
1728 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001729 sizeResponses = main.Cluster.command( "setTestSize",
1730 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001731 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001732 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001733 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001734 if size != sizeResponses[ i ]:
1735 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001736 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001737 " for set " + main.onosSetName +
1738 " but got " + str( sizeResponses[ i ] ) )
1739 addAllResults = addAllResults and getResults and sizeResults
1740 utilities.assert_equals( expect=main.TRUE,
1741 actual=addAllResults,
1742 onpass="Set addAll correct",
1743 onfail="Set addAll was incorrect" )
1744
1745 main.step( "Distributed Set contains()" )
Jon Hallca319892017-06-15 15:25:22 -07001746 containsResponses = main.Cluster.command( "setTestGet",
1747 args=[ main.onosSetName ],
1748 kwargs={ "values": addValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001749 containsResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001750 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001751 if containsResponses[ i ] == main.ERROR:
1752 containsResults = main.FALSE
1753 else:
1754 containsResults = containsResults and\
1755 containsResponses[ i ][ 1 ]
1756 utilities.assert_equals( expect=main.TRUE,
1757 actual=containsResults,
1758 onpass="Set contains is functional",
1759 onfail="Set contains failed" )
1760
1761 main.step( "Distributed Set containsAll()" )
Jon Hallca319892017-06-15 15:25:22 -07001762 containsAllResponses = main.Cluster.command( "setTestGet",
1763 args=[ main.onosSetName ],
1764 kwargs={ "values": addAllValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001765 containsAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001766 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001767 if containsResponses[ i ] == main.ERROR:
1768 containsResults = main.FALSE
1769 else:
1770 containsResults = containsResults and\
1771 containsResponses[ i ][ 1 ]
1772 utilities.assert_equals( expect=main.TRUE,
1773 actual=containsAllResults,
1774 onpass="Set containsAll is functional",
1775 onfail="Set containsAll failed" )
1776
1777 main.step( "Distributed Set remove()" )
1778 main.onosSet.remove( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001779 removeResponses = main.Cluster.command( "setTestRemove",
1780 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001781 # main.TRUE = successfully changed the set
1782 # main.FALSE = action resulted in no change in set
1783 # main.ERROR - Some error in executing the function
1784 removeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001785 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001786 if removeResponses[ i ] == main.TRUE:
1787 # All is well
1788 pass
1789 elif removeResponses[ i ] == main.FALSE:
1790 # not in set, probably fine
1791 pass
1792 elif removeResponses[ i ] == main.ERROR:
1793 # Error in execution
1794 removeResults = main.FALSE
1795 else:
1796 # unexpected result
1797 removeResults = main.FALSE
1798 if removeResults != main.TRUE:
1799 main.log.error( "Error executing set remove" )
1800
1801 # Check if set is still correct
1802 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001803 getResponses = main.Cluster.command( "setTestGet",
1804 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001805 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001806 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001807 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001808 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001809 current = set( getResponses[ i ] )
1810 if len( current ) == len( getResponses[ i ] ):
1811 # no repeats
1812 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001813 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001814 " of set " + main.onosSetName + ":\n" +
1815 str( getResponses[ i ] ) )
1816 main.log.debug( "Expected: " + str( main.onosSet ) )
1817 main.log.debug( "Actual: " + str( current ) )
1818 getResults = main.FALSE
1819 else:
1820 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001821 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001822 " set " + main.onosSetName + ":\n" +
1823 str( getResponses[ i ] ) )
1824 getResults = main.FALSE
1825 elif getResponses[ i ] == main.ERROR:
1826 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001827 sizeResponses = main.Cluster.command( "setTestSize",
1828 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001829 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001830 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001831 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001832 if size != sizeResponses[ i ]:
1833 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001834 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001835 " for set " + main.onosSetName +
1836 " but got " + str( sizeResponses[ i ] ) )
1837 removeResults = removeResults and getResults and sizeResults
1838 utilities.assert_equals( expect=main.TRUE,
1839 actual=removeResults,
1840 onpass="Set remove correct",
1841 onfail="Set remove was incorrect" )
1842
1843 main.step( "Distributed Set removeAll()" )
1844 main.onosSet.difference_update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001845 removeAllResponses = main.Cluster.command( "setTestRemove",
1846 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001847 # main.TRUE = successfully changed the set
1848 # main.FALSE = action resulted in no change in set
1849 # main.ERROR - Some error in executing the function
1850 removeAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001851 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001852 if removeAllResponses[ i ] == main.TRUE:
1853 # All is well
1854 pass
1855 elif removeAllResponses[ i ] == main.FALSE:
1856 # not in set, probably fine
1857 pass
1858 elif removeAllResponses[ i ] == main.ERROR:
1859 # Error in execution
1860 removeAllResults = main.FALSE
1861 else:
1862 # unexpected result
1863 removeAllResults = main.FALSE
1864 if removeAllResults != main.TRUE:
1865 main.log.error( "Error executing set removeAll" )
1866
1867 # Check if set is still correct
1868 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001869 getResponses = main.Cluster.command( "setTestGet",
1870 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001871 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001872 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001873 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001874 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001875 current = set( getResponses[ i ] )
1876 if len( current ) == len( getResponses[ i ] ):
1877 # no repeats
1878 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001879 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001880 " of set " + main.onosSetName + ":\n" +
1881 str( getResponses[ i ] ) )
1882 main.log.debug( "Expected: " + str( main.onosSet ) )
1883 main.log.debug( "Actual: " + str( current ) )
1884 getResults = main.FALSE
1885 else:
1886 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001887 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001888 " set " + main.onosSetName + ":\n" +
1889 str( getResponses[ i ] ) )
1890 getResults = main.FALSE
1891 elif getResponses[ i ] == main.ERROR:
1892 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001893 sizeResponses = main.Cluster.command( "setTestSize",
1894 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001895 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001896 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001897 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001898 if size != sizeResponses[ i ]:
1899 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001900 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001901 " for set " + main.onosSetName +
1902 " but got " + str( sizeResponses[ i ] ) )
1903 removeAllResults = removeAllResults and getResults and sizeResults
1904 utilities.assert_equals( expect=main.TRUE,
1905 actual=removeAllResults,
1906 onpass="Set removeAll correct",
1907 onfail="Set removeAll was incorrect" )
1908
1909 main.step( "Distributed Set addAll()" )
1910 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001911 addResponses = main.Cluster.command( "setTestAdd",
1912 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001913 # main.TRUE = successfully changed the set
1914 # main.FALSE = action resulted in no change in set
1915 # main.ERROR - Some error in executing the function
1916 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001917 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001918 if addResponses[ i ] == main.TRUE:
1919 # All is well
1920 pass
1921 elif addResponses[ i ] == main.FALSE:
1922 # Already in set, probably fine
1923 pass
1924 elif addResponses[ i ] == main.ERROR:
1925 # Error in execution
1926 addAllResults = main.FALSE
1927 else:
1928 # unexpected result
1929 addAllResults = main.FALSE
1930 if addAllResults != main.TRUE:
1931 main.log.error( "Error executing set addAll" )
1932
1933 # Check if set is still correct
1934 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001935 getResponses = main.Cluster.command( "setTestGet",
1936 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001937 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001938 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001939 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07001940 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001941 current = set( getResponses[ i ] )
1942 if len( current ) == len( getResponses[ i ] ):
1943 # no repeats
1944 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001945 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001946 " of set " + main.onosSetName + ":\n" +
1947 str( getResponses[ i ] ) )
1948 main.log.debug( "Expected: " + str( main.onosSet ) )
1949 main.log.debug( "Actual: " + str( current ) )
1950 getResults = main.FALSE
1951 else:
1952 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001953 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001954 " set " + main.onosSetName + ":\n" +
1955 str( getResponses[ i ] ) )
1956 getResults = main.FALSE
1957 elif getResponses[ i ] == main.ERROR:
1958 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001959 sizeResponses = main.Cluster.command( "setTestSize",
1960 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001961 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001962 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001963 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001964 if size != sizeResponses[ i ]:
1965 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001966 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001967 " for set " + main.onosSetName +
1968 " but got " + str( sizeResponses[ i ] ) )
1969 addAllResults = addAllResults and getResults and sizeResults
1970 utilities.assert_equals( expect=main.TRUE,
1971 actual=addAllResults,
1972 onpass="Set addAll correct",
1973 onfail="Set addAll was incorrect" )
1974
1975 main.step( "Distributed Set clear()" )
1976 main.onosSet.clear()
Jon Hallca319892017-06-15 15:25:22 -07001977 clearResponses = main.Cluster.command( "setTestRemove",
Jon Hall4173b242017-09-12 17:04:38 -07001978 args=[ main.onosSetName, " " ], # Values doesn't matter
1979 kwargs={ "clear": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001980 # main.TRUE = successfully changed the set
1981 # main.FALSE = action resulted in no change in set
1982 # main.ERROR - Some error in executing the function
1983 clearResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001984 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001985 if clearResponses[ i ] == main.TRUE:
1986 # All is well
1987 pass
1988 elif clearResponses[ i ] == main.FALSE:
1989 # Nothing set, probably fine
1990 pass
1991 elif clearResponses[ i ] == main.ERROR:
1992 # Error in execution
1993 clearResults = main.FALSE
1994 else:
1995 # unexpected result
1996 clearResults = main.FALSE
1997 if clearResults != main.TRUE:
1998 main.log.error( "Error executing set clear" )
1999
2000 # Check if set is still correct
2001 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002002 getResponses = main.Cluster.command( "setTestGet",
2003 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002004 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002005 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002006 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002007 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002008 current = set( getResponses[ i ] )
2009 if len( current ) == len( getResponses[ i ] ):
2010 # no repeats
2011 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002012 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002013 " of set " + main.onosSetName + ":\n" +
2014 str( getResponses[ i ] ) )
2015 main.log.debug( "Expected: " + str( main.onosSet ) )
2016 main.log.debug( "Actual: " + str( current ) )
2017 getResults = main.FALSE
2018 else:
2019 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002020 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002021 " set " + main.onosSetName + ":\n" +
2022 str( getResponses[ i ] ) )
2023 getResults = main.FALSE
2024 elif getResponses[ i ] == main.ERROR:
2025 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002026 sizeResponses = main.Cluster.command( "setTestSize",
2027 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002028 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002029 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002030 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002031 if size != sizeResponses[ i ]:
2032 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002033 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002034 " for set " + main.onosSetName +
2035 " but got " + str( sizeResponses[ i ] ) )
2036 clearResults = clearResults and getResults and sizeResults
2037 utilities.assert_equals( expect=main.TRUE,
2038 actual=clearResults,
2039 onpass="Set clear correct",
2040 onfail="Set clear was incorrect" )
2041
2042 main.step( "Distributed Set addAll()" )
2043 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002044 addResponses = main.Cluster.command( "setTestAdd",
2045 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002046 # main.TRUE = successfully changed the set
2047 # main.FALSE = action resulted in no change in set
2048 # main.ERROR - Some error in executing the function
2049 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002050 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002051 if addResponses[ i ] == main.TRUE:
2052 # All is well
2053 pass
2054 elif addResponses[ i ] == main.FALSE:
2055 # Already in set, probably fine
2056 pass
2057 elif addResponses[ i ] == main.ERROR:
2058 # Error in execution
2059 addAllResults = main.FALSE
2060 else:
2061 # unexpected result
2062 addAllResults = main.FALSE
2063 if addAllResults != main.TRUE:
2064 main.log.error( "Error executing set addAll" )
2065
2066 # Check if set is still correct
2067 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002068 getResponses = main.Cluster.command( "setTestGet",
2069 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002070 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002071 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002072 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002073 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002074 current = set( getResponses[ i ] )
2075 if len( current ) == len( getResponses[ i ] ):
2076 # no repeats
2077 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002078 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002079 " of set " + main.onosSetName + ":\n" +
2080 str( getResponses[ i ] ) )
2081 main.log.debug( "Expected: " + str( main.onosSet ) )
2082 main.log.debug( "Actual: " + str( current ) )
2083 getResults = main.FALSE
2084 else:
2085 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002086 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002087 " set " + main.onosSetName + ":\n" +
2088 str( getResponses[ i ] ) )
2089 getResults = main.FALSE
2090 elif getResponses[ i ] == main.ERROR:
2091 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002092 sizeResponses = main.Cluster.command( "setTestSize",
2093 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002094 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002095 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002096 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002097 if size != sizeResponses[ i ]:
2098 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002099 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002100 " for set " + main.onosSetName +
2101 " but got " + str( sizeResponses[ i ] ) )
2102 addAllResults = addAllResults and getResults and sizeResults
2103 utilities.assert_equals( expect=main.TRUE,
2104 actual=addAllResults,
2105 onpass="Set addAll correct",
2106 onfail="Set addAll was incorrect" )
2107
2108 main.step( "Distributed Set retain()" )
2109 main.onosSet.intersection_update( retainValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002110 retainResponses = main.Cluster.command( "setTestRemove",
2111 args=[ main.onosSetName, retainValue ],
2112 kwargs={ "retain": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002113 # main.TRUE = successfully changed the set
2114 # main.FALSE = action resulted in no change in set
2115 # main.ERROR - Some error in executing the function
2116 retainResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002117 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002118 if retainResponses[ i ] == main.TRUE:
2119 # All is well
2120 pass
2121 elif retainResponses[ i ] == main.FALSE:
2122 # Already in set, probably fine
2123 pass
2124 elif retainResponses[ i ] == main.ERROR:
2125 # Error in execution
2126 retainResults = main.FALSE
2127 else:
2128 # unexpected result
2129 retainResults = main.FALSE
2130 if retainResults != main.TRUE:
2131 main.log.error( "Error executing set retain" )
2132
2133 # Check if set is still correct
2134 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002135 getResponses = main.Cluster.command( "setTestGet",
2136 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002137 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002138 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002139 node = main.Cluster.active( i )
Jon Hallf37d44d2017-05-24 10:37:30 -07002140 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002141 current = set( getResponses[ i ] )
2142 if len( current ) == len( getResponses[ i ] ):
2143 # no repeats
2144 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002145 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002146 " of set " + main.onosSetName + ":\n" +
2147 str( getResponses[ i ] ) )
2148 main.log.debug( "Expected: " + str( main.onosSet ) )
2149 main.log.debug( "Actual: " + str( current ) )
2150 getResults = main.FALSE
2151 else:
2152 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002153 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002154 " set " + main.onosSetName + ":\n" +
2155 str( getResponses[ i ] ) )
2156 getResults = main.FALSE
2157 elif getResponses[ i ] == main.ERROR:
2158 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002159 sizeResponses = main.Cluster.command( "setTestSize",
2160 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002161 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002162 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002163 node = main.Cluster.active( i )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002164 if size != sizeResponses[ i ]:
2165 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002166 main.log.error( node + " expected a size of " +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002167 str( size ) + " for set " + main.onosSetName +
2168 " but got " + str( sizeResponses[ i ] ) )
2169 retainResults = retainResults and getResults and sizeResults
2170 utilities.assert_equals( expect=main.TRUE,
2171 actual=retainResults,
2172 onpass="Set retain correct",
2173 onfail="Set retain was incorrect" )
2174
2175 # Transactional maps
2176 main.step( "Partitioned Transactional maps put" )
2177 tMapValue = "Testing"
2178 numKeys = 100
2179 putResult = True
Jon Hallca319892017-06-15 15:25:22 -07002180 ctrl = main.Cluster.next()
2181 putResponses = ctrl.transactionalMapPut( numKeys, tMapValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002182 if putResponses and len( putResponses ) == 100:
2183 for i in putResponses:
2184 if putResponses[ i ][ 'value' ] != tMapValue:
2185 putResult = False
2186 else:
2187 putResult = False
2188 if not putResult:
2189 main.log.debug( "Put response values: " + str( putResponses ) )
2190 utilities.assert_equals( expect=True,
2191 actual=putResult,
2192 onpass="Partitioned Transactional Map put successful",
2193 onfail="Partitioned Transactional Map put values are incorrect" )
2194
2195 main.step( "Partitioned Transactional maps get" )
2196 # FIXME: is this sleep needed?
2197 time.sleep( 5 )
2198
2199 getCheck = True
2200 for n in range( 1, numKeys + 1 ):
Jon Hallca319892017-06-15 15:25:22 -07002201 getResponses = main.Cluster.command( "transactionalMapGet",
2202 args=[ "Key" + str( n ) ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002203 valueCheck = True
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002204 for node in getResponses:
2205 if node != tMapValue:
2206 valueCheck = False
2207 if not valueCheck:
Jon Hallf37d44d2017-05-24 10:37:30 -07002208 main.log.warn( "Values for key 'Key" + str(n) + "' do not match:" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002209 main.log.warn( getResponses )
2210 getCheck = getCheck and valueCheck
2211 utilities.assert_equals( expect=True,
2212 actual=getCheck,
2213 onpass="Partitioned Transactional Map get values were correct",
2214 onfail="Partitioned Transactional Map values incorrect" )
2215
2216 # DISTRIBUTED ATOMIC VALUE
2217 main.step( "Get the value of a new value" )
Jon Hallca319892017-06-15 15:25:22 -07002218 getValues = main.Cluster.command( "valueTestGet",
2219 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002220 main.log.debug( getValues )
2221 # Check the results
2222 atomicValueGetResult = True
2223 expected = valueValue if valueValue is not None else "null"
2224 main.log.debug( "Checking for value of " + expected )
2225 for i in getValues:
2226 if i != expected:
2227 atomicValueGetResult = False
2228 utilities.assert_equals( expect=True,
2229 actual=atomicValueGetResult,
2230 onpass="Atomic Value get successful",
2231 onfail="Error getting atomic Value " +
2232 str( valueValue ) + ", found: " +
2233 str( getValues ) )
2234
2235 main.step( "Atomic Value set()" )
2236 valueValue = "foo"
Jon Hallca319892017-06-15 15:25:22 -07002237 setValues = main.Cluster.command( "valueTestSet",
2238 args=[ valueName, valueValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002239 main.log.debug( setValues )
2240 # Check the results
2241 atomicValueSetResults = True
2242 for i in setValues:
2243 if i != main.TRUE:
2244 atomicValueSetResults = False
2245 utilities.assert_equals( expect=True,
2246 actual=atomicValueSetResults,
2247 onpass="Atomic Value set successful",
2248 onfail="Error setting atomic Value" +
2249 str( setValues ) )
2250
2251 main.step( "Get the value after set()" )
Jon Hallca319892017-06-15 15:25:22 -07002252 getValues = main.Cluster.command( "valueTestGet",
2253 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002254 main.log.debug( getValues )
2255 # Check the results
2256 atomicValueGetResult = True
2257 expected = valueValue if valueValue is not None else "null"
2258 main.log.debug( "Checking for value of " + expected )
2259 for i in getValues:
2260 if i != expected:
2261 atomicValueGetResult = False
2262 utilities.assert_equals( expect=True,
2263 actual=atomicValueGetResult,
2264 onpass="Atomic Value get successful",
2265 onfail="Error getting atomic Value " +
2266 str( valueValue ) + ", found: " +
2267 str( getValues ) )
2268
2269 main.step( "Atomic Value compareAndSet()" )
2270 oldValue = valueValue
2271 valueValue = "bar"
Jon Hallca319892017-06-15 15:25:22 -07002272 ctrl = main.Cluster.next()
2273 CASValue = ctrl.valueTestCompareAndSet( valueName, oldValue, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002274 main.log.debug( CASValue )
2275 utilities.assert_equals( expect=main.TRUE,
2276 actual=CASValue,
2277 onpass="Atomic Value comapreAndSet successful",
2278 onfail="Error setting atomic Value:" +
2279 str( CASValue ) )
2280
2281 main.step( "Get the value after compareAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002282 getValues = main.Cluster.command( "valueTestGet",
2283 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002284 main.log.debug( getValues )
2285 # Check the results
2286 atomicValueGetResult = True
2287 expected = valueValue if valueValue is not None else "null"
2288 main.log.debug( "Checking for value of " + expected )
2289 for i in getValues:
2290 if i != expected:
2291 atomicValueGetResult = False
2292 utilities.assert_equals( expect=True,
2293 actual=atomicValueGetResult,
2294 onpass="Atomic Value get successful",
2295 onfail="Error getting atomic Value " +
2296 str( valueValue ) + ", found: " +
2297 str( getValues ) )
2298
2299 main.step( "Atomic Value getAndSet()" )
2300 oldValue = valueValue
2301 valueValue = "baz"
Jon Hallca319892017-06-15 15:25:22 -07002302 ctrl = main.Cluster.next()
2303 GASValue = ctrl.valueTestGetAndSet( valueName, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002304 main.log.debug( GASValue )
2305 expected = oldValue if oldValue is not None else "null"
2306 utilities.assert_equals( expect=expected,
2307 actual=GASValue,
2308 onpass="Atomic Value GAS successful",
2309 onfail="Error with GetAndSet atomic Value: expected " +
2310 str( expected ) + ", found: " +
2311 str( GASValue ) )
2312
2313 main.step( "Get the value after getAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002314 getValues = main.Cluster.command( "valueTestGet",
2315 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002316 main.log.debug( getValues )
2317 # Check the results
2318 atomicValueGetResult = True
2319 expected = valueValue if valueValue is not None else "null"
2320 main.log.debug( "Checking for value of " + expected )
2321 for i in getValues:
2322 if i != expected:
2323 atomicValueGetResult = False
2324 utilities.assert_equals( expect=True,
2325 actual=atomicValueGetResult,
2326 onpass="Atomic Value get successful",
2327 onfail="Error getting atomic Value: expected " +
2328 str( valueValue ) + ", found: " +
2329 str( getValues ) )
2330
2331 main.step( "Atomic Value destory()" )
2332 valueValue = None
Jon Hallca319892017-06-15 15:25:22 -07002333 ctrl = main.Cluster.next()
2334 destroyResult = ctrl.valueTestDestroy( valueName )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002335 main.log.debug( destroyResult )
2336 # Check the results
2337 utilities.assert_equals( expect=main.TRUE,
2338 actual=destroyResult,
2339 onpass="Atomic Value destroy successful",
2340 onfail="Error destroying atomic Value" )
2341
2342 main.step( "Get the value after destroy()" )
Jon Hallca319892017-06-15 15:25:22 -07002343 getValues = main.Cluster.command( "valueTestGet",
2344 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002345 main.log.debug( getValues )
2346 # Check the results
2347 atomicValueGetResult = True
2348 expected = valueValue if valueValue is not None else "null"
2349 main.log.debug( "Checking for value of " + expected )
2350 for i in getValues:
2351 if i != expected:
2352 atomicValueGetResult = False
2353 utilities.assert_equals( expect=True,
2354 actual=atomicValueGetResult,
2355 onpass="Atomic Value get successful",
2356 onfail="Error getting atomic Value " +
2357 str( valueValue ) + ", found: " +
2358 str( getValues ) )
Jon Halle0f0b342017-04-18 11:43:47 -07002359
2360 # WORK QUEUES
2361 main.step( "Work Queue add()" )
Jon Hallca319892017-06-15 15:25:22 -07002362 ctrl = main.Cluster.next()
2363 addResult = ctrl.workQueueAdd( workQueueName, 'foo' )
Jon Halle0f0b342017-04-18 11:43:47 -07002364 workQueuePending += 1
2365 main.log.debug( addResult )
2366 # Check the results
2367 utilities.assert_equals( expect=main.TRUE,
2368 actual=addResult,
2369 onpass="Work Queue add successful",
2370 onfail="Error adding to Work Queue" )
2371
2372 main.step( "Check the work queue stats" )
2373 statsResults = self.workQueueStatsCheck( workQueueName,
2374 workQueueCompleted,
2375 workQueueInProgress,
2376 workQueuePending )
2377 utilities.assert_equals( expect=True,
2378 actual=statsResults,
2379 onpass="Work Queue stats correct",
2380 onfail="Work Queue stats incorrect " )
2381
2382 main.step( "Work Queue addMultiple()" )
Jon Hallca319892017-06-15 15:25:22 -07002383 ctrl = main.Cluster.next()
2384 addMultipleResult = ctrl.workQueueAddMultiple( workQueueName, 'bar', 'baz' )
Jon Halle0f0b342017-04-18 11:43:47 -07002385 workQueuePending += 2
2386 main.log.debug( addMultipleResult )
2387 # Check the results
2388 utilities.assert_equals( expect=main.TRUE,
2389 actual=addMultipleResult,
2390 onpass="Work Queue add multiple successful",
2391 onfail="Error adding multiple items to Work Queue" )
2392
2393 main.step( "Check the work queue stats" )
2394 statsResults = self.workQueueStatsCheck( workQueueName,
2395 workQueueCompleted,
2396 workQueueInProgress,
2397 workQueuePending )
2398 utilities.assert_equals( expect=True,
2399 actual=statsResults,
2400 onpass="Work Queue stats correct",
2401 onfail="Work Queue stats incorrect " )
2402
2403 main.step( "Work Queue takeAndComplete() 1" )
Jon Hallca319892017-06-15 15:25:22 -07002404 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002405 number = 1
Jon Hallca319892017-06-15 15:25:22 -07002406 take1Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002407 workQueuePending -= number
2408 workQueueCompleted += number
2409 main.log.debug( take1Result )
2410 # Check the results
2411 utilities.assert_equals( expect=main.TRUE,
2412 actual=take1Result,
2413 onpass="Work Queue takeAndComplete 1 successful",
2414 onfail="Error taking 1 from Work Queue" )
2415
2416 main.step( "Check the work queue stats" )
2417 statsResults = self.workQueueStatsCheck( workQueueName,
2418 workQueueCompleted,
2419 workQueueInProgress,
2420 workQueuePending )
2421 utilities.assert_equals( expect=True,
2422 actual=statsResults,
2423 onpass="Work Queue stats correct",
2424 onfail="Work Queue stats incorrect " )
2425
2426 main.step( "Work Queue takeAndComplete() 2" )
Jon Hallca319892017-06-15 15:25:22 -07002427 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002428 number = 2
Jon Hallca319892017-06-15 15:25:22 -07002429 take2Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002430 workQueuePending -= number
2431 workQueueCompleted += number
2432 main.log.debug( take2Result )
2433 # Check the results
2434 utilities.assert_equals( expect=main.TRUE,
2435 actual=take2Result,
2436 onpass="Work Queue takeAndComplete 2 successful",
2437 onfail="Error taking 2 from Work Queue" )
2438
2439 main.step( "Check the work queue stats" )
2440 statsResults = self.workQueueStatsCheck( workQueueName,
2441 workQueueCompleted,
2442 workQueueInProgress,
2443 workQueuePending )
2444 utilities.assert_equals( expect=True,
2445 actual=statsResults,
2446 onpass="Work Queue stats correct",
2447 onfail="Work Queue stats incorrect " )
2448
2449 main.step( "Work Queue destroy()" )
2450 valueValue = None
2451 threads = []
Jon Hallca319892017-06-15 15:25:22 -07002452 ctrl = main.Cluster.next()
2453 destroyResult = ctrl.workQueueDestroy( workQueueName )
Jon Halle0f0b342017-04-18 11:43:47 -07002454 workQueueCompleted = 0
2455 workQueueInProgress = 0
2456 workQueuePending = 0
2457 main.log.debug( destroyResult )
2458 # Check the results
2459 utilities.assert_equals( expect=main.TRUE,
2460 actual=destroyResult,
2461 onpass="Work Queue destroy successful",
2462 onfail="Error destroying Work Queue" )
2463
2464 main.step( "Check the work queue stats" )
2465 statsResults = self.workQueueStatsCheck( workQueueName,
2466 workQueueCompleted,
2467 workQueueInProgress,
2468 workQueuePending )
2469 utilities.assert_equals( expect=True,
2470 actual=statsResults,
2471 onpass="Work Queue stats correct",
2472 onfail="Work Queue stats incorrect " )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002473 except Exception as e:
2474 main.log.error( "Exception: " + str( e ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002475
2476 def cleanUp( self, main ):
2477 """
2478 Clean up
2479 """
2480 import os
2481 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002482 assert main, "main not defined"
2483 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002484
2485 # printing colors to terminal
2486 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2487 'blue': '\033[94m', 'green': '\033[92m',
2488 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
Jon Hall4173b242017-09-12 17:04:38 -07002489
Devin Lim58046fa2017-07-05 16:55:00 -07002490 main.case( "Test Cleanup" )
Jon Hall4173b242017-09-12 17:04:38 -07002491
2492 main.step( "Checking raft log size" )
2493 # TODO: this is a flaky check, but the intent is to make sure the raft logs
2494 # get compacted periodically
2495 logCheck = main.Cluster.checkPartitionSize()
2496 utilities.assert_equals( expect=True, actual=logCheck,
2497 onpass="Raft log size is not too big",
2498 onfail="Raft logs grew too big" )
2499
Devin Lim58046fa2017-07-05 16:55:00 -07002500 main.step( "Killing tcpdumps" )
2501 main.Mininet2.stopTcpdump()
2502
2503 testname = main.TEST
2504 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2505 main.step( "Copying MN pcap and ONOS log files to test station" )
2506 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2507 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2508 # NOTE: MN Pcap file is being saved to logdir.
2509 # We scp this file as MN and TestON aren't necessarily the same vm
2510
2511 # FIXME: To be replaced with a Jenkin's post script
2512 # TODO: Load these from params
2513 # NOTE: must end in /
2514 logFolder = "/opt/onos/log/"
2515 logFiles = [ "karaf.log", "karaf.log.1" ]
2516 # NOTE: must end in /
2517 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002518 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002519 dstName = main.logdir + "/" + ctrl.name + "-" + f
2520 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002521 logFolder + f, dstName )
2522 # std*.log's
2523 # NOTE: must end in /
2524 logFolder = "/opt/onos/var/"
2525 logFiles = [ "stderr.log", "stdout.log" ]
2526 # NOTE: must end in /
2527 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002528 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002529 dstName = main.logdir + "/" + ctrl.name + "-" + f
2530 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002531 logFolder + f, dstName )
2532 else:
2533 main.log.debug( "skipping saving log files" )
2534
Jon Hall5d5876e2017-11-30 09:33:16 -08002535 main.step( "Checking ONOS Logs for errors" )
2536 for ctrl in main.Cluster.runningNodes:
2537 main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
2538 main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
2539
Devin Lim58046fa2017-07-05 16:55:00 -07002540 main.step( "Stopping Mininet" )
2541 mnResult = main.Mininet1.stopNet()
2542 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2543 onpass="Mininet stopped",
2544 onfail="MN cleanup NOT successful" )
2545
Devin Lim58046fa2017-07-05 16:55:00 -07002546 try:
2547 timerLog = open( main.logdir + "/Timers.csv", 'w' )
2548 main.log.debug( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2549 timerLog.write( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2550 timerLog.close()
2551 except NameError as e:
2552 main.log.exception( e )
Jon Hallca319892017-06-15 15:25:22 -07002553
Devin Lim58046fa2017-07-05 16:55:00 -07002554 def assignMastership( self, main ):
2555 """
2556 Assign mastership to controllers
2557 """
2558 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002559 assert main, "main not defined"
2560 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002561
2562 main.case( "Assigning Controller roles for switches" )
2563 main.caseExplanation = "Check that ONOS is connected to each " +\
2564 "device. Then manually assign" +\
2565 " mastership to specific ONOS nodes using" +\
2566 " 'device-role'"
2567 main.step( "Assign mastership of switches to specific controllers" )
2568 # Manually assign mastership to the controller we want
2569 roleCall = main.TRUE
2570
2571 ipList = []
2572 deviceList = []
Jon Hallca319892017-06-15 15:25:22 -07002573 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07002574 try:
2575 # Assign mastership to specific controllers. This assignment was
2576 # determined for a 7 node cluser, but will work with any sized
2577 # cluster
2578 for i in range( 1, 29 ): # switches 1 through 28
2579 # set up correct variables:
2580 if i == 1:
2581 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002582 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002583 deviceId = onosCli.getDevice( "1000" ).get( 'id' )
2584 elif i == 2:
Devin Lim142b5342017-07-20 15:22:39 -07002585 c = 1 % main.Cluster.numCtrls
2586 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002587 deviceId = onosCli.getDevice( "2000" ).get( 'id' )
2588 elif i == 3:
Devin Lim142b5342017-07-20 15:22:39 -07002589 c = 1 % main.Cluster.numCtrls
2590 ip = main.Cluster.active( c ).ip_address # ONOS2
Devin Lim58046fa2017-07-05 16:55:00 -07002591 deviceId = onosCli.getDevice( "3000" ).get( 'id' )
2592 elif i == 4:
Devin Lim142b5342017-07-20 15:22:39 -07002593 c = 3 % main.Cluster.numCtrls
2594 ip = main.Cluster.active( c ).ip_address # ONOS4
Devin Lim58046fa2017-07-05 16:55:00 -07002595 deviceId = onosCli.getDevice( "3004" ).get( 'id' )
2596 elif i == 5:
Devin Lim142b5342017-07-20 15:22:39 -07002597 c = 2 % main.Cluster.numCtrls
2598 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002599 deviceId = onosCli.getDevice( "5000" ).get( 'id' )
2600 elif i == 6:
Devin Lim142b5342017-07-20 15:22:39 -07002601 c = 2 % main.Cluster.numCtrls
2602 ip = main.Cluster.active( c ).ip_address # ONOS3
Devin Lim58046fa2017-07-05 16:55:00 -07002603 deviceId = onosCli.getDevice( "6000" ).get( 'id' )
2604 elif i == 7:
Devin Lim142b5342017-07-20 15:22:39 -07002605 c = 5 % main.Cluster.numCtrls
2606 ip = main.Cluster.active( c ).ip_address # ONOS6
Devin Lim58046fa2017-07-05 16:55:00 -07002607 deviceId = onosCli.getDevice( "6007" ).get( 'id' )
2608 elif i >= 8 and i <= 17:
Devin Lim142b5342017-07-20 15:22:39 -07002609 c = 4 % main.Cluster.numCtrls
2610 ip = main.Cluster.active( c ).ip_address # ONOS5
Devin Lim58046fa2017-07-05 16:55:00 -07002611 dpid = '3' + str( i ).zfill( 3 )
2612 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2613 elif i >= 18 and i <= 27:
Devin Lim142b5342017-07-20 15:22:39 -07002614 c = 6 % main.Cluster.numCtrls
2615 ip = main.Cluster.active( c ).ip_address # ONOS7
Devin Lim58046fa2017-07-05 16:55:00 -07002616 dpid = '6' + str( i ).zfill( 3 )
2617 deviceId = onosCli.getDevice( dpid ).get( 'id' )
2618 elif i == 28:
2619 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002620 ip = main.Cluster.active( c ).ip_address # ONOS1
Devin Lim58046fa2017-07-05 16:55:00 -07002621 deviceId = onosCli.getDevice( "2800" ).get( 'id' )
2622 else:
2623 main.log.error( "You didn't write an else statement for " +
2624 "switch s" + str( i ) )
2625 roleCall = main.FALSE
2626 # Assign switch
2627 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
2628 # TODO: make this controller dynamic
2629 roleCall = roleCall and onosCli.deviceRole( deviceId, ip )
2630 ipList.append( ip )
2631 deviceList.append( deviceId )
2632 except ( AttributeError, AssertionError ):
2633 main.log.exception( "Something is wrong with ONOS device view" )
2634 main.log.info( onosCli.devices() )
2635 utilities.assert_equals(
2636 expect=main.TRUE,
2637 actual=roleCall,
2638 onpass="Re-assigned switch mastership to designated controller",
2639 onfail="Something wrong with deviceRole calls" )
2640
2641 main.step( "Check mastership was correctly assigned" )
2642 roleCheck = main.TRUE
2643 # NOTE: This is due to the fact that device mastership change is not
2644 # atomic and is actually a multi step process
2645 time.sleep( 5 )
2646 for i in range( len( ipList ) ):
2647 ip = ipList[ i ]
2648 deviceId = deviceList[ i ]
2649 # Check assignment
2650 master = onosCli.getRole( deviceId ).get( 'master' )
2651 if ip in master:
2652 roleCheck = roleCheck and main.TRUE
2653 else:
2654 roleCheck = roleCheck and main.FALSE
2655 main.log.error( "Error, controller " + ip + " is not" +
2656 " master " + "of device " +
2657 str( deviceId ) + ". Master is " +
2658 repr( master ) + "." )
2659 utilities.assert_equals(
2660 expect=main.TRUE,
2661 actual=roleCheck,
2662 onpass="Switches were successfully reassigned to designated " +
2663 "controller",
2664 onfail="Switches were not successfully reassigned" )
Jon Hallca319892017-06-15 15:25:22 -07002665
Jon Hall5d5876e2017-11-30 09:33:16 -08002666 def bringUpStoppedNodes( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -07002667 """
Jon Hall5d5876e2017-11-30 09:33:16 -08002668 The bring up stopped nodes.
Devin Lim58046fa2017-07-05 16:55:00 -07002669 """
2670 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002671 assert main, "main not defined"
2672 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002673 assert main.kill, "main.kill not defined"
2674 main.case( "Restart minority of ONOS nodes" )
2675
2676 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
2677 startResults = main.TRUE
2678 restartTime = time.time()
Jon Hallca319892017-06-15 15:25:22 -07002679 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002680 startResults = startResults and\
Jon Hallca319892017-06-15 15:25:22 -07002681 ctrl.onosStart( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002682 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2683 onpass="ONOS nodes started successfully",
2684 onfail="ONOS nodes NOT successfully started" )
2685
2686 main.step( "Checking if ONOS is up yet" )
2687 count = 0
2688 onosIsupResult = main.FALSE
2689 while onosIsupResult == main.FALSE and count < 10:
2690 onosIsupResult = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002691 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002692 onosIsupResult = onosIsupResult and\
Jon Hallca319892017-06-15 15:25:22 -07002693 ctrl.isup( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002694 count = count + 1
2695 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
2696 onpass="ONOS restarted successfully",
2697 onfail="ONOS restart NOT successful" )
2698
Jon Hall5d5876e2017-11-30 09:33:16 -08002699 main.step( "Restarting ONOS CLI" )
Devin Lim58046fa2017-07-05 16:55:00 -07002700 cliResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002701 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002702 cliResults = cliResults and\
Jon Hallca319892017-06-15 15:25:22 -07002703 ctrl.startOnosCli( ctrl.ipAddress )
2704 ctrl.active = True
Devin Lim58046fa2017-07-05 16:55:00 -07002705 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
Jon Hallca319892017-06-15 15:25:22 -07002706 onpass="ONOS node(s) restarted",
2707 onfail="ONOS node(s) did not restart" )
Devin Lim58046fa2017-07-05 16:55:00 -07002708
Jon Hall5d5876e2017-11-30 09:33:16 -08002709 # Grab the time of restart so we can have some idea of average time
Devin Lim58046fa2017-07-05 16:55:00 -07002710 main.restartTime = time.time() - restartTime
2711 main.log.debug( "Restart time: " + str( main.restartTime ) )
2712 # TODO: MAke this configurable. Also, we are breaking the above timer
2713 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -08002714 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -07002715 False,
Devin Lim58046fa2017-07-05 16:55:00 -07002716 sleep=15,
2717 attempts=5 )
2718
2719 utilities.assert_equals( expect=True, actual=nodeResults,
2720 onpass="Nodes check successful",
2721 onfail="Nodes check NOT successful" )
2722
2723 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07002724 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07002725 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07002726 ctrl.name,
2727 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002728 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -07002729 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002730
Jon Hallca319892017-06-15 15:25:22 -07002731 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -07002732
2733 main.step( "Rerun for election on the node(s) that were killed" )
2734 runResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002735 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002736 runResults = runResults and\
Jon Hallca319892017-06-15 15:25:22 -07002737 ctrl.electionTestRun()
Devin Lim58046fa2017-07-05 16:55:00 -07002738 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2739 onpass="ONOS nodes reran for election topic",
Jon Hall5d5876e2017-11-30 09:33:16 -08002740 onfail="Error rerunning for election" )
2741
2742 def upgradeNodes( self, main ):
2743 """
2744 Reinstall some nodes with an upgraded version.
2745
2746 This will reinstall nodes in main.kill with an upgraded version.
2747 """
2748 import time
2749 assert main, "main not defined"
2750 assert utilities.assert_equals, "utilities.assert_equals not defined"
2751 assert main.kill, "main.kill not defined"
2752 nodeNames = [ node.name for node in main.kill ]
2753 main.step( "Upgrading" + str( nodeNames ) + " ONOS nodes" )
2754
2755 stopResults = main.TRUE
2756 uninstallResults = main.TRUE
2757 startResults = main.TRUE
2758 sshResults = main.TRUE
2759 isup = main.TRUE
2760 restartTime = time.time()
2761 for ctrl in main.kill:
2762 stopResults = stopResults and\
2763 ctrl.onosStop( ctrl.ipAddress )
2764 uninstallResults = uninstallResults and\
2765 ctrl.onosUninstall( ctrl.ipAddress )
2766 # Install the new version of onos
2767 startResults = startResults and\
2768 ctrl.onosInstall( options="-fv", node=ctrl.ipAddress )
2769 sshResults = sshResults and\
2770 ctrl.onosSecureSSH( node=ctrl.ipAddress )
2771 isup = isup and ctrl.isup( ctrl.ipAddress )
2772 utilities.assert_equals( expect=main.TRUE, actual=stopResults,
2773 onpass="ONOS nodes stopped successfully",
2774 onfail="ONOS nodes NOT successfully stopped" )
2775 utilities.assert_equals( expect=main.TRUE, actual=uninstallResults,
2776 onpass="ONOS nodes uninstalled successfully",
2777 onfail="ONOS nodes NOT successfully uninstalled" )
2778 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2779 onpass="ONOS nodes started successfully",
2780 onfail="ONOS nodes NOT successfully started" )
2781 utilities.assert_equals( expect=main.TRUE, actual=sshResults,
2782 onpass="Successfully secured onos ssh",
2783 onfail="Failed to secure onos ssh" )
2784 utilities.assert_equals( expect=main.TRUE, actual=isup,
2785 onpass="ONOS nodes fully started",
2786 onfail="ONOS nodes NOT fully started" )
2787
2788 main.step( "Restarting ONOS CLI" )
2789 cliResults = main.TRUE
2790 for ctrl in main.kill:
2791 cliResults = cliResults and\
2792 ctrl.startOnosCli( ctrl.ipAddress )
2793 ctrl.active = True
2794 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
2795 onpass="ONOS node(s) restarted",
2796 onfail="ONOS node(s) did not restart" )
2797
2798 # Grab the time of restart so we can have some idea of average time
2799 main.restartTime = time.time() - restartTime
2800 main.log.debug( "Restart time: " + str( main.restartTime ) )
2801 # TODO: Make this configurable.
2802 main.step( "Checking ONOS nodes" )
2803 nodeResults = utilities.retry( main.Cluster.nodesCheck,
2804 False,
2805 sleep=15,
2806 attempts=5 )
2807
2808 utilities.assert_equals( expect=True, actual=nodeResults,
2809 onpass="Nodes check successful",
2810 onfail="Nodes check NOT successful" )
2811
2812 if not nodeResults:
2813 for ctrl in main.Cluster.active():
2814 main.log.debug( "{} components not ACTIVE: \n{}".format(
2815 ctrl.name,
2816 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
2817 main.log.error( "Failed to start ONOS, stopping test" )
2818 main.cleanAndExit()
2819
2820 self.commonChecks()
2821
2822 main.step( "Rerun for election on the node(s) that were killed" )
2823 runResults = main.TRUE
2824 for ctrl in main.kill:
2825 runResults = runResults and\
2826 ctrl.electionTestRun()
2827 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2828 onpass="ONOS nodes reran for election topic",
2829 onfail="Error rerunning for election" )
Jon Hall4173b242017-09-12 17:04:38 -07002830
Devin Lim142b5342017-07-20 15:22:39 -07002831 def tempCell( self, cellName, ipList ):
2832 main.step( "Create cell file" )
2833 cellAppString = main.params[ 'ENV' ][ 'appString' ]
Devin Lim58046fa2017-07-05 16:55:00 -07002834
Devin Lim142b5342017-07-20 15:22:39 -07002835 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
2836 main.Mininet1.ip_address,
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002837 cellAppString, ipList, main.ONOScli1.karafUser )
Devin Lim142b5342017-07-20 15:22:39 -07002838 main.step( "Applying cell variable to environment" )
2839 cellResult = main.ONOSbench.setCell( cellName )
2840 verifyResult = main.ONOSbench.verifyCell()
2841
Devin Lim142b5342017-07-20 15:22:39 -07002842 def checkStateAfterEvent( self, main, afterWhich, compareSwitch=False, isRestart=False ):
Devin Lim58046fa2017-07-05 16:55:00 -07002843 """
2844 afterWhich :
Jon Hallca319892017-06-15 15:25:22 -07002845 0: failure
Devin Lim58046fa2017-07-05 16:55:00 -07002846 1: scaling
2847 """
2848 """
2849 Check state after ONOS failure/scaling
2850 """
2851 import json
Devin Lim58046fa2017-07-05 16:55:00 -07002852 assert main, "main not defined"
2853 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002854 main.case( "Running ONOS Constant State Tests" )
2855
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002856 OnosAfterWhich = [ "failure", "scaliing" ]
Devin Lim58046fa2017-07-05 16:55:00 -07002857
Devin Lim58046fa2017-07-05 16:55:00 -07002858 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -07002859 self.checkRoleNotNull()
Devin Lim58046fa2017-07-05 16:55:00 -07002860
Devin Lim142b5342017-07-20 15:22:39 -07002861 ONOSMastership, rolesResults, consistentMastership = self.checkTheRole()
Jon Hallca319892017-06-15 15:25:22 -07002862 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07002863
2864 if rolesResults and not consistentMastership:
2865 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002866 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -07002867 main.log.warn( node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07002868 json.dumps( json.loads( ONOSMastership[ i ] ),
2869 sort_keys=True,
2870 indent=4,
2871 separators=( ',', ': ' ) ) )
2872
2873 if compareSwitch:
2874 description2 = "Compare switch roles from before failure"
2875 main.step( description2 )
2876 try:
2877 currentJson = json.loads( ONOSMastership[ 0 ] )
2878 oldJson = json.loads( mastershipState )
2879 except ( ValueError, TypeError ):
2880 main.log.exception( "Something is wrong with parsing " +
2881 "ONOSMastership[0] or mastershipState" )
Jon Hallca319892017-06-15 15:25:22 -07002882 main.log.debug( "ONOSMastership[0]: " + repr( ONOSMastership[ 0 ] ) )
2883 main.log.debug( "mastershipState" + repr( mastershipState ) )
Devin Lim44075962017-08-11 10:56:37 -07002884 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002885 mastershipCheck = main.TRUE
2886 for i in range( 1, 29 ):
2887 switchDPID = str(
2888 main.Mininet1.getSwitchDPID( switch="s" + str( i ) ) )
2889 current = [ switch[ 'master' ] for switch in currentJson
2890 if switchDPID in switch[ 'id' ] ]
2891 old = [ switch[ 'master' ] for switch in oldJson
2892 if switchDPID in switch[ 'id' ] ]
2893 if current == old:
2894 mastershipCheck = mastershipCheck and main.TRUE
2895 else:
2896 main.log.warn( "Mastership of switch %s changed" % switchDPID )
2897 mastershipCheck = main.FALSE
2898 utilities.assert_equals(
2899 expect=main.TRUE,
2900 actual=mastershipCheck,
2901 onpass="Mastership of Switches was not changed",
2902 onfail="Mastership of some switches changed" )
2903
2904 # NOTE: we expect mastership to change on controller failure/scaling down
Devin Lim142b5342017-07-20 15:22:39 -07002905 ONOSIntents, intentsResults = self.checkingIntents()
Devin Lim58046fa2017-07-05 16:55:00 -07002906 intentCheck = main.FALSE
2907 consistentIntents = True
Devin Lim58046fa2017-07-05 16:55:00 -07002908
2909 main.step( "Check for consistency in Intents from each controller" )
2910 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2911 main.log.info( "Intents are consistent across all ONOS " +
2912 "nodes" )
2913 else:
2914 consistentIntents = False
2915
2916 # Try to make it easy to figure out what is happening
2917 #
2918 # Intent ONOS1 ONOS2 ...
2919 # 0x01 INSTALLED INSTALLING
2920 # ... ... ...
2921 # ... ... ...
2922 title = " ID"
Jon Hallca319892017-06-15 15:25:22 -07002923 for ctrl in main.Cluster.active():
2924 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07002925 main.log.warn( title )
2926 # get all intent keys in the cluster
2927 keys = []
2928 for nodeStr in ONOSIntents:
2929 node = json.loads( nodeStr )
2930 for intent in node:
2931 keys.append( intent.get( 'id' ) )
2932 keys = set( keys )
2933 for key in keys:
2934 row = "%-13s" % key
2935 for nodeStr in ONOSIntents:
2936 node = json.loads( nodeStr )
2937 for intent in node:
2938 if intent.get( 'id' ) == key:
2939 row += "%-15s" % intent.get( 'state' )
2940 main.log.warn( row )
2941 # End table view
2942
2943 utilities.assert_equals(
2944 expect=True,
2945 actual=consistentIntents,
2946 onpass="Intents are consistent across all ONOS nodes",
2947 onfail="ONOS nodes have different views of intents" )
2948 intentStates = []
2949 for node in ONOSIntents: # Iter through ONOS nodes
2950 nodeStates = []
2951 # Iter through intents of a node
2952 try:
2953 for intent in json.loads( node ):
2954 nodeStates.append( intent[ 'state' ] )
2955 except ( ValueError, TypeError ):
2956 main.log.exception( "Error in parsing intents" )
2957 main.log.error( repr( node ) )
2958 intentStates.append( nodeStates )
2959 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2960 main.log.info( dict( out ) )
2961
2962 if intentsResults and not consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07002963 for i in range( len( main.Cluster.active() ) ):
Jon Hall6040bcf2017-08-14 11:15:41 -07002964 ctrl = main.Cluster.controllers[ i ]
Jon Hallca319892017-06-15 15:25:22 -07002965 main.log.warn( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07002966 main.log.warn( json.dumps(
2967 json.loads( ONOSIntents[ i ] ),
2968 sort_keys=True,
2969 indent=4,
2970 separators=( ',', ': ' ) ) )
2971 elif intentsResults and consistentIntents:
2972 intentCheck = main.TRUE
2973
2974 # NOTE: Store has no durability, so intents are lost across system
2975 # restarts
2976 if not isRestart:
2977 main.step( "Compare current intents with intents before the " + OnosAfterWhich[ afterWhich ] )
2978 # NOTE: this requires case 5 to pass for intentState to be set.
2979 # maybe we should stop the test if that fails?
2980 sameIntents = main.FALSE
2981 try:
2982 intentState
2983 except NameError:
2984 main.log.warn( "No previous intent state was saved" )
2985 else:
2986 if intentState and intentState == ONOSIntents[ 0 ]:
2987 sameIntents = main.TRUE
2988 main.log.info( "Intents are consistent with before " + OnosAfterWhich[ afterWhich ] )
2989 # TODO: possibly the states have changed? we may need to figure out
2990 # what the acceptable states are
2991 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2992 sameIntents = main.TRUE
2993 try:
2994 before = json.loads( intentState )
2995 after = json.loads( ONOSIntents[ 0 ] )
2996 for intent in before:
2997 if intent not in after:
2998 sameIntents = main.FALSE
2999 main.log.debug( "Intent is not currently in ONOS " +
3000 "(at least in the same form):" )
3001 main.log.debug( json.dumps( intent ) )
3002 except ( ValueError, TypeError ):
3003 main.log.exception( "Exception printing intents" )
3004 main.log.debug( repr( ONOSIntents[ 0 ] ) )
3005 main.log.debug( repr( intentState ) )
3006 if sameIntents == main.FALSE:
3007 try:
3008 main.log.debug( "ONOS intents before: " )
3009 main.log.debug( json.dumps( json.loads( intentState ),
3010 sort_keys=True, indent=4,
3011 separators=( ',', ': ' ) ) )
3012 main.log.debug( "Current ONOS intents: " )
3013 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
3014 sort_keys=True, indent=4,
3015 separators=( ',', ': ' ) ) )
3016 except ( ValueError, TypeError ):
3017 main.log.exception( "Exception printing intents" )
3018 main.log.debug( repr( ONOSIntents[ 0 ] ) )
3019 main.log.debug( repr( intentState ) )
3020 utilities.assert_equals(
3021 expect=main.TRUE,
3022 actual=sameIntents,
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07003023 onpass="Intents are consistent with before " + OnosAfterWhich[ afterWhich ],
Devin Lim58046fa2017-07-05 16:55:00 -07003024 onfail="The Intents changed during " + OnosAfterWhich[ afterWhich ] )
3025 intentCheck = intentCheck and sameIntents
3026
3027 main.step( "Get the OF Table entries and compare to before " +
3028 "component " + OnosAfterWhich[ afterWhich ] )
3029 FlowTables = main.TRUE
3030 for i in range( 28 ):
3031 main.log.info( "Checking flow table on s" + str( i + 1 ) )
3032 tmpFlows = main.Mininet1.getFlowTable( "s" + str( i + 1 ), version="1.3", debug=False )
3033 curSwitch = main.Mininet1.flowTableComp( flows[ i ], tmpFlows )
3034 FlowTables = FlowTables and curSwitch
3035 if curSwitch == main.FALSE:
3036 main.log.warn( "Differences in flow table for switch: s{}".format( i + 1 ) )
3037 utilities.assert_equals(
3038 expect=main.TRUE,
3039 actual=FlowTables,
3040 onpass="No changes were found in the flow tables",
3041 onfail="Changes were found in the flow tables" )
3042
Jon Hallca319892017-06-15 15:25:22 -07003043 main.Mininet2.pingLongKill()
Devin Lim58046fa2017-07-05 16:55:00 -07003044 """
3045 main.step( "Check the continuous pings to ensure that no packets " +
3046 "were dropped during component failure" )
3047 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
3048 main.params[ 'TESTONIP' ] )
3049 LossInPings = main.FALSE
3050 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
3051 for i in range( 8, 18 ):
3052 main.log.info(
3053 "Checking for a loss in pings along flow from s" +
3054 str( i ) )
3055 LossInPings = main.Mininet2.checkForLoss(
3056 "/tmp/ping.h" +
3057 str( i ) ) or LossInPings
3058 if LossInPings == main.TRUE:
3059 main.log.info( "Loss in ping detected" )
3060 elif LossInPings == main.ERROR:
3061 main.log.info( "There are multiple mininet process running" )
3062 elif LossInPings == main.FALSE:
3063 main.log.info( "No Loss in the pings" )
3064 main.log.info( "No loss of dataplane connectivity" )
3065 utilities.assert_equals(
3066 expect=main.FALSE,
3067 actual=LossInPings,
3068 onpass="No Loss of connectivity",
3069 onfail="Loss of dataplane connectivity detected" )
3070 # NOTE: Since intents are not persisted with IntnentStore,
3071 # we expect loss in dataplane connectivity
3072 LossInPings = main.FALSE
3073 """
Devin Lim58046fa2017-07-05 16:55:00 -07003074 def compareTopo( self, main ):
3075 """
3076 Compare topo
3077 """
3078 import json
3079 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003080 assert main, "main not defined"
3081 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003082 try:
3083 from tests.dependencies.topology import Topology
3084 except ImportError:
3085 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07003086 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07003087 try:
3088 main.topoRelated
3089 except ( NameError, AttributeError ):
3090 main.topoRelated = Topology()
3091 main.case( "Compare ONOS Topology view to Mininet topology" )
3092 main.caseExplanation = "Compare topology objects between Mininet" +\
3093 " and ONOS"
3094 topoResult = main.FALSE
3095 topoFailMsg = "ONOS topology don't match Mininet"
3096 elapsed = 0
3097 count = 0
3098 main.step( "Comparing ONOS topology to MN topology" )
3099 startTime = time.time()
3100 # Give time for Gossip to work
3101 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
3102 devicesResults = main.TRUE
3103 linksResults = main.TRUE
3104 hostsResults = main.TRUE
3105 hostAttachmentResults = True
3106 count += 1
3107 cliStart = time.time()
Devin Lim142b5342017-07-20 15:22:39 -07003108 devices = main.topoRelated.getAll( "devices", True,
Jon Hallca319892017-06-15 15:25:22 -07003109 kwargs={ 'sleep': 5, 'attempts': 5,
3110 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003111 ipResult = main.TRUE
3112
Devin Lim142b5342017-07-20 15:22:39 -07003113 hosts = main.topoRelated.getAll( "hosts", True,
Jon Hallca319892017-06-15 15:25:22 -07003114 kwargs={ 'sleep': 5, 'attempts': 5,
3115 'randomTime': True },
3116 inJson=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003117
3118 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003119 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003120 if hosts[ controller ]:
3121 for host in hosts[ controller ]:
3122 if host is None or host.get( 'ipAddresses', [] ) == []:
3123 main.log.error(
3124 "Error with host ipAddresses on controller" +
3125 controllerStr + ": " + str( host ) )
3126 ipResult = main.FALSE
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07003127 ports = main.topoRelated.getAll( "ports", True,
Jon Hallca319892017-06-15 15:25:22 -07003128 kwargs={ 'sleep': 5, 'attempts': 5,
3129 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003130 links = main.topoRelated.getAll( "links", True,
Jon Hallca319892017-06-15 15:25:22 -07003131 kwargs={ 'sleep': 5, 'attempts': 5,
3132 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003133 clusters = main.topoRelated.getAll( "clusters", True,
Jon Hallca319892017-06-15 15:25:22 -07003134 kwargs={ 'sleep': 5, 'attempts': 5,
3135 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003136
3137 elapsed = time.time() - startTime
3138 cliTime = time.time() - cliStart
Jon Hall5d5876e2017-11-30 09:33:16 -08003139 main.log.debug( "Elapsed time: " + str( elapsed ) )
3140 main.log.debug( "CLI time: " + str( cliTime ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003141
3142 if all( e is None for e in devices ) and\
3143 all( e is None for e in hosts ) and\
3144 all( e is None for e in ports ) and\
3145 all( e is None for e in links ) and\
3146 all( e is None for e in clusters ):
3147 topoFailMsg = "Could not get topology from ONOS"
3148 main.log.error( topoFailMsg )
3149 continue # Try again, No use trying to compare
3150
3151 mnSwitches = main.Mininet1.getSwitches()
3152 mnLinks = main.Mininet1.getLinks()
3153 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07003154 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003155 controllerStr = str( main.Cluster.active( controller ) )
Jon Hall4173b242017-09-12 17:04:38 -07003156 currentDevicesResult = main.topoRelated.compareDevicePort( main.Mininet1,
3157 controller,
3158 mnSwitches,
3159 devices,
3160 ports )
Devin Lim58046fa2017-07-05 16:55:00 -07003161 utilities.assert_equals( expect=main.TRUE,
3162 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07003163 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003164 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003165 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003166 " Switches view is incorrect" )
3167
Devin Lim58046fa2017-07-05 16:55:00 -07003168 currentLinksResult = main.topoRelated.compareBase( links, controller,
Jon Hall4173b242017-09-12 17:04:38 -07003169 main.Mininet1.compareLinks,
3170 [ mnSwitches, mnLinks ] )
Devin Lim58046fa2017-07-05 16:55:00 -07003171 utilities.assert_equals( expect=main.TRUE,
3172 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07003173 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003174 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003175 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003176 " links view is incorrect" )
3177 if hosts[ controller ] and "Error" not in hosts[ controller ]:
3178 currentHostsResult = main.Mininet1.compareHosts(
3179 mnHosts,
3180 hosts[ controller ] )
3181 elif hosts[ controller ] == []:
3182 currentHostsResult = main.TRUE
3183 else:
3184 currentHostsResult = main.FALSE
3185 utilities.assert_equals( expect=main.TRUE,
3186 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07003187 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003188 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07003189 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003190 " hosts don't match Mininet" )
3191 # CHECKING HOST ATTACHMENT POINTS
3192 hostAttachment = True
3193 zeroHosts = False
3194 # FIXME: topo-HA/obelisk specific mappings:
3195 # key is mac and value is dpid
3196 mappings = {}
3197 for i in range( 1, 29 ): # hosts 1 through 28
3198 # set up correct variables:
3199 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
3200 if i == 1:
3201 deviceId = "1000".zfill( 16 )
3202 elif i == 2:
3203 deviceId = "2000".zfill( 16 )
3204 elif i == 3:
3205 deviceId = "3000".zfill( 16 )
3206 elif i == 4:
3207 deviceId = "3004".zfill( 16 )
3208 elif i == 5:
3209 deviceId = "5000".zfill( 16 )
3210 elif i == 6:
3211 deviceId = "6000".zfill( 16 )
3212 elif i == 7:
3213 deviceId = "6007".zfill( 16 )
3214 elif i >= 8 and i <= 17:
3215 dpid = '3' + str( i ).zfill( 3 )
3216 deviceId = dpid.zfill( 16 )
3217 elif i >= 18 and i <= 27:
3218 dpid = '6' + str( i ).zfill( 3 )
3219 deviceId = dpid.zfill( 16 )
3220 elif i == 28:
3221 deviceId = "2800".zfill( 16 )
3222 mappings[ macId ] = deviceId
3223 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3224 if hosts[ controller ] == []:
3225 main.log.warn( "There are no hosts discovered" )
3226 zeroHosts = True
3227 else:
3228 for host in hosts[ controller ]:
3229 mac = None
3230 location = None
3231 device = None
3232 port = None
3233 try:
3234 mac = host.get( 'mac' )
3235 assert mac, "mac field could not be found for this host object"
Devin Limefaf3062017-08-14 16:18:19 -07003236 print host
3237 if 'locations' in host:
3238 location = host.get( 'locations' )[ 0 ]
3239 elif 'location' in host:
3240 location = host.get( 'location' )
Devin Lim58046fa2017-07-05 16:55:00 -07003241 assert location, "location field could not be found for this host object"
3242
3243 # Trim the protocol identifier off deviceId
3244 device = str( location.get( 'elementId' ) ).split( ':' )[ 1 ]
3245 assert device, "elementId field could not be found for this host location object"
3246
3247 port = location.get( 'port' )
3248 assert port, "port field could not be found for this host location object"
3249
3250 # Now check if this matches where they should be
3251 if mac and device and port:
3252 if str( port ) != "1":
3253 main.log.error( "The attachment port is incorrect for " +
3254 "host " + str( mac ) +
3255 ". Expected: 1 Actual: " + str( port ) )
3256 hostAttachment = False
3257 if device != mappings[ str( mac ) ]:
3258 main.log.error( "The attachment device is incorrect for " +
3259 "host " + str( mac ) +
3260 ". Expected: " + mappings[ str( mac ) ] +
3261 " Actual: " + device )
3262 hostAttachment = False
3263 else:
3264 hostAttachment = False
Devin Limefaf3062017-08-14 16:18:19 -07003265 except ( AssertionError, TypeError ):
Devin Lim58046fa2017-07-05 16:55:00 -07003266 main.log.exception( "Json object not as expected" )
3267 main.log.error( repr( host ) )
3268 hostAttachment = False
3269 else:
3270 main.log.error( "No hosts json output or \"Error\"" +
3271 " in output. hosts = " +
3272 repr( hosts[ controller ] ) )
3273 if zeroHosts is False:
3274 # TODO: Find a way to know if there should be hosts in a
3275 # given point of the test
3276 hostAttachment = True
3277
3278 # END CHECKING HOST ATTACHMENT POINTS
3279 devicesResults = devicesResults and currentDevicesResult
3280 linksResults = linksResults and currentLinksResult
3281 hostsResults = hostsResults and currentHostsResult
3282 hostAttachmentResults = hostAttachmentResults and\
3283 hostAttachment
3284 topoResult = ( devicesResults and linksResults
3285 and hostsResults and ipResult and
3286 hostAttachmentResults )
3287 utilities.assert_equals( expect=True,
3288 actual=topoResult,
3289 onpass="ONOS topology matches Mininet",
3290 onfail=topoFailMsg )
3291 # End of While loop to pull ONOS state
3292
3293 # Compare json objects for hosts and dataplane clusters
3294
3295 # hosts
3296 main.step( "Hosts view is consistent across all ONOS nodes" )
3297 consistentHostsResult = main.TRUE
3298 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003299 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003300 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3301 if hosts[ controller ] == hosts[ 0 ]:
3302 continue
3303 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07003304 main.log.error( "hosts from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003305 " is inconsistent with ONOS1" )
Jon Hallca319892017-06-15 15:25:22 -07003306 main.log.debug( repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003307 consistentHostsResult = main.FALSE
3308
3309 else:
Jon Hallca319892017-06-15 15:25:22 -07003310 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003311 controllerStr )
3312 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003313 main.log.debug( controllerStr +
3314 " hosts response: " +
3315 repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003316 utilities.assert_equals(
3317 expect=main.TRUE,
3318 actual=consistentHostsResult,
3319 onpass="Hosts view is consistent across all ONOS nodes",
3320 onfail="ONOS nodes have different views of hosts" )
3321
3322 main.step( "Hosts information is correct" )
3323 hostsResults = hostsResults and ipResult
3324 utilities.assert_equals(
3325 expect=main.TRUE,
3326 actual=hostsResults,
3327 onpass="Host information is correct",
3328 onfail="Host information is incorrect" )
3329
3330 main.step( "Host attachment points to the network" )
3331 utilities.assert_equals(
3332 expect=True,
3333 actual=hostAttachmentResults,
3334 onpass="Hosts are correctly attached to the network",
3335 onfail="ONOS did not correctly attach hosts to the network" )
3336
3337 # Strongly connected clusters of devices
3338 main.step( "Clusters view is consistent across all ONOS nodes" )
3339 consistentClustersResult = main.TRUE
3340 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003341 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003342 if "Error" not in clusters[ controller ]:
3343 if clusters[ controller ] == clusters[ 0 ]:
3344 continue
3345 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07003346 main.log.error( "clusters from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003347 controllerStr +
3348 " is inconsistent with ONOS1" )
3349 consistentClustersResult = main.FALSE
3350 else:
3351 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07003352 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07003353 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003354 main.log.debug( controllerStr +
3355 " clusters response: " +
3356 repr( clusters[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003357 utilities.assert_equals(
3358 expect=main.TRUE,
3359 actual=consistentClustersResult,
3360 onpass="Clusters view is consistent across all ONOS nodes",
3361 onfail="ONOS nodes have different views of clusters" )
3362 if not consistentClustersResult:
3363 main.log.debug( clusters )
3364 for x in links:
Jon Hallca319892017-06-15 15:25:22 -07003365 main.log.debug( "{}: {}".format( len( x ), x ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003366
3367 main.step( "There is only one SCC" )
3368 # there should always only be one cluster
3369 try:
3370 numClusters = len( json.loads( clusters[ 0 ] ) )
3371 except ( ValueError, TypeError ):
3372 main.log.exception( "Error parsing clusters[0]: " +
3373 repr( clusters[ 0 ] ) )
3374 numClusters = "ERROR"
3375 clusterResults = main.FALSE
3376 if numClusters == 1:
3377 clusterResults = main.TRUE
3378 utilities.assert_equals(
3379 expect=1,
3380 actual=numClusters,
3381 onpass="ONOS shows 1 SCC",
3382 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
3383
3384 topoResult = ( devicesResults and linksResults
3385 and hostsResults and consistentHostsResult
3386 and consistentClustersResult and clusterResults
3387 and ipResult and hostAttachmentResults )
3388
3389 topoResult = topoResult and int( count <= 2 )
3390 note = "note it takes about " + str( int( cliTime ) ) + \
3391 " seconds for the test to make all the cli calls to fetch " +\
3392 "the topology from each ONOS instance"
3393 main.log.info(
3394 "Very crass estimate for topology discovery/convergence( " +
3395 str( note ) + " ): " + str( elapsed ) + " seconds, " +
3396 str( count ) + " tries" )
3397
3398 main.step( "Device information is correct" )
3399 utilities.assert_equals(
3400 expect=main.TRUE,
3401 actual=devicesResults,
3402 onpass="Device information is correct",
3403 onfail="Device information is incorrect" )
3404
3405 main.step( "Links are correct" )
3406 utilities.assert_equals(
3407 expect=main.TRUE,
3408 actual=linksResults,
3409 onpass="Link are correct",
3410 onfail="Links are incorrect" )
3411
3412 main.step( "Hosts are correct" )
3413 utilities.assert_equals(
3414 expect=main.TRUE,
3415 actual=hostsResults,
3416 onpass="Hosts are correct",
3417 onfail="Hosts are incorrect" )
3418
3419 # FIXME: move this to an ONOS state case
3420 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -08003421 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -07003422 False,
Devin Lim58046fa2017-07-05 16:55:00 -07003423 attempts=5 )
3424 utilities.assert_equals( expect=True, actual=nodeResults,
3425 onpass="Nodes check successful",
3426 onfail="Nodes check NOT successful" )
3427 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07003428 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07003429 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07003430 ctrl.name,
3431 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003432
3433 if not topoResult:
Devin Lim44075962017-08-11 10:56:37 -07003434 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -07003435
Devin Lim58046fa2017-07-05 16:55:00 -07003436 def linkDown( self, main, fromS="s3", toS="s28" ):
3437 """
3438 Link fromS-toS down
3439 """
3440 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003441 assert main, "main not defined"
3442 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003443 # NOTE: You should probably run a topology check after this
3444
3445 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3446
3447 description = "Turn off a link to ensure that Link Discovery " +\
3448 "is working properly"
3449 main.case( description )
3450
3451 main.step( "Kill Link between " + fromS + " and " + toS )
3452 LinkDown = main.Mininet1.link( END1=fromS, END2=toS, OPTION="down" )
3453 main.log.info( "Waiting " + str( linkSleep ) +
3454 " seconds for link down to be discovered" )
3455 time.sleep( linkSleep )
3456 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
3457 onpass="Link down successful",
3458 onfail="Failed to bring link down" )
3459 # TODO do some sort of check here
3460
3461 def linkUp( self, main, fromS="s3", toS="s28" ):
3462 """
3463 Link fromS-toS up
3464 """
3465 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003466 assert main, "main not defined"
3467 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003468 # NOTE: You should probably run a topology check after this
3469
3470 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3471
3472 description = "Restore a link to ensure that Link Discovery is " + \
3473 "working properly"
3474 main.case( description )
3475
Jon Hall4173b242017-09-12 17:04:38 -07003476 main.step( "Bring link between " + fromS + " and " + toS + " back up" )
Devin Lim58046fa2017-07-05 16:55:00 -07003477 LinkUp = main.Mininet1.link( END1=fromS, END2=toS, OPTION="up" )
3478 main.log.info( "Waiting " + str( linkSleep ) +
3479 " seconds for link up to be discovered" )
3480 time.sleep( linkSleep )
3481 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
3482 onpass="Link up successful",
3483 onfail="Failed to bring link up" )
3484
3485 def switchDown( self, main ):
3486 """
3487 Switch Down
3488 """
3489 # NOTE: You should probably run a topology check after this
3490 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003491 assert main, "main not defined"
3492 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003493
3494 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3495
3496 description = "Killing a switch to ensure it is discovered correctly"
Jon Hallca319892017-06-15 15:25:22 -07003497 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003498 main.case( description )
3499 switch = main.params[ 'kill' ][ 'switch' ]
3500 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3501
3502 # TODO: Make this switch parameterizable
3503 main.step( "Kill " + switch )
3504 main.log.info( "Deleting " + switch )
3505 main.Mininet1.delSwitch( switch )
3506 main.log.info( "Waiting " + str( switchSleep ) +
3507 " seconds for switch down to be discovered" )
3508 time.sleep( switchSleep )
3509 device = onosCli.getDevice( dpid=switchDPID )
3510 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003511 main.log.warn( "Bringing down switch " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003512 result = main.FALSE
3513 if device and device[ 'available' ] is False:
3514 result = main.TRUE
3515 utilities.assert_equals( expect=main.TRUE, actual=result,
3516 onpass="Kill switch successful",
3517 onfail="Failed to kill switch?" )
Jon Hallca319892017-06-15 15:25:22 -07003518
Devin Lim58046fa2017-07-05 16:55:00 -07003519 def switchUp( self, main ):
3520 """
3521 Switch Up
3522 """
3523 # NOTE: You should probably run a topology check after this
3524 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003525 assert main, "main not defined"
3526 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003527
3528 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3529 switch = main.params[ 'kill' ][ 'switch' ]
3530 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3531 links = main.params[ 'kill' ][ 'links' ].split()
Jon Hallca319892017-06-15 15:25:22 -07003532 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -07003533 description = "Adding a switch to ensure it is discovered correctly"
3534 main.case( description )
3535
3536 main.step( "Add back " + switch )
3537 main.Mininet1.addSwitch( switch, dpid=switchDPID )
3538 for peer in links:
3539 main.Mininet1.addLink( switch, peer )
Jon Hallca319892017-06-15 15:25:22 -07003540 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -07003541 main.Mininet1.assignSwController( sw=switch, ip=ipList )
3542 main.log.info( "Waiting " + str( switchSleep ) +
3543 " seconds for switch up to be discovered" )
3544 time.sleep( switchSleep )
3545 device = onosCli.getDevice( dpid=switchDPID )
3546 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003547 main.log.debug( "Added device: " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003548 result = main.FALSE
3549 if device and device[ 'available' ]:
3550 result = main.TRUE
3551 utilities.assert_equals( expect=main.TRUE, actual=result,
3552 onpass="add switch successful",
3553 onfail="Failed to add switch?" )
3554
3555 def startElectionApp( self, main ):
3556 """
3557 start election app on all onos nodes
3558 """
Devin Lim58046fa2017-07-05 16:55:00 -07003559 assert main, "main not defined"
3560 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003561
3562 main.case( "Start Leadership Election app" )
3563 main.step( "Install leadership election app" )
Jon Hallca319892017-06-15 15:25:22 -07003564 onosCli = main.Cluster.next()
Devin Lime9f0ccf2017-08-11 17:25:12 -07003565 appResult = onosCli.CLI.activateApp( "org.onosproject.election" )
Devin Lim58046fa2017-07-05 16:55:00 -07003566 utilities.assert_equals(
3567 expect=main.TRUE,
3568 actual=appResult,
3569 onpass="Election app installed",
3570 onfail="Something went wrong with installing Leadership election" )
3571
3572 main.step( "Run for election on each node" )
Jon Hallca319892017-06-15 15:25:22 -07003573 onosCli.electionTestRun()
3574 main.Cluster.command( "electionTestRun" )
Devin Lim58046fa2017-07-05 16:55:00 -07003575 time.sleep( 5 )
Jon Hallca319892017-06-15 15:25:22 -07003576 sameResult, leaders = main.HA.consistentLeaderboards( main.Cluster.active() )
Devin Lim58046fa2017-07-05 16:55:00 -07003577 utilities.assert_equals(
3578 expect=True,
3579 actual=sameResult,
3580 onpass="All nodes see the same leaderboards",
3581 onfail="Inconsistent leaderboards" )
3582
3583 if sameResult:
Jon Hall5d5876e2017-11-30 09:33:16 -08003584 # Check that the leader is one of the active nodes
3585 ips = sorted( main.Cluster.getIps( activeOnly=True ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003586 leader = leaders[ 0 ][ 0 ]
Jon Hall5d5876e2017-11-30 09:33:16 -08003587 if leader in ips:
3588 legitimate = True
Devin Lim58046fa2017-07-05 16:55:00 -07003589 else:
Jon Hall5d5876e2017-11-30 09:33:16 -08003590 legitimate = False
3591 main.log.debug( leaders )
3592 main.step( "Active node was elected leader?" )
Devin Lim58046fa2017-07-05 16:55:00 -07003593 utilities.assert_equals(
3594 expect=True,
Jon Hall5d5876e2017-11-30 09:33:16 -08003595 actual=legitimate,
Devin Lim58046fa2017-07-05 16:55:00 -07003596 onpass="Correct leader was elected",
3597 onfail="Incorrect leader" )
Jon Hallca319892017-06-15 15:25:22 -07003598 main.Cluster.testLeader = leader
3599
Devin Lim58046fa2017-07-05 16:55:00 -07003600 def isElectionFunctional( self, main ):
3601 """
3602 Check that Leadership Election is still functional
3603 15.1 Run election on each node
3604 15.2 Check that each node has the same leaders and candidates
3605 15.3 Find current leader and withdraw
3606 15.4 Check that a new node was elected leader
3607 15.5 Check that that new leader was the candidate of old leader
3608 15.6 Run for election on old leader
3609 15.7 Check that oldLeader is a candidate, and leader if only 1 node
3610 15.8 Make sure that the old leader was added to the candidate list
3611
3612 old and new variable prefixes refer to data from before vs after
3613 withdrawl and later before withdrawl vs after re-election
3614 """
3615 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003616 assert main, "main not defined"
3617 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003618
3619 description = "Check that Leadership Election is still functional"
3620 main.case( description )
3621 # NOTE: Need to re-run after restarts since being a canidate is not persistant
3622
3623 oldLeaders = [] # list of lists of each nodes' candidates before
3624 newLeaders = [] # list of lists of each nodes' candidates after
3625 oldLeader = '' # the old leader from oldLeaders, None if not same
3626 newLeader = '' # the new leaders fron newLoeaders, None if not same
3627 oldLeaderCLI = None # the CLI of the old leader used for re-electing
3628 expectNoLeader = False # True when there is only one leader
Devin Lim142b5342017-07-20 15:22:39 -07003629 if len( main.Cluster.runningNodes ) == 1:
Devin Lim58046fa2017-07-05 16:55:00 -07003630 expectNoLeader = True
3631
3632 main.step( "Run for election on each node" )
Devin Lim142b5342017-07-20 15:22:39 -07003633 electionResult = main.Cluster.command( "electionTestRun", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003634 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -07003635 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -07003636 actual=electionResult,
3637 onpass="All nodes successfully ran for leadership",
3638 onfail="At least one node failed to run for leadership" )
3639
3640 if electionResult == main.FALSE:
3641 main.log.error(
3642 "Skipping Test Case because Election Test App isn't loaded" )
3643 main.skipCase()
3644
3645 main.step( "Check that each node shows the same leader and candidates" )
3646 failMessage = "Nodes have different leaderboards"
Jon Hallca319892017-06-15 15:25:22 -07003647 activeCLIs = main.Cluster.active()
3648 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Devin Lim58046fa2017-07-05 16:55:00 -07003649 if sameResult:
3650 oldLeader = oldLeaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003651 main.log.info( "Old leader: " + oldLeader )
Devin Lim58046fa2017-07-05 16:55:00 -07003652 else:
3653 oldLeader = None
3654 utilities.assert_equals(
3655 expect=True,
3656 actual=sameResult,
3657 onpass="Leaderboards are consistent for the election topic",
3658 onfail=failMessage )
3659
3660 main.step( "Find current leader and withdraw" )
3661 withdrawResult = main.TRUE
3662 # do some sanity checking on leader before using it
3663 if oldLeader is None:
3664 main.log.error( "Leadership isn't consistent." )
3665 withdrawResult = main.FALSE
3666 # Get the CLI of the oldLeader
Jon Hallca319892017-06-15 15:25:22 -07003667 for ctrl in main.Cluster.active():
3668 if oldLeader == ctrl.ipAddress:
3669 oldLeaderCLI = ctrl
Devin Lim58046fa2017-07-05 16:55:00 -07003670 break
3671 else: # FOR/ELSE statement
3672 main.log.error( "Leader election, could not find current leader" )
3673 if oldLeader:
3674 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3675 utilities.assert_equals(
3676 expect=main.TRUE,
3677 actual=withdrawResult,
3678 onpass="Node was withdrawn from election",
3679 onfail="Node was not withdrawn from election" )
3680
3681 main.step( "Check that a new node was elected leader" )
3682 failMessage = "Nodes have different leaders"
3683 # Get new leaders and candidates
3684 newLeaderResult, newLeaders = self.consistentLeaderboards( activeCLIs )
3685 newLeader = None
3686 if newLeaderResult:
3687 if newLeaders[ 0 ][ 0 ] == 'none':
3688 main.log.error( "No leader was elected on at least 1 node" )
3689 if not expectNoLeader:
3690 newLeaderResult = False
3691 newLeader = newLeaders[ 0 ][ 0 ]
3692
3693 # Check that the new leader is not the older leader, which was withdrawn
3694 if newLeader == oldLeader:
3695 newLeaderResult = False
3696 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3697 " as the current leader" )
3698 utilities.assert_equals(
3699 expect=True,
3700 actual=newLeaderResult,
3701 onpass="Leadership election passed",
3702 onfail="Something went wrong with Leadership election" )
3703
3704 main.step( "Check that that new leader was the candidate of old leader" )
3705 # candidates[ 2 ] should become the top candidate after withdrawl
3706 correctCandidateResult = main.TRUE
3707 if expectNoLeader:
3708 if newLeader == 'none':
3709 main.log.info( "No leader expected. None found. Pass" )
3710 correctCandidateResult = main.TRUE
3711 else:
3712 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3713 correctCandidateResult = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07003714 utilities.assert_equals(
3715 expect=main.TRUE,
3716 actual=correctCandidateResult,
3717 onpass="Correct Candidate Elected",
3718 onfail="Incorrect Candidate Elected" )
3719
3720 main.step( "Run for election on old leader( just so everyone " +
3721 "is in the hat )" )
3722 if oldLeaderCLI is not None:
3723 runResult = oldLeaderCLI.electionTestRun()
3724 else:
3725 main.log.error( "No old leader to re-elect" )
3726 runResult = main.FALSE
3727 utilities.assert_equals(
3728 expect=main.TRUE,
3729 actual=runResult,
3730 onpass="App re-ran for election",
3731 onfail="App failed to run for election" )
3732
3733 main.step(
3734 "Check that oldLeader is a candidate, and leader if only 1 node" )
3735 # verify leader didn't just change
3736 # Get new leaders and candidates
3737 reRunLeaders = []
3738 time.sleep( 5 ) # Paremterize
3739 positionResult, reRunLeaders = self.consistentLeaderboards( activeCLIs )
3740
Devin Lim58046fa2017-07-05 16:55:00 -07003741 def installDistributedPrimitiveApp( self, main ):
Jon Hall5d5876e2017-11-30 09:33:16 -08003742 '''
Devin Lim58046fa2017-07-05 16:55:00 -07003743 Install Distributed Primitives app
Jon Hall5d5876e2017-11-30 09:33:16 -08003744 '''
Devin Lim58046fa2017-07-05 16:55:00 -07003745 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003746 assert main, "main not defined"
3747 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003748
3749 # Variables for the distributed primitives tests
3750 main.pCounterName = "TestON-Partitions"
3751 main.pCounterValue = 0
3752 main.onosSet = set( [] )
3753 main.onosSetName = "TestON-set"
3754
3755 description = "Install Primitives app"
3756 main.case( description )
3757 main.step( "Install Primitives app" )
3758 appName = "org.onosproject.distributedprimitives"
Devin Lime9f0ccf2017-08-11 17:25:12 -07003759 appResults = main.Cluster.next().CLI.activateApp( appName )
Devin Lim58046fa2017-07-05 16:55:00 -07003760 utilities.assert_equals( expect=main.TRUE,
3761 actual=appResults,
3762 onpass="Primitives app activated",
3763 onfail="Primitives app not activated" )
3764 # TODO check on all nodes instead of sleeping
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003765 time.sleep( 5 ) # To allow all nodes to activate