blob: c809ffe65119d4d77d6450785706aeda647d47e8 [file] [log] [blame]
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07001"""
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002Copyright 2015 Open Networking Foundation ( ONF )
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003
4Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
5the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
6or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
7
8 TestON is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation, either version 2 of the License, or
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -070011 ( at your option ) any later version.
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -070012
13 TestON is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with TestON. If not, see <http://www.gnu.org/licenses/>.
20"""
Jon Halla440e872016-03-31 15:15:50 -070021import json
Jon Hall41d39f12016-04-11 22:54:35 -070022import time
Jon Halla478b852017-12-04 15:00:15 -080023import pexpect
24import re
Jon Halle1a3b752015-07-22 13:02:46 -070025
Jon Hallf37d44d2017-05-24 10:37:30 -070026
Jon Hall41d39f12016-04-11 22:54:35 -070027class HA():
Jon Hall57b50432015-10-22 10:20:10 -070028
Jon Halla440e872016-03-31 15:15:50 -070029 def __init__( self ):
30 self.default = ''
Jon Hallab611372018-02-21 15:26:05 -080031 main.topoMappings = {}
Jon Hall57b50432015-10-22 10:20:10 -070032
Jon Hall5a5c8432018-11-28 11:39:57 -080033 def removeKarafConsoleLogging( self ):
34 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
35 main.ONOSbench.handle.expect( main.ONOSbench.prompt )
36 main.ONOSbench.handle.sendline( "sed -i 's/-Dkaraf.log.console=INFO //g' tools/package/bin/onos-service" )
37 main.ONOSbench.handle.expect( main.ONOSbench.prompt )
38
Devin Lim58046fa2017-07-05 16:55:00 -070039 def customizeOnosGenPartitions( self ):
Devin Lim58046fa2017-07-05 16:55:00 -070040 # copy gen-partions file to ONOS
41 # NOTE: this assumes TestON and ONOS are on the same machine
Jon Hallab611372018-02-21 15:26:05 -080042 srcFile = main.testsRoot + "/HA/dependencies/onos-gen-partitions"
Devin Lim58046fa2017-07-05 16:55:00 -070043 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
44 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
45 main.ONOSbench.ip_address,
46 srcFile,
47 dstDir,
48 pwd=main.ONOSbench.pwd,
49 direction="from" )
Jon Hallca319892017-06-15 15:25:22 -070050
Devin Lim58046fa2017-07-05 16:55:00 -070051 def cleanUpGenPartition( self ):
52 # clean up gen-partitions file
53 try:
54 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
55 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
56 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
57 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
Jon Hall0e240372018-05-02 11:21:57 -070058 main.log.info( "Cleaning custom gen partitions file, response was: \n" +
Devin Lim58046fa2017-07-05 16:55:00 -070059 str( main.ONOSbench.handle.before ) )
60 except ( pexpect.TIMEOUT, pexpect.EOF ):
61 main.log.exception( "ONOSbench: pexpect exception found:" +
62 main.ONOSbench.handle.before )
Devin Lim44075962017-08-11 10:56:37 -070063 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -070064
Devin Lim58046fa2017-07-05 16:55:00 -070065 def startingMininet( self ):
66 main.step( "Starting Mininet" )
67 # scp topo file to mininet
68 # TODO: move to params?
69 topoName = "obelisk.py"
70 filePath = main.ONOSbench.home + "/tools/test/topos/"
71 main.ONOSbench.scp( main.Mininet1,
72 filePath + topoName,
73 main.Mininet1.home,
74 direction="to" )
75 mnResult = main.Mininet1.startNet()
76 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
77 onpass="Mininet Started",
78 onfail="Error starting Mininet" )
Jon Hallca319892017-06-15 15:25:22 -070079
Devin Lim58046fa2017-07-05 16:55:00 -070080 def swapNodeMetadata( self ):
Devin Lim142b5342017-07-20 15:22:39 -070081 if main.Cluster.numCtrls >= 5:
82 main.Cluster.setRunningNode( main.Cluster.numCtrls - 2 )
Devin Lim58046fa2017-07-05 16:55:00 -070083 else:
84 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
Devin Lim58046fa2017-07-05 16:55:00 -070085
Jon Hall4f360bc2017-09-07 10:19:52 -070086 def copyBackupConfig( self ):
Devin Lim58046fa2017-07-05 16:55:00 -070087 main.step( "Copying backup config files" )
88 main.onosServicepath = main.ONOSbench.home + "/tools/package/bin/onos-service"
89 cp = main.ONOSbench.scp( main.ONOSbench,
90 main.onosServicepath,
91 main.onosServicepath + ".backup",
92 direction="to" )
93
94 utilities.assert_equals( expect=main.TRUE,
95 actual=cp,
96 onpass="Copy backup config file succeeded",
97 onfail="Copy backup config file failed" )
Jon Hall4f360bc2017-09-07 10:19:52 -070098
99 def setMetadataUrl( self ):
100 # NOTE: You should probably backup the config before and reset the config after the test
Devin Lim58046fa2017-07-05 16:55:00 -0700101 # we need to modify the onos-service file to use remote metadata file
102 # url for cluster metadata file
103 iface = main.params[ 'server' ].get( 'interface' )
104 ip = main.ONOSbench.getIpAddr( iface=iface )
105 metaFile = "cluster.json"
106 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, main.serverPort, metaFile )
Devin Lim58046fa2017-07-05 16:55:00 -0700107 main.log.warn( repr( javaArgs ) )
108 handle = main.ONOSbench.handle
Jon Hall4173b242017-09-12 17:04:38 -0700109 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs,
110 main.onosServicepath )
Devin Lim58046fa2017-07-05 16:55:00 -0700111 main.log.warn( repr( sed ) )
112 handle.sendline( sed )
113 handle.expect( metaFile )
114 output = handle.before
115 handle.expect( "\$" )
116 output += handle.before
117 main.log.debug( repr( output ) )
118
119 def cleanUpOnosService( self ):
120 # Cleanup custom onos-service file
121 main.ONOSbench.scp( main.ONOSbench,
122 main.onosServicepath + ".backup",
123 main.onosServicepath,
124 direction="to" )
Jon Hallca319892017-06-15 15:25:22 -0700125
Jon Halla440e872016-03-31 15:15:50 -0700126 def consistentCheck( self ):
127 """
128 Checks that TestON counters are consistent across all nodes.
Jon Halle1a3b752015-07-22 13:02:46 -0700129
Jon Hallf37d44d2017-05-24 10:37:30 -0700130 Returns the tuple ( onosCounters, consistent )
Jon Hall41d39f12016-04-11 22:54:35 -0700131 - onosCounters is the parsed json output of the counters command on
132 all nodes
133 - consistent is main.TRUE if all "TestON" counters are consitent across
134 all nodes or main.FALSE
Jon Halla440e872016-03-31 15:15:50 -0700135 """
Jon Halle1a3b752015-07-22 13:02:46 -0700136 try:
Jon Halla440e872016-03-31 15:15:50 -0700137 # Get onos counters results
138 onosCountersRaw = []
139 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700140 for ctrl in main.Cluster.active():
Jon Halla440e872016-03-31 15:15:50 -0700141 t = main.Thread( target=utilities.retry,
Jon Hallca319892017-06-15 15:25:22 -0700142 name="counters-" + str( ctrl ),
143 args=[ ctrl.counters, [ None ] ],
Jon Hallf37d44d2017-05-24 10:37:30 -0700144 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Halla440e872016-03-31 15:15:50 -0700145 'randomTime': True } )
146 threads.append( t )
147 t.start()
148 for t in threads:
149 t.join()
150 onosCountersRaw.append( t.result )
151 onosCounters = []
Jon Hallca319892017-06-15 15:25:22 -0700152 for i in range( len( onosCountersRaw ) ):
Jon Halla440e872016-03-31 15:15:50 -0700153 try:
Jon Hall3e6edb32018-08-21 16:20:30 -0700154 value = json.loads( onosCountersRaw[ i ] )
155 onosCounters.append( value )
Jon Halla440e872016-03-31 15:15:50 -0700156 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700157 main.log.error( "Could not parse counters response from " +
Devin Lim142b5342017-07-20 15:22:39 -0700158 str( main.Cluster.active( i ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700159 main.log.warn( repr( onosCountersRaw[ i ] ) )
Jon Hall0e240372018-05-02 11:21:57 -0700160 onosCounters.append( {} )
Jon Halla440e872016-03-31 15:15:50 -0700161
162 testCounters = {}
163 # make a list of all the "TestON-*" counters in ONOS
Jon Hall41d39f12016-04-11 22:54:35 -0700164 # lookes like a dict whose keys are the name of the ONOS node and
165 # values are a list of the counters. I.E.
Jon Hallf37d44d2017-05-24 10:37:30 -0700166 # { "ONOS1": [ { "name":"TestON-Partitions","value":56 } ]
Jon Halla440e872016-03-31 15:15:50 -0700167 # }
168 # NOTE: There is an assumtion that all nodes are active
169 # based on the above for loops
170 for controller in enumerate( onosCounters ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700171 for key, value in controller[ 1 ].iteritems():
Jon Halla440e872016-03-31 15:15:50 -0700172 if 'TestON' in key:
Jon Hall0e240372018-05-02 11:21:57 -0700173 node = main.Cluster.active( controller[ 0 ] )
Jon Halla440e872016-03-31 15:15:50 -0700174 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700175 testCounters[ node ].append( { key: value } )
Jon Halla440e872016-03-31 15:15:50 -0700176 except KeyError:
Jon Hallf37d44d2017-05-24 10:37:30 -0700177 testCounters[ node ] = [ { key: value } ]
Jon Halla440e872016-03-31 15:15:50 -0700178 # compare the counters on each node
Jon Hallf37d44d2017-05-24 10:37:30 -0700179 firstV = testCounters.values()[ 0 ]
Jon Halla440e872016-03-31 15:15:50 -0700180 tmp = [ v == firstV for k, v in testCounters.iteritems() ]
181 if all( tmp ):
182 consistent = main.TRUE
183 else:
184 consistent = main.FALSE
Jon Hall0e240372018-05-02 11:21:57 -0700185 main.log.error( "ONOS nodes have different values for counters: %s",
Jon Halla440e872016-03-31 15:15:50 -0700186 testCounters )
187 return ( onosCounters, consistent )
188 except Exception:
189 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700190 main.cleanAndExit()
Jon Halla440e872016-03-31 15:15:50 -0700191
192 def counterCheck( self, counterName, counterValue ):
193 """
194 Checks that TestON counters are consistent across all nodes and that
195 specified counter is in ONOS with the given value
196 """
197 try:
198 correctResults = main.TRUE
199 # Get onos counters results and consistentCheck
200 onosCounters, consistent = self.consistentCheck()
201 # Check for correct values
Jon Hallca319892017-06-15 15:25:22 -0700202 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -0700203 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -0700204 current = onosCounters[ i ]
Jon Halla440e872016-03-31 15:15:50 -0700205 onosValue = None
206 try:
207 onosValue = current.get( counterName )
Jon Hall41d39f12016-04-11 22:54:35 -0700208 except AttributeError:
Jon Hallca319892017-06-15 15:25:22 -0700209 main.log.exception( node + " counters result " +
Jon Hall41d39f12016-04-11 22:54:35 -0700210 "is not as expected" )
Jon Halla440e872016-03-31 15:15:50 -0700211 correctResults = main.FALSE
212 if onosValue == counterValue:
Jon Hall0e240372018-05-02 11:21:57 -0700213 main.log.info( "{}: {} counter value is correct".format( node, counterName ) )
Jon Halla440e872016-03-31 15:15:50 -0700214 else:
Jon Hall0e240372018-05-02 11:21:57 -0700215 main.log.error( node + ": " + counterName +
Jon Hall41d39f12016-04-11 22:54:35 -0700216 " counter value is incorrect," +
217 " expected value: " + str( counterValue ) +
218 " current value: " + str( onosValue ) )
Jon Halla440e872016-03-31 15:15:50 -0700219 correctResults = main.FALSE
220 return consistent and correctResults
221 except Exception:
222 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700223 main.cleanAndExit()
Jon Hall41d39f12016-04-11 22:54:35 -0700224
225 def consistentLeaderboards( self, nodes ):
226 TOPIC = 'org.onosproject.election'
227 # FIXME: use threads
Jon Halle0f0b342017-04-18 11:43:47 -0700228 # FIXME: should we retry outside the function?
Jon Hall41d39f12016-04-11 22:54:35 -0700229 for n in range( 5 ): # Retry in case election is still happening
230 leaderList = []
231 # Get all leaderboards
232 for cli in nodes:
233 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
234 # Compare leaderboards
Jon Hallf37d44d2017-05-24 10:37:30 -0700235 result = all( i == leaderList[ 0 ] for i in leaderList ) and\
Jon Hall41d39f12016-04-11 22:54:35 -0700236 leaderList is not None
Jon Hall41d39f12016-04-11 22:54:35 -0700237 if result:
238 return ( result, leaderList )
Jon Hallf37d44d2017-05-24 10:37:30 -0700239 time.sleep( 5 ) # TODO: paramerterize
Jon Hall41d39f12016-04-11 22:54:35 -0700240 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
241 return ( result, leaderList )
242
Devin Lim58046fa2017-07-05 16:55:00 -0700243 def initialSetUp( self, serviceClean=False ):
244 """
245 rest of initialSetup
246 """
Devin Lim58046fa2017-07-05 16:55:00 -0700247 if main.params[ 'tcpdump' ].lower() == "true":
248 main.step( "Start Packet Capture MN" )
249 main.Mininet2.startTcpdump(
250 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
251 + "-MN.pcap",
252 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
253 port=main.params[ 'MNtcpdump' ][ 'port' ] )
254
255 if serviceClean:
256 main.step( "Clean up ONOS service changes" )
Devin Lim142b5342017-07-20 15:22:39 -0700257 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
258 main.ONOSbench.handle.expect( "\$" )
259 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
260 main.ONOSbench.handle.expect( "\$" )
Devin Lim58046fa2017-07-05 16:55:00 -0700261
262 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -0800263 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700264 False,
Jon Hallcd1126d2018-09-11 11:32:48 -0700265 attempts=90 )
Devin Lim58046fa2017-07-05 16:55:00 -0700266
267 utilities.assert_equals( expect=True, actual=nodeResults,
268 onpass="Nodes check successful",
269 onfail="Nodes check NOT successful" )
270
271 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -0700272 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700273 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -0700274 ctrl.name,
Jon Hall6c9e2da2018-11-06 12:01:23 -0800275 ctrl.CLI.sendline( "onos:scr-list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700276 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -0700277 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700278
279 main.step( "Activate apps defined in the params file" )
280 # get data from the params
281 apps = main.params.get( 'apps' )
282 if apps:
283 apps = apps.split( ',' )
Jon Hallca319892017-06-15 15:25:22 -0700284 main.log.debug( "Apps: " + str( apps ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700285 activateResult = True
286 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700287 main.Cluster.active( 0 ).app( app, "Activate" )
Devin Lim58046fa2017-07-05 16:55:00 -0700288 # TODO: check this worked
289 time.sleep( 10 ) # wait for apps to activate
290 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700291 state = main.Cluster.active( 0 ).appStatus( app )
Devin Lim58046fa2017-07-05 16:55:00 -0700292 if state == "ACTIVE":
293 activateResult = activateResult and True
294 else:
295 main.log.error( "{} is in {} state".format( app, state ) )
296 activateResult = False
297 utilities.assert_equals( expect=True,
298 actual=activateResult,
299 onpass="Successfully activated apps",
300 onfail="Failed to activate apps" )
301 else:
302 main.log.warn( "No apps were specified to be loaded after startup" )
303
304 main.step( "Set ONOS configurations" )
Jon Hallca319892017-06-15 15:25:22 -0700305 # FIXME: This shoudl be part of the general startup sequence
Devin Lim58046fa2017-07-05 16:55:00 -0700306 config = main.params.get( 'ONOS_Configuration' )
307 if config:
308 main.log.debug( config )
309 checkResult = main.TRUE
310 for component in config:
311 for setting in config[ component ]:
312 value = config[ component ][ setting ]
Jon Hallca319892017-06-15 15:25:22 -0700313 check = main.Cluster.next().setCfg( component, setting, value )
Devin Lim58046fa2017-07-05 16:55:00 -0700314 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
315 checkResult = check and checkResult
316 utilities.assert_equals( expect=main.TRUE,
317 actual=checkResult,
318 onpass="Successfully set config",
319 onfail="Failed to set config" )
320 else:
321 main.log.warn( "No configurations were specified to be changed after startup" )
322
Jon Hallca319892017-06-15 15:25:22 -0700323 main.step( "Check app ids" )
324 appCheck = self.appCheck()
325 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700326 onpass="App Ids seem to be correct",
327 onfail="Something is wrong with app Ids" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -0700328
Jon Hallca319892017-06-15 15:25:22 -0700329 def commonChecks( self ):
330 # TODO: make this assertable or assert in here?
331 self.topicsCheck()
332 self.partitionsCheck()
333 self.pendingMapCheck()
334 self.appCheck()
335
336 def topicsCheck( self, extraTopics=[] ):
337 """
338 Check for work partition topics in leaders output
339 """
340 leaders = main.Cluster.next().leaders()
341 missing = False
342 try:
343 if leaders:
344 parsedLeaders = json.loads( leaders )
345 output = json.dumps( parsedLeaders,
346 sort_keys=True,
347 indent=4,
348 separators=( ',', ': ' ) )
Jon Hallca319892017-06-15 15:25:22 -0700349 # check for all intent partitions
350 topics = []
351 for i in range( 14 ):
352 topics.append( "work-partition-" + str( i ) )
353 topics += extraTopics
Jon Hallca319892017-06-15 15:25:22 -0700354 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
355 for topic in topics:
356 if topic not in ONOStopics:
357 main.log.error( "Error: " + topic +
358 " not in leaders" )
359 missing = True
360 else:
361 main.log.error( "leaders() returned None" )
362 except ( ValueError, TypeError ):
363 main.log.exception( "Error parsing leaders" )
364 main.log.error( repr( leaders ) )
365 if missing:
Jon Hall4173b242017-09-12 17:04:38 -0700366 # NOTE Can we refactor this into the Cluster class?
367 # Maybe an option to print the output of a command from each node?
Jon Hallca319892017-06-15 15:25:22 -0700368 for ctrl in main.Cluster.active():
369 response = ctrl.CLI.leaders( jsonFormat=False )
370 main.log.debug( str( ctrl.name ) + " leaders output: \n" +
371 str( response ) )
372 return missing
373
374 def partitionsCheck( self ):
375 # TODO: return something assertable
376 partitions = main.Cluster.next().partitions()
377 try:
378 if partitions:
379 parsedPartitions = json.loads( partitions )
380 output = json.dumps( parsedPartitions,
381 sort_keys=True,
382 indent=4,
383 separators=( ',', ': ' ) )
384 main.log.debug( "Partitions: " + output )
385 # TODO check for a leader in all paritions
386 # TODO check for consistency among nodes
387 else:
388 main.log.error( "partitions() returned None" )
389 except ( ValueError, TypeError ):
390 main.log.exception( "Error parsing partitions" )
391 main.log.error( repr( partitions ) )
392
393 def pendingMapCheck( self ):
394 pendingMap = main.Cluster.next().pendingMap()
395 try:
396 if pendingMap:
397 parsedPending = json.loads( pendingMap )
398 output = json.dumps( parsedPending,
399 sort_keys=True,
400 indent=4,
401 separators=( ',', ': ' ) )
402 main.log.debug( "Pending map: " + output )
403 # TODO check something here?
404 else:
405 main.log.error( "pendingMap() returned None" )
406 except ( ValueError, TypeError ):
407 main.log.exception( "Error parsing pending map" )
408 main.log.error( repr( pendingMap ) )
409
410 def appCheck( self ):
411 """
412 Check App IDs on all nodes
413 """
414 # FIXME: Rename this to appIDCheck? or add a check for isntalled apps
Jon Hallb9d381e2018-02-05 12:02:10 -0800415 for i in range( 15 ):
416 # TODO modify retry or add a new version that accepts looking for
417 # a value in a return list instead of needing to match the entire
418 # return value to retry
419 appResults = main.Cluster.command( "appToIDCheck" )
420 appCheck = all( i == main.TRUE for i in appResults )
421 if appCheck:
422 break
423 else:
424 time.sleep( 5 )
425
Jon Hallca319892017-06-15 15:25:22 -0700426 if not appCheck:
Devin Lim142b5342017-07-20 15:22:39 -0700427 ctrl = main.Cluster.active( 0 )
Jon Hallb9d381e2018-02-05 12:02:10 -0800428 main.log.debug( "%s apps: %s" % ( ctrl.name, ctrl.pprint( ctrl.apps() ) ) )
429 main.log.debug( "%s appIDs: %s" % ( ctrl.name, ctrl.pprint( ctrl.appIDs() ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700430 return appCheck
431
Jon Halle0f0b342017-04-18 11:43:47 -0700432 def workQueueStatsCheck( self, workQueueName, completed, inProgress, pending ):
433 # Completed
Jon Hallca319892017-06-15 15:25:22 -0700434 completedValues = main.Cluster.command( "workQueueTotalCompleted",
435 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700436 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700437 completedResults = [ int( x ) == completed for x in completedValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700438 completedResult = all( completedResults )
439 if not completedResult:
440 main.log.warn( "Expected Work Queue {} to have {} completed, found {}".format(
441 workQueueName, completed, completedValues ) )
442
443 # In Progress
Jon Hallca319892017-06-15 15:25:22 -0700444 inProgressValues = main.Cluster.command( "workQueueTotalInProgress",
445 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700446 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700447 inProgressResults = [ int( x ) == inProgress for x in inProgressValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700448 inProgressResult = all( inProgressResults )
449 if not inProgressResult:
450 main.log.warn( "Expected Work Queue {} to have {} inProgress, found {}".format(
451 workQueueName, inProgress, inProgressValues ) )
452
453 # Pending
Jon Hallca319892017-06-15 15:25:22 -0700454 pendingValues = main.Cluster.command( "workQueueTotalPending",
455 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700456 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700457 pendingResults = [ int( x ) == pending for x in pendingValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700458 pendingResult = all( pendingResults )
459 if not pendingResult:
460 main.log.warn( "Expected Work Queue {} to have {} pending, found {}".format(
461 workQueueName, pending, pendingValues ) )
462 return completedResult and inProgressResult and pendingResult
463
Devin Lim58046fa2017-07-05 16:55:00 -0700464 def assignDevices( self, main ):
465 """
466 Assign devices to controllers
467 """
468 import re
Devin Lim58046fa2017-07-05 16:55:00 -0700469 assert main, "main not defined"
470 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700471
472 main.case( "Assigning devices to controllers" )
473 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " + \
474 "and check that an ONOS node becomes the " + \
475 "master of the device."
476 main.step( "Assign switches to controllers" )
477
Jon Hallca319892017-06-15 15:25:22 -0700478 ipList = main.Cluster.getIps()
Jon Hallab611372018-02-21 15:26:05 -0800479 swList = main.Mininet1.getSwitches().keys()
Devin Lim58046fa2017-07-05 16:55:00 -0700480 main.Mininet1.assignSwController( sw=swList, ip=ipList )
481
482 mastershipCheck = main.TRUE
Jon Hallab611372018-02-21 15:26:05 -0800483 for switch in swList:
484 response = main.Mininet1.getSwController( switch )
Devin Lim58046fa2017-07-05 16:55:00 -0700485 try:
486 main.log.info( str( response ) )
Jon Hallab611372018-02-21 15:26:05 -0800487 for ctrl in main.Cluster.runningNodes:
488 if re.search( "tcp:" + ctrl.ipAddress, response ):
489 mastershipCheck = mastershipCheck and main.TRUE
490 else:
491 main.log.error( "Error, node " + repr( ctrl ) + " is " +
492 "not in the list of controllers " +
493 switch + " is connecting to." )
494 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -0700495 except Exception:
Jon Hallab611372018-02-21 15:26:05 -0800496 main.log.warn( "Error parsing get-controller response" )
497 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -0700498 utilities.assert_equals(
499 expect=main.TRUE,
500 actual=mastershipCheck,
501 onpass="Switch mastership assigned correctly",
502 onfail="Switches not assigned correctly to controllers" )
Jon Hallca319892017-06-15 15:25:22 -0700503
Jon Hallab611372018-02-21 15:26:05 -0800504 # Mappings for attachmentPoints from host mac to deviceID
505 # TODO: make the key a dict with deviceIds and port #'s
506 # FIXME: topo-HA/obelisk specific mappings:
507 # key is mac and value is dpid
508 main.topoMappings = {}
509 for i in range( 1, 29 ): # hosts 1 through 28
510 # set up correct variables:
511 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
512 if i == 1:
513 deviceId = "1000".zfill( 16 )
514 elif i == 2:
515 deviceId = "2000".zfill( 16 )
516 elif i == 3:
517 deviceId = "3000".zfill( 16 )
518 elif i == 4:
519 deviceId = "3004".zfill( 16 )
520 elif i == 5:
521 deviceId = "5000".zfill( 16 )
522 elif i == 6:
523 deviceId = "6000".zfill( 16 )
524 elif i == 7:
525 deviceId = "6007".zfill( 16 )
526 elif i >= 8 and i <= 17:
527 dpid = '3' + str( i ).zfill( 3 )
528 deviceId = dpid.zfill( 16 )
529 elif i >= 18 and i <= 27:
530 dpid = '6' + str( i ).zfill( 3 )
531 deviceId = dpid.zfill( 16 )
532 elif i == 28:
533 deviceId = "2800".zfill( 16 )
534 main.topoMappings[ macId ] = deviceId
535
Devin Lim58046fa2017-07-05 16:55:00 -0700536 def assignIntents( self, main ):
537 """
538 Assign intents
539 """
540 import time
541 import json
Devin Lim58046fa2017-07-05 16:55:00 -0700542 assert main, "main not defined"
543 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700544 try:
545 main.HAlabels
546 except ( NameError, AttributeError ):
547 main.log.error( "main.HAlabels not defined, setting to []" )
548 main.HAlabels = []
549 try:
550 main.HAdata
551 except ( NameError, AttributeError ):
552 main.log.error( "data not defined, setting to []" )
553 main.HAdata = []
554 main.case( "Adding host Intents" )
555 main.caseExplanation = "Discover hosts by using pingall then " +\
556 "assign predetermined host-to-host intents." +\
557 " After installation, check that the intent" +\
558 " is distributed to all nodes and the state" +\
559 " is INSTALLED"
560
561 # install onos-app-fwd
562 main.step( "Install reactive forwarding app" )
Jon Hall0e240372018-05-02 11:21:57 -0700563 installResults = main.Cluster.next().CLI.activateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700564 utilities.assert_equals( expect=main.TRUE, actual=installResults,
565 onpass="Install fwd successful",
566 onfail="Install fwd failed" )
567
568 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700569 appCheck = self.appCheck()
570 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700571 onpass="App Ids seem to be correct",
572 onfail="Something is wrong with app Ids" )
573
574 main.step( "Discovering Hosts( Via pingall for now )" )
575 # FIXME: Once we have a host discovery mechanism, use that instead
576 # REACTIVE FWD test
577 pingResult = main.FALSE
578 passMsg = "Reactive Pingall test passed"
579 time1 = time.time()
580 pingResult = main.Mininet1.pingall()
581 time2 = time.time()
582 if not pingResult:
583 main.log.warn( "First pingall failed. Trying again..." )
584 pingResult = main.Mininet1.pingall()
585 passMsg += " on the second try"
586 utilities.assert_equals(
587 expect=main.TRUE,
588 actual=pingResult,
589 onpass=passMsg,
590 onfail="Reactive Pingall failed, " +
591 "one or more ping pairs failed" )
592 main.log.info( "Time for pingall: %2f seconds" %
593 ( time2 - time1 ) )
Jon Hallca319892017-06-15 15:25:22 -0700594 if not pingResult:
Devin Lim44075962017-08-11 10:56:37 -0700595 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700596 # timeout for fwd flows
597 time.sleep( 11 )
598 # uninstall onos-app-fwd
599 main.step( "Uninstall reactive forwarding app" )
Jon Hall0e240372018-05-02 11:21:57 -0700600 uninstallResult = main.Cluster.next().CLI.deactivateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700601 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
602 onpass="Uninstall fwd successful",
603 onfail="Uninstall fwd failed" )
604
605 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700606 appCheck2 = self.appCheck()
607 utilities.assert_equals( expect=True, actual=appCheck2,
Devin Lim58046fa2017-07-05 16:55:00 -0700608 onpass="App Ids seem to be correct",
609 onfail="Something is wrong with app Ids" )
610
611 main.step( "Add host intents via cli" )
612 intentIds = []
613 # TODO: move the host numbers to params
614 # Maybe look at all the paths we ping?
615 intentAddResult = True
616 hostResult = main.TRUE
617 for i in range( 8, 18 ):
618 main.log.info( "Adding host intent between h" + str( i ) +
619 " and h" + str( i + 10 ) )
620 host1 = "00:00:00:00:00:" + \
621 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
622 host2 = "00:00:00:00:00:" + \
623 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
624 # NOTE: getHost can return None
Jon Hall0e240372018-05-02 11:21:57 -0700625 host1Dict = main.Cluster.next().CLI.getHost( host1 )
626 host2Dict = main.Cluster.next().CLI.getHost( host2 )
Devin Lim58046fa2017-07-05 16:55:00 -0700627 host1Id = None
628 host2Id = None
629 if host1Dict and host2Dict:
630 host1Id = host1Dict.get( 'id', None )
631 host2Id = host2Dict.get( 'id', None )
632 if host1Id and host2Id:
Jon Hallca319892017-06-15 15:25:22 -0700633 nodeNum = len( main.Cluster.active() )
Devin Lim142b5342017-07-20 15:22:39 -0700634 ctrl = main.Cluster.active( i % nodeNum )
Jon Hallca319892017-06-15 15:25:22 -0700635 tmpId = ctrl.CLI.addHostIntent( host1Id, host2Id )
Devin Lim58046fa2017-07-05 16:55:00 -0700636 if tmpId:
637 main.log.info( "Added intent with id: " + tmpId )
638 intentIds.append( tmpId )
639 else:
640 main.log.error( "addHostIntent returned: " +
641 repr( tmpId ) )
642 else:
643 main.log.error( "Error, getHost() failed for h" + str( i ) +
644 " and/or h" + str( i + 10 ) )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700645 hosts = main.Cluster.next().CLI.hosts()
Devin Lim58046fa2017-07-05 16:55:00 -0700646 try:
Jon Hallca319892017-06-15 15:25:22 -0700647 output = json.dumps( json.loads( hosts ),
648 sort_keys=True,
649 indent=4,
650 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700651 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700652 output = repr( hosts )
653 main.log.debug( "Hosts output: %s" % output )
Devin Lim58046fa2017-07-05 16:55:00 -0700654 hostResult = main.FALSE
655 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
656 onpass="Found a host id for each host",
657 onfail="Error looking up host ids" )
658
659 intentStart = time.time()
Jon Hall0e240372018-05-02 11:21:57 -0700660 onosIds = main.Cluster.next().getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700661 main.log.info( "Submitted intents: " + str( intentIds ) )
662 main.log.info( "Intents in ONOS: " + str( onosIds ) )
663 for intent in intentIds:
664 if intent in onosIds:
665 pass # intent submitted is in onos
666 else:
667 intentAddResult = False
668 if intentAddResult:
669 intentStop = time.time()
670 else:
671 intentStop = None
672 # Print the intent states
Jon Hall0e240372018-05-02 11:21:57 -0700673 intents = main.Cluster.next().CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700674 intentStates = []
675 installedCheck = True
676 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
677 count = 0
678 try:
679 for intent in json.loads( intents ):
680 state = intent.get( 'state', None )
681 if "INSTALLED" not in state:
682 installedCheck = False
683 intentId = intent.get( 'id', None )
684 intentStates.append( ( intentId, state ) )
685 except ( ValueError, TypeError ):
686 main.log.exception( "Error parsing intents" )
687 # add submitted intents not in the store
688 tmplist = [ i for i, s in intentStates ]
689 missingIntents = False
690 for i in intentIds:
691 if i not in tmplist:
692 intentStates.append( ( i, " - " ) )
693 missingIntents = True
694 intentStates.sort()
695 for i, s in intentStates:
696 count += 1
697 main.log.info( "%-6s%-15s%-15s" %
698 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700699 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700700
701 intentAddResult = bool( intentAddResult and not missingIntents and
702 installedCheck )
703 if not intentAddResult:
704 main.log.error( "Error in pushing host intents to ONOS" )
705
706 main.step( "Intent Anti-Entropy dispersion" )
707 for j in range( 100 ):
708 correct = True
709 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700710 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700711 onosIds = []
Jon Hallca319892017-06-15 15:25:22 -0700712 ids = ctrl.getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700713 onosIds.append( ids )
Jon Hallca319892017-06-15 15:25:22 -0700714 main.log.debug( "Intents in " + ctrl.name + ": " +
Devin Lim58046fa2017-07-05 16:55:00 -0700715 str( sorted( onosIds ) ) )
716 if sorted( ids ) != sorted( intentIds ):
717 main.log.warn( "Set of intent IDs doesn't match" )
718 correct = False
719 break
720 else:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700721 intents = json.loads( ctrl.CLI.intents() )
Devin Lim58046fa2017-07-05 16:55:00 -0700722 for intent in intents:
723 if intent[ 'state' ] != "INSTALLED":
724 main.log.warn( "Intent " + intent[ 'id' ] +
725 " is " + intent[ 'state' ] )
726 correct = False
727 break
728 if correct:
729 break
730 else:
731 time.sleep( 1 )
732 if not intentStop:
733 intentStop = time.time()
734 global gossipTime
735 gossipTime = intentStop - intentStart
736 main.log.info( "It took about " + str( gossipTime ) +
737 " seconds for all intents to appear in each node" )
738 append = False
739 title = "Gossip Intents"
740 count = 1
741 while append is False:
742 curTitle = title + str( count )
743 if curTitle not in main.HAlabels:
744 main.HAlabels.append( curTitle )
745 main.HAdata.append( str( gossipTime ) )
746 append = True
747 else:
748 count += 1
749 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Devin Lim142b5342017-07-20 15:22:39 -0700750 maxGossipTime = gossipPeriod * len( main.Cluster.runningNodes )
Devin Lim58046fa2017-07-05 16:55:00 -0700751 utilities.assert_greater_equals(
752 expect=maxGossipTime, actual=gossipTime,
753 onpass="ECM anti-entropy for intents worked within " +
754 "expected time",
755 onfail="Intent ECM anti-entropy took too long. " +
756 "Expected time:{}, Actual time:{}".format( maxGossipTime,
757 gossipTime ) )
758 if gossipTime <= maxGossipTime:
759 intentAddResult = True
760
Jon Hallca319892017-06-15 15:25:22 -0700761 pendingMap = main.Cluster.next().pendingMap()
Devin Lim58046fa2017-07-05 16:55:00 -0700762 if not intentAddResult or "key" in pendingMap:
Devin Lim58046fa2017-07-05 16:55:00 -0700763 installedCheck = True
764 main.log.info( "Sleeping 60 seconds to see if intents are found" )
765 time.sleep( 60 )
Jon Hall0e240372018-05-02 11:21:57 -0700766 onosIds = main.Cluster.next().getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700767 main.log.info( "Submitted intents: " + str( intentIds ) )
768 main.log.info( "Intents in ONOS: " + str( onosIds ) )
769 # Print the intent states
Jon Hall0e240372018-05-02 11:21:57 -0700770 intents = main.Cluster.next().CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700771 intentStates = []
772 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
773 count = 0
774 try:
775 for intent in json.loads( intents ):
776 # Iter through intents of a node
777 state = intent.get( 'state', None )
778 if "INSTALLED" not in state:
779 installedCheck = False
780 intentId = intent.get( 'id', None )
781 intentStates.append( ( intentId, state ) )
782 except ( ValueError, TypeError ):
783 main.log.exception( "Error parsing intents" )
784 # add submitted intents not in the store
785 tmplist = [ i for i, s in intentStates ]
786 for i in intentIds:
787 if i not in tmplist:
788 intentStates.append( ( i, " - " ) )
789 intentStates.sort()
790 for i, s in intentStates:
791 count += 1
792 main.log.info( "%-6s%-15s%-15s" %
793 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700794 self.topicsCheck( [ "org.onosproject.election" ] )
795 self.partitionsCheck()
796 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700797
Jon Hallca319892017-06-15 15:25:22 -0700798 def pingAcrossHostIntent( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -0700799 """
800 Ping across added host intents
801 """
802 import json
803 import time
Devin Lim58046fa2017-07-05 16:55:00 -0700804 assert main, "main not defined"
805 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700806 main.case( "Verify connectivity by sending traffic across Intents" )
807 main.caseExplanation = "Ping across added host intents to check " +\
808 "functionality and check the state of " +\
809 "the intent"
810
Jon Hallca319892017-06-15 15:25:22 -0700811 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -0700812 main.step( "Check Intent state" )
813 installedCheck = False
814 loopCount = 0
Jon Hall5d5876e2017-11-30 09:33:16 -0800815 while not installedCheck and loopCount < 90:
Devin Lim58046fa2017-07-05 16:55:00 -0700816 installedCheck = True
817 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700818 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700819 intentStates = []
820 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
821 count = 0
822 # Iter through intents of a node
823 try:
824 for intent in json.loads( intents ):
825 state = intent.get( 'state', None )
826 if "INSTALLED" not in state:
827 installedCheck = False
Jon Hall8bafdc02017-09-05 11:36:26 -0700828 main.log.debug( "Failed intent: " + str( intent ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700829 intentId = intent.get( 'id', None )
830 intentStates.append( ( intentId, state ) )
831 except ( ValueError, TypeError ):
832 main.log.exception( "Error parsing intents." )
833 # Print states
834 intentStates.sort()
835 for i, s in intentStates:
836 count += 1
837 main.log.info( "%-6s%-15s%-15s" %
838 ( str( count ), str( i ), str( s ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700839 if not installedCheck:
840 time.sleep( 1 )
841 loopCount += 1
842 utilities.assert_equals( expect=True, actual=installedCheck,
843 onpass="Intents are all INSTALLED",
844 onfail="Intents are not all in " +
845 "INSTALLED state" )
846
847 main.step( "Ping across added host intents" )
848 PingResult = main.TRUE
849 for i in range( 8, 18 ):
850 ping = main.Mininet1.pingHost( src="h" + str( i ),
851 target="h" + str( i + 10 ) )
852 PingResult = PingResult and ping
853 if ping == main.FALSE:
854 main.log.warn( "Ping failed between h" + str( i ) +
855 " and h" + str( i + 10 ) )
856 elif ping == main.TRUE:
857 main.log.info( "Ping test passed!" )
858 # Don't set PingResult or you'd override failures
859 if PingResult == main.FALSE:
860 main.log.error(
861 "Intents have not been installed correctly, pings failed." )
862 # TODO: pretty print
Devin Lim58046fa2017-07-05 16:55:00 -0700863 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700864 tmpIntents = onosCli.CLI.intents()
Jon Hallca319892017-06-15 15:25:22 -0700865 output = json.dumps( json.loads( tmpIntents ),
866 sort_keys=True,
867 indent=4,
868 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700869 except ( ValueError, TypeError ):
Jon Hall4173b242017-09-12 17:04:38 -0700870 output = repr( tmpIntents )
Jon Hallca319892017-06-15 15:25:22 -0700871 main.log.debug( "ONOS1 intents: " + output )
Devin Lim58046fa2017-07-05 16:55:00 -0700872 utilities.assert_equals(
873 expect=main.TRUE,
874 actual=PingResult,
875 onpass="Intents have been installed correctly and pings work",
876 onfail="Intents have not been installed correctly, pings failed." )
877
878 main.step( "Check leadership of topics" )
Jon Hallca319892017-06-15 15:25:22 -0700879 topicsCheck = self.topicsCheck()
880 utilities.assert_equals( expect=False, actual=topicsCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700881 onpass="intent Partitions is in leaders",
Jon Hallca319892017-06-15 15:25:22 -0700882 onfail="Some topics were lost" )
883 self.partitionsCheck()
884 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700885
886 if not installedCheck:
887 main.log.info( "Waiting 60 seconds to see if the state of " +
888 "intents change" )
889 time.sleep( 60 )
890 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700891 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700892 intentStates = []
893 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
894 count = 0
895 # Iter through intents of a node
896 try:
897 for intent in json.loads( intents ):
898 state = intent.get( 'state', None )
899 if "INSTALLED" not in state:
900 installedCheck = False
901 intentId = intent.get( 'id', None )
902 intentStates.append( ( intentId, state ) )
903 except ( ValueError, TypeError ):
904 main.log.exception( "Error parsing intents." )
905 intentStates.sort()
906 for i, s in intentStates:
907 count += 1
908 main.log.info( "%-6s%-15s%-15s" %
909 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700910 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700911
Devin Lim58046fa2017-07-05 16:55:00 -0700912 main.step( "Wait a minute then ping again" )
913 # the wait is above
914 PingResult = main.TRUE
915 for i in range( 8, 18 ):
916 ping = main.Mininet1.pingHost( src="h" + str( i ),
917 target="h" + str( i + 10 ) )
918 PingResult = PingResult and ping
919 if ping == main.FALSE:
920 main.log.warn( "Ping failed between h" + str( i ) +
921 " and h" + str( i + 10 ) )
922 elif ping == main.TRUE:
923 main.log.info( "Ping test passed!" )
924 # Don't set PingResult or you'd override failures
925 if PingResult == main.FALSE:
926 main.log.error(
927 "Intents have not been installed correctly, pings failed." )
928 # TODO: pretty print
Jon Hallca319892017-06-15 15:25:22 -0700929 main.log.warn( str( onosCli.name ) + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -0700930 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700931 tmpIntents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700932 main.log.warn( json.dumps( json.loads( tmpIntents ),
933 sort_keys=True,
934 indent=4,
935 separators=( ',', ': ' ) ) )
936 except ( ValueError, TypeError ):
937 main.log.warn( repr( tmpIntents ) )
938 utilities.assert_equals(
939 expect=main.TRUE,
940 actual=PingResult,
941 onpass="Intents have been installed correctly and pings work",
942 onfail="Intents have not been installed correctly, pings failed." )
943
Devin Lim142b5342017-07-20 15:22:39 -0700944 def checkRoleNotNull( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700945 main.step( "Check that each switch has a master" )
Devin Lim58046fa2017-07-05 16:55:00 -0700946 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -0700947 rolesNotNull = main.Cluster.command( "rolesNotNull", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -0700948 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -0700949 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -0700950 actual=rolesNotNull,
951 onpass="Each device has a master",
952 onfail="Some devices don't have a master assigned" )
953
Devin Lim142b5342017-07-20 15:22:39 -0700954 def checkTheRole( self ):
955 main.step( "Read device roles from ONOS" )
Jon Hallca319892017-06-15 15:25:22 -0700956 ONOSMastership = main.Cluster.command( "roles" )
Devin Lim58046fa2017-07-05 16:55:00 -0700957 consistentMastership = True
958 rolesResults = True
Devin Lim58046fa2017-07-05 16:55:00 -0700959 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -0700960 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700961 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hallca319892017-06-15 15:25:22 -0700962 main.log.error( "Error in getting " + node + " roles" )
963 main.log.warn( node + " mastership response: " +
Devin Lim58046fa2017-07-05 16:55:00 -0700964 repr( ONOSMastership[ i ] ) )
965 rolesResults = False
966 utilities.assert_equals(
967 expect=True,
968 actual=rolesResults,
969 onpass="No error in reading roles output",
970 onfail="Error in reading roles from ONOS" )
971
972 main.step( "Check for consistency in roles from each controller" )
973 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
974 main.log.info(
975 "Switch roles are consistent across all ONOS nodes" )
976 else:
977 consistentMastership = False
978 utilities.assert_equals(
979 expect=True,
980 actual=consistentMastership,
981 onpass="Switch roles are consistent across all ONOS nodes",
982 onfail="ONOS nodes have different views of switch roles" )
Devin Lim142b5342017-07-20 15:22:39 -0700983 return ONOSMastership, rolesResults, consistentMastership
984
985 def checkingIntents( self ):
986 main.step( "Get the intents from each controller" )
987 ONOSIntents = main.Cluster.command( "intents", specificDriver=2 )
988 intentsResults = True
989 for i in range( len( ONOSIntents ) ):
990 node = str( main.Cluster.active( i ) )
991 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
992 main.log.error( "Error in getting " + node + " intents" )
993 main.log.warn( node + " intents response: " +
994 repr( ONOSIntents[ i ] ) )
995 intentsResults = False
996 utilities.assert_equals(
997 expect=True,
998 actual=intentsResults,
999 onpass="No error in reading intents output",
1000 onfail="Error in reading intents from ONOS" )
1001 return ONOSIntents, intentsResults
1002
1003 def readingState( self, main ):
1004 """
1005 Reading state of ONOS
1006 """
1007 import json
Devin Lim142b5342017-07-20 15:22:39 -07001008 assert main, "main not defined"
1009 assert utilities.assert_equals, "utilities.assert_equals not defined"
1010 try:
1011 from tests.dependencies.topology import Topology
1012 except ImportError:
1013 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07001014 main.cleanAndExit()
Devin Lim142b5342017-07-20 15:22:39 -07001015 try:
1016 main.topoRelated
1017 except ( NameError, AttributeError ):
1018 main.topoRelated = Topology()
1019 main.case( "Setting up and gathering data for current state" )
1020 # The general idea for this test case is to pull the state of
1021 # ( intents,flows, topology,... ) from each ONOS node
1022 # We can then compare them with each other and also with past states
1023
1024 global mastershipState
1025 mastershipState = '[]'
1026
1027 self.checkRoleNotNull()
1028
1029 main.step( "Get the Mastership of each switch from each controller" )
1030 mastershipCheck = main.FALSE
1031
1032 ONOSMastership, consistentMastership, rolesResults = self.checkTheRole()
Devin Lim58046fa2017-07-05 16:55:00 -07001033
1034 if rolesResults and not consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001035 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001036 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001037 try:
1038 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001039 node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07001040 json.dumps(
1041 json.loads( ONOSMastership[ i ] ),
1042 sort_keys=True,
1043 indent=4,
1044 separators=( ',', ': ' ) ) )
1045 except ( ValueError, TypeError ):
1046 main.log.warn( repr( ONOSMastership[ i ] ) )
1047 elif rolesResults and consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001048 mastershipCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001049 mastershipState = ONOSMastership[ 0 ]
1050
Devin Lim58046fa2017-07-05 16:55:00 -07001051 global intentState
1052 intentState = []
Devin Lim142b5342017-07-20 15:22:39 -07001053 ONOSIntents, intentsResults = self.checkingIntents()
Jon Hallca319892017-06-15 15:25:22 -07001054 intentCheck = main.FALSE
1055 consistentIntents = True
Devin Lim142b5342017-07-20 15:22:39 -07001056
Devin Lim58046fa2017-07-05 16:55:00 -07001057 main.step( "Check for consistency in Intents from each controller" )
1058 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1059 main.log.info( "Intents are consistent across all ONOS " +
1060 "nodes" )
1061 else:
1062 consistentIntents = False
1063 main.log.error( "Intents not consistent" )
1064 utilities.assert_equals(
1065 expect=True,
1066 actual=consistentIntents,
1067 onpass="Intents are consistent across all ONOS nodes",
1068 onfail="ONOS nodes have different views of intents" )
1069
1070 if intentsResults:
1071 # Try to make it easy to figure out what is happening
1072 #
1073 # Intent ONOS1 ONOS2 ...
1074 # 0x01 INSTALLED INSTALLING
1075 # ... ... ...
1076 # ... ... ...
1077 title = " Id"
Jon Hallca319892017-06-15 15:25:22 -07001078 for ctrl in main.Cluster.active():
1079 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07001080 main.log.warn( title )
1081 # get all intent keys in the cluster
1082 keys = []
1083 try:
1084 # Get the set of all intent keys
1085 for nodeStr in ONOSIntents:
1086 node = json.loads( nodeStr )
1087 for intent in node:
1088 keys.append( intent.get( 'id' ) )
1089 keys = set( keys )
1090 # For each intent key, print the state on each node
1091 for key in keys:
1092 row = "%-13s" % key
1093 for nodeStr in ONOSIntents:
1094 node = json.loads( nodeStr )
1095 for intent in node:
1096 if intent.get( 'id', "Error" ) == key:
1097 row += "%-15s" % intent.get( 'state' )
1098 main.log.warn( row )
1099 # End of intent state table
1100 except ValueError as e:
1101 main.log.exception( e )
1102 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1103
1104 if intentsResults and not consistentIntents:
1105 # print the json objects
Jon Hallca319892017-06-15 15:25:22 -07001106 main.log.debug( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001107 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1108 sort_keys=True,
1109 indent=4,
1110 separators=( ',', ': ' ) ) )
1111 for i in range( len( ONOSIntents ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001112 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001113 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallca319892017-06-15 15:25:22 -07001114 main.log.debug( node + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001115 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
1116 sort_keys=True,
1117 indent=4,
1118 separators=( ',', ': ' ) ) )
1119 else:
Jon Hallca319892017-06-15 15:25:22 -07001120 main.log.debug( node + " intents match " + ctrl.name + " intents" )
Devin Lim58046fa2017-07-05 16:55:00 -07001121 elif intentsResults and consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07001122 intentCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001123 intentState = ONOSIntents[ 0 ]
1124
1125 main.step( "Get the flows from each controller" )
1126 global flowState
1127 flowState = []
Jon Hall4173b242017-09-12 17:04:38 -07001128 ONOSFlows = main.Cluster.command( "flows", specificDriver=2 ) # TODO: Possible arg: sleep = 30
Devin Lim58046fa2017-07-05 16:55:00 -07001129 ONOSFlowsJson = []
1130 flowCheck = main.FALSE
1131 consistentFlows = True
1132 flowsResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001133 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001134 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001135 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001136 main.log.error( "Error in getting " + node + " flows" )
1137 main.log.warn( node + " flows response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001138 repr( ONOSFlows[ i ] ) )
1139 flowsResults = False
1140 ONOSFlowsJson.append( None )
1141 else:
1142 try:
1143 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1144 except ( ValueError, TypeError ):
1145 # FIXME: change this to log.error?
Jon Hallca319892017-06-15 15:25:22 -07001146 main.log.exception( "Error in parsing " + node +
Devin Lim58046fa2017-07-05 16:55:00 -07001147 " response as json." )
1148 main.log.error( repr( ONOSFlows[ i ] ) )
1149 ONOSFlowsJson.append( None )
1150 flowsResults = False
1151 utilities.assert_equals(
1152 expect=True,
1153 actual=flowsResults,
1154 onpass="No error in reading flows output",
1155 onfail="Error in reading flows from ONOS" )
1156
1157 main.step( "Check for consistency in Flows from each controller" )
1158 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1159 if all( tmp ):
1160 main.log.info( "Flow count is consistent across all ONOS nodes" )
1161 else:
1162 consistentFlows = False
1163 utilities.assert_equals(
1164 expect=True,
1165 actual=consistentFlows,
1166 onpass="The flow count is consistent across all ONOS nodes",
1167 onfail="ONOS nodes have different flow counts" )
1168
1169 if flowsResults and not consistentFlows:
1170 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001171 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001172 try:
1173 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001174 node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001175 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
1176 indent=4, separators=( ',', ': ' ) ) )
1177 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -07001178 main.log.warn( node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001179 repr( ONOSFlows[ i ] ) )
1180 elif flowsResults and consistentFlows:
1181 flowCheck = main.TRUE
1182 flowState = ONOSFlows[ 0 ]
1183
1184 main.step( "Get the OF Table entries" )
1185 global flows
Jon Hallab611372018-02-21 15:26:05 -08001186 flows = {}
1187 for swName, swDetails in main.Mininet1.getSwitches().items():
1188 main.log.debug( repr( swName ) + repr( swDetails ) )
1189 flows[ swName ] = main.Mininet1.getFlowTable( swName, version="1.3", debug=False )
Devin Lim58046fa2017-07-05 16:55:00 -07001190 if flowCheck == main.FALSE:
1191 for table in flows:
1192 main.log.warn( table )
1193 # TODO: Compare switch flow tables with ONOS flow tables
1194
1195 main.step( "Start continuous pings" )
Jon Hallab611372018-02-21 15:26:05 -08001196 if main.params.get( 'PING', False ):
1197 # TODO: Make this more dynamic and less hardcoded, ie, # or ping pairs
1198 main.Mininet2.pingLong(
1199 src=main.params[ 'PING' ][ 'source1' ],
1200 target=main.params[ 'PING' ][ 'target1' ],
1201 pingTime=500 )
1202 main.Mininet2.pingLong(
1203 src=main.params[ 'PING' ][ 'source2' ],
1204 target=main.params[ 'PING' ][ 'target2' ],
1205 pingTime=500 )
1206 main.Mininet2.pingLong(
1207 src=main.params[ 'PING' ][ 'source3' ],
1208 target=main.params[ 'PING' ][ 'target3' ],
1209 pingTime=500 )
1210 main.Mininet2.pingLong(
1211 src=main.params[ 'PING' ][ 'source4' ],
1212 target=main.params[ 'PING' ][ 'target4' ],
1213 pingTime=500 )
1214 main.Mininet2.pingLong(
1215 src=main.params[ 'PING' ][ 'source5' ],
1216 target=main.params[ 'PING' ][ 'target5' ],
1217 pingTime=500 )
1218 main.Mininet2.pingLong(
1219 src=main.params[ 'PING' ][ 'source6' ],
1220 target=main.params[ 'PING' ][ 'target6' ],
1221 pingTime=500 )
1222 main.Mininet2.pingLong(
1223 src=main.params[ 'PING' ][ 'source7' ],
1224 target=main.params[ 'PING' ][ 'target7' ],
1225 pingTime=500 )
1226 main.Mininet2.pingLong(
1227 src=main.params[ 'PING' ][ 'source8' ],
1228 target=main.params[ 'PING' ][ 'target8' ],
1229 pingTime=500 )
1230 main.Mininet2.pingLong(
1231 src=main.params[ 'PING' ][ 'source9' ],
1232 target=main.params[ 'PING' ][ 'target9' ],
1233 pingTime=500 )
1234 main.Mininet2.pingLong(
1235 src=main.params[ 'PING' ][ 'source10' ],
1236 target=main.params[ 'PING' ][ 'target10' ],
1237 pingTime=500 )
Devin Lim58046fa2017-07-05 16:55:00 -07001238
1239 main.step( "Collecting topology information from ONOS" )
Devin Lim142b5342017-07-20 15:22:39 -07001240 devices = main.topoRelated.getAll( "devices" )
1241 hosts = main.topoRelated.getAll( "hosts", inJson=True )
1242 ports = main.topoRelated.getAll( "ports" )
1243 links = main.topoRelated.getAll( "links" )
1244 clusters = main.topoRelated.getAll( "clusters" )
Devin Lim58046fa2017-07-05 16:55:00 -07001245 # Compare json objects for hosts and dataplane clusters
1246
1247 # hosts
1248 main.step( "Host view is consistent across ONOS nodes" )
1249 consistentHostsResult = main.TRUE
1250 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001251 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001252 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1253 if hosts[ controller ] == hosts[ 0 ]:
1254 continue
1255 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07001256 main.log.error( "hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001257 controllerStr +
1258 " is inconsistent with ONOS1" )
1259 main.log.warn( repr( hosts[ controller ] ) )
1260 consistentHostsResult = main.FALSE
1261
1262 else:
Jon Hallca319892017-06-15 15:25:22 -07001263 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001264 controllerStr )
1265 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001266 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001267 " hosts response: " +
1268 repr( hosts[ controller ] ) )
1269 utilities.assert_equals(
1270 expect=main.TRUE,
1271 actual=consistentHostsResult,
1272 onpass="Hosts view is consistent across all ONOS nodes",
1273 onfail="ONOS nodes have different views of hosts" )
1274
1275 main.step( "Each host has an IP address" )
1276 ipResult = main.TRUE
1277 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001278 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001279 if hosts[ controller ]:
1280 for host in hosts[ controller ]:
1281 if not host.get( 'ipAddresses', [] ):
Jon Hallca319892017-06-15 15:25:22 -07001282 main.log.error( "Error with host ips on " +
Devin Lim58046fa2017-07-05 16:55:00 -07001283 controllerStr + ": " + str( host ) )
1284 ipResult = main.FALSE
1285 utilities.assert_equals(
1286 expect=main.TRUE,
1287 actual=ipResult,
1288 onpass="The ips of the hosts aren't empty",
1289 onfail="The ip of at least one host is missing" )
1290
1291 # Strongly connected clusters of devices
1292 main.step( "Cluster view is consistent across ONOS nodes" )
1293 consistentClustersResult = main.TRUE
1294 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001295 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001296 if "Error" not in clusters[ controller ]:
1297 if clusters[ controller ] == clusters[ 0 ]:
1298 continue
1299 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07001300 main.log.error( "clusters from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001301 " is inconsistent with ONOS1" )
1302 consistentClustersResult = main.FALSE
1303
1304 else:
1305 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07001306 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07001307 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001308 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001309 " clusters response: " +
1310 repr( clusters[ controller ] ) )
1311 utilities.assert_equals(
1312 expect=main.TRUE,
1313 actual=consistentClustersResult,
1314 onpass="Clusters view is consistent across all ONOS nodes",
1315 onfail="ONOS nodes have different views of clusters" )
1316 if not consistentClustersResult:
1317 main.log.debug( clusters )
1318
1319 # there should always only be one cluster
1320 main.step( "Cluster view correct across ONOS nodes" )
1321 try:
1322 numClusters = len( json.loads( clusters[ 0 ] ) )
1323 except ( ValueError, TypeError ):
1324 main.log.exception( "Error parsing clusters[0]: " +
1325 repr( clusters[ 0 ] ) )
1326 numClusters = "ERROR"
1327 utilities.assert_equals(
1328 expect=1,
1329 actual=numClusters,
1330 onpass="ONOS shows 1 SCC",
1331 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1332
1333 main.step( "Comparing ONOS topology to MN" )
1334 devicesResults = main.TRUE
1335 linksResults = main.TRUE
1336 hostsResults = main.TRUE
1337 mnSwitches = main.Mininet1.getSwitches()
1338 mnLinks = main.Mininet1.getLinks()
1339 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07001340 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001341 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001342 currentDevicesResult = main.topoRelated.compareDevicePort(
1343 main.Mininet1, controller,
1344 mnSwitches, devices, ports )
1345 utilities.assert_equals( expect=main.TRUE,
1346 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07001347 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001348 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001349 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001350 " Switches view is incorrect" )
1351
1352 currentLinksResult = main.topoRelated.compareBase( links, controller,
1353 main.Mininet1.compareLinks,
1354 [ mnSwitches, mnLinks ] )
1355 utilities.assert_equals( expect=main.TRUE,
1356 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07001357 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001358 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001359 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001360 " links view is incorrect" )
1361
1362 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1363 currentHostsResult = main.Mininet1.compareHosts(
1364 mnHosts,
1365 hosts[ controller ] )
1366 else:
1367 currentHostsResult = main.FALSE
1368 utilities.assert_equals( expect=main.TRUE,
1369 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07001370 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001371 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07001372 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001373 " hosts don't match Mininet" )
1374
1375 devicesResults = devicesResults and currentDevicesResult
1376 linksResults = linksResults and currentLinksResult
1377 hostsResults = hostsResults and currentHostsResult
1378
1379 main.step( "Device information is correct" )
1380 utilities.assert_equals(
1381 expect=main.TRUE,
1382 actual=devicesResults,
1383 onpass="Device information is correct",
1384 onfail="Device information is incorrect" )
1385
1386 main.step( "Links are correct" )
1387 utilities.assert_equals(
1388 expect=main.TRUE,
1389 actual=linksResults,
1390 onpass="Link are correct",
1391 onfail="Links are incorrect" )
1392
1393 main.step( "Hosts are correct" )
1394 utilities.assert_equals(
1395 expect=main.TRUE,
1396 actual=hostsResults,
1397 onpass="Hosts are correct",
1398 onfail="Hosts are incorrect" )
1399
1400 def checkDistPrimitivesFunc( self, main ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001401 """
1402 Check for basic functionality with distributed primitives
1403 """
Jon Halle0f0b342017-04-18 11:43:47 -07001404 # TODO: Clean this up so it's not just a cut/paste from the test
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001405 try:
1406 # Make sure variables are defined/set
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001407 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001408 assert main.pCounterName, "main.pCounterName not defined"
1409 assert main.onosSetName, "main.onosSetName not defined"
1410 # NOTE: assert fails if value is 0/None/Empty/False
1411 try:
1412 main.pCounterValue
Devin Lim58046fa2017-07-05 16:55:00 -07001413 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001414 main.log.error( "main.pCounterValue not defined, setting to 0" )
1415 main.pCounterValue = 0
1416 try:
1417 main.onosSet
Devin Lim58046fa2017-07-05 16:55:00 -07001418 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001419 main.log.error( "main.onosSet not defined, setting to empty Set" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001420 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001421 # Variables for the distributed primitives tests. These are local only
1422 addValue = "a"
1423 addAllValue = "a b c d e f"
1424 retainValue = "c d e f"
Jon Halle0f0b342017-04-18 11:43:47 -07001425 valueName = "TestON-Value"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001426 valueValue = None
Jon Halle0f0b342017-04-18 11:43:47 -07001427 workQueueName = "TestON-Queue"
1428 workQueueCompleted = 0
1429 workQueueInProgress = 0
1430 workQueuePending = 0
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001431
1432 description = "Check for basic functionality with distributed " +\
1433 "primitives"
1434 main.case( description )
1435 main.caseExplanation = "Test the methods of the distributed " +\
1436 "primitives (counters and sets) throught the cli"
1437 # DISTRIBUTED ATOMIC COUNTERS
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001438 main.step( "Increment then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001439 pCounters = main.Cluster.command( "counterTestAddAndGet",
1440 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001441 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001442 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001443 main.pCounterValue += 1
1444 addedPValues.append( main.pCounterValue )
Jon Hallca319892017-06-15 15:25:22 -07001445 # Check that counter incremented once per controller
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001446 pCounterResults = True
1447 for i in addedPValues:
1448 tmpResult = i in pCounters
1449 pCounterResults = pCounterResults and tmpResult
1450 if not tmpResult:
1451 main.log.error( str( i ) + " is not in partitioned "
1452 "counter incremented results" )
1453 utilities.assert_equals( expect=True,
1454 actual=pCounterResults,
1455 onpass="Default counter incremented",
1456 onfail="Error incrementing default" +
1457 " counter" )
1458
1459 main.step( "Get then Increment a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001460 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1461 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001462 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001463 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001464 addedPValues.append( main.pCounterValue )
1465 main.pCounterValue += 1
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001466 # Check that counter incremented numController times
1467 pCounterResults = True
1468 for i in addedPValues:
1469 tmpResult = i in pCounters
1470 pCounterResults = pCounterResults and tmpResult
1471 if not tmpResult:
1472 main.log.error( str( i ) + " is not in partitioned "
1473 "counter incremented results" )
1474 utilities.assert_equals( expect=True,
1475 actual=pCounterResults,
1476 onpass="Default counter incremented",
1477 onfail="Error incrementing default" +
1478 " counter" )
1479
1480 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001481 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001482 utilities.assert_equals( expect=main.TRUE,
1483 actual=incrementCheck,
1484 onpass="Added counters are correct",
1485 onfail="Added counters are incorrect" )
1486
1487 main.step( "Add -8 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001488 pCounters = main.Cluster.command( "counterTestAddAndGet",
1489 args=[ main.pCounterName ],
1490 kwargs={ "delta": -8 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001491 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001492 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001493 main.pCounterValue += -8
1494 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001495 # Check that counter incremented numController times
1496 pCounterResults = True
1497 for i in addedPValues:
1498 tmpResult = i in pCounters
1499 pCounterResults = pCounterResults and tmpResult
1500 if not tmpResult:
1501 main.log.error( str( i ) + " is not in partitioned "
1502 "counter incremented results" )
1503 utilities.assert_equals( expect=True,
1504 actual=pCounterResults,
1505 onpass="Default counter incremented",
1506 onfail="Error incrementing default" +
1507 " counter" )
1508
1509 main.step( "Add 5 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001510 pCounters = main.Cluster.command( "counterTestAddAndGet",
1511 args=[ main.pCounterName ],
1512 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001513 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001514 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001515 main.pCounterValue += 5
1516 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001517
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001518 # Check that counter incremented numController times
1519 pCounterResults = True
1520 for i in addedPValues:
1521 tmpResult = i in pCounters
1522 pCounterResults = pCounterResults and tmpResult
1523 if not tmpResult:
1524 main.log.error( str( i ) + " is not in partitioned "
1525 "counter incremented results" )
1526 utilities.assert_equals( expect=True,
1527 actual=pCounterResults,
1528 onpass="Default counter incremented",
1529 onfail="Error incrementing default" +
1530 " counter" )
1531
1532 main.step( "Get then add 5 to a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001533 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1534 args=[ main.pCounterName ],
1535 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001536 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001537 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001538 addedPValues.append( main.pCounterValue )
1539 main.pCounterValue += 5
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001540 # Check that counter incremented numController times
1541 pCounterResults = True
1542 for i in addedPValues:
1543 tmpResult = i in pCounters
1544 pCounterResults = pCounterResults and tmpResult
1545 if not tmpResult:
1546 main.log.error( str( i ) + " is not in partitioned "
1547 "counter incremented results" )
1548 utilities.assert_equals( expect=True,
1549 actual=pCounterResults,
1550 onpass="Default counter incremented",
1551 onfail="Error incrementing default" +
1552 " counter" )
1553
1554 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001555 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001556 utilities.assert_equals( expect=main.TRUE,
1557 actual=incrementCheck,
1558 onpass="Added counters are correct",
1559 onfail="Added counters are incorrect" )
1560
1561 # DISTRIBUTED SETS
1562 main.step( "Distributed Set get" )
1563 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001564 getResponses = main.Cluster.command( "setTestGet",
1565 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001566 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001567 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001568 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001569 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001570 current = set( getResponses[ i ] )
1571 if len( current ) == len( getResponses[ i ] ):
1572 # no repeats
1573 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001574 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001575 " has incorrect view" +
1576 " of set " + main.onosSetName + ":\n" +
1577 str( getResponses[ i ] ) )
1578 main.log.debug( "Expected: " + str( main.onosSet ) )
1579 main.log.debug( "Actual: " + str( current ) )
1580 getResults = main.FALSE
1581 else:
1582 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001583 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001584 " has repeat elements in" +
1585 " set " + main.onosSetName + ":\n" +
1586 str( getResponses[ i ] ) )
1587 getResults = main.FALSE
1588 elif getResponses[ i ] == main.ERROR:
1589 getResults = main.FALSE
1590 utilities.assert_equals( expect=main.TRUE,
1591 actual=getResults,
1592 onpass="Set elements are correct",
1593 onfail="Set elements are incorrect" )
1594
1595 main.step( "Distributed Set size" )
Jon Hallca319892017-06-15 15:25:22 -07001596 sizeResponses = main.Cluster.command( "setTestSize",
1597 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001598 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001599 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001600 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001601 if size != sizeResponses[ i ]:
1602 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001603 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001604 " expected a size of " + str( size ) +
1605 " for set " + main.onosSetName +
1606 " but got " + str( sizeResponses[ i ] ) )
1607 utilities.assert_equals( expect=main.TRUE,
1608 actual=sizeResults,
1609 onpass="Set sizes are correct",
1610 onfail="Set sizes are incorrect" )
1611
1612 main.step( "Distributed Set add()" )
1613 main.onosSet.add( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001614 addResponses = main.Cluster.command( "setTestAdd",
1615 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001616 # main.TRUE = successfully changed the set
1617 # main.FALSE = action resulted in no change in set
1618 # main.ERROR - Some error in executing the function
1619 addResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001620 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001621 if addResponses[ i ] == main.TRUE:
1622 # All is well
1623 pass
1624 elif addResponses[ i ] == main.FALSE:
1625 # Already in set, probably fine
1626 pass
1627 elif addResponses[ i ] == main.ERROR:
1628 # Error in execution
1629 addResults = main.FALSE
1630 else:
1631 # unexpected result
1632 addResults = main.FALSE
1633 if addResults != main.TRUE:
1634 main.log.error( "Error executing set add" )
1635
1636 # Check if set is still correct
1637 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001638 getResponses = main.Cluster.command( "setTestGet",
1639 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001640 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001641 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001642 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001643 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001644 current = set( getResponses[ i ] )
1645 if len( current ) == len( getResponses[ i ] ):
1646 # no repeats
1647 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001648 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001649 " of set " + main.onosSetName + ":\n" +
1650 str( getResponses[ i ] ) )
1651 main.log.debug( "Expected: " + str( main.onosSet ) )
1652 main.log.debug( "Actual: " + str( current ) )
1653 getResults = main.FALSE
1654 else:
1655 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001656 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001657 " set " + main.onosSetName + ":\n" +
1658 str( getResponses[ i ] ) )
1659 getResults = main.FALSE
1660 elif getResponses[ i ] == main.ERROR:
1661 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001662 sizeResponses = main.Cluster.command( "setTestSize",
1663 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001664 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001665 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001666 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001667 if size != sizeResponses[ i ]:
1668 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001669 main.log.error( node + " expected a size of " +
1670 str( size ) + " for set " + main.onosSetName +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001671 " but got " + str( sizeResponses[ i ] ) )
1672 addResults = addResults and getResults and sizeResults
1673 utilities.assert_equals( expect=main.TRUE,
1674 actual=addResults,
1675 onpass="Set add correct",
1676 onfail="Set add was incorrect" )
1677
1678 main.step( "Distributed Set addAll()" )
1679 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001680 addResponses = main.Cluster.command( "setTestAdd",
1681 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001682 # main.TRUE = successfully changed the set
1683 # main.FALSE = action resulted in no change in set
1684 # main.ERROR - Some error in executing the function
1685 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001686 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001687 if addResponses[ i ] == main.TRUE:
1688 # All is well
1689 pass
1690 elif addResponses[ i ] == main.FALSE:
1691 # Already in set, probably fine
1692 pass
1693 elif addResponses[ i ] == main.ERROR:
1694 # Error in execution
1695 addAllResults = main.FALSE
1696 else:
1697 # unexpected result
1698 addAllResults = main.FALSE
1699 if addAllResults != main.TRUE:
1700 main.log.error( "Error executing set addAll" )
1701
1702 # Check if set is still correct
1703 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001704 getResponses = main.Cluster.command( "setTestGet",
1705 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001706 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001707 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001708 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001709 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001710 current = set( getResponses[ i ] )
1711 if len( current ) == len( getResponses[ i ] ):
1712 # no repeats
1713 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001714 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001715 " of set " + main.onosSetName + ":\n" +
1716 str( getResponses[ i ] ) )
1717 main.log.debug( "Expected: " + str( main.onosSet ) )
1718 main.log.debug( "Actual: " + str( current ) )
1719 getResults = main.FALSE
1720 else:
1721 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001722 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001723 " set " + main.onosSetName + ":\n" +
1724 str( getResponses[ i ] ) )
1725 getResults = main.FALSE
1726 elif getResponses[ i ] == main.ERROR:
1727 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001728 sizeResponses = main.Cluster.command( "setTestSize",
1729 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001730 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001731 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001732 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001733 if size != sizeResponses[ i ]:
1734 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001735 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001736 " for set " + main.onosSetName +
1737 " but got " + str( sizeResponses[ i ] ) )
1738 addAllResults = addAllResults and getResults and sizeResults
1739 utilities.assert_equals( expect=main.TRUE,
1740 actual=addAllResults,
1741 onpass="Set addAll correct",
1742 onfail="Set addAll was incorrect" )
1743
1744 main.step( "Distributed Set contains()" )
Jon Hallca319892017-06-15 15:25:22 -07001745 containsResponses = main.Cluster.command( "setTestGet",
1746 args=[ main.onosSetName ],
1747 kwargs={ "values": addValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001748 containsResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001749 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001750 if containsResponses[ i ] == main.ERROR:
1751 containsResults = main.FALSE
1752 else:
1753 containsResults = containsResults and\
1754 containsResponses[ i ][ 1 ]
1755 utilities.assert_equals( expect=main.TRUE,
1756 actual=containsResults,
1757 onpass="Set contains is functional",
1758 onfail="Set contains failed" )
1759
1760 main.step( "Distributed Set containsAll()" )
Jon Hallca319892017-06-15 15:25:22 -07001761 containsAllResponses = main.Cluster.command( "setTestGet",
1762 args=[ main.onosSetName ],
1763 kwargs={ "values": addAllValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001764 containsAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001765 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001766 if containsResponses[ i ] == main.ERROR:
1767 containsResults = main.FALSE
1768 else:
1769 containsResults = containsResults and\
1770 containsResponses[ i ][ 1 ]
1771 utilities.assert_equals( expect=main.TRUE,
1772 actual=containsAllResults,
1773 onpass="Set containsAll is functional",
1774 onfail="Set containsAll failed" )
1775
1776 main.step( "Distributed Set remove()" )
1777 main.onosSet.remove( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001778 removeResponses = main.Cluster.command( "setTestRemove",
1779 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001780 # main.TRUE = successfully changed the set
1781 # main.FALSE = action resulted in no change in set
1782 # main.ERROR - Some error in executing the function
1783 removeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001784 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001785 if removeResponses[ i ] == main.TRUE:
1786 # All is well
1787 pass
1788 elif removeResponses[ i ] == main.FALSE:
1789 # not in set, probably fine
1790 pass
1791 elif removeResponses[ i ] == main.ERROR:
1792 # Error in execution
1793 removeResults = main.FALSE
1794 else:
1795 # unexpected result
1796 removeResults = main.FALSE
1797 if removeResults != main.TRUE:
1798 main.log.error( "Error executing set remove" )
1799
1800 # Check if set is still correct
1801 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001802 getResponses = main.Cluster.command( "setTestGet",
1803 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001804 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001805 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001806 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001807 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001808 current = set( getResponses[ i ] )
1809 if len( current ) == len( getResponses[ i ] ):
1810 # no repeats
1811 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001812 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001813 " of set " + main.onosSetName + ":\n" +
1814 str( getResponses[ i ] ) )
1815 main.log.debug( "Expected: " + str( main.onosSet ) )
1816 main.log.debug( "Actual: " + str( current ) )
1817 getResults = main.FALSE
1818 else:
1819 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001820 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001821 " set " + main.onosSetName + ":\n" +
1822 str( getResponses[ i ] ) )
1823 getResults = main.FALSE
1824 elif getResponses[ i ] == main.ERROR:
1825 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001826 sizeResponses = main.Cluster.command( "setTestSize",
1827 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001828 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001829 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001830 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001831 if size != sizeResponses[ i ]:
1832 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001833 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001834 " for set " + main.onosSetName +
1835 " but got " + str( sizeResponses[ i ] ) )
1836 removeResults = removeResults and getResults and sizeResults
1837 utilities.assert_equals( expect=main.TRUE,
1838 actual=removeResults,
1839 onpass="Set remove correct",
1840 onfail="Set remove was incorrect" )
1841
1842 main.step( "Distributed Set removeAll()" )
1843 main.onosSet.difference_update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001844 removeAllResponses = main.Cluster.command( "setTestRemove",
1845 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001846 # main.TRUE = successfully changed the set
1847 # main.FALSE = action resulted in no change in set
1848 # main.ERROR - Some error in executing the function
1849 removeAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001850 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001851 if removeAllResponses[ i ] == main.TRUE:
1852 # All is well
1853 pass
1854 elif removeAllResponses[ i ] == main.FALSE:
1855 # not in set, probably fine
1856 pass
1857 elif removeAllResponses[ i ] == main.ERROR:
1858 # Error in execution
1859 removeAllResults = main.FALSE
1860 else:
1861 # unexpected result
1862 removeAllResults = main.FALSE
1863 if removeAllResults != main.TRUE:
1864 main.log.error( "Error executing set removeAll" )
1865
1866 # Check if set is still correct
1867 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001868 getResponses = main.Cluster.command( "setTestGet",
1869 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001870 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001871 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001872 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001873 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001874 current = set( getResponses[ i ] )
1875 if len( current ) == len( getResponses[ i ] ):
1876 # no repeats
1877 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001878 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001879 " of set " + main.onosSetName + ":\n" +
1880 str( getResponses[ i ] ) )
1881 main.log.debug( "Expected: " + str( main.onosSet ) )
1882 main.log.debug( "Actual: " + str( current ) )
1883 getResults = main.FALSE
1884 else:
1885 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001886 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001887 " set " + main.onosSetName + ":\n" +
1888 str( getResponses[ i ] ) )
1889 getResults = main.FALSE
1890 elif getResponses[ i ] == main.ERROR:
1891 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001892 sizeResponses = main.Cluster.command( "setTestSize",
1893 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001894 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001895 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001896 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001897 if size != sizeResponses[ i ]:
1898 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001899 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001900 " for set " + main.onosSetName +
1901 " but got " + str( sizeResponses[ i ] ) )
1902 removeAllResults = removeAllResults and getResults and sizeResults
1903 utilities.assert_equals( expect=main.TRUE,
1904 actual=removeAllResults,
1905 onpass="Set removeAll correct",
1906 onfail="Set removeAll was incorrect" )
1907
1908 main.step( "Distributed Set addAll()" )
1909 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001910 addResponses = main.Cluster.command( "setTestAdd",
1911 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001912 # main.TRUE = successfully changed the set
1913 # main.FALSE = action resulted in no change in set
1914 # main.ERROR - Some error in executing the function
1915 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001916 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001917 if addResponses[ i ] == main.TRUE:
1918 # All is well
1919 pass
1920 elif addResponses[ i ] == main.FALSE:
1921 # Already in set, probably fine
1922 pass
1923 elif addResponses[ i ] == main.ERROR:
1924 # Error in execution
1925 addAllResults = main.FALSE
1926 else:
1927 # unexpected result
1928 addAllResults = main.FALSE
1929 if addAllResults != main.TRUE:
1930 main.log.error( "Error executing set addAll" )
1931
1932 # Check if set is still correct
1933 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001934 getResponses = main.Cluster.command( "setTestGet",
1935 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001936 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001937 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001938 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001939 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001940 current = set( getResponses[ i ] )
1941 if len( current ) == len( getResponses[ i ] ):
1942 # no repeats
1943 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001944 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001945 " of set " + main.onosSetName + ":\n" +
1946 str( getResponses[ i ] ) )
1947 main.log.debug( "Expected: " + str( main.onosSet ) )
1948 main.log.debug( "Actual: " + str( current ) )
1949 getResults = main.FALSE
1950 else:
1951 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001952 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001953 " set " + main.onosSetName + ":\n" +
1954 str( getResponses[ i ] ) )
1955 getResults = main.FALSE
1956 elif getResponses[ i ] == main.ERROR:
1957 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001958 sizeResponses = main.Cluster.command( "setTestSize",
1959 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001960 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001961 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001962 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001963 if size != sizeResponses[ i ]:
1964 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001965 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001966 " for set " + main.onosSetName +
1967 " but got " + str( sizeResponses[ i ] ) )
1968 addAllResults = addAllResults and getResults and sizeResults
1969 utilities.assert_equals( expect=main.TRUE,
1970 actual=addAllResults,
1971 onpass="Set addAll correct",
1972 onfail="Set addAll was incorrect" )
1973
1974 main.step( "Distributed Set clear()" )
1975 main.onosSet.clear()
Jon Hallca319892017-06-15 15:25:22 -07001976 clearResponses = main.Cluster.command( "setTestRemove",
Jon Hall4173b242017-09-12 17:04:38 -07001977 args=[ main.onosSetName, " " ], # Values doesn't matter
1978 kwargs={ "clear": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001979 # main.TRUE = successfully changed the set
1980 # main.FALSE = action resulted in no change in set
1981 # main.ERROR - Some error in executing the function
1982 clearResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001983 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001984 if clearResponses[ i ] == main.TRUE:
1985 # All is well
1986 pass
1987 elif clearResponses[ i ] == main.FALSE:
1988 # Nothing set, probably fine
1989 pass
1990 elif clearResponses[ i ] == main.ERROR:
1991 # Error in execution
1992 clearResults = main.FALSE
1993 else:
1994 # unexpected result
1995 clearResults = main.FALSE
1996 if clearResults != main.TRUE:
1997 main.log.error( "Error executing set clear" )
1998
1999 # Check if set is still correct
2000 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002001 getResponses = main.Cluster.command( "setTestGet",
2002 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002003 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002004 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002005 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07002006 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002007 current = set( getResponses[ i ] )
2008 if len( current ) == len( getResponses[ i ] ):
2009 # no repeats
2010 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002011 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002012 " of set " + main.onosSetName + ":\n" +
2013 str( getResponses[ i ] ) )
2014 main.log.debug( "Expected: " + str( main.onosSet ) )
2015 main.log.debug( "Actual: " + str( current ) )
2016 getResults = main.FALSE
2017 else:
2018 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002019 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002020 " set " + main.onosSetName + ":\n" +
2021 str( getResponses[ i ] ) )
2022 getResults = main.FALSE
2023 elif getResponses[ i ] == main.ERROR:
2024 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002025 sizeResponses = main.Cluster.command( "setTestSize",
2026 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002027 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002028 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002029 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002030 if size != sizeResponses[ i ]:
2031 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002032 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002033 " for set " + main.onosSetName +
2034 " but got " + str( sizeResponses[ i ] ) )
2035 clearResults = clearResults and getResults and sizeResults
2036 utilities.assert_equals( expect=main.TRUE,
2037 actual=clearResults,
2038 onpass="Set clear correct",
2039 onfail="Set clear was incorrect" )
2040
2041 main.step( "Distributed Set addAll()" )
2042 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002043 addResponses = main.Cluster.command( "setTestAdd",
2044 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002045 # main.TRUE = successfully changed the set
2046 # main.FALSE = action resulted in no change in set
2047 # main.ERROR - Some error in executing the function
2048 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002049 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002050 if addResponses[ i ] == main.TRUE:
2051 # All is well
2052 pass
2053 elif addResponses[ i ] == main.FALSE:
2054 # Already in set, probably fine
2055 pass
2056 elif addResponses[ i ] == main.ERROR:
2057 # Error in execution
2058 addAllResults = main.FALSE
2059 else:
2060 # unexpected result
2061 addAllResults = main.FALSE
2062 if addAllResults != main.TRUE:
2063 main.log.error( "Error executing set addAll" )
2064
2065 # Check if set is still correct
2066 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002067 getResponses = main.Cluster.command( "setTestGet",
2068 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002069 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002070 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002071 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07002072 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002073 current = set( getResponses[ i ] )
2074 if len( current ) == len( getResponses[ i ] ):
2075 # no repeats
2076 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002077 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002078 " of set " + main.onosSetName + ":\n" +
2079 str( getResponses[ i ] ) )
2080 main.log.debug( "Expected: " + str( main.onosSet ) )
2081 main.log.debug( "Actual: " + str( current ) )
2082 getResults = main.FALSE
2083 else:
2084 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002085 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002086 " set " + main.onosSetName + ":\n" +
2087 str( getResponses[ i ] ) )
2088 getResults = main.FALSE
2089 elif getResponses[ i ] == main.ERROR:
2090 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002091 sizeResponses = main.Cluster.command( "setTestSize",
2092 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002093 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002094 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002095 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002096 if size != sizeResponses[ i ]:
2097 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002098 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002099 " for set " + main.onosSetName +
2100 " but got " + str( sizeResponses[ i ] ) )
2101 addAllResults = addAllResults and getResults and sizeResults
2102 utilities.assert_equals( expect=main.TRUE,
2103 actual=addAllResults,
2104 onpass="Set addAll correct",
2105 onfail="Set addAll was incorrect" )
2106
2107 main.step( "Distributed Set retain()" )
2108 main.onosSet.intersection_update( retainValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002109 retainResponses = main.Cluster.command( "setTestRemove",
2110 args=[ main.onosSetName, retainValue ],
2111 kwargs={ "retain": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002112 # main.TRUE = successfully changed the set
2113 # main.FALSE = action resulted in no change in set
2114 # main.ERROR - Some error in executing the function
2115 retainResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002116 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002117 if retainResponses[ i ] == main.TRUE:
2118 # All is well
2119 pass
2120 elif retainResponses[ i ] == main.FALSE:
2121 # Already in set, probably fine
2122 pass
2123 elif retainResponses[ i ] == main.ERROR:
2124 # Error in execution
2125 retainResults = main.FALSE
2126 else:
2127 # unexpected result
2128 retainResults = main.FALSE
2129 if retainResults != main.TRUE:
2130 main.log.error( "Error executing set retain" )
2131
2132 # Check if set is still correct
2133 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002134 getResponses = main.Cluster.command( "setTestGet",
2135 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002136 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002137 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002138 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07002139 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002140 current = set( getResponses[ i ] )
2141 if len( current ) == len( getResponses[ i ] ):
2142 # no repeats
2143 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002144 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002145 " of set " + main.onosSetName + ":\n" +
2146 str( getResponses[ i ] ) )
2147 main.log.debug( "Expected: " + str( main.onosSet ) )
2148 main.log.debug( "Actual: " + str( current ) )
2149 getResults = main.FALSE
2150 else:
2151 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002152 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002153 " set " + main.onosSetName + ":\n" +
2154 str( getResponses[ i ] ) )
2155 getResults = main.FALSE
2156 elif getResponses[ i ] == main.ERROR:
2157 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002158 sizeResponses = main.Cluster.command( "setTestSize",
2159 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002160 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002161 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002162 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002163 if size != sizeResponses[ i ]:
2164 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002165 main.log.error( node + " expected a size of " +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002166 str( size ) + " for set " + main.onosSetName +
2167 " but got " + str( sizeResponses[ i ] ) )
2168 retainResults = retainResults and getResults and sizeResults
2169 utilities.assert_equals( expect=main.TRUE,
2170 actual=retainResults,
2171 onpass="Set retain correct",
2172 onfail="Set retain was incorrect" )
2173
2174 # Transactional maps
2175 main.step( "Partitioned Transactional maps put" )
2176 tMapValue = "Testing"
2177 numKeys = 100
2178 putResult = True
Jon Hallca319892017-06-15 15:25:22 -07002179 ctrl = main.Cluster.next()
2180 putResponses = ctrl.transactionalMapPut( numKeys, tMapValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002181 if putResponses and len( putResponses ) == 100:
2182 for i in putResponses:
2183 if putResponses[ i ][ 'value' ] != tMapValue:
2184 putResult = False
2185 else:
2186 putResult = False
2187 if not putResult:
2188 main.log.debug( "Put response values: " + str( putResponses ) )
2189 utilities.assert_equals( expect=True,
2190 actual=putResult,
2191 onpass="Partitioned Transactional Map put successful",
2192 onfail="Partitioned Transactional Map put values are incorrect" )
2193
2194 main.step( "Partitioned Transactional maps get" )
2195 # FIXME: is this sleep needed?
2196 time.sleep( 5 )
2197
2198 getCheck = True
2199 for n in range( 1, numKeys + 1 ):
Jon Hallca319892017-06-15 15:25:22 -07002200 getResponses = main.Cluster.command( "transactionalMapGet",
2201 args=[ "Key" + str( n ) ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002202 valueCheck = True
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002203 for node in getResponses:
2204 if node != tMapValue:
2205 valueCheck = False
2206 if not valueCheck:
Jon Hall0e240372018-05-02 11:21:57 -07002207 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002208 main.log.warn( getResponses )
2209 getCheck = getCheck and valueCheck
2210 utilities.assert_equals( expect=True,
2211 actual=getCheck,
2212 onpass="Partitioned Transactional Map get values were correct",
2213 onfail="Partitioned Transactional Map values incorrect" )
2214
2215 # DISTRIBUTED ATOMIC VALUE
2216 main.step( "Get the value of a new value" )
Jon Hallca319892017-06-15 15:25:22 -07002217 getValues = main.Cluster.command( "valueTestGet",
2218 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002219 main.log.debug( getValues )
2220 # Check the results
2221 atomicValueGetResult = True
2222 expected = valueValue if valueValue is not None else "null"
2223 main.log.debug( "Checking for value of " + expected )
2224 for i in getValues:
2225 if i != expected:
2226 atomicValueGetResult = False
2227 utilities.assert_equals( expect=True,
2228 actual=atomicValueGetResult,
2229 onpass="Atomic Value get successful",
2230 onfail="Error getting atomic Value " +
2231 str( valueValue ) + ", found: " +
2232 str( getValues ) )
2233
2234 main.step( "Atomic Value set()" )
2235 valueValue = "foo"
Jon Hallca319892017-06-15 15:25:22 -07002236 setValues = main.Cluster.command( "valueTestSet",
2237 args=[ valueName, valueValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002238 main.log.debug( setValues )
2239 # Check the results
2240 atomicValueSetResults = True
2241 for i in setValues:
2242 if i != main.TRUE:
2243 atomicValueSetResults = False
2244 utilities.assert_equals( expect=True,
2245 actual=atomicValueSetResults,
2246 onpass="Atomic Value set successful",
2247 onfail="Error setting atomic Value" +
2248 str( setValues ) )
2249
2250 main.step( "Get the value after set()" )
Jon Hallca319892017-06-15 15:25:22 -07002251 getValues = main.Cluster.command( "valueTestGet",
2252 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002253 main.log.debug( getValues )
2254 # Check the results
2255 atomicValueGetResult = True
2256 expected = valueValue if valueValue is not None else "null"
2257 main.log.debug( "Checking for value of " + expected )
2258 for i in getValues:
2259 if i != expected:
2260 atomicValueGetResult = False
2261 utilities.assert_equals( expect=True,
2262 actual=atomicValueGetResult,
2263 onpass="Atomic Value get successful",
2264 onfail="Error getting atomic Value " +
2265 str( valueValue ) + ", found: " +
2266 str( getValues ) )
2267
2268 main.step( "Atomic Value compareAndSet()" )
2269 oldValue = valueValue
2270 valueValue = "bar"
Jon Hallca319892017-06-15 15:25:22 -07002271 ctrl = main.Cluster.next()
2272 CASValue = ctrl.valueTestCompareAndSet( valueName, oldValue, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002273 main.log.debug( CASValue )
2274 utilities.assert_equals( expect=main.TRUE,
2275 actual=CASValue,
2276 onpass="Atomic Value comapreAndSet successful",
2277 onfail="Error setting atomic Value:" +
2278 str( CASValue ) )
2279
2280 main.step( "Get the value after compareAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002281 getValues = main.Cluster.command( "valueTestGet",
2282 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002283 main.log.debug( getValues )
2284 # Check the results
2285 atomicValueGetResult = True
2286 expected = valueValue if valueValue is not None else "null"
2287 main.log.debug( "Checking for value of " + expected )
2288 for i in getValues:
2289 if i != expected:
2290 atomicValueGetResult = False
2291 utilities.assert_equals( expect=True,
2292 actual=atomicValueGetResult,
2293 onpass="Atomic Value get successful",
2294 onfail="Error getting atomic Value " +
2295 str( valueValue ) + ", found: " +
2296 str( getValues ) )
2297
2298 main.step( "Atomic Value getAndSet()" )
2299 oldValue = valueValue
2300 valueValue = "baz"
Jon Hallca319892017-06-15 15:25:22 -07002301 ctrl = main.Cluster.next()
2302 GASValue = ctrl.valueTestGetAndSet( valueName, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002303 main.log.debug( GASValue )
2304 expected = oldValue if oldValue is not None else "null"
2305 utilities.assert_equals( expect=expected,
2306 actual=GASValue,
2307 onpass="Atomic Value GAS successful",
2308 onfail="Error with GetAndSet atomic Value: expected " +
2309 str( expected ) + ", found: " +
2310 str( GASValue ) )
2311
2312 main.step( "Get the value after getAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002313 getValues = main.Cluster.command( "valueTestGet",
2314 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002315 main.log.debug( getValues )
2316 # Check the results
2317 atomicValueGetResult = True
2318 expected = valueValue if valueValue is not None else "null"
2319 main.log.debug( "Checking for value of " + expected )
2320 for i in getValues:
2321 if i != expected:
2322 atomicValueGetResult = False
2323 utilities.assert_equals( expect=True,
2324 actual=atomicValueGetResult,
2325 onpass="Atomic Value get successful",
2326 onfail="Error getting atomic Value: expected " +
2327 str( valueValue ) + ", found: " +
2328 str( getValues ) )
2329
2330 main.step( "Atomic Value destory()" )
2331 valueValue = None
Jon Hallca319892017-06-15 15:25:22 -07002332 ctrl = main.Cluster.next()
2333 destroyResult = ctrl.valueTestDestroy( valueName )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002334 main.log.debug( destroyResult )
2335 # Check the results
2336 utilities.assert_equals( expect=main.TRUE,
2337 actual=destroyResult,
2338 onpass="Atomic Value destroy successful",
2339 onfail="Error destroying atomic Value" )
2340
2341 main.step( "Get the value after destroy()" )
Jon Hallca319892017-06-15 15:25:22 -07002342 getValues = main.Cluster.command( "valueTestGet",
2343 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002344 main.log.debug( getValues )
2345 # Check the results
2346 atomicValueGetResult = True
2347 expected = valueValue if valueValue is not None else "null"
2348 main.log.debug( "Checking for value of " + expected )
2349 for i in getValues:
2350 if i != expected:
2351 atomicValueGetResult = False
2352 utilities.assert_equals( expect=True,
2353 actual=atomicValueGetResult,
2354 onpass="Atomic Value get successful",
2355 onfail="Error getting atomic Value " +
2356 str( valueValue ) + ", found: " +
2357 str( getValues ) )
Jon Halle0f0b342017-04-18 11:43:47 -07002358
2359 # WORK QUEUES
2360 main.step( "Work Queue add()" )
Jon Hallca319892017-06-15 15:25:22 -07002361 ctrl = main.Cluster.next()
2362 addResult = ctrl.workQueueAdd( workQueueName, 'foo' )
Jon Halle0f0b342017-04-18 11:43:47 -07002363 workQueuePending += 1
2364 main.log.debug( addResult )
2365 # Check the results
2366 utilities.assert_equals( expect=main.TRUE,
2367 actual=addResult,
2368 onpass="Work Queue add successful",
2369 onfail="Error adding to Work Queue" )
2370
2371 main.step( "Check the work queue stats" )
2372 statsResults = self.workQueueStatsCheck( workQueueName,
2373 workQueueCompleted,
2374 workQueueInProgress,
2375 workQueuePending )
2376 utilities.assert_equals( expect=True,
2377 actual=statsResults,
2378 onpass="Work Queue stats correct",
2379 onfail="Work Queue stats incorrect " )
2380
2381 main.step( "Work Queue addMultiple()" )
Jon Hallca319892017-06-15 15:25:22 -07002382 ctrl = main.Cluster.next()
2383 addMultipleResult = ctrl.workQueueAddMultiple( workQueueName, 'bar', 'baz' )
Jon Halle0f0b342017-04-18 11:43:47 -07002384 workQueuePending += 2
2385 main.log.debug( addMultipleResult )
2386 # Check the results
2387 utilities.assert_equals( expect=main.TRUE,
2388 actual=addMultipleResult,
2389 onpass="Work Queue add multiple successful",
2390 onfail="Error adding multiple items to Work Queue" )
2391
2392 main.step( "Check the work queue stats" )
2393 statsResults = self.workQueueStatsCheck( workQueueName,
2394 workQueueCompleted,
2395 workQueueInProgress,
2396 workQueuePending )
2397 utilities.assert_equals( expect=True,
2398 actual=statsResults,
2399 onpass="Work Queue stats correct",
2400 onfail="Work Queue stats incorrect " )
2401
2402 main.step( "Work Queue takeAndComplete() 1" )
Jon Hallca319892017-06-15 15:25:22 -07002403 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002404 number = 1
Jon Hallca319892017-06-15 15:25:22 -07002405 take1Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002406 workQueuePending -= number
2407 workQueueCompleted += number
2408 main.log.debug( take1Result )
2409 # Check the results
2410 utilities.assert_equals( expect=main.TRUE,
2411 actual=take1Result,
2412 onpass="Work Queue takeAndComplete 1 successful",
2413 onfail="Error taking 1 from Work Queue" )
2414
2415 main.step( "Check the work queue stats" )
2416 statsResults = self.workQueueStatsCheck( workQueueName,
2417 workQueueCompleted,
2418 workQueueInProgress,
2419 workQueuePending )
2420 utilities.assert_equals( expect=True,
2421 actual=statsResults,
2422 onpass="Work Queue stats correct",
2423 onfail="Work Queue stats incorrect " )
2424
2425 main.step( "Work Queue takeAndComplete() 2" )
Jon Hallca319892017-06-15 15:25:22 -07002426 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002427 number = 2
Jon Hallca319892017-06-15 15:25:22 -07002428 take2Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002429 workQueuePending -= number
2430 workQueueCompleted += number
2431 main.log.debug( take2Result )
2432 # Check the results
2433 utilities.assert_equals( expect=main.TRUE,
2434 actual=take2Result,
2435 onpass="Work Queue takeAndComplete 2 successful",
2436 onfail="Error taking 2 from Work Queue" )
2437
2438 main.step( "Check the work queue stats" )
2439 statsResults = self.workQueueStatsCheck( workQueueName,
2440 workQueueCompleted,
2441 workQueueInProgress,
2442 workQueuePending )
2443 utilities.assert_equals( expect=True,
2444 actual=statsResults,
2445 onpass="Work Queue stats correct",
2446 onfail="Work Queue stats incorrect " )
2447
2448 main.step( "Work Queue destroy()" )
2449 valueValue = None
2450 threads = []
Jon Hallca319892017-06-15 15:25:22 -07002451 ctrl = main.Cluster.next()
2452 destroyResult = ctrl.workQueueDestroy( workQueueName )
Jon Halle0f0b342017-04-18 11:43:47 -07002453 workQueueCompleted = 0
2454 workQueueInProgress = 0
2455 workQueuePending = 0
2456 main.log.debug( destroyResult )
2457 # Check the results
2458 utilities.assert_equals( expect=main.TRUE,
2459 actual=destroyResult,
2460 onpass="Work Queue destroy successful",
2461 onfail="Error destroying Work Queue" )
2462
2463 main.step( "Check the work queue stats" )
2464 statsResults = self.workQueueStatsCheck( workQueueName,
2465 workQueueCompleted,
2466 workQueueInProgress,
2467 workQueuePending )
2468 utilities.assert_equals( expect=True,
2469 actual=statsResults,
2470 onpass="Work Queue stats correct",
2471 onfail="Work Queue stats incorrect " )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002472 except Exception as e:
2473 main.log.error( "Exception: " + str( e ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002474
2475 def cleanUp( self, main ):
2476 """
2477 Clean up
2478 """
Devin Lim58046fa2017-07-05 16:55:00 -07002479 assert main, "main not defined"
2480 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002481
2482 # printing colors to terminal
2483 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2484 'blue': '\033[94m', 'green': '\033[92m',
2485 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
Jon Hall4173b242017-09-12 17:04:38 -07002486
Devin Lim58046fa2017-07-05 16:55:00 -07002487 main.case( "Test Cleanup" )
Jon Hall4173b242017-09-12 17:04:38 -07002488
2489 main.step( "Checking raft log size" )
2490 # TODO: this is a flaky check, but the intent is to make sure the raft logs
2491 # get compacted periodically
Jon Hall3e6edb32018-08-21 16:20:30 -07002492
2493 # FIXME: We need to look at the raft servers, which might not be on the ONOS machine
Jon Hall4173b242017-09-12 17:04:38 -07002494 logCheck = main.Cluster.checkPartitionSize()
2495 utilities.assert_equals( expect=True, actual=logCheck,
2496 onpass="Raft log size is not too big",
2497 onfail="Raft logs grew too big" )
2498
Devin Lim58046fa2017-07-05 16:55:00 -07002499 main.step( "Killing tcpdumps" )
2500 main.Mininet2.stopTcpdump()
2501
2502 testname = main.TEST
2503 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2504 main.step( "Copying MN pcap and ONOS log files to test station" )
2505 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2506 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2507 # NOTE: MN Pcap file is being saved to logdir.
2508 # We scp this file as MN and TestON aren't necessarily the same vm
2509
2510 # FIXME: To be replaced with a Jenkin's post script
2511 # TODO: Load these from params
2512 # NOTE: must end in /
2513 logFolder = "/opt/onos/log/"
2514 logFiles = [ "karaf.log", "karaf.log.1" ]
2515 # NOTE: must end in /
2516 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002517 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002518 dstName = main.logdir + "/" + ctrl.name + "-" + f
2519 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002520 logFolder + f, dstName )
2521 # std*.log's
2522 # NOTE: must end in /
2523 logFolder = "/opt/onos/var/"
2524 logFiles = [ "stderr.log", "stdout.log" ]
2525 # NOTE: must end in /
2526 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002527 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002528 dstName = main.logdir + "/" + ctrl.name + "-" + f
2529 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002530 logFolder + f, dstName )
2531 else:
2532 main.log.debug( "skipping saving log files" )
2533
Jon Hall5d5876e2017-11-30 09:33:16 -08002534 main.step( "Checking ONOS Logs for errors" )
2535 for ctrl in main.Cluster.runningNodes:
2536 main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
2537 main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
2538
Devin Lim58046fa2017-07-05 16:55:00 -07002539 main.step( "Stopping Mininet" )
2540 mnResult = main.Mininet1.stopNet()
2541 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2542 onpass="Mininet stopped",
2543 onfail="MN cleanup NOT successful" )
2544
Devin Lim58046fa2017-07-05 16:55:00 -07002545 try:
2546 timerLog = open( main.logdir + "/Timers.csv", 'w' )
2547 main.log.debug( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2548 timerLog.write( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2549 timerLog.close()
2550 except NameError as e:
2551 main.log.exception( e )
Jon Hallca319892017-06-15 15:25:22 -07002552
Devin Lim58046fa2017-07-05 16:55:00 -07002553 def assignMastership( self, main ):
2554 """
2555 Assign mastership to controllers
2556 """
2557 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002558 assert main, "main not defined"
2559 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002560
2561 main.case( "Assigning Controller roles for switches" )
2562 main.caseExplanation = "Check that ONOS is connected to each " +\
2563 "device. Then manually assign" +\
2564 " mastership to specific ONOS nodes using" +\
2565 " 'device-role'"
2566 main.step( "Assign mastership of switches to specific controllers" )
2567 # Manually assign mastership to the controller we want
2568 roleCall = main.TRUE
2569
2570 ipList = []
2571 deviceList = []
Devin Lim58046fa2017-07-05 16:55:00 -07002572 try:
2573 # Assign mastership to specific controllers. This assignment was
2574 # determined for a 7 node cluser, but will work with any sized
2575 # cluster
2576 for i in range( 1, 29 ): # switches 1 through 28
2577 # set up correct variables:
2578 if i == 1:
2579 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002580 ip = main.Cluster.active( c ).ip_address # ONOS1
Jon Hall0e240372018-05-02 11:21:57 -07002581 deviceId = main.Cluster.next().getDevice( "1000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002582 elif i == 2:
Devin Lim142b5342017-07-20 15:22:39 -07002583 c = 1 % main.Cluster.numCtrls
2584 ip = main.Cluster.active( c ).ip_address # ONOS2
Jon Hall0e240372018-05-02 11:21:57 -07002585 deviceId = main.Cluster.next().getDevice( "2000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002586 elif i == 3:
Devin Lim142b5342017-07-20 15:22:39 -07002587 c = 1 % main.Cluster.numCtrls
2588 ip = main.Cluster.active( c ).ip_address # ONOS2
Jon Hall0e240372018-05-02 11:21:57 -07002589 deviceId = main.Cluster.next().getDevice( "3000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002590 elif i == 4:
Devin Lim142b5342017-07-20 15:22:39 -07002591 c = 3 % main.Cluster.numCtrls
2592 ip = main.Cluster.active( c ).ip_address # ONOS4
Jon Hall0e240372018-05-02 11:21:57 -07002593 deviceId = main.Cluster.next().getDevice( "3004" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002594 elif i == 5:
Devin Lim142b5342017-07-20 15:22:39 -07002595 c = 2 % main.Cluster.numCtrls
2596 ip = main.Cluster.active( c ).ip_address # ONOS3
Jon Hall0e240372018-05-02 11:21:57 -07002597 deviceId = main.Cluster.next().getDevice( "5000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002598 elif i == 6:
Devin Lim142b5342017-07-20 15:22:39 -07002599 c = 2 % main.Cluster.numCtrls
2600 ip = main.Cluster.active( c ).ip_address # ONOS3
Jon Hall0e240372018-05-02 11:21:57 -07002601 deviceId = main.Cluster.next().getDevice( "6000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002602 elif i == 7:
Devin Lim142b5342017-07-20 15:22:39 -07002603 c = 5 % main.Cluster.numCtrls
2604 ip = main.Cluster.active( c ).ip_address # ONOS6
Jon Hall0e240372018-05-02 11:21:57 -07002605 deviceId = main.Cluster.next().getDevice( "6007" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002606 elif i >= 8 and i <= 17:
Devin Lim142b5342017-07-20 15:22:39 -07002607 c = 4 % main.Cluster.numCtrls
2608 ip = main.Cluster.active( c ).ip_address # ONOS5
Devin Lim58046fa2017-07-05 16:55:00 -07002609 dpid = '3' + str( i ).zfill( 3 )
Jon Hall0e240372018-05-02 11:21:57 -07002610 deviceId = main.Cluster.next().getDevice( dpid ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002611 elif i >= 18 and i <= 27:
Devin Lim142b5342017-07-20 15:22:39 -07002612 c = 6 % main.Cluster.numCtrls
2613 ip = main.Cluster.active( c ).ip_address # ONOS7
Devin Lim58046fa2017-07-05 16:55:00 -07002614 dpid = '6' + str( i ).zfill( 3 )
Jon Hall0e240372018-05-02 11:21:57 -07002615 deviceId = main.Cluster.next().getDevice( dpid ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002616 elif i == 28:
2617 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002618 ip = main.Cluster.active( c ).ip_address # ONOS1
Jon Hall0e240372018-05-02 11:21:57 -07002619 deviceId = main.Cluster.next().getDevice( "2800" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002620 else:
2621 main.log.error( "You didn't write an else statement for " +
2622 "switch s" + str( i ) )
2623 roleCall = main.FALSE
2624 # Assign switch
2625 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
2626 # TODO: make this controller dynamic
Jon Hall0e240372018-05-02 11:21:57 -07002627 roleCall = roleCall and main.Cluster.next().deviceRole( deviceId, ip )
Devin Lim58046fa2017-07-05 16:55:00 -07002628 ipList.append( ip )
2629 deviceList.append( deviceId )
2630 except ( AttributeError, AssertionError ):
2631 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hall0e240372018-05-02 11:21:57 -07002632 main.log.info( main.Cluster.next().devices() )
Devin Lim58046fa2017-07-05 16:55:00 -07002633 utilities.assert_equals(
2634 expect=main.TRUE,
2635 actual=roleCall,
2636 onpass="Re-assigned switch mastership to designated controller",
2637 onfail="Something wrong with deviceRole calls" )
2638
2639 main.step( "Check mastership was correctly assigned" )
2640 roleCheck = main.TRUE
2641 # NOTE: This is due to the fact that device mastership change is not
2642 # atomic and is actually a multi step process
2643 time.sleep( 5 )
2644 for i in range( len( ipList ) ):
2645 ip = ipList[ i ]
2646 deviceId = deviceList[ i ]
2647 # Check assignment
Jon Hall0e240372018-05-02 11:21:57 -07002648 master = main.Cluster.next().getRole( deviceId ).get( 'master' )
Devin Lim58046fa2017-07-05 16:55:00 -07002649 if ip in master:
2650 roleCheck = roleCheck and main.TRUE
2651 else:
2652 roleCheck = roleCheck and main.FALSE
2653 main.log.error( "Error, controller " + ip + " is not" +
2654 " master " + "of device " +
2655 str( deviceId ) + ". Master is " +
2656 repr( master ) + "." )
2657 utilities.assert_equals(
2658 expect=main.TRUE,
2659 actual=roleCheck,
2660 onpass="Switches were successfully reassigned to designated " +
2661 "controller",
2662 onfail="Switches were not successfully reassigned" )
Jon Hallca319892017-06-15 15:25:22 -07002663
Jon Hall5d5876e2017-11-30 09:33:16 -08002664 def bringUpStoppedNodes( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -07002665 """
Jon Hall5d5876e2017-11-30 09:33:16 -08002666 The bring up stopped nodes.
Devin Lim58046fa2017-07-05 16:55:00 -07002667 """
2668 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002669 assert main, "main not defined"
2670 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002671 assert main.kill, "main.kill not defined"
2672 main.case( "Restart minority of ONOS nodes" )
2673
2674 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
2675 startResults = main.TRUE
2676 restartTime = time.time()
Jon Hallca319892017-06-15 15:25:22 -07002677 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002678 startResults = startResults and\
Jon Hallca319892017-06-15 15:25:22 -07002679 ctrl.onosStart( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002680 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2681 onpass="ONOS nodes started successfully",
2682 onfail="ONOS nodes NOT successfully started" )
2683
2684 main.step( "Checking if ONOS is up yet" )
2685 count = 0
2686 onosIsupResult = main.FALSE
2687 while onosIsupResult == main.FALSE and count < 10:
2688 onosIsupResult = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002689 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002690 onosIsupResult = onosIsupResult and\
Jon Hallca319892017-06-15 15:25:22 -07002691 ctrl.isup( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002692 count = count + 1
2693 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
2694 onpass="ONOS restarted successfully",
2695 onfail="ONOS restart NOT successful" )
2696
Jon Hall5d5876e2017-11-30 09:33:16 -08002697 main.step( "Restarting ONOS CLI" )
Devin Lim58046fa2017-07-05 16:55:00 -07002698 cliResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002699 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002700 cliResults = cliResults and\
Jon Hallca319892017-06-15 15:25:22 -07002701 ctrl.startOnosCli( ctrl.ipAddress )
2702 ctrl.active = True
Devin Lim58046fa2017-07-05 16:55:00 -07002703 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
Jon Hallca319892017-06-15 15:25:22 -07002704 onpass="ONOS node(s) restarted",
2705 onfail="ONOS node(s) did not restart" )
Devin Lim58046fa2017-07-05 16:55:00 -07002706
Jon Hall5d5876e2017-11-30 09:33:16 -08002707 # Grab the time of restart so we can have some idea of average time
Devin Lim58046fa2017-07-05 16:55:00 -07002708 main.restartTime = time.time() - restartTime
2709 main.log.debug( "Restart time: " + str( main.restartTime ) )
2710 # TODO: MAke this configurable. Also, we are breaking the above timer
2711 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -08002712 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -07002713 False,
Devin Lim58046fa2017-07-05 16:55:00 -07002714 sleep=15,
2715 attempts=5 )
2716
2717 utilities.assert_equals( expect=True, actual=nodeResults,
2718 onpass="Nodes check successful",
2719 onfail="Nodes check NOT successful" )
2720
2721 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07002722 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07002723 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07002724 ctrl.name,
Jon Hall6c9e2da2018-11-06 12:01:23 -08002725 ctrl.CLI.sendline( "onos:scr-list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002726 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -07002727 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002728
Jon Hallca319892017-06-15 15:25:22 -07002729 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -07002730
2731 main.step( "Rerun for election on the node(s) that were killed" )
2732 runResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002733 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002734 runResults = runResults and\
Jon Hallca319892017-06-15 15:25:22 -07002735 ctrl.electionTestRun()
Devin Lim58046fa2017-07-05 16:55:00 -07002736 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2737 onpass="ONOS nodes reran for election topic",
Jon Hall5d5876e2017-11-30 09:33:16 -08002738 onfail="Error rerunning for election" )
2739
2740 def upgradeNodes( self, main ):
2741 """
2742 Reinstall some nodes with an upgraded version.
2743
2744 This will reinstall nodes in main.kill with an upgraded version.
2745 """
2746 import time
2747 assert main, "main not defined"
2748 assert utilities.assert_equals, "utilities.assert_equals not defined"
2749 assert main.kill, "main.kill not defined"
2750 nodeNames = [ node.name for node in main.kill ]
2751 main.step( "Upgrading" + str( nodeNames ) + " ONOS nodes" )
2752
2753 stopResults = main.TRUE
2754 uninstallResults = main.TRUE
2755 startResults = main.TRUE
2756 sshResults = main.TRUE
2757 isup = main.TRUE
2758 restartTime = time.time()
2759 for ctrl in main.kill:
2760 stopResults = stopResults and\
2761 ctrl.onosStop( ctrl.ipAddress )
2762 uninstallResults = uninstallResults and\
2763 ctrl.onosUninstall( ctrl.ipAddress )
2764 # Install the new version of onos
2765 startResults = startResults and\
2766 ctrl.onosInstall( options="-fv", node=ctrl.ipAddress )
2767 sshResults = sshResults and\
2768 ctrl.onosSecureSSH( node=ctrl.ipAddress )
2769 isup = isup and ctrl.isup( ctrl.ipAddress )
2770 utilities.assert_equals( expect=main.TRUE, actual=stopResults,
2771 onpass="ONOS nodes stopped successfully",
2772 onfail="ONOS nodes NOT successfully stopped" )
2773 utilities.assert_equals( expect=main.TRUE, actual=uninstallResults,
2774 onpass="ONOS nodes uninstalled successfully",
2775 onfail="ONOS nodes NOT successfully uninstalled" )
2776 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2777 onpass="ONOS nodes started successfully",
2778 onfail="ONOS nodes NOT successfully started" )
2779 utilities.assert_equals( expect=main.TRUE, actual=sshResults,
2780 onpass="Successfully secured onos ssh",
2781 onfail="Failed to secure onos ssh" )
2782 utilities.assert_equals( expect=main.TRUE, actual=isup,
2783 onpass="ONOS nodes fully started",
2784 onfail="ONOS nodes NOT fully started" )
2785
2786 main.step( "Restarting ONOS CLI" )
2787 cliResults = main.TRUE
2788 for ctrl in main.kill:
2789 cliResults = cliResults and\
2790 ctrl.startOnosCli( ctrl.ipAddress )
2791 ctrl.active = True
2792 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
2793 onpass="ONOS node(s) restarted",
2794 onfail="ONOS node(s) did not restart" )
2795
2796 # Grab the time of restart so we can have some idea of average time
2797 main.restartTime = time.time() - restartTime
2798 main.log.debug( "Restart time: " + str( main.restartTime ) )
2799 # TODO: Make this configurable.
2800 main.step( "Checking ONOS nodes" )
2801 nodeResults = utilities.retry( main.Cluster.nodesCheck,
2802 False,
2803 sleep=15,
2804 attempts=5 )
2805
2806 utilities.assert_equals( expect=True, actual=nodeResults,
2807 onpass="Nodes check successful",
2808 onfail="Nodes check NOT successful" )
2809
2810 if not nodeResults:
2811 for ctrl in main.Cluster.active():
2812 main.log.debug( "{} components not ACTIVE: \n{}".format(
2813 ctrl.name,
Jon Hall6c9e2da2018-11-06 12:01:23 -08002814 ctrl.CLI.sendline( "onos:scr-list | grep -v ACTIVE" ) ) )
Jon Hall5d5876e2017-11-30 09:33:16 -08002815 main.log.error( "Failed to start ONOS, stopping test" )
2816 main.cleanAndExit()
2817
2818 self.commonChecks()
2819
2820 main.step( "Rerun for election on the node(s) that were killed" )
2821 runResults = main.TRUE
2822 for ctrl in main.kill:
2823 runResults = runResults and\
2824 ctrl.electionTestRun()
2825 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2826 onpass="ONOS nodes reran for election topic",
2827 onfail="Error rerunning for election" )
Jon Hall4173b242017-09-12 17:04:38 -07002828
Devin Lim142b5342017-07-20 15:22:39 -07002829 def tempCell( self, cellName, ipList ):
2830 main.step( "Create cell file" )
2831 cellAppString = main.params[ 'ENV' ][ 'appString' ]
Devin Lim58046fa2017-07-05 16:55:00 -07002832
Devin Lim142b5342017-07-20 15:22:39 -07002833 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
2834 main.Mininet1.ip_address,
Jon Hall3e6edb32018-08-21 16:20:30 -07002835 cellAppString, ipList, ipList,
2836 main.ONOScli1.karafUser )
Devin Lim142b5342017-07-20 15:22:39 -07002837 main.step( "Applying cell variable to environment" )
2838 cellResult = main.ONOSbench.setCell( cellName )
2839 verifyResult = main.ONOSbench.verifyCell()
2840
Devin Lim142b5342017-07-20 15:22:39 -07002841 def checkStateAfterEvent( self, main, afterWhich, compareSwitch=False, isRestart=False ):
Devin Lim58046fa2017-07-05 16:55:00 -07002842 """
2843 afterWhich :
Jon Hallca319892017-06-15 15:25:22 -07002844 0: failure
Devin Lim58046fa2017-07-05 16:55:00 -07002845 1: scaling
2846 """
2847 """
2848 Check state after ONOS failure/scaling
2849 """
2850 import json
Devin Lim58046fa2017-07-05 16:55:00 -07002851 assert main, "main not defined"
2852 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002853 main.case( "Running ONOS Constant State Tests" )
2854
Jon Hall3e6edb32018-08-21 16:20:30 -07002855 OnosAfterWhich = [ "failure", "scaling" ]
Devin Lim58046fa2017-07-05 16:55:00 -07002856
Devin Lim58046fa2017-07-05 16:55:00 -07002857 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -07002858 self.checkRoleNotNull()
Devin Lim58046fa2017-07-05 16:55:00 -07002859
Devin Lim142b5342017-07-20 15:22:39 -07002860 ONOSMastership, rolesResults, consistentMastership = self.checkTheRole()
Jon Hallca319892017-06-15 15:25:22 -07002861 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07002862
2863 if rolesResults and not consistentMastership:
2864 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002865 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -07002866 main.log.warn( node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07002867 json.dumps( json.loads( ONOSMastership[ i ] ),
2868 sort_keys=True,
2869 indent=4,
2870 separators=( ',', ': ' ) ) )
2871
2872 if compareSwitch:
2873 description2 = "Compare switch roles from before failure"
2874 main.step( description2 )
2875 try:
2876 currentJson = json.loads( ONOSMastership[ 0 ] )
2877 oldJson = json.loads( mastershipState )
2878 except ( ValueError, TypeError ):
2879 main.log.exception( "Something is wrong with parsing " +
2880 "ONOSMastership[0] or mastershipState" )
Jon Hallca319892017-06-15 15:25:22 -07002881 main.log.debug( "ONOSMastership[0]: " + repr( ONOSMastership[ 0 ] ) )
2882 main.log.debug( "mastershipState" + repr( mastershipState ) )
Devin Lim44075962017-08-11 10:56:37 -07002883 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002884 mastershipCheck = main.TRUE
Jon Hallab611372018-02-21 15:26:05 -08002885 for swName, swDetails in main.Mininet1.getSwitches().items():
2886 switchDPID = swDetails[ 'dpid' ]
Devin Lim58046fa2017-07-05 16:55:00 -07002887 current = [ switch[ 'master' ] for switch in currentJson
2888 if switchDPID in switch[ 'id' ] ]
2889 old = [ switch[ 'master' ] for switch in oldJson
2890 if switchDPID in switch[ 'id' ] ]
2891 if current == old:
2892 mastershipCheck = mastershipCheck and main.TRUE
2893 else:
2894 main.log.warn( "Mastership of switch %s changed" % switchDPID )
2895 mastershipCheck = main.FALSE
2896 utilities.assert_equals(
2897 expect=main.TRUE,
2898 actual=mastershipCheck,
2899 onpass="Mastership of Switches was not changed",
2900 onfail="Mastership of some switches changed" )
2901
2902 # NOTE: we expect mastership to change on controller failure/scaling down
Devin Lim142b5342017-07-20 15:22:39 -07002903 ONOSIntents, intentsResults = self.checkingIntents()
Devin Lim58046fa2017-07-05 16:55:00 -07002904 intentCheck = main.FALSE
2905 consistentIntents = True
Devin Lim58046fa2017-07-05 16:55:00 -07002906
2907 main.step( "Check for consistency in Intents from each controller" )
2908 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2909 main.log.info( "Intents are consistent across all ONOS " +
2910 "nodes" )
2911 else:
2912 consistentIntents = False
2913
2914 # Try to make it easy to figure out what is happening
2915 #
2916 # Intent ONOS1 ONOS2 ...
2917 # 0x01 INSTALLED INSTALLING
2918 # ... ... ...
2919 # ... ... ...
2920 title = " ID"
Jon Hallca319892017-06-15 15:25:22 -07002921 for ctrl in main.Cluster.active():
2922 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07002923 main.log.warn( title )
2924 # get all intent keys in the cluster
2925 keys = []
2926 for nodeStr in ONOSIntents:
2927 node = json.loads( nodeStr )
2928 for intent in node:
2929 keys.append( intent.get( 'id' ) )
2930 keys = set( keys )
2931 for key in keys:
2932 row = "%-13s" % key
2933 for nodeStr in ONOSIntents:
2934 node = json.loads( nodeStr )
2935 for intent in node:
2936 if intent.get( 'id' ) == key:
2937 row += "%-15s" % intent.get( 'state' )
2938 main.log.warn( row )
2939 # End table view
2940
2941 utilities.assert_equals(
2942 expect=True,
2943 actual=consistentIntents,
2944 onpass="Intents are consistent across all ONOS nodes",
2945 onfail="ONOS nodes have different views of intents" )
2946 intentStates = []
2947 for node in ONOSIntents: # Iter through ONOS nodes
2948 nodeStates = []
2949 # Iter through intents of a node
2950 try:
2951 for intent in json.loads( node ):
2952 nodeStates.append( intent[ 'state' ] )
2953 except ( ValueError, TypeError ):
2954 main.log.exception( "Error in parsing intents" )
2955 main.log.error( repr( node ) )
2956 intentStates.append( nodeStates )
2957 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2958 main.log.info( dict( out ) )
2959
2960 if intentsResults and not consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07002961 for i in range( len( main.Cluster.active() ) ):
Jon Hall6040bcf2017-08-14 11:15:41 -07002962 ctrl = main.Cluster.controllers[ i ]
Jon Hallca319892017-06-15 15:25:22 -07002963 main.log.warn( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07002964 main.log.warn( json.dumps(
2965 json.loads( ONOSIntents[ i ] ),
2966 sort_keys=True,
2967 indent=4,
2968 separators=( ',', ': ' ) ) )
2969 elif intentsResults and consistentIntents:
2970 intentCheck = main.TRUE
2971
2972 # NOTE: Store has no durability, so intents are lost across system
2973 # restarts
2974 if not isRestart:
2975 main.step( "Compare current intents with intents before the " + OnosAfterWhich[ afterWhich ] )
2976 # NOTE: this requires case 5 to pass for intentState to be set.
2977 # maybe we should stop the test if that fails?
2978 sameIntents = main.FALSE
2979 try:
2980 intentState
2981 except NameError:
2982 main.log.warn( "No previous intent state was saved" )
2983 else:
2984 if intentState and intentState == ONOSIntents[ 0 ]:
2985 sameIntents = main.TRUE
2986 main.log.info( "Intents are consistent with before " + OnosAfterWhich[ afterWhich ] )
2987 # TODO: possibly the states have changed? we may need to figure out
2988 # what the acceptable states are
2989 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2990 sameIntents = main.TRUE
2991 try:
2992 before = json.loads( intentState )
2993 after = json.loads( ONOSIntents[ 0 ] )
2994 for intent in before:
2995 if intent not in after:
2996 sameIntents = main.FALSE
2997 main.log.debug( "Intent is not currently in ONOS " +
2998 "(at least in the same form):" )
2999 main.log.debug( json.dumps( intent ) )
3000 except ( ValueError, TypeError ):
3001 main.log.exception( "Exception printing intents" )
3002 main.log.debug( repr( ONOSIntents[ 0 ] ) )
3003 main.log.debug( repr( intentState ) )
3004 if sameIntents == main.FALSE:
3005 try:
3006 main.log.debug( "ONOS intents before: " )
3007 main.log.debug( json.dumps( json.loads( intentState ),
3008 sort_keys=True, indent=4,
3009 separators=( ',', ': ' ) ) )
3010 main.log.debug( "Current ONOS intents: " )
3011 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
3012 sort_keys=True, indent=4,
3013 separators=( ',', ': ' ) ) )
3014 except ( ValueError, TypeError ):
3015 main.log.exception( "Exception printing intents" )
3016 main.log.debug( repr( ONOSIntents[ 0 ] ) )
3017 main.log.debug( repr( intentState ) )
3018 utilities.assert_equals(
3019 expect=main.TRUE,
3020 actual=sameIntents,
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07003021 onpass="Intents are consistent with before " + OnosAfterWhich[ afterWhich ],
Devin Lim58046fa2017-07-05 16:55:00 -07003022 onfail="The Intents changed during " + OnosAfterWhich[ afterWhich ] )
3023 intentCheck = intentCheck and sameIntents
3024
3025 main.step( "Get the OF Table entries and compare to before " +
3026 "component " + OnosAfterWhich[ afterWhich ] )
3027 FlowTables = main.TRUE
Jon Hallab611372018-02-21 15:26:05 -08003028 for switch in main.Mininet1.getSwitches().keys():
3029 main.log.info( "Checking flow table on " + switch )
3030 tmpFlows = main.Mininet1.getFlowTable( switch, version="1.3", debug=False )
3031 curSwitch = main.Mininet1.flowTableComp( flows[ switch ], tmpFlows )
Devin Lim58046fa2017-07-05 16:55:00 -07003032 FlowTables = FlowTables and curSwitch
3033 if curSwitch == main.FALSE:
Jon Hallab611372018-02-21 15:26:05 -08003034 main.log.warn( "Differences in flow table for switch: {}".format( switch ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003035 utilities.assert_equals(
3036 expect=main.TRUE,
3037 actual=FlowTables,
3038 onpass="No changes were found in the flow tables",
3039 onfail="Changes were found in the flow tables" )
3040
Jon Hallca319892017-06-15 15:25:22 -07003041 main.Mininet2.pingLongKill()
Devin Lim58046fa2017-07-05 16:55:00 -07003042 """
3043 main.step( "Check the continuous pings to ensure that no packets " +
3044 "were dropped during component failure" )
3045 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
3046 main.params[ 'TESTONIP' ] )
3047 LossInPings = main.FALSE
3048 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
3049 for i in range( 8, 18 ):
3050 main.log.info(
3051 "Checking for a loss in pings along flow from s" +
3052 str( i ) )
3053 LossInPings = main.Mininet2.checkForLoss(
3054 "/tmp/ping.h" +
3055 str( i ) ) or LossInPings
3056 if LossInPings == main.TRUE:
3057 main.log.info( "Loss in ping detected" )
3058 elif LossInPings == main.ERROR:
3059 main.log.info( "There are multiple mininet process running" )
3060 elif LossInPings == main.FALSE:
3061 main.log.info( "No Loss in the pings" )
3062 main.log.info( "No loss of dataplane connectivity" )
3063 utilities.assert_equals(
3064 expect=main.FALSE,
3065 actual=LossInPings,
3066 onpass="No Loss of connectivity",
3067 onfail="Loss of dataplane connectivity detected" )
3068 # NOTE: Since intents are not persisted with IntnentStore,
3069 # we expect loss in dataplane connectivity
3070 LossInPings = main.FALSE
3071 """
Devin Lim58046fa2017-07-05 16:55:00 -07003072 def compareTopo( self, main ):
3073 """
3074 Compare topo
3075 """
3076 import json
3077 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003078 assert main, "main not defined"
3079 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003080 try:
3081 from tests.dependencies.topology import Topology
3082 except ImportError:
3083 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07003084 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07003085 try:
3086 main.topoRelated
3087 except ( NameError, AttributeError ):
3088 main.topoRelated = Topology()
3089 main.case( "Compare ONOS Topology view to Mininet topology" )
3090 main.caseExplanation = "Compare topology objects between Mininet" +\
3091 " and ONOS"
3092 topoResult = main.FALSE
3093 topoFailMsg = "ONOS topology don't match Mininet"
3094 elapsed = 0
3095 count = 0
3096 main.step( "Comparing ONOS topology to MN topology" )
3097 startTime = time.time()
3098 # Give time for Gossip to work
3099 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
3100 devicesResults = main.TRUE
3101 linksResults = main.TRUE
3102 hostsResults = main.TRUE
3103 hostAttachmentResults = True
3104 count += 1
3105 cliStart = time.time()
Devin Lim142b5342017-07-20 15:22:39 -07003106 devices = main.topoRelated.getAll( "devices", True,
Jon Hallca319892017-06-15 15:25:22 -07003107 kwargs={ 'sleep': 5, 'attempts': 5,
3108 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003109 ipResult = main.TRUE
3110
Devin Lim142b5342017-07-20 15:22:39 -07003111 hosts = main.topoRelated.getAll( "hosts", True,
Jon Hallca319892017-06-15 15:25:22 -07003112 kwargs={ 'sleep': 5, 'attempts': 5,
3113 'randomTime': True },
3114 inJson=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003115
3116 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003117 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003118 if hosts[ controller ]:
3119 for host in hosts[ controller ]:
3120 if host is None or host.get( 'ipAddresses', [] ) == []:
3121 main.log.error(
3122 "Error with host ipAddresses on controller" +
3123 controllerStr + ": " + str( host ) )
3124 ipResult = main.FALSE
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07003125 ports = main.topoRelated.getAll( "ports", True,
Jon Hallca319892017-06-15 15:25:22 -07003126 kwargs={ 'sleep': 5, 'attempts': 5,
3127 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003128 links = main.topoRelated.getAll( "links", True,
Jon Hallca319892017-06-15 15:25:22 -07003129 kwargs={ 'sleep': 5, 'attempts': 5,
3130 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003131 clusters = main.topoRelated.getAll( "clusters", True,
Jon Hallca319892017-06-15 15:25:22 -07003132 kwargs={ 'sleep': 5, 'attempts': 5,
3133 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003134
3135 elapsed = time.time() - startTime
3136 cliTime = time.time() - cliStart
Jon Hall5d5876e2017-11-30 09:33:16 -08003137 main.log.debug( "Elapsed time: " + str( elapsed ) )
3138 main.log.debug( "CLI time: " + str( cliTime ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003139
3140 if all( e is None for e in devices ) and\
3141 all( e is None for e in hosts ) and\
3142 all( e is None for e in ports ) and\
3143 all( e is None for e in links ) and\
3144 all( e is None for e in clusters ):
3145 topoFailMsg = "Could not get topology from ONOS"
3146 main.log.error( topoFailMsg )
3147 continue # Try again, No use trying to compare
3148
3149 mnSwitches = main.Mininet1.getSwitches()
3150 mnLinks = main.Mininet1.getLinks()
3151 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07003152 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003153 controllerStr = str( main.Cluster.active( controller ) )
Jon Hall4173b242017-09-12 17:04:38 -07003154 currentDevicesResult = main.topoRelated.compareDevicePort( main.Mininet1,
3155 controller,
3156 mnSwitches,
3157 devices,
3158 ports )
Devin Lim58046fa2017-07-05 16:55:00 -07003159 utilities.assert_equals( expect=main.TRUE,
3160 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07003161 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003162 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003163 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003164 " Switches view is incorrect" )
3165
Devin Lim58046fa2017-07-05 16:55:00 -07003166 currentLinksResult = main.topoRelated.compareBase( links, controller,
Jon Hall4173b242017-09-12 17:04:38 -07003167 main.Mininet1.compareLinks,
3168 [ mnSwitches, mnLinks ] )
Devin Lim58046fa2017-07-05 16:55:00 -07003169 utilities.assert_equals( expect=main.TRUE,
3170 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07003171 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003172 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003173 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003174 " links view is incorrect" )
3175 if hosts[ controller ] and "Error" not in hosts[ controller ]:
3176 currentHostsResult = main.Mininet1.compareHosts(
3177 mnHosts,
3178 hosts[ controller ] )
3179 elif hosts[ controller ] == []:
3180 currentHostsResult = main.TRUE
3181 else:
3182 currentHostsResult = main.FALSE
3183 utilities.assert_equals( expect=main.TRUE,
3184 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07003185 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003186 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07003187 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003188 " hosts don't match Mininet" )
Devin Lim58046fa2017-07-05 16:55:00 -07003189 hostAttachment = True
Jon Hallab611372018-02-21 15:26:05 -08003190 if main.topoMappings:
3191 ctrl = main.Cluster.next()
3192 # CHECKING HOST ATTACHMENT POINTS
3193 zeroHosts = False
3194 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3195 if hosts[ controller ] == []:
3196 main.log.warn( "There are no hosts discovered" )
3197 zeroHosts = True
3198 else:
3199 for host in hosts[ controller ]:
3200 mac = None
3201 locations = []
3202 device = None
3203 port = None
3204 try:
3205 mac = host.get( 'mac' )
3206 assert mac, "mac field could not be found for this host object"
3207 if 'locations' in host:
3208 locations = host.get( 'locations' )
3209 elif 'location' in host:
3210 locations.append( host.get( 'location' ) )
3211 assert locations, "locations field could not be found for this host object"
Devin Lim58046fa2017-07-05 16:55:00 -07003212
Jon Hallab611372018-02-21 15:26:05 -08003213 # Trim the protocol identifier off deviceId
3214 device = str( locations[0].get( 'elementId' ) ).split( ':' )[ 1 ]
3215 assert device, "elementId field could not be found for this host location object"
Devin Lim58046fa2017-07-05 16:55:00 -07003216
Jon Hallab611372018-02-21 15:26:05 -08003217 port = locations[0].get( 'port' )
3218 assert port, "port field could not be found for this host location object"
Devin Lim58046fa2017-07-05 16:55:00 -07003219
Jon Hallab611372018-02-21 15:26:05 -08003220 # Now check if this matches where they should be
3221 if mac and device and port:
3222 if str( port ) != "1":
3223 main.log.error( "The attachment port is incorrect for " +
3224 "host " + str( mac ) +
3225 ". Expected: 1 Actual: " + str( port ) )
3226 hostAttachment = False
3227 if device != main.topoMappings[ str( mac ) ]:
3228 main.log.error( "The attachment device is incorrect for " +
3229 "host " + str( mac ) +
3230 ". Expected: " + main.topoMppings[ str( mac ) ] +
3231 " Actual: " + device )
3232 hostAttachment = False
3233 else:
Devin Lim58046fa2017-07-05 16:55:00 -07003234 hostAttachment = False
Jon Hallab611372018-02-21 15:26:05 -08003235 except ( AssertionError, TypeError ):
3236 main.log.exception( "Json object not as expected" )
3237 main.log.error( repr( host ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003238 hostAttachment = False
Jon Hallab611372018-02-21 15:26:05 -08003239 else:
3240 main.log.error( "No hosts json output or \"Error\"" +
3241 " in output. hosts = " +
3242 repr( hosts[ controller ] ) )
3243 if zeroHosts is False:
3244 # TODO: Find a way to know if there should be hosts in a
3245 # given point of the test
3246 hostAttachment = True
Devin Lim58046fa2017-07-05 16:55:00 -07003247
Jon Hallab611372018-02-21 15:26:05 -08003248 # END CHECKING HOST ATTACHMENT POINTS
Devin Lim58046fa2017-07-05 16:55:00 -07003249 devicesResults = devicesResults and currentDevicesResult
3250 linksResults = linksResults and currentLinksResult
3251 hostsResults = hostsResults and currentHostsResult
3252 hostAttachmentResults = hostAttachmentResults and\
3253 hostAttachment
3254 topoResult = ( devicesResults and linksResults
3255 and hostsResults and ipResult and
3256 hostAttachmentResults )
3257 utilities.assert_equals( expect=True,
3258 actual=topoResult,
3259 onpass="ONOS topology matches Mininet",
3260 onfail=topoFailMsg )
3261 # End of While loop to pull ONOS state
3262
3263 # Compare json objects for hosts and dataplane clusters
3264
3265 # hosts
3266 main.step( "Hosts view is consistent across all ONOS nodes" )
3267 consistentHostsResult = main.TRUE
3268 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003269 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003270 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3271 if hosts[ controller ] == hosts[ 0 ]:
3272 continue
3273 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07003274 main.log.error( "hosts from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003275 " is inconsistent with ONOS1" )
Jon Hallca319892017-06-15 15:25:22 -07003276 main.log.debug( repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003277 consistentHostsResult = main.FALSE
3278
3279 else:
Jon Hallca319892017-06-15 15:25:22 -07003280 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003281 controllerStr )
3282 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003283 main.log.debug( controllerStr +
3284 " hosts response: " +
3285 repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003286 utilities.assert_equals(
3287 expect=main.TRUE,
3288 actual=consistentHostsResult,
3289 onpass="Hosts view is consistent across all ONOS nodes",
3290 onfail="ONOS nodes have different views of hosts" )
3291
3292 main.step( "Hosts information is correct" )
3293 hostsResults = hostsResults and ipResult
3294 utilities.assert_equals(
3295 expect=main.TRUE,
3296 actual=hostsResults,
3297 onpass="Host information is correct",
3298 onfail="Host information is incorrect" )
3299
3300 main.step( "Host attachment points to the network" )
3301 utilities.assert_equals(
3302 expect=True,
3303 actual=hostAttachmentResults,
3304 onpass="Hosts are correctly attached to the network",
3305 onfail="ONOS did not correctly attach hosts to the network" )
3306
3307 # Strongly connected clusters of devices
3308 main.step( "Clusters view is consistent across all ONOS nodes" )
3309 consistentClustersResult = main.TRUE
3310 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003311 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003312 if "Error" not in clusters[ controller ]:
3313 if clusters[ controller ] == clusters[ 0 ]:
3314 continue
3315 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07003316 main.log.error( "clusters from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003317 controllerStr +
3318 " is inconsistent with ONOS1" )
3319 consistentClustersResult = main.FALSE
3320 else:
3321 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07003322 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07003323 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003324 main.log.debug( controllerStr +
3325 " clusters response: " +
3326 repr( clusters[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003327 utilities.assert_equals(
3328 expect=main.TRUE,
3329 actual=consistentClustersResult,
3330 onpass="Clusters view is consistent across all ONOS nodes",
3331 onfail="ONOS nodes have different views of clusters" )
3332 if not consistentClustersResult:
3333 main.log.debug( clusters )
3334 for x in links:
Jon Hallca319892017-06-15 15:25:22 -07003335 main.log.debug( "{}: {}".format( len( x ), x ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003336
3337 main.step( "There is only one SCC" )
3338 # there should always only be one cluster
3339 try:
3340 numClusters = len( json.loads( clusters[ 0 ] ) )
3341 except ( ValueError, TypeError ):
3342 main.log.exception( "Error parsing clusters[0]: " +
3343 repr( clusters[ 0 ] ) )
3344 numClusters = "ERROR"
3345 clusterResults = main.FALSE
3346 if numClusters == 1:
3347 clusterResults = main.TRUE
3348 utilities.assert_equals(
3349 expect=1,
3350 actual=numClusters,
3351 onpass="ONOS shows 1 SCC",
3352 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
3353
3354 topoResult = ( devicesResults and linksResults
3355 and hostsResults and consistentHostsResult
3356 and consistentClustersResult and clusterResults
3357 and ipResult and hostAttachmentResults )
3358
3359 topoResult = topoResult and int( count <= 2 )
3360 note = "note it takes about " + str( int( cliTime ) ) + \
3361 " seconds for the test to make all the cli calls to fetch " +\
3362 "the topology from each ONOS instance"
3363 main.log.info(
3364 "Very crass estimate for topology discovery/convergence( " +
3365 str( note ) + " ): " + str( elapsed ) + " seconds, " +
3366 str( count ) + " tries" )
3367
3368 main.step( "Device information is correct" )
3369 utilities.assert_equals(
3370 expect=main.TRUE,
3371 actual=devicesResults,
3372 onpass="Device information is correct",
3373 onfail="Device information is incorrect" )
3374
3375 main.step( "Links are correct" )
3376 utilities.assert_equals(
3377 expect=main.TRUE,
3378 actual=linksResults,
3379 onpass="Link are correct",
3380 onfail="Links are incorrect" )
3381
3382 main.step( "Hosts are correct" )
3383 utilities.assert_equals(
3384 expect=main.TRUE,
3385 actual=hostsResults,
3386 onpass="Hosts are correct",
3387 onfail="Hosts are incorrect" )
3388
3389 # FIXME: move this to an ONOS state case
3390 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -08003391 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -07003392 False,
Devin Lim58046fa2017-07-05 16:55:00 -07003393 attempts=5 )
3394 utilities.assert_equals( expect=True, actual=nodeResults,
3395 onpass="Nodes check successful",
3396 onfail="Nodes check NOT successful" )
3397 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07003398 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07003399 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07003400 ctrl.name,
Jon Hall6c9e2da2018-11-06 12:01:23 -08003401 ctrl.CLI.sendline( "onos:scr-list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003402
3403 if not topoResult:
Devin Lim44075962017-08-11 10:56:37 -07003404 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -07003405
Jon Hallab611372018-02-21 15:26:05 -08003406 def linkDown( self, main, src="s3", dst="s28" ):
Devin Lim58046fa2017-07-05 16:55:00 -07003407 """
Jon Hallab611372018-02-21 15:26:05 -08003408 Link src-dst down
Devin Lim58046fa2017-07-05 16:55:00 -07003409 """
3410 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003411 assert main, "main not defined"
3412 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003413 # NOTE: You should probably run a topology check after this
3414
3415 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3416
3417 description = "Turn off a link to ensure that Link Discovery " +\
3418 "is working properly"
3419 main.case( description )
3420
Jon Hallab611372018-02-21 15:26:05 -08003421 main.step( "Kill Link between " + src + " and " + dst )
3422 LinkDown = main.Mininet1.link( END1=src, END2=dst, OPTION="down" )
Devin Lim58046fa2017-07-05 16:55:00 -07003423 main.log.info( "Waiting " + str( linkSleep ) +
3424 " seconds for link down to be discovered" )
3425 time.sleep( linkSleep )
3426 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
3427 onpass="Link down successful",
3428 onfail="Failed to bring link down" )
3429 # TODO do some sort of check here
3430
Jon Hallab611372018-02-21 15:26:05 -08003431 def linkUp( self, main, src="s3", dst="s28" ):
Devin Lim58046fa2017-07-05 16:55:00 -07003432 """
Jon Hallab611372018-02-21 15:26:05 -08003433 Link src-dst up
Devin Lim58046fa2017-07-05 16:55:00 -07003434 """
3435 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003436 assert main, "main not defined"
3437 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003438 # NOTE: You should probably run a topology check after this
3439
3440 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3441
3442 description = "Restore a link to ensure that Link Discovery is " + \
3443 "working properly"
3444 main.case( description )
3445
Jon Hallab611372018-02-21 15:26:05 -08003446 main.step( "Bring link between " + src + " and " + dst + " back up" )
3447 LinkUp = main.Mininet1.link( END1=src, END2=dst, OPTION="up" )
Devin Lim58046fa2017-07-05 16:55:00 -07003448 main.log.info( "Waiting " + str( linkSleep ) +
3449 " seconds for link up to be discovered" )
3450 time.sleep( linkSleep )
3451 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
3452 onpass="Link up successful",
3453 onfail="Failed to bring link up" )
3454
3455 def switchDown( self, main ):
3456 """
3457 Switch Down
3458 """
3459 # NOTE: You should probably run a topology check after this
3460 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003461 assert main, "main not defined"
3462 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003463
3464 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3465
3466 description = "Killing a switch to ensure it is discovered correctly"
Devin Lim58046fa2017-07-05 16:55:00 -07003467 main.case( description )
3468 switch = main.params[ 'kill' ][ 'switch' ]
3469 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3470
3471 # TODO: Make this switch parameterizable
3472 main.step( "Kill " + switch )
3473 main.log.info( "Deleting " + switch )
3474 main.Mininet1.delSwitch( switch )
3475 main.log.info( "Waiting " + str( switchSleep ) +
3476 " seconds for switch down to be discovered" )
3477 time.sleep( switchSleep )
Jon Hall0e240372018-05-02 11:21:57 -07003478 device = main.Cluster.next().getDevice( dpid=switchDPID )
Devin Lim58046fa2017-07-05 16:55:00 -07003479 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003480 main.log.warn( "Bringing down switch " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003481 result = main.FALSE
3482 if device and device[ 'available' ] is False:
3483 result = main.TRUE
3484 utilities.assert_equals( expect=main.TRUE, actual=result,
3485 onpass="Kill switch successful",
3486 onfail="Failed to kill switch?" )
Jon Hallca319892017-06-15 15:25:22 -07003487
Devin Lim58046fa2017-07-05 16:55:00 -07003488 def switchUp( self, main ):
3489 """
3490 Switch Up
3491 """
3492 # NOTE: You should probably run a topology check after this
3493 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003494 assert main, "main not defined"
3495 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003496
3497 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3498 switch = main.params[ 'kill' ][ 'switch' ]
3499 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3500 links = main.params[ 'kill' ][ 'links' ].split()
Devin Lim58046fa2017-07-05 16:55:00 -07003501 description = "Adding a switch to ensure it is discovered correctly"
3502 main.case( description )
3503
3504 main.step( "Add back " + switch )
3505 main.Mininet1.addSwitch( switch, dpid=switchDPID )
3506 for peer in links:
3507 main.Mininet1.addLink( switch, peer )
Jon Hallca319892017-06-15 15:25:22 -07003508 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -07003509 main.Mininet1.assignSwController( sw=switch, ip=ipList )
3510 main.log.info( "Waiting " + str( switchSleep ) +
3511 " seconds for switch up to be discovered" )
3512 time.sleep( switchSleep )
Jon Hall0e240372018-05-02 11:21:57 -07003513 device = main.Cluster.next().getDevice( dpid=switchDPID )
Devin Lim58046fa2017-07-05 16:55:00 -07003514 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003515 main.log.debug( "Added device: " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003516 result = main.FALSE
3517 if device and device[ 'available' ]:
3518 result = main.TRUE
3519 utilities.assert_equals( expect=main.TRUE, actual=result,
3520 onpass="add switch successful",
3521 onfail="Failed to add switch?" )
3522
3523 def startElectionApp( self, main ):
3524 """
3525 start election app on all onos nodes
3526 """
Devin Lim58046fa2017-07-05 16:55:00 -07003527 assert main, "main not defined"
3528 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003529
3530 main.case( "Start Leadership Election app" )
3531 main.step( "Install leadership election app" )
Jon Hall0e240372018-05-02 11:21:57 -07003532 appResult = main.Cluster.next().CLI.activateApp( "org.onosproject.election" )
Devin Lim58046fa2017-07-05 16:55:00 -07003533 utilities.assert_equals(
3534 expect=main.TRUE,
3535 actual=appResult,
3536 onpass="Election app installed",
3537 onfail="Something went wrong with installing Leadership election" )
3538
3539 main.step( "Run for election on each node" )
Jon Hall0e240372018-05-02 11:21:57 -07003540 main.Cluster.next().electionTestRun()
Jon Hallca319892017-06-15 15:25:22 -07003541 main.Cluster.command( "electionTestRun" )
Devin Lim58046fa2017-07-05 16:55:00 -07003542 time.sleep( 5 )
Jon Hallca319892017-06-15 15:25:22 -07003543 sameResult, leaders = main.HA.consistentLeaderboards( main.Cluster.active() )
Devin Lim58046fa2017-07-05 16:55:00 -07003544 utilities.assert_equals(
3545 expect=True,
3546 actual=sameResult,
3547 onpass="All nodes see the same leaderboards",
3548 onfail="Inconsistent leaderboards" )
3549
3550 if sameResult:
Jon Hall5d5876e2017-11-30 09:33:16 -08003551 # Check that the leader is one of the active nodes
3552 ips = sorted( main.Cluster.getIps( activeOnly=True ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003553 leader = leaders[ 0 ][ 0 ]
Jon Hall5d5876e2017-11-30 09:33:16 -08003554 if leader in ips:
3555 legitimate = True
Devin Lim58046fa2017-07-05 16:55:00 -07003556 else:
Jon Hall5d5876e2017-11-30 09:33:16 -08003557 legitimate = False
3558 main.log.debug( leaders )
3559 main.step( "Active node was elected leader?" )
Devin Lim58046fa2017-07-05 16:55:00 -07003560 utilities.assert_equals(
3561 expect=True,
Jon Hall5d5876e2017-11-30 09:33:16 -08003562 actual=legitimate,
Devin Lim58046fa2017-07-05 16:55:00 -07003563 onpass="Correct leader was elected",
3564 onfail="Incorrect leader" )
Jon Hallca319892017-06-15 15:25:22 -07003565 main.Cluster.testLeader = leader
3566
Devin Lim58046fa2017-07-05 16:55:00 -07003567 def isElectionFunctional( self, main ):
3568 """
3569 Check that Leadership Election is still functional
3570 15.1 Run election on each node
3571 15.2 Check that each node has the same leaders and candidates
3572 15.3 Find current leader and withdraw
3573 15.4 Check that a new node was elected leader
3574 15.5 Check that that new leader was the candidate of old leader
3575 15.6 Run for election on old leader
3576 15.7 Check that oldLeader is a candidate, and leader if only 1 node
3577 15.8 Make sure that the old leader was added to the candidate list
3578
3579 old and new variable prefixes refer to data from before vs after
3580 withdrawl and later before withdrawl vs after re-election
3581 """
3582 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003583 assert main, "main not defined"
3584 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003585
3586 description = "Check that Leadership Election is still functional"
3587 main.case( description )
3588 # NOTE: Need to re-run after restarts since being a canidate is not persistant
3589
3590 oldLeaders = [] # list of lists of each nodes' candidates before
3591 newLeaders = [] # list of lists of each nodes' candidates after
3592 oldLeader = '' # the old leader from oldLeaders, None if not same
3593 newLeader = '' # the new leaders fron newLoeaders, None if not same
3594 oldLeaderCLI = None # the CLI of the old leader used for re-electing
3595 expectNoLeader = False # True when there is only one leader
Devin Lim142b5342017-07-20 15:22:39 -07003596 if len( main.Cluster.runningNodes ) == 1:
Devin Lim58046fa2017-07-05 16:55:00 -07003597 expectNoLeader = True
3598
3599 main.step( "Run for election on each node" )
Devin Lim142b5342017-07-20 15:22:39 -07003600 electionResult = main.Cluster.command( "electionTestRun", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003601 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -07003602 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -07003603 actual=electionResult,
3604 onpass="All nodes successfully ran for leadership",
3605 onfail="At least one node failed to run for leadership" )
3606
3607 if electionResult == main.FALSE:
3608 main.log.error(
3609 "Skipping Test Case because Election Test App isn't loaded" )
3610 main.skipCase()
3611
3612 main.step( "Check that each node shows the same leader and candidates" )
3613 failMessage = "Nodes have different leaderboards"
Jon Hallca319892017-06-15 15:25:22 -07003614 activeCLIs = main.Cluster.active()
3615 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Devin Lim58046fa2017-07-05 16:55:00 -07003616 if sameResult:
3617 oldLeader = oldLeaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003618 main.log.info( "Old leader: " + oldLeader )
Devin Lim58046fa2017-07-05 16:55:00 -07003619 else:
3620 oldLeader = None
3621 utilities.assert_equals(
3622 expect=True,
3623 actual=sameResult,
3624 onpass="Leaderboards are consistent for the election topic",
3625 onfail=failMessage )
3626
3627 main.step( "Find current leader and withdraw" )
3628 withdrawResult = main.TRUE
3629 # do some sanity checking on leader before using it
3630 if oldLeader is None:
3631 main.log.error( "Leadership isn't consistent." )
3632 withdrawResult = main.FALSE
3633 # Get the CLI of the oldLeader
Jon Hallca319892017-06-15 15:25:22 -07003634 for ctrl in main.Cluster.active():
3635 if oldLeader == ctrl.ipAddress:
3636 oldLeaderCLI = ctrl
Devin Lim58046fa2017-07-05 16:55:00 -07003637 break
3638 else: # FOR/ELSE statement
Jon Hall701fea12018-10-08 11:09:22 -07003639 main.log.error( "Leader election, could not find current leader amongst active nodes" )
3640 for ctrl in main.Cluster.controllers:
3641 if oldLeader == ctrl.ipAddress:
3642 oldLeaderCLI = ctrl
3643 main.log.warn( "Old leader was found as node " + str( ctrl.ipAddress ) )
3644 # Should we skip the next if statement then? There should be a new leader elected?
Devin Lim58046fa2017-07-05 16:55:00 -07003645 if oldLeader:
3646 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3647 utilities.assert_equals(
3648 expect=main.TRUE,
3649 actual=withdrawResult,
3650 onpass="Node was withdrawn from election",
3651 onfail="Node was not withdrawn from election" )
3652
3653 main.step( "Check that a new node was elected leader" )
3654 failMessage = "Nodes have different leaders"
3655 # Get new leaders and candidates
3656 newLeaderResult, newLeaders = self.consistentLeaderboards( activeCLIs )
3657 newLeader = None
3658 if newLeaderResult:
3659 if newLeaders[ 0 ][ 0 ] == 'none':
3660 main.log.error( "No leader was elected on at least 1 node" )
3661 if not expectNoLeader:
3662 newLeaderResult = False
3663 newLeader = newLeaders[ 0 ][ 0 ]
3664
3665 # Check that the new leader is not the older leader, which was withdrawn
3666 if newLeader == oldLeader:
3667 newLeaderResult = False
3668 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3669 " as the current leader" )
3670 utilities.assert_equals(
3671 expect=True,
3672 actual=newLeaderResult,
3673 onpass="Leadership election passed",
3674 onfail="Something went wrong with Leadership election" )
3675
3676 main.step( "Check that that new leader was the candidate of old leader" )
3677 # candidates[ 2 ] should become the top candidate after withdrawl
3678 correctCandidateResult = main.TRUE
3679 if expectNoLeader:
3680 if newLeader == 'none':
3681 main.log.info( "No leader expected. None found. Pass" )
3682 correctCandidateResult = main.TRUE
3683 else:
3684 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3685 correctCandidateResult = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07003686 utilities.assert_equals(
3687 expect=main.TRUE,
3688 actual=correctCandidateResult,
3689 onpass="Correct Candidate Elected",
3690 onfail="Incorrect Candidate Elected" )
3691
3692 main.step( "Run for election on old leader( just so everyone " +
3693 "is in the hat )" )
3694 if oldLeaderCLI is not None:
3695 runResult = oldLeaderCLI.electionTestRun()
3696 else:
3697 main.log.error( "No old leader to re-elect" )
3698 runResult = main.FALSE
3699 utilities.assert_equals(
3700 expect=main.TRUE,
3701 actual=runResult,
3702 onpass="App re-ran for election",
3703 onfail="App failed to run for election" )
3704
3705 main.step(
3706 "Check that oldLeader is a candidate, and leader if only 1 node" )
3707 # verify leader didn't just change
3708 # Get new leaders and candidates
3709 reRunLeaders = []
3710 time.sleep( 5 ) # Paremterize
3711 positionResult, reRunLeaders = self.consistentLeaderboards( activeCLIs )
3712
Devin Lim58046fa2017-07-05 16:55:00 -07003713 def installDistributedPrimitiveApp( self, main ):
Jon Hall5d5876e2017-11-30 09:33:16 -08003714 '''
Devin Lim58046fa2017-07-05 16:55:00 -07003715 Install Distributed Primitives app
Jon Hall5d5876e2017-11-30 09:33:16 -08003716 '''
Devin Lim58046fa2017-07-05 16:55:00 -07003717 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003718 assert main, "main not defined"
3719 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003720
3721 # Variables for the distributed primitives tests
3722 main.pCounterName = "TestON-Partitions"
3723 main.pCounterValue = 0
3724 main.onosSet = set( [] )
3725 main.onosSetName = "TestON-set"
3726
3727 description = "Install Primitives app"
3728 main.case( description )
3729 main.step( "Install Primitives app" )
3730 appName = "org.onosproject.distributedprimitives"
Devin Lime9f0ccf2017-08-11 17:25:12 -07003731 appResults = main.Cluster.next().CLI.activateApp( appName )
Devin Lim58046fa2017-07-05 16:55:00 -07003732 utilities.assert_equals( expect=main.TRUE,
3733 actual=appResults,
3734 onpass="Primitives app activated",
3735 onfail="Primitives app not activated" )
3736 # TODO check on all nodes instead of sleeping
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003737 time.sleep( 5 ) # To allow all nodes to activate
Jon Halla478b852017-12-04 15:00:15 -08003738
3739 def upgradeInit( self, main ):
3740 '''
3741 Initiates an update
3742 '''
3743 main.step( "Send the command to initialize the upgrade" )
3744 ctrl = main.Cluster.next().CLI
3745 initialized = ctrl.issuInit()
3746 utilities.assert_equals( expect=main.TRUE, actual=initialized,
3747 onpass="ISSU initialized",
3748 onfail="Error initializing the upgrade" )
3749
3750 main.step( "Check the status of the upgrade" )
3751 ctrl = main.Cluster.next().CLI
3752 status = ctrl.issu()
3753 main.log.debug( status )
3754 # TODO: check things here?
3755
3756 main.step( "Checking ONOS nodes" )
3757 nodeResults = utilities.retry( main.Cluster.nodesCheck,
3758 False,
3759 sleep=15,
3760 attempts=5 )
3761 utilities.assert_equals( expect=True, actual=nodeResults,
3762 onpass="Nodes check successful",
3763 onfail="Nodes check NOT successful" )
Jon Hall7ce46ea2018-02-05 12:20:59 -08003764
3765 def backupData( self, main, location ):
3766 """
3767 Backs up ONOS data and logs to a given location on each active node in a cluster
3768 """
3769 result = True
3770 for ctrl in main.Cluster.active():
3771 try:
3772 ctrl.server.handle.sendline( "rm " + location )
3773 ctrl.server.handle.expect( ctrl.server.prompt )
3774 main.log.debug( ctrl.server.handle.before + ctrl.server.handle.after )
3775 except pexpect.ExceptionPexpect as e:
3776 main.log.error( e )
3777 main.cleanAndExit()
3778 ctrl.CLI.log( "'Starting backup of onos data'", level="INFO" )
3779 result = result and ( ctrl.server.backupData( location ) is main.TRUE )
3780 ctrl.CLI.log( "'End of backup of onos data'", level="INFO" )
3781 return result
3782
3783 def restoreData( self, main, location ):
3784 """
3785 Restores ONOS data and logs from a given location on each node in a cluster
3786 """
3787 result = True
3788 for ctrl in main.Cluster.controllers:
3789 result = result and ( ctrl.server.restoreData( location ) is main.TRUE )
3790 return result
Jon Hallab611372018-02-21 15:26:05 -08003791
3792 def startTopology( self, main ):
3793 """
3794 Starts Mininet using a topology file after pushing a network config file to ONOS.
3795 """
3796 import json
3797 import time
3798 main.case( "Starting Mininet Topology" )
3799
3800 main.step( "Pushing Network config" )
3801 ctrl = main.Cluster.next()
3802 cfgPath = main.testsRoot + main.params[ 'topology' ][ 'configPath' ]
3803 cfgResult = ctrl.onosNetCfg( ctrl.ipAddress,
3804 path=cfgPath,
3805 fileName=main.params[ 'topology' ][ 'configName' ] )
3806 utilities.assert_equals( expect=main.TRUE, actual=cfgResult,
3807 onpass="Pushed Network Configuration to ONOS",
3808 onfail="Failed to push Network Configuration to ONOS" )
3809
3810 main.step( "Check Network config" )
3811 try:
3812 cfgFile = cfgPath + main.params[ 'topology' ][ 'configName' ]
3813 with open( cfgFile, 'r' ) as contents:
3814 pushedNetCfg = json.load( contents )
3815 pushedNetCfg = json.loads( json.dumps( pushedNetCfg ).lower() )
3816 except IOError:
3817 main.log.exception( "Net Cfg file not found." )
3818 main.cleanAndExit()
3819 netCfgSleep = int( main.params[ 'timers' ][ 'NetCfg' ] )
3820 time.sleep( netCfgSleep )
3821 rawONOSNetCfg = utilities.retry( f=main.Cluster.next().REST.getNetCfg,
3822 retValue=False,
3823 attempts=5,
3824 sleep=netCfgSleep )
3825 # Fix differences between ONOS printing and Pushed Cfg
3826 onosNetCfg = json.loads( rawONOSNetCfg.lower() )
3827
3828 # Compare pushed device config
3829 cfgResult = True
3830 for did, pushedDevice in pushedNetCfg[ 'devices' ].items():
3831 onosDevice = onosNetCfg[ 'devices' ].get( did )
3832 if pushedDevice != onosDevice:
3833 cfgResult = False
3834 main.log.error( "Pushed Network configuration does not match what is in " +
3835 "ONOS:\nPushed: {}\nONOS: {}".format( ctrl.pprint( pushedDevice ),
3836 ctrl.pprint( onosDevice ) ) )
3837
3838 # Compare pushed port config
3839 for portURI, pushedInterface in pushedNetCfg[ 'ports' ].items():
3840 onosInterface = onosNetCfg[ 'ports' ].get( portURI )
3841 # NOTE: pushed Cfg doesn't have macs
3842 for i in xrange( 0, len( pushedInterface[ 'interfaces' ] ) ):
3843 keys = pushedInterface[ 'interfaces' ][ i ].keys()
3844 portCompare = True
3845 for key in keys:
3846 if pushedInterface[ 'interfaces' ][ i ].get( key ) != onosInterface[ 'interfaces' ][ i ].get( key ) :
3847 main.log.debug( "{} mismatch for port {}".format( key, portURI ) )
3848 portCompare = False
3849 if not portCompare:
3850 cfgResult = False
3851 main.log.error( "Pushed Network configuration does not match what is in " +
3852 "ONOS:\nPushed: {}\nONOS: {}".format( ctrl.pprint( pushedInterface ),
3853 ctrl.pprint( onosInterface ) ) )
3854
Jon Hall9677ed32018-04-24 11:16:23 -07003855 if pushedNetCfg.get( 'hosts' ) is not None:
3856 # Compare pushed host config
3857 for hid, pushedHost in pushedNetCfg[ 'hosts' ].items():
3858 onosHost = onosNetCfg[ 'hosts' ].get( hid.lower() )
3859 if pushedHost != onosHost:
3860 cfgResult = False
3861 main.log.error( "Pushed Network configuration does not match what is in " +
Jon Hall0e240372018-05-02 11:21:57 -07003862 "ONOS:\nPushed: {}\nONOS: {}".format( ctrl.pprint( pushedHost ),
Jon Hall9677ed32018-04-24 11:16:23 -07003863 ctrl.pprint( onosHost ) ) )
Jon Hallab611372018-02-21 15:26:05 -08003864 utilities.assert_equals( expect=True,
3865 actual=cfgResult,
3866 onpass="Net Cfg set",
3867 onfail="Net Cfg not correctly set" )
3868 if not cfgResult:
3869 main.log.debug( "Pushed Network Config:" + ctrl.pprint( pushedNetCfg ) )
3870 main.log.debug( "ONOS Network Config:" + ctrl.pprint( onosNetCfg ) )
3871
3872 main.step( "Start Mininet topology" )
3873 for f in main.params[ 'topology' ][ 'files' ].values():
3874 main.ONOSbench.scp( main.Mininet1,
3875 f,
3876 main.Mininet1.home,
3877 direction="to" )
3878 topoName = main.params[ 'topology' ][ 'topoFile' ]
3879 topo = main.Mininet1.home + topoName
3880 ctrlList = ''
3881 for ctrl in main.Cluster.controllers:
3882 ctrlList += str( ctrl.ipAddress ) + ","
3883 args = main.params[ 'topology' ][ 'args' ]
3884 startResult = main.Mininet1.startNet( topoFile=topo,
3885 args=" --onos-ip=" + ctrlList + " " + args )
3886 utilities.assert_equals( expect=main.TRUE, actual=startResult,
3887 onpass="Mininet Started",
3888 onfail="Failed to start Mininet" )
3889 # Give SR app time to configure the network
3890 time.sleep( int( main.params[ 'timers' ][ 'SRSetup' ] ) )