blob: 4b61c1e5de4112eecc4f8b53dcd215cf6ae74b6f [file] [log] [blame]
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07001"""
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002Copyright 2015 Open Networking Foundation ( ONF )
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003
4Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
5the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
6or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
7
8 TestON is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation, either version 2 of the License, or
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -070011 ( at your option ) any later version.
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -070012
13 TestON is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with TestON. If not, see <http://www.gnu.org/licenses/>.
20"""
Jon Halla440e872016-03-31 15:15:50 -070021import json
Jon Hall41d39f12016-04-11 22:54:35 -070022import time
Jon Halla478b852017-12-04 15:00:15 -080023import pexpect
24import re
Jon Halle1a3b752015-07-22 13:02:46 -070025
Jon Hallf37d44d2017-05-24 10:37:30 -070026
Jon Hall41d39f12016-04-11 22:54:35 -070027class HA():
Jon Hall57b50432015-10-22 10:20:10 -070028
Jon Halla440e872016-03-31 15:15:50 -070029 def __init__( self ):
30 self.default = ''
Jon Hallab611372018-02-21 15:26:05 -080031 main.topoMappings = {}
Jon Hall57b50432015-10-22 10:20:10 -070032
Devin Lim58046fa2017-07-05 16:55:00 -070033 def customizeOnosGenPartitions( self ):
Devin Lim58046fa2017-07-05 16:55:00 -070034 # copy gen-partions file to ONOS
35 # NOTE: this assumes TestON and ONOS are on the same machine
Jon Hallab611372018-02-21 15:26:05 -080036 srcFile = main.testsRoot + "/HA/dependencies/onos-gen-partitions"
Devin Lim58046fa2017-07-05 16:55:00 -070037 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
38 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
39 main.ONOSbench.ip_address,
40 srcFile,
41 dstDir,
42 pwd=main.ONOSbench.pwd,
43 direction="from" )
Jon Hallca319892017-06-15 15:25:22 -070044
Devin Lim58046fa2017-07-05 16:55:00 -070045 def cleanUpGenPartition( self ):
46 # clean up gen-partitions file
47 try:
48 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
49 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
50 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
51 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
Jon Hall0e240372018-05-02 11:21:57 -070052 main.log.info( "Cleaning custom gen partitions file, response was: \n" +
Devin Lim58046fa2017-07-05 16:55:00 -070053 str( main.ONOSbench.handle.before ) )
54 except ( pexpect.TIMEOUT, pexpect.EOF ):
55 main.log.exception( "ONOSbench: pexpect exception found:" +
56 main.ONOSbench.handle.before )
Devin Lim44075962017-08-11 10:56:37 -070057 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -070058
Devin Lim58046fa2017-07-05 16:55:00 -070059 def startingMininet( self ):
60 main.step( "Starting Mininet" )
61 # scp topo file to mininet
62 # TODO: move to params?
63 topoName = "obelisk.py"
64 filePath = main.ONOSbench.home + "/tools/test/topos/"
65 main.ONOSbench.scp( main.Mininet1,
66 filePath + topoName,
67 main.Mininet1.home,
68 direction="to" )
69 mnResult = main.Mininet1.startNet()
70 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
71 onpass="Mininet Started",
72 onfail="Error starting Mininet" )
Jon Hallca319892017-06-15 15:25:22 -070073
Devin Lim58046fa2017-07-05 16:55:00 -070074 def swapNodeMetadata( self ):
Devin Lim142b5342017-07-20 15:22:39 -070075 if main.Cluster.numCtrls >= 5:
76 main.Cluster.setRunningNode( main.Cluster.numCtrls - 2 )
Devin Lim58046fa2017-07-05 16:55:00 -070077 else:
78 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
Devin Lim58046fa2017-07-05 16:55:00 -070079
Jon Hall4f360bc2017-09-07 10:19:52 -070080 def copyBackupConfig( self ):
Devin Lim58046fa2017-07-05 16:55:00 -070081 main.step( "Copying backup config files" )
82 main.onosServicepath = main.ONOSbench.home + "/tools/package/bin/onos-service"
83 cp = main.ONOSbench.scp( main.ONOSbench,
84 main.onosServicepath,
85 main.onosServicepath + ".backup",
86 direction="to" )
87
88 utilities.assert_equals( expect=main.TRUE,
89 actual=cp,
90 onpass="Copy backup config file succeeded",
91 onfail="Copy backup config file failed" )
Jon Hall4f360bc2017-09-07 10:19:52 -070092
93 def setMetadataUrl( self ):
94 # NOTE: You should probably backup the config before and reset the config after the test
Devin Lim58046fa2017-07-05 16:55:00 -070095 # we need to modify the onos-service file to use remote metadata file
96 # url for cluster metadata file
97 iface = main.params[ 'server' ].get( 'interface' )
98 ip = main.ONOSbench.getIpAddr( iface=iface )
99 metaFile = "cluster.json"
100 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, main.serverPort, metaFile )
Devin Lim58046fa2017-07-05 16:55:00 -0700101 main.log.warn( repr( javaArgs ) )
102 handle = main.ONOSbench.handle
Jon Hall4173b242017-09-12 17:04:38 -0700103 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs,
104 main.onosServicepath )
Devin Lim58046fa2017-07-05 16:55:00 -0700105 main.log.warn( repr( sed ) )
106 handle.sendline( sed )
107 handle.expect( metaFile )
108 output = handle.before
109 handle.expect( "\$" )
110 output += handle.before
111 main.log.debug( repr( output ) )
112
113 def cleanUpOnosService( self ):
114 # Cleanup custom onos-service file
115 main.ONOSbench.scp( main.ONOSbench,
116 main.onosServicepath + ".backup",
117 main.onosServicepath,
118 direction="to" )
Jon Hallca319892017-06-15 15:25:22 -0700119
Jon Halla440e872016-03-31 15:15:50 -0700120 def consistentCheck( self ):
121 """
122 Checks that TestON counters are consistent across all nodes.
Jon Halle1a3b752015-07-22 13:02:46 -0700123
Jon Hallf37d44d2017-05-24 10:37:30 -0700124 Returns the tuple ( onosCounters, consistent )
Jon Hall41d39f12016-04-11 22:54:35 -0700125 - onosCounters is the parsed json output of the counters command on
126 all nodes
127 - consistent is main.TRUE if all "TestON" counters are consitent across
128 all nodes or main.FALSE
Jon Halla440e872016-03-31 15:15:50 -0700129 """
Jon Halle1a3b752015-07-22 13:02:46 -0700130 try:
Jon Halla440e872016-03-31 15:15:50 -0700131 # Get onos counters results
132 onosCountersRaw = []
133 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700134 for ctrl in main.Cluster.active():
Jon Halla440e872016-03-31 15:15:50 -0700135 t = main.Thread( target=utilities.retry,
Jon Hallca319892017-06-15 15:25:22 -0700136 name="counters-" + str( ctrl ),
137 args=[ ctrl.counters, [ None ] ],
Jon Hallf37d44d2017-05-24 10:37:30 -0700138 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Halla440e872016-03-31 15:15:50 -0700139 'randomTime': True } )
140 threads.append( t )
141 t.start()
142 for t in threads:
143 t.join()
144 onosCountersRaw.append( t.result )
145 onosCounters = []
Jon Hallca319892017-06-15 15:25:22 -0700146 for i in range( len( onosCountersRaw ) ):
Jon Halla440e872016-03-31 15:15:50 -0700147 try:
Jon Hall3e6edb32018-08-21 16:20:30 -0700148 value = json.loads( onosCountersRaw[ i ] )
149 onosCounters.append( value )
Jon Halla440e872016-03-31 15:15:50 -0700150 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700151 main.log.error( "Could not parse counters response from " +
Devin Lim142b5342017-07-20 15:22:39 -0700152 str( main.Cluster.active( i ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700153 main.log.warn( repr( onosCountersRaw[ i ] ) )
Jon Hall0e240372018-05-02 11:21:57 -0700154 onosCounters.append( {} )
Jon Halla440e872016-03-31 15:15:50 -0700155
156 testCounters = {}
157 # make a list of all the "TestON-*" counters in ONOS
Jon Hall41d39f12016-04-11 22:54:35 -0700158 # lookes like a dict whose keys are the name of the ONOS node and
159 # values are a list of the counters. I.E.
Jon Hallf37d44d2017-05-24 10:37:30 -0700160 # { "ONOS1": [ { "name":"TestON-Partitions","value":56 } ]
Jon Halla440e872016-03-31 15:15:50 -0700161 # }
162 # NOTE: There is an assumtion that all nodes are active
163 # based on the above for loops
164 for controller in enumerate( onosCounters ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700165 for key, value in controller[ 1 ].iteritems():
Jon Halla440e872016-03-31 15:15:50 -0700166 if 'TestON' in key:
Jon Hall0e240372018-05-02 11:21:57 -0700167 node = main.Cluster.active( controller[ 0 ] )
Jon Halla440e872016-03-31 15:15:50 -0700168 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700169 testCounters[ node ].append( { key: value } )
Jon Halla440e872016-03-31 15:15:50 -0700170 except KeyError:
Jon Hallf37d44d2017-05-24 10:37:30 -0700171 testCounters[ node ] = [ { key: value } ]
Jon Halla440e872016-03-31 15:15:50 -0700172 # compare the counters on each node
Jon Hallf37d44d2017-05-24 10:37:30 -0700173 firstV = testCounters.values()[ 0 ]
Jon Halla440e872016-03-31 15:15:50 -0700174 tmp = [ v == firstV for k, v in testCounters.iteritems() ]
175 if all( tmp ):
176 consistent = main.TRUE
177 else:
178 consistent = main.FALSE
Jon Hall0e240372018-05-02 11:21:57 -0700179 main.log.error( "ONOS nodes have different values for counters: %s",
Jon Halla440e872016-03-31 15:15:50 -0700180 testCounters )
181 return ( onosCounters, consistent )
182 except Exception:
183 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700184 main.cleanAndExit()
Jon Halla440e872016-03-31 15:15:50 -0700185
186 def counterCheck( self, counterName, counterValue ):
187 """
188 Checks that TestON counters are consistent across all nodes and that
189 specified counter is in ONOS with the given value
190 """
191 try:
192 correctResults = main.TRUE
193 # Get onos counters results and consistentCheck
194 onosCounters, consistent = self.consistentCheck()
195 # Check for correct values
Jon Hallca319892017-06-15 15:25:22 -0700196 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -0700197 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -0700198 current = onosCounters[ i ]
Jon Halla440e872016-03-31 15:15:50 -0700199 onosValue = None
200 try:
201 onosValue = current.get( counterName )
Jon Hall41d39f12016-04-11 22:54:35 -0700202 except AttributeError:
Jon Hallca319892017-06-15 15:25:22 -0700203 main.log.exception( node + " counters result " +
Jon Hall41d39f12016-04-11 22:54:35 -0700204 "is not as expected" )
Jon Halla440e872016-03-31 15:15:50 -0700205 correctResults = main.FALSE
206 if onosValue == counterValue:
Jon Hall0e240372018-05-02 11:21:57 -0700207 main.log.info( "{}: {} counter value is correct".format( node, counterName ) )
Jon Halla440e872016-03-31 15:15:50 -0700208 else:
Jon Hall0e240372018-05-02 11:21:57 -0700209 main.log.error( node + ": " + counterName +
Jon Hall41d39f12016-04-11 22:54:35 -0700210 " counter value is incorrect," +
211 " expected value: " + str( counterValue ) +
212 " current value: " + str( onosValue ) )
Jon Halla440e872016-03-31 15:15:50 -0700213 correctResults = main.FALSE
214 return consistent and correctResults
215 except Exception:
216 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700217 main.cleanAndExit()
Jon Hall41d39f12016-04-11 22:54:35 -0700218
219 def consistentLeaderboards( self, nodes ):
220 TOPIC = 'org.onosproject.election'
221 # FIXME: use threads
Jon Halle0f0b342017-04-18 11:43:47 -0700222 # FIXME: should we retry outside the function?
Jon Hall41d39f12016-04-11 22:54:35 -0700223 for n in range( 5 ): # Retry in case election is still happening
224 leaderList = []
225 # Get all leaderboards
226 for cli in nodes:
227 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
228 # Compare leaderboards
Jon Hallf37d44d2017-05-24 10:37:30 -0700229 result = all( i == leaderList[ 0 ] for i in leaderList ) and\
Jon Hall41d39f12016-04-11 22:54:35 -0700230 leaderList is not None
Jon Hall41d39f12016-04-11 22:54:35 -0700231 if result:
232 return ( result, leaderList )
Jon Hallf37d44d2017-05-24 10:37:30 -0700233 time.sleep( 5 ) # TODO: paramerterize
Jon Hall41d39f12016-04-11 22:54:35 -0700234 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
235 return ( result, leaderList )
236
Devin Lim58046fa2017-07-05 16:55:00 -0700237 def initialSetUp( self, serviceClean=False ):
238 """
239 rest of initialSetup
240 """
Devin Lim58046fa2017-07-05 16:55:00 -0700241 if main.params[ 'tcpdump' ].lower() == "true":
242 main.step( "Start Packet Capture MN" )
243 main.Mininet2.startTcpdump(
244 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
245 + "-MN.pcap",
246 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
247 port=main.params[ 'MNtcpdump' ][ 'port' ] )
248
249 if serviceClean:
250 main.step( "Clean up ONOS service changes" )
Devin Lim142b5342017-07-20 15:22:39 -0700251 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
252 main.ONOSbench.handle.expect( "\$" )
253 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
254 main.ONOSbench.handle.expect( "\$" )
Devin Lim58046fa2017-07-05 16:55:00 -0700255
256 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -0800257 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700258 False,
Jon Hallcd1126d2018-09-11 11:32:48 -0700259 attempts=90 )
Devin Lim58046fa2017-07-05 16:55:00 -0700260
261 utilities.assert_equals( expect=True, actual=nodeResults,
262 onpass="Nodes check successful",
263 onfail="Nodes check NOT successful" )
264
265 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -0700266 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700267 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -0700268 ctrl.name,
269 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700270 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -0700271 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700272
273 main.step( "Activate apps defined in the params file" )
274 # get data from the params
275 apps = main.params.get( 'apps' )
276 if apps:
277 apps = apps.split( ',' )
Jon Hallca319892017-06-15 15:25:22 -0700278 main.log.debug( "Apps: " + str( apps ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700279 activateResult = True
280 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700281 main.Cluster.active( 0 ).app( app, "Activate" )
Devin Lim58046fa2017-07-05 16:55:00 -0700282 # TODO: check this worked
283 time.sleep( 10 ) # wait for apps to activate
284 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700285 state = main.Cluster.active( 0 ).appStatus( app )
Devin Lim58046fa2017-07-05 16:55:00 -0700286 if state == "ACTIVE":
287 activateResult = activateResult and True
288 else:
289 main.log.error( "{} is in {} state".format( app, state ) )
290 activateResult = False
291 utilities.assert_equals( expect=True,
292 actual=activateResult,
293 onpass="Successfully activated apps",
294 onfail="Failed to activate apps" )
295 else:
296 main.log.warn( "No apps were specified to be loaded after startup" )
297
298 main.step( "Set ONOS configurations" )
Jon Hallca319892017-06-15 15:25:22 -0700299 # FIXME: This shoudl be part of the general startup sequence
Devin Lim58046fa2017-07-05 16:55:00 -0700300 config = main.params.get( 'ONOS_Configuration' )
301 if config:
302 main.log.debug( config )
303 checkResult = main.TRUE
304 for component in config:
305 for setting in config[ component ]:
306 value = config[ component ][ setting ]
Jon Hallca319892017-06-15 15:25:22 -0700307 check = main.Cluster.next().setCfg( component, setting, value )
Devin Lim58046fa2017-07-05 16:55:00 -0700308 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
309 checkResult = check and checkResult
310 utilities.assert_equals( expect=main.TRUE,
311 actual=checkResult,
312 onpass="Successfully set config",
313 onfail="Failed to set config" )
314 else:
315 main.log.warn( "No configurations were specified to be changed after startup" )
316
Jon Hallca319892017-06-15 15:25:22 -0700317 main.step( "Check app ids" )
318 appCheck = self.appCheck()
319 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700320 onpass="App Ids seem to be correct",
321 onfail="Something is wrong with app Ids" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -0700322
Jon Hallca319892017-06-15 15:25:22 -0700323 def commonChecks( self ):
324 # TODO: make this assertable or assert in here?
325 self.topicsCheck()
326 self.partitionsCheck()
327 self.pendingMapCheck()
328 self.appCheck()
329
330 def topicsCheck( self, extraTopics=[] ):
331 """
332 Check for work partition topics in leaders output
333 """
334 leaders = main.Cluster.next().leaders()
335 missing = False
336 try:
337 if leaders:
338 parsedLeaders = json.loads( leaders )
339 output = json.dumps( parsedLeaders,
340 sort_keys=True,
341 indent=4,
342 separators=( ',', ': ' ) )
Jon Hallca319892017-06-15 15:25:22 -0700343 # check for all intent partitions
344 topics = []
345 for i in range( 14 ):
346 topics.append( "work-partition-" + str( i ) )
347 topics += extraTopics
Jon Hallca319892017-06-15 15:25:22 -0700348 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
349 for topic in topics:
350 if topic not in ONOStopics:
351 main.log.error( "Error: " + topic +
352 " not in leaders" )
353 missing = True
354 else:
355 main.log.error( "leaders() returned None" )
356 except ( ValueError, TypeError ):
357 main.log.exception( "Error parsing leaders" )
358 main.log.error( repr( leaders ) )
359 if missing:
Jon Hall4173b242017-09-12 17:04:38 -0700360 # NOTE Can we refactor this into the Cluster class?
361 # Maybe an option to print the output of a command from each node?
Jon Hallca319892017-06-15 15:25:22 -0700362 for ctrl in main.Cluster.active():
363 response = ctrl.CLI.leaders( jsonFormat=False )
364 main.log.debug( str( ctrl.name ) + " leaders output: \n" +
365 str( response ) )
366 return missing
367
368 def partitionsCheck( self ):
369 # TODO: return something assertable
370 partitions = main.Cluster.next().partitions()
371 try:
372 if partitions:
373 parsedPartitions = json.loads( partitions )
374 output = json.dumps( parsedPartitions,
375 sort_keys=True,
376 indent=4,
377 separators=( ',', ': ' ) )
378 main.log.debug( "Partitions: " + output )
379 # TODO check for a leader in all paritions
380 # TODO check for consistency among nodes
381 else:
382 main.log.error( "partitions() returned None" )
383 except ( ValueError, TypeError ):
384 main.log.exception( "Error parsing partitions" )
385 main.log.error( repr( partitions ) )
386
387 def pendingMapCheck( self ):
388 pendingMap = main.Cluster.next().pendingMap()
389 try:
390 if pendingMap:
391 parsedPending = json.loads( pendingMap )
392 output = json.dumps( parsedPending,
393 sort_keys=True,
394 indent=4,
395 separators=( ',', ': ' ) )
396 main.log.debug( "Pending map: " + output )
397 # TODO check something here?
398 else:
399 main.log.error( "pendingMap() returned None" )
400 except ( ValueError, TypeError ):
401 main.log.exception( "Error parsing pending map" )
402 main.log.error( repr( pendingMap ) )
403
404 def appCheck( self ):
405 """
406 Check App IDs on all nodes
407 """
408 # FIXME: Rename this to appIDCheck? or add a check for isntalled apps
Jon Hallb9d381e2018-02-05 12:02:10 -0800409 for i in range( 15 ):
410 # TODO modify retry or add a new version that accepts looking for
411 # a value in a return list instead of needing to match the entire
412 # return value to retry
413 appResults = main.Cluster.command( "appToIDCheck" )
414 appCheck = all( i == main.TRUE for i in appResults )
415 if appCheck:
416 break
417 else:
418 time.sleep( 5 )
419
Jon Hallca319892017-06-15 15:25:22 -0700420 if not appCheck:
Devin Lim142b5342017-07-20 15:22:39 -0700421 ctrl = main.Cluster.active( 0 )
Jon Hallb9d381e2018-02-05 12:02:10 -0800422 main.log.debug( "%s apps: %s" % ( ctrl.name, ctrl.pprint( ctrl.apps() ) ) )
423 main.log.debug( "%s appIDs: %s" % ( ctrl.name, ctrl.pprint( ctrl.appIDs() ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700424 return appCheck
425
Jon Halle0f0b342017-04-18 11:43:47 -0700426 def workQueueStatsCheck( self, workQueueName, completed, inProgress, pending ):
427 # Completed
Jon Hallca319892017-06-15 15:25:22 -0700428 completedValues = main.Cluster.command( "workQueueTotalCompleted",
429 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700430 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700431 completedResults = [ int( x ) == completed for x in completedValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700432 completedResult = all( completedResults )
433 if not completedResult:
434 main.log.warn( "Expected Work Queue {} to have {} completed, found {}".format(
435 workQueueName, completed, completedValues ) )
436
437 # In Progress
Jon Hallca319892017-06-15 15:25:22 -0700438 inProgressValues = main.Cluster.command( "workQueueTotalInProgress",
439 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700440 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700441 inProgressResults = [ int( x ) == inProgress for x in inProgressValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700442 inProgressResult = all( inProgressResults )
443 if not inProgressResult:
444 main.log.warn( "Expected Work Queue {} to have {} inProgress, found {}".format(
445 workQueueName, inProgress, inProgressValues ) )
446
447 # Pending
Jon Hallca319892017-06-15 15:25:22 -0700448 pendingValues = main.Cluster.command( "workQueueTotalPending",
449 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700450 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700451 pendingResults = [ int( x ) == pending for x in pendingValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700452 pendingResult = all( pendingResults )
453 if not pendingResult:
454 main.log.warn( "Expected Work Queue {} to have {} pending, found {}".format(
455 workQueueName, pending, pendingValues ) )
456 return completedResult and inProgressResult and pendingResult
457
Devin Lim58046fa2017-07-05 16:55:00 -0700458 def assignDevices( self, main ):
459 """
460 Assign devices to controllers
461 """
462 import re
Devin Lim58046fa2017-07-05 16:55:00 -0700463 assert main, "main not defined"
464 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700465
466 main.case( "Assigning devices to controllers" )
467 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " + \
468 "and check that an ONOS node becomes the " + \
469 "master of the device."
470 main.step( "Assign switches to controllers" )
471
Jon Hallca319892017-06-15 15:25:22 -0700472 ipList = main.Cluster.getIps()
Jon Hallab611372018-02-21 15:26:05 -0800473 swList = main.Mininet1.getSwitches().keys()
Devin Lim58046fa2017-07-05 16:55:00 -0700474 main.Mininet1.assignSwController( sw=swList, ip=ipList )
475
476 mastershipCheck = main.TRUE
Jon Hallab611372018-02-21 15:26:05 -0800477 for switch in swList:
478 response = main.Mininet1.getSwController( switch )
Devin Lim58046fa2017-07-05 16:55:00 -0700479 try:
480 main.log.info( str( response ) )
Jon Hallab611372018-02-21 15:26:05 -0800481 for ctrl in main.Cluster.runningNodes:
482 if re.search( "tcp:" + ctrl.ipAddress, response ):
483 mastershipCheck = mastershipCheck and main.TRUE
484 else:
485 main.log.error( "Error, node " + repr( ctrl ) + " is " +
486 "not in the list of controllers " +
487 switch + " is connecting to." )
488 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -0700489 except Exception:
Jon Hallab611372018-02-21 15:26:05 -0800490 main.log.warn( "Error parsing get-controller response" )
491 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -0700492 utilities.assert_equals(
493 expect=main.TRUE,
494 actual=mastershipCheck,
495 onpass="Switch mastership assigned correctly",
496 onfail="Switches not assigned correctly to controllers" )
Jon Hallca319892017-06-15 15:25:22 -0700497
Jon Hallab611372018-02-21 15:26:05 -0800498 # Mappings for attachmentPoints from host mac to deviceID
499 # TODO: make the key a dict with deviceIds and port #'s
500 # FIXME: topo-HA/obelisk specific mappings:
501 # key is mac and value is dpid
502 main.topoMappings = {}
503 for i in range( 1, 29 ): # hosts 1 through 28
504 # set up correct variables:
505 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
506 if i == 1:
507 deviceId = "1000".zfill( 16 )
508 elif i == 2:
509 deviceId = "2000".zfill( 16 )
510 elif i == 3:
511 deviceId = "3000".zfill( 16 )
512 elif i == 4:
513 deviceId = "3004".zfill( 16 )
514 elif i == 5:
515 deviceId = "5000".zfill( 16 )
516 elif i == 6:
517 deviceId = "6000".zfill( 16 )
518 elif i == 7:
519 deviceId = "6007".zfill( 16 )
520 elif i >= 8 and i <= 17:
521 dpid = '3' + str( i ).zfill( 3 )
522 deviceId = dpid.zfill( 16 )
523 elif i >= 18 and i <= 27:
524 dpid = '6' + str( i ).zfill( 3 )
525 deviceId = dpid.zfill( 16 )
526 elif i == 28:
527 deviceId = "2800".zfill( 16 )
528 main.topoMappings[ macId ] = deviceId
529
Devin Lim58046fa2017-07-05 16:55:00 -0700530 def assignIntents( self, main ):
531 """
532 Assign intents
533 """
534 import time
535 import json
Devin Lim58046fa2017-07-05 16:55:00 -0700536 assert main, "main not defined"
537 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700538 try:
539 main.HAlabels
540 except ( NameError, AttributeError ):
541 main.log.error( "main.HAlabels not defined, setting to []" )
542 main.HAlabels = []
543 try:
544 main.HAdata
545 except ( NameError, AttributeError ):
546 main.log.error( "data not defined, setting to []" )
547 main.HAdata = []
548 main.case( "Adding host Intents" )
549 main.caseExplanation = "Discover hosts by using pingall then " +\
550 "assign predetermined host-to-host intents." +\
551 " After installation, check that the intent" +\
552 " is distributed to all nodes and the state" +\
553 " is INSTALLED"
554
555 # install onos-app-fwd
556 main.step( "Install reactive forwarding app" )
Jon Hall0e240372018-05-02 11:21:57 -0700557 installResults = main.Cluster.next().CLI.activateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700558 utilities.assert_equals( expect=main.TRUE, actual=installResults,
559 onpass="Install fwd successful",
560 onfail="Install fwd failed" )
561
562 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700563 appCheck = self.appCheck()
564 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700565 onpass="App Ids seem to be correct",
566 onfail="Something is wrong with app Ids" )
567
568 main.step( "Discovering Hosts( Via pingall for now )" )
569 # FIXME: Once we have a host discovery mechanism, use that instead
570 # REACTIVE FWD test
571 pingResult = main.FALSE
572 passMsg = "Reactive Pingall test passed"
573 time1 = time.time()
574 pingResult = main.Mininet1.pingall()
575 time2 = time.time()
576 if not pingResult:
577 main.log.warn( "First pingall failed. Trying again..." )
578 pingResult = main.Mininet1.pingall()
579 passMsg += " on the second try"
580 utilities.assert_equals(
581 expect=main.TRUE,
582 actual=pingResult,
583 onpass=passMsg,
584 onfail="Reactive Pingall failed, " +
585 "one or more ping pairs failed" )
586 main.log.info( "Time for pingall: %2f seconds" %
587 ( time2 - time1 ) )
Jon Hallca319892017-06-15 15:25:22 -0700588 if not pingResult:
Devin Lim44075962017-08-11 10:56:37 -0700589 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700590 # timeout for fwd flows
591 time.sleep( 11 )
592 # uninstall onos-app-fwd
593 main.step( "Uninstall reactive forwarding app" )
Jon Hall0e240372018-05-02 11:21:57 -0700594 uninstallResult = main.Cluster.next().CLI.deactivateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700595 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
596 onpass="Uninstall fwd successful",
597 onfail="Uninstall fwd failed" )
598
599 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700600 appCheck2 = self.appCheck()
601 utilities.assert_equals( expect=True, actual=appCheck2,
Devin Lim58046fa2017-07-05 16:55:00 -0700602 onpass="App Ids seem to be correct",
603 onfail="Something is wrong with app Ids" )
604
605 main.step( "Add host intents via cli" )
606 intentIds = []
607 # TODO: move the host numbers to params
608 # Maybe look at all the paths we ping?
609 intentAddResult = True
610 hostResult = main.TRUE
611 for i in range( 8, 18 ):
612 main.log.info( "Adding host intent between h" + str( i ) +
613 " and h" + str( i + 10 ) )
614 host1 = "00:00:00:00:00:" + \
615 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
616 host2 = "00:00:00:00:00:" + \
617 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
618 # NOTE: getHost can return None
Jon Hall0e240372018-05-02 11:21:57 -0700619 host1Dict = main.Cluster.next().CLI.getHost( host1 )
620 host2Dict = main.Cluster.next().CLI.getHost( host2 )
Devin Lim58046fa2017-07-05 16:55:00 -0700621 host1Id = None
622 host2Id = None
623 if host1Dict and host2Dict:
624 host1Id = host1Dict.get( 'id', None )
625 host2Id = host2Dict.get( 'id', None )
626 if host1Id and host2Id:
Jon Hallca319892017-06-15 15:25:22 -0700627 nodeNum = len( main.Cluster.active() )
Devin Lim142b5342017-07-20 15:22:39 -0700628 ctrl = main.Cluster.active( i % nodeNum )
Jon Hallca319892017-06-15 15:25:22 -0700629 tmpId = ctrl.CLI.addHostIntent( host1Id, host2Id )
Devin Lim58046fa2017-07-05 16:55:00 -0700630 if tmpId:
631 main.log.info( "Added intent with id: " + tmpId )
632 intentIds.append( tmpId )
633 else:
634 main.log.error( "addHostIntent returned: " +
635 repr( tmpId ) )
636 else:
637 main.log.error( "Error, getHost() failed for h" + str( i ) +
638 " and/or h" + str( i + 10 ) )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700639 hosts = main.Cluster.next().CLI.hosts()
Devin Lim58046fa2017-07-05 16:55:00 -0700640 try:
Jon Hallca319892017-06-15 15:25:22 -0700641 output = json.dumps( json.loads( hosts ),
642 sort_keys=True,
643 indent=4,
644 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700645 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700646 output = repr( hosts )
647 main.log.debug( "Hosts output: %s" % output )
Devin Lim58046fa2017-07-05 16:55:00 -0700648 hostResult = main.FALSE
649 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
650 onpass="Found a host id for each host",
651 onfail="Error looking up host ids" )
652
653 intentStart = time.time()
Jon Hall0e240372018-05-02 11:21:57 -0700654 onosIds = main.Cluster.next().getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700655 main.log.info( "Submitted intents: " + str( intentIds ) )
656 main.log.info( "Intents in ONOS: " + str( onosIds ) )
657 for intent in intentIds:
658 if intent in onosIds:
659 pass # intent submitted is in onos
660 else:
661 intentAddResult = False
662 if intentAddResult:
663 intentStop = time.time()
664 else:
665 intentStop = None
666 # Print the intent states
Jon Hall0e240372018-05-02 11:21:57 -0700667 intents = main.Cluster.next().CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700668 intentStates = []
669 installedCheck = True
670 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
671 count = 0
672 try:
673 for intent in json.loads( intents ):
674 state = intent.get( 'state', None )
675 if "INSTALLED" not in state:
676 installedCheck = False
677 intentId = intent.get( 'id', None )
678 intentStates.append( ( intentId, state ) )
679 except ( ValueError, TypeError ):
680 main.log.exception( "Error parsing intents" )
681 # add submitted intents not in the store
682 tmplist = [ i for i, s in intentStates ]
683 missingIntents = False
684 for i in intentIds:
685 if i not in tmplist:
686 intentStates.append( ( i, " - " ) )
687 missingIntents = True
688 intentStates.sort()
689 for i, s in intentStates:
690 count += 1
691 main.log.info( "%-6s%-15s%-15s" %
692 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700693 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700694
695 intentAddResult = bool( intentAddResult and not missingIntents and
696 installedCheck )
697 if not intentAddResult:
698 main.log.error( "Error in pushing host intents to ONOS" )
699
700 main.step( "Intent Anti-Entropy dispersion" )
701 for j in range( 100 ):
702 correct = True
703 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700704 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700705 onosIds = []
Jon Hallca319892017-06-15 15:25:22 -0700706 ids = ctrl.getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700707 onosIds.append( ids )
Jon Hallca319892017-06-15 15:25:22 -0700708 main.log.debug( "Intents in " + ctrl.name + ": " +
Devin Lim58046fa2017-07-05 16:55:00 -0700709 str( sorted( onosIds ) ) )
710 if sorted( ids ) != sorted( intentIds ):
711 main.log.warn( "Set of intent IDs doesn't match" )
712 correct = False
713 break
714 else:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700715 intents = json.loads( ctrl.CLI.intents() )
Devin Lim58046fa2017-07-05 16:55:00 -0700716 for intent in intents:
717 if intent[ 'state' ] != "INSTALLED":
718 main.log.warn( "Intent " + intent[ 'id' ] +
719 " is " + intent[ 'state' ] )
720 correct = False
721 break
722 if correct:
723 break
724 else:
725 time.sleep( 1 )
726 if not intentStop:
727 intentStop = time.time()
728 global gossipTime
729 gossipTime = intentStop - intentStart
730 main.log.info( "It took about " + str( gossipTime ) +
731 " seconds for all intents to appear in each node" )
732 append = False
733 title = "Gossip Intents"
734 count = 1
735 while append is False:
736 curTitle = title + str( count )
737 if curTitle not in main.HAlabels:
738 main.HAlabels.append( curTitle )
739 main.HAdata.append( str( gossipTime ) )
740 append = True
741 else:
742 count += 1
743 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Devin Lim142b5342017-07-20 15:22:39 -0700744 maxGossipTime = gossipPeriod * len( main.Cluster.runningNodes )
Devin Lim58046fa2017-07-05 16:55:00 -0700745 utilities.assert_greater_equals(
746 expect=maxGossipTime, actual=gossipTime,
747 onpass="ECM anti-entropy for intents worked within " +
748 "expected time",
749 onfail="Intent ECM anti-entropy took too long. " +
750 "Expected time:{}, Actual time:{}".format( maxGossipTime,
751 gossipTime ) )
752 if gossipTime <= maxGossipTime:
753 intentAddResult = True
754
Jon Hallca319892017-06-15 15:25:22 -0700755 pendingMap = main.Cluster.next().pendingMap()
Devin Lim58046fa2017-07-05 16:55:00 -0700756 if not intentAddResult or "key" in pendingMap:
Devin Lim58046fa2017-07-05 16:55:00 -0700757 installedCheck = True
758 main.log.info( "Sleeping 60 seconds to see if intents are found" )
759 time.sleep( 60 )
Jon Hall0e240372018-05-02 11:21:57 -0700760 onosIds = main.Cluster.next().getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700761 main.log.info( "Submitted intents: " + str( intentIds ) )
762 main.log.info( "Intents in ONOS: " + str( onosIds ) )
763 # Print the intent states
Jon Hall0e240372018-05-02 11:21:57 -0700764 intents = main.Cluster.next().CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700765 intentStates = []
766 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
767 count = 0
768 try:
769 for intent in json.loads( intents ):
770 # Iter through intents of a node
771 state = intent.get( 'state', None )
772 if "INSTALLED" not in state:
773 installedCheck = False
774 intentId = intent.get( 'id', None )
775 intentStates.append( ( intentId, state ) )
776 except ( ValueError, TypeError ):
777 main.log.exception( "Error parsing intents" )
778 # add submitted intents not in the store
779 tmplist = [ i for i, s in intentStates ]
780 for i in intentIds:
781 if i not in tmplist:
782 intentStates.append( ( i, " - " ) )
783 intentStates.sort()
784 for i, s in intentStates:
785 count += 1
786 main.log.info( "%-6s%-15s%-15s" %
787 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700788 self.topicsCheck( [ "org.onosproject.election" ] )
789 self.partitionsCheck()
790 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700791
Jon Hallca319892017-06-15 15:25:22 -0700792 def pingAcrossHostIntent( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -0700793 """
794 Ping across added host intents
795 """
796 import json
797 import time
Devin Lim58046fa2017-07-05 16:55:00 -0700798 assert main, "main not defined"
799 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700800 main.case( "Verify connectivity by sending traffic across Intents" )
801 main.caseExplanation = "Ping across added host intents to check " +\
802 "functionality and check the state of " +\
803 "the intent"
804
Jon Hallca319892017-06-15 15:25:22 -0700805 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -0700806 main.step( "Check Intent state" )
807 installedCheck = False
808 loopCount = 0
Jon Hall5d5876e2017-11-30 09:33:16 -0800809 while not installedCheck and loopCount < 90:
Devin Lim58046fa2017-07-05 16:55:00 -0700810 installedCheck = True
811 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700812 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700813 intentStates = []
814 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
815 count = 0
816 # Iter through intents of a node
817 try:
818 for intent in json.loads( intents ):
819 state = intent.get( 'state', None )
820 if "INSTALLED" not in state:
821 installedCheck = False
Jon Hall8bafdc02017-09-05 11:36:26 -0700822 main.log.debug( "Failed intent: " + str( intent ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700823 intentId = intent.get( 'id', None )
824 intentStates.append( ( intentId, state ) )
825 except ( ValueError, TypeError ):
826 main.log.exception( "Error parsing intents." )
827 # Print states
828 intentStates.sort()
829 for i, s in intentStates:
830 count += 1
831 main.log.info( "%-6s%-15s%-15s" %
832 ( str( count ), str( i ), str( s ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700833 if not installedCheck:
834 time.sleep( 1 )
835 loopCount += 1
836 utilities.assert_equals( expect=True, actual=installedCheck,
837 onpass="Intents are all INSTALLED",
838 onfail="Intents are not all in " +
839 "INSTALLED state" )
840
841 main.step( "Ping across added host intents" )
842 PingResult = main.TRUE
843 for i in range( 8, 18 ):
844 ping = main.Mininet1.pingHost( src="h" + str( i ),
845 target="h" + str( i + 10 ) )
846 PingResult = PingResult and ping
847 if ping == main.FALSE:
848 main.log.warn( "Ping failed between h" + str( i ) +
849 " and h" + str( i + 10 ) )
850 elif ping == main.TRUE:
851 main.log.info( "Ping test passed!" )
852 # Don't set PingResult or you'd override failures
853 if PingResult == main.FALSE:
854 main.log.error(
855 "Intents have not been installed correctly, pings failed." )
856 # TODO: pretty print
Devin Lim58046fa2017-07-05 16:55:00 -0700857 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700858 tmpIntents = onosCli.CLI.intents()
Jon Hallca319892017-06-15 15:25:22 -0700859 output = json.dumps( json.loads( tmpIntents ),
860 sort_keys=True,
861 indent=4,
862 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700863 except ( ValueError, TypeError ):
Jon Hall4173b242017-09-12 17:04:38 -0700864 output = repr( tmpIntents )
Jon Hallca319892017-06-15 15:25:22 -0700865 main.log.debug( "ONOS1 intents: " + output )
Devin Lim58046fa2017-07-05 16:55:00 -0700866 utilities.assert_equals(
867 expect=main.TRUE,
868 actual=PingResult,
869 onpass="Intents have been installed correctly and pings work",
870 onfail="Intents have not been installed correctly, pings failed." )
871
872 main.step( "Check leadership of topics" )
Jon Hallca319892017-06-15 15:25:22 -0700873 topicsCheck = self.topicsCheck()
874 utilities.assert_equals( expect=False, actual=topicsCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700875 onpass="intent Partitions is in leaders",
Jon Hallca319892017-06-15 15:25:22 -0700876 onfail="Some topics were lost" )
877 self.partitionsCheck()
878 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700879
880 if not installedCheck:
881 main.log.info( "Waiting 60 seconds to see if the state of " +
882 "intents change" )
883 time.sleep( 60 )
884 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700885 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700886 intentStates = []
887 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
888 count = 0
889 # Iter through intents of a node
890 try:
891 for intent in json.loads( intents ):
892 state = intent.get( 'state', None )
893 if "INSTALLED" not in state:
894 installedCheck = False
895 intentId = intent.get( 'id', None )
896 intentStates.append( ( intentId, state ) )
897 except ( ValueError, TypeError ):
898 main.log.exception( "Error parsing intents." )
899 intentStates.sort()
900 for i, s in intentStates:
901 count += 1
902 main.log.info( "%-6s%-15s%-15s" %
903 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700904 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700905
Devin Lim58046fa2017-07-05 16:55:00 -0700906 main.step( "Wait a minute then ping again" )
907 # the wait is above
908 PingResult = main.TRUE
909 for i in range( 8, 18 ):
910 ping = main.Mininet1.pingHost( src="h" + str( i ),
911 target="h" + str( i + 10 ) )
912 PingResult = PingResult and ping
913 if ping == main.FALSE:
914 main.log.warn( "Ping failed between h" + str( i ) +
915 " and h" + str( i + 10 ) )
916 elif ping == main.TRUE:
917 main.log.info( "Ping test passed!" )
918 # Don't set PingResult or you'd override failures
919 if PingResult == main.FALSE:
920 main.log.error(
921 "Intents have not been installed correctly, pings failed." )
922 # TODO: pretty print
Jon Hallca319892017-06-15 15:25:22 -0700923 main.log.warn( str( onosCli.name ) + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -0700924 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700925 tmpIntents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700926 main.log.warn( json.dumps( json.loads( tmpIntents ),
927 sort_keys=True,
928 indent=4,
929 separators=( ',', ': ' ) ) )
930 except ( ValueError, TypeError ):
931 main.log.warn( repr( tmpIntents ) )
932 utilities.assert_equals(
933 expect=main.TRUE,
934 actual=PingResult,
935 onpass="Intents have been installed correctly and pings work",
936 onfail="Intents have not been installed correctly, pings failed." )
937
Devin Lim142b5342017-07-20 15:22:39 -0700938 def checkRoleNotNull( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700939 main.step( "Check that each switch has a master" )
Devin Lim58046fa2017-07-05 16:55:00 -0700940 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -0700941 rolesNotNull = main.Cluster.command( "rolesNotNull", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -0700942 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -0700943 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -0700944 actual=rolesNotNull,
945 onpass="Each device has a master",
946 onfail="Some devices don't have a master assigned" )
947
Devin Lim142b5342017-07-20 15:22:39 -0700948 def checkTheRole( self ):
949 main.step( "Read device roles from ONOS" )
Jon Hallca319892017-06-15 15:25:22 -0700950 ONOSMastership = main.Cluster.command( "roles" )
Devin Lim58046fa2017-07-05 16:55:00 -0700951 consistentMastership = True
952 rolesResults = True
Devin Lim58046fa2017-07-05 16:55:00 -0700953 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -0700954 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700955 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hallca319892017-06-15 15:25:22 -0700956 main.log.error( "Error in getting " + node + " roles" )
957 main.log.warn( node + " mastership response: " +
Devin Lim58046fa2017-07-05 16:55:00 -0700958 repr( ONOSMastership[ i ] ) )
959 rolesResults = False
960 utilities.assert_equals(
961 expect=True,
962 actual=rolesResults,
963 onpass="No error in reading roles output",
964 onfail="Error in reading roles from ONOS" )
965
966 main.step( "Check for consistency in roles from each controller" )
967 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
968 main.log.info(
969 "Switch roles are consistent across all ONOS nodes" )
970 else:
971 consistentMastership = False
972 utilities.assert_equals(
973 expect=True,
974 actual=consistentMastership,
975 onpass="Switch roles are consistent across all ONOS nodes",
976 onfail="ONOS nodes have different views of switch roles" )
Devin Lim142b5342017-07-20 15:22:39 -0700977 return ONOSMastership, rolesResults, consistentMastership
978
979 def checkingIntents( self ):
980 main.step( "Get the intents from each controller" )
981 ONOSIntents = main.Cluster.command( "intents", specificDriver=2 )
982 intentsResults = True
983 for i in range( len( ONOSIntents ) ):
984 node = str( main.Cluster.active( i ) )
985 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
986 main.log.error( "Error in getting " + node + " intents" )
987 main.log.warn( node + " intents response: " +
988 repr( ONOSIntents[ i ] ) )
989 intentsResults = False
990 utilities.assert_equals(
991 expect=True,
992 actual=intentsResults,
993 onpass="No error in reading intents output",
994 onfail="Error in reading intents from ONOS" )
995 return ONOSIntents, intentsResults
996
997 def readingState( self, main ):
998 """
999 Reading state of ONOS
1000 """
1001 import json
Devin Lim142b5342017-07-20 15:22:39 -07001002 assert main, "main not defined"
1003 assert utilities.assert_equals, "utilities.assert_equals not defined"
1004 try:
1005 from tests.dependencies.topology import Topology
1006 except ImportError:
1007 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07001008 main.cleanAndExit()
Devin Lim142b5342017-07-20 15:22:39 -07001009 try:
1010 main.topoRelated
1011 except ( NameError, AttributeError ):
1012 main.topoRelated = Topology()
1013 main.case( "Setting up and gathering data for current state" )
1014 # The general idea for this test case is to pull the state of
1015 # ( intents,flows, topology,... ) from each ONOS node
1016 # We can then compare them with each other and also with past states
1017
1018 global mastershipState
1019 mastershipState = '[]'
1020
1021 self.checkRoleNotNull()
1022
1023 main.step( "Get the Mastership of each switch from each controller" )
1024 mastershipCheck = main.FALSE
1025
1026 ONOSMastership, consistentMastership, rolesResults = self.checkTheRole()
Devin Lim58046fa2017-07-05 16:55:00 -07001027
1028 if rolesResults and not consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001029 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001030 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001031 try:
1032 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001033 node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07001034 json.dumps(
1035 json.loads( ONOSMastership[ i ] ),
1036 sort_keys=True,
1037 indent=4,
1038 separators=( ',', ': ' ) ) )
1039 except ( ValueError, TypeError ):
1040 main.log.warn( repr( ONOSMastership[ i ] ) )
1041 elif rolesResults and consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001042 mastershipCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001043 mastershipState = ONOSMastership[ 0 ]
1044
Devin Lim58046fa2017-07-05 16:55:00 -07001045 global intentState
1046 intentState = []
Devin Lim142b5342017-07-20 15:22:39 -07001047 ONOSIntents, intentsResults = self.checkingIntents()
Jon Hallca319892017-06-15 15:25:22 -07001048 intentCheck = main.FALSE
1049 consistentIntents = True
Devin Lim142b5342017-07-20 15:22:39 -07001050
Devin Lim58046fa2017-07-05 16:55:00 -07001051 main.step( "Check for consistency in Intents from each controller" )
1052 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1053 main.log.info( "Intents are consistent across all ONOS " +
1054 "nodes" )
1055 else:
1056 consistentIntents = False
1057 main.log.error( "Intents not consistent" )
1058 utilities.assert_equals(
1059 expect=True,
1060 actual=consistentIntents,
1061 onpass="Intents are consistent across all ONOS nodes",
1062 onfail="ONOS nodes have different views of intents" )
1063
1064 if intentsResults:
1065 # Try to make it easy to figure out what is happening
1066 #
1067 # Intent ONOS1 ONOS2 ...
1068 # 0x01 INSTALLED INSTALLING
1069 # ... ... ...
1070 # ... ... ...
1071 title = " Id"
Jon Hallca319892017-06-15 15:25:22 -07001072 for ctrl in main.Cluster.active():
1073 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07001074 main.log.warn( title )
1075 # get all intent keys in the cluster
1076 keys = []
1077 try:
1078 # Get the set of all intent keys
1079 for nodeStr in ONOSIntents:
1080 node = json.loads( nodeStr )
1081 for intent in node:
1082 keys.append( intent.get( 'id' ) )
1083 keys = set( keys )
1084 # For each intent key, print the state on each node
1085 for key in keys:
1086 row = "%-13s" % key
1087 for nodeStr in ONOSIntents:
1088 node = json.loads( nodeStr )
1089 for intent in node:
1090 if intent.get( 'id', "Error" ) == key:
1091 row += "%-15s" % intent.get( 'state' )
1092 main.log.warn( row )
1093 # End of intent state table
1094 except ValueError as e:
1095 main.log.exception( e )
1096 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1097
1098 if intentsResults and not consistentIntents:
1099 # print the json objects
Jon Hallca319892017-06-15 15:25:22 -07001100 main.log.debug( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001101 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1102 sort_keys=True,
1103 indent=4,
1104 separators=( ',', ': ' ) ) )
1105 for i in range( len( ONOSIntents ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001106 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001107 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallca319892017-06-15 15:25:22 -07001108 main.log.debug( node + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001109 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
1110 sort_keys=True,
1111 indent=4,
1112 separators=( ',', ': ' ) ) )
1113 else:
Jon Hallca319892017-06-15 15:25:22 -07001114 main.log.debug( node + " intents match " + ctrl.name + " intents" )
Devin Lim58046fa2017-07-05 16:55:00 -07001115 elif intentsResults and consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07001116 intentCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001117 intentState = ONOSIntents[ 0 ]
1118
1119 main.step( "Get the flows from each controller" )
1120 global flowState
1121 flowState = []
Jon Hall4173b242017-09-12 17:04:38 -07001122 ONOSFlows = main.Cluster.command( "flows", specificDriver=2 ) # TODO: Possible arg: sleep = 30
Devin Lim58046fa2017-07-05 16:55:00 -07001123 ONOSFlowsJson = []
1124 flowCheck = main.FALSE
1125 consistentFlows = True
1126 flowsResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001127 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001128 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001129 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001130 main.log.error( "Error in getting " + node + " flows" )
1131 main.log.warn( node + " flows response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001132 repr( ONOSFlows[ i ] ) )
1133 flowsResults = False
1134 ONOSFlowsJson.append( None )
1135 else:
1136 try:
1137 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1138 except ( ValueError, TypeError ):
1139 # FIXME: change this to log.error?
Jon Hallca319892017-06-15 15:25:22 -07001140 main.log.exception( "Error in parsing " + node +
Devin Lim58046fa2017-07-05 16:55:00 -07001141 " response as json." )
1142 main.log.error( repr( ONOSFlows[ i ] ) )
1143 ONOSFlowsJson.append( None )
1144 flowsResults = False
1145 utilities.assert_equals(
1146 expect=True,
1147 actual=flowsResults,
1148 onpass="No error in reading flows output",
1149 onfail="Error in reading flows from ONOS" )
1150
1151 main.step( "Check for consistency in Flows from each controller" )
1152 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1153 if all( tmp ):
1154 main.log.info( "Flow count is consistent across all ONOS nodes" )
1155 else:
1156 consistentFlows = False
1157 utilities.assert_equals(
1158 expect=True,
1159 actual=consistentFlows,
1160 onpass="The flow count is consistent across all ONOS nodes",
1161 onfail="ONOS nodes have different flow counts" )
1162
1163 if flowsResults and not consistentFlows:
1164 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001165 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001166 try:
1167 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001168 node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001169 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
1170 indent=4, separators=( ',', ': ' ) ) )
1171 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -07001172 main.log.warn( node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001173 repr( ONOSFlows[ i ] ) )
1174 elif flowsResults and consistentFlows:
1175 flowCheck = main.TRUE
1176 flowState = ONOSFlows[ 0 ]
1177
1178 main.step( "Get the OF Table entries" )
1179 global flows
Jon Hallab611372018-02-21 15:26:05 -08001180 flows = {}
1181 for swName, swDetails in main.Mininet1.getSwitches().items():
1182 main.log.debug( repr( swName ) + repr( swDetails ) )
1183 flows[ swName ] = main.Mininet1.getFlowTable( swName, version="1.3", debug=False )
Devin Lim58046fa2017-07-05 16:55:00 -07001184 if flowCheck == main.FALSE:
1185 for table in flows:
1186 main.log.warn( table )
1187 # TODO: Compare switch flow tables with ONOS flow tables
1188
1189 main.step( "Start continuous pings" )
Jon Hallab611372018-02-21 15:26:05 -08001190 if main.params.get( 'PING', False ):
1191 # TODO: Make this more dynamic and less hardcoded, ie, # or ping pairs
1192 main.Mininet2.pingLong(
1193 src=main.params[ 'PING' ][ 'source1' ],
1194 target=main.params[ 'PING' ][ 'target1' ],
1195 pingTime=500 )
1196 main.Mininet2.pingLong(
1197 src=main.params[ 'PING' ][ 'source2' ],
1198 target=main.params[ 'PING' ][ 'target2' ],
1199 pingTime=500 )
1200 main.Mininet2.pingLong(
1201 src=main.params[ 'PING' ][ 'source3' ],
1202 target=main.params[ 'PING' ][ 'target3' ],
1203 pingTime=500 )
1204 main.Mininet2.pingLong(
1205 src=main.params[ 'PING' ][ 'source4' ],
1206 target=main.params[ 'PING' ][ 'target4' ],
1207 pingTime=500 )
1208 main.Mininet2.pingLong(
1209 src=main.params[ 'PING' ][ 'source5' ],
1210 target=main.params[ 'PING' ][ 'target5' ],
1211 pingTime=500 )
1212 main.Mininet2.pingLong(
1213 src=main.params[ 'PING' ][ 'source6' ],
1214 target=main.params[ 'PING' ][ 'target6' ],
1215 pingTime=500 )
1216 main.Mininet2.pingLong(
1217 src=main.params[ 'PING' ][ 'source7' ],
1218 target=main.params[ 'PING' ][ 'target7' ],
1219 pingTime=500 )
1220 main.Mininet2.pingLong(
1221 src=main.params[ 'PING' ][ 'source8' ],
1222 target=main.params[ 'PING' ][ 'target8' ],
1223 pingTime=500 )
1224 main.Mininet2.pingLong(
1225 src=main.params[ 'PING' ][ 'source9' ],
1226 target=main.params[ 'PING' ][ 'target9' ],
1227 pingTime=500 )
1228 main.Mininet2.pingLong(
1229 src=main.params[ 'PING' ][ 'source10' ],
1230 target=main.params[ 'PING' ][ 'target10' ],
1231 pingTime=500 )
Devin Lim58046fa2017-07-05 16:55:00 -07001232
1233 main.step( "Collecting topology information from ONOS" )
Devin Lim142b5342017-07-20 15:22:39 -07001234 devices = main.topoRelated.getAll( "devices" )
1235 hosts = main.topoRelated.getAll( "hosts", inJson=True )
1236 ports = main.topoRelated.getAll( "ports" )
1237 links = main.topoRelated.getAll( "links" )
1238 clusters = main.topoRelated.getAll( "clusters" )
Devin Lim58046fa2017-07-05 16:55:00 -07001239 # Compare json objects for hosts and dataplane clusters
1240
1241 # hosts
1242 main.step( "Host view is consistent across ONOS nodes" )
1243 consistentHostsResult = main.TRUE
1244 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001245 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001246 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1247 if hosts[ controller ] == hosts[ 0 ]:
1248 continue
1249 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07001250 main.log.error( "hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001251 controllerStr +
1252 " is inconsistent with ONOS1" )
1253 main.log.warn( repr( hosts[ controller ] ) )
1254 consistentHostsResult = main.FALSE
1255
1256 else:
Jon Hallca319892017-06-15 15:25:22 -07001257 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001258 controllerStr )
1259 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001260 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001261 " hosts response: " +
1262 repr( hosts[ controller ] ) )
1263 utilities.assert_equals(
1264 expect=main.TRUE,
1265 actual=consistentHostsResult,
1266 onpass="Hosts view is consistent across all ONOS nodes",
1267 onfail="ONOS nodes have different views of hosts" )
1268
1269 main.step( "Each host has an IP address" )
1270 ipResult = main.TRUE
1271 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001272 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001273 if hosts[ controller ]:
1274 for host in hosts[ controller ]:
1275 if not host.get( 'ipAddresses', [] ):
Jon Hallca319892017-06-15 15:25:22 -07001276 main.log.error( "Error with host ips on " +
Devin Lim58046fa2017-07-05 16:55:00 -07001277 controllerStr + ": " + str( host ) )
1278 ipResult = main.FALSE
1279 utilities.assert_equals(
1280 expect=main.TRUE,
1281 actual=ipResult,
1282 onpass="The ips of the hosts aren't empty",
1283 onfail="The ip of at least one host is missing" )
1284
1285 # Strongly connected clusters of devices
1286 main.step( "Cluster view is consistent across ONOS nodes" )
1287 consistentClustersResult = main.TRUE
1288 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001289 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001290 if "Error" not in clusters[ controller ]:
1291 if clusters[ controller ] == clusters[ 0 ]:
1292 continue
1293 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07001294 main.log.error( "clusters from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001295 " is inconsistent with ONOS1" )
1296 consistentClustersResult = main.FALSE
1297
1298 else:
1299 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07001300 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07001301 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001302 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001303 " clusters response: " +
1304 repr( clusters[ controller ] ) )
1305 utilities.assert_equals(
1306 expect=main.TRUE,
1307 actual=consistentClustersResult,
1308 onpass="Clusters view is consistent across all ONOS nodes",
1309 onfail="ONOS nodes have different views of clusters" )
1310 if not consistentClustersResult:
1311 main.log.debug( clusters )
1312
1313 # there should always only be one cluster
1314 main.step( "Cluster view correct across ONOS nodes" )
1315 try:
1316 numClusters = len( json.loads( clusters[ 0 ] ) )
1317 except ( ValueError, TypeError ):
1318 main.log.exception( "Error parsing clusters[0]: " +
1319 repr( clusters[ 0 ] ) )
1320 numClusters = "ERROR"
1321 utilities.assert_equals(
1322 expect=1,
1323 actual=numClusters,
1324 onpass="ONOS shows 1 SCC",
1325 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1326
1327 main.step( "Comparing ONOS topology to MN" )
1328 devicesResults = main.TRUE
1329 linksResults = main.TRUE
1330 hostsResults = main.TRUE
1331 mnSwitches = main.Mininet1.getSwitches()
1332 mnLinks = main.Mininet1.getLinks()
1333 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07001334 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001335 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001336 currentDevicesResult = main.topoRelated.compareDevicePort(
1337 main.Mininet1, controller,
1338 mnSwitches, devices, ports )
1339 utilities.assert_equals( expect=main.TRUE,
1340 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07001341 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001342 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001343 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001344 " Switches view is incorrect" )
1345
1346 currentLinksResult = main.topoRelated.compareBase( links, controller,
1347 main.Mininet1.compareLinks,
1348 [ mnSwitches, mnLinks ] )
1349 utilities.assert_equals( expect=main.TRUE,
1350 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07001351 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001352 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001353 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001354 " links view is incorrect" )
1355
1356 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1357 currentHostsResult = main.Mininet1.compareHosts(
1358 mnHosts,
1359 hosts[ controller ] )
1360 else:
1361 currentHostsResult = main.FALSE
1362 utilities.assert_equals( expect=main.TRUE,
1363 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07001364 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001365 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07001366 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001367 " hosts don't match Mininet" )
1368
1369 devicesResults = devicesResults and currentDevicesResult
1370 linksResults = linksResults and currentLinksResult
1371 hostsResults = hostsResults and currentHostsResult
1372
1373 main.step( "Device information is correct" )
1374 utilities.assert_equals(
1375 expect=main.TRUE,
1376 actual=devicesResults,
1377 onpass="Device information is correct",
1378 onfail="Device information is incorrect" )
1379
1380 main.step( "Links are correct" )
1381 utilities.assert_equals(
1382 expect=main.TRUE,
1383 actual=linksResults,
1384 onpass="Link are correct",
1385 onfail="Links are incorrect" )
1386
1387 main.step( "Hosts are correct" )
1388 utilities.assert_equals(
1389 expect=main.TRUE,
1390 actual=hostsResults,
1391 onpass="Hosts are correct",
1392 onfail="Hosts are incorrect" )
1393
1394 def checkDistPrimitivesFunc( self, main ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001395 """
1396 Check for basic functionality with distributed primitives
1397 """
Jon Halle0f0b342017-04-18 11:43:47 -07001398 # TODO: Clean this up so it's not just a cut/paste from the test
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001399 try:
1400 # Make sure variables are defined/set
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001401 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001402 assert main.pCounterName, "main.pCounterName not defined"
1403 assert main.onosSetName, "main.onosSetName not defined"
1404 # NOTE: assert fails if value is 0/None/Empty/False
1405 try:
1406 main.pCounterValue
Devin Lim58046fa2017-07-05 16:55:00 -07001407 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001408 main.log.error( "main.pCounterValue not defined, setting to 0" )
1409 main.pCounterValue = 0
1410 try:
1411 main.onosSet
Devin Lim58046fa2017-07-05 16:55:00 -07001412 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001413 main.log.error( "main.onosSet not defined, setting to empty Set" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001414 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001415 # Variables for the distributed primitives tests. These are local only
1416 addValue = "a"
1417 addAllValue = "a b c d e f"
1418 retainValue = "c d e f"
Jon Halle0f0b342017-04-18 11:43:47 -07001419 valueName = "TestON-Value"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001420 valueValue = None
Jon Halle0f0b342017-04-18 11:43:47 -07001421 workQueueName = "TestON-Queue"
1422 workQueueCompleted = 0
1423 workQueueInProgress = 0
1424 workQueuePending = 0
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001425
1426 description = "Check for basic functionality with distributed " +\
1427 "primitives"
1428 main.case( description )
1429 main.caseExplanation = "Test the methods of the distributed " +\
1430 "primitives (counters and sets) throught the cli"
1431 # DISTRIBUTED ATOMIC COUNTERS
1432 # Partitioned counters
1433 main.step( "Increment then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001434 pCounters = main.Cluster.command( "counterTestAddAndGet",
1435 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001436 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001437 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001438 main.pCounterValue += 1
1439 addedPValues.append( main.pCounterValue )
Jon Hallca319892017-06-15 15:25:22 -07001440 # Check that counter incremented once per controller
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001441 pCounterResults = True
1442 for i in addedPValues:
1443 tmpResult = i in pCounters
1444 pCounterResults = pCounterResults and tmpResult
1445 if not tmpResult:
1446 main.log.error( str( i ) + " is not in partitioned "
1447 "counter incremented results" )
1448 utilities.assert_equals( expect=True,
1449 actual=pCounterResults,
1450 onpass="Default counter incremented",
1451 onfail="Error incrementing default" +
1452 " counter" )
1453
1454 main.step( "Get then Increment a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001455 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1456 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001457 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001458 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001459 addedPValues.append( main.pCounterValue )
1460 main.pCounterValue += 1
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001461 # Check that counter incremented numController times
1462 pCounterResults = True
1463 for i in addedPValues:
1464 tmpResult = i in pCounters
1465 pCounterResults = pCounterResults and tmpResult
1466 if not tmpResult:
1467 main.log.error( str( i ) + " is not in partitioned "
1468 "counter incremented results" )
1469 utilities.assert_equals( expect=True,
1470 actual=pCounterResults,
1471 onpass="Default counter incremented",
1472 onfail="Error incrementing default" +
1473 " counter" )
1474
1475 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001476 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001477 utilities.assert_equals( expect=main.TRUE,
1478 actual=incrementCheck,
1479 onpass="Added counters are correct",
1480 onfail="Added counters are incorrect" )
1481
1482 main.step( "Add -8 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001483 pCounters = main.Cluster.command( "counterTestAddAndGet",
1484 args=[ main.pCounterName ],
1485 kwargs={ "delta": -8 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001486 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001487 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001488 main.pCounterValue += -8
1489 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001490 # Check that counter incremented numController times
1491 pCounterResults = True
1492 for i in addedPValues:
1493 tmpResult = i in pCounters
1494 pCounterResults = pCounterResults and tmpResult
1495 if not tmpResult:
1496 main.log.error( str( i ) + " is not in partitioned "
1497 "counter incremented results" )
1498 utilities.assert_equals( expect=True,
1499 actual=pCounterResults,
1500 onpass="Default counter incremented",
1501 onfail="Error incrementing default" +
1502 " counter" )
1503
1504 main.step( "Add 5 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001505 pCounters = main.Cluster.command( "counterTestAddAndGet",
1506 args=[ main.pCounterName ],
1507 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001508 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001509 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001510 main.pCounterValue += 5
1511 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001512
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001513 # Check that counter incremented numController times
1514 pCounterResults = True
1515 for i in addedPValues:
1516 tmpResult = i in pCounters
1517 pCounterResults = pCounterResults and tmpResult
1518 if not tmpResult:
1519 main.log.error( str( i ) + " is not in partitioned "
1520 "counter incremented results" )
1521 utilities.assert_equals( expect=True,
1522 actual=pCounterResults,
1523 onpass="Default counter incremented",
1524 onfail="Error incrementing default" +
1525 " counter" )
1526
1527 main.step( "Get then add 5 to a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001528 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1529 args=[ main.pCounterName ],
1530 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001531 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001532 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001533 addedPValues.append( main.pCounterValue )
1534 main.pCounterValue += 5
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001535 # Check that counter incremented numController times
1536 pCounterResults = True
1537 for i in addedPValues:
1538 tmpResult = i in pCounters
1539 pCounterResults = pCounterResults and tmpResult
1540 if not tmpResult:
1541 main.log.error( str( i ) + " is not in partitioned "
1542 "counter incremented results" )
1543 utilities.assert_equals( expect=True,
1544 actual=pCounterResults,
1545 onpass="Default counter incremented",
1546 onfail="Error incrementing default" +
1547 " counter" )
1548
1549 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001550 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001551 utilities.assert_equals( expect=main.TRUE,
1552 actual=incrementCheck,
1553 onpass="Added counters are correct",
1554 onfail="Added counters are incorrect" )
1555
1556 # DISTRIBUTED SETS
1557 main.step( "Distributed Set get" )
1558 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001559 getResponses = main.Cluster.command( "setTestGet",
1560 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001561 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001562 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001563 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001564 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001565 current = set( getResponses[ i ] )
1566 if len( current ) == len( getResponses[ i ] ):
1567 # no repeats
1568 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001569 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001570 " has incorrect view" +
1571 " of set " + main.onosSetName + ":\n" +
1572 str( getResponses[ i ] ) )
1573 main.log.debug( "Expected: " + str( main.onosSet ) )
1574 main.log.debug( "Actual: " + str( current ) )
1575 getResults = main.FALSE
1576 else:
1577 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001578 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001579 " has repeat elements in" +
1580 " set " + main.onosSetName + ":\n" +
1581 str( getResponses[ i ] ) )
1582 getResults = main.FALSE
1583 elif getResponses[ i ] == main.ERROR:
1584 getResults = main.FALSE
1585 utilities.assert_equals( expect=main.TRUE,
1586 actual=getResults,
1587 onpass="Set elements are correct",
1588 onfail="Set elements are incorrect" )
1589
1590 main.step( "Distributed Set size" )
Jon Hallca319892017-06-15 15:25:22 -07001591 sizeResponses = main.Cluster.command( "setTestSize",
1592 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001593 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001594 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001595 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001596 if size != sizeResponses[ i ]:
1597 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001598 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001599 " expected a size of " + str( size ) +
1600 " for set " + main.onosSetName +
1601 " but got " + str( sizeResponses[ i ] ) )
1602 utilities.assert_equals( expect=main.TRUE,
1603 actual=sizeResults,
1604 onpass="Set sizes are correct",
1605 onfail="Set sizes are incorrect" )
1606
1607 main.step( "Distributed Set add()" )
1608 main.onosSet.add( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001609 addResponses = main.Cluster.command( "setTestAdd",
1610 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001611 # main.TRUE = successfully changed the set
1612 # main.FALSE = action resulted in no change in set
1613 # main.ERROR - Some error in executing the function
1614 addResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001615 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001616 if addResponses[ i ] == main.TRUE:
1617 # All is well
1618 pass
1619 elif addResponses[ i ] == main.FALSE:
1620 # Already in set, probably fine
1621 pass
1622 elif addResponses[ i ] == main.ERROR:
1623 # Error in execution
1624 addResults = main.FALSE
1625 else:
1626 # unexpected result
1627 addResults = main.FALSE
1628 if addResults != main.TRUE:
1629 main.log.error( "Error executing set add" )
1630
1631 # Check if set is still correct
1632 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001633 getResponses = main.Cluster.command( "setTestGet",
1634 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001635 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001636 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001637 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001638 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001639 current = set( getResponses[ i ] )
1640 if len( current ) == len( getResponses[ i ] ):
1641 # no repeats
1642 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001643 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001644 " of set " + main.onosSetName + ":\n" +
1645 str( getResponses[ i ] ) )
1646 main.log.debug( "Expected: " + str( main.onosSet ) )
1647 main.log.debug( "Actual: " + str( current ) )
1648 getResults = main.FALSE
1649 else:
1650 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001651 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001652 " set " + main.onosSetName + ":\n" +
1653 str( getResponses[ i ] ) )
1654 getResults = main.FALSE
1655 elif getResponses[ i ] == main.ERROR:
1656 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001657 sizeResponses = main.Cluster.command( "setTestSize",
1658 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001659 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001660 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001661 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001662 if size != sizeResponses[ i ]:
1663 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001664 main.log.error( node + " expected a size of " +
1665 str( size ) + " for set " + main.onosSetName +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001666 " but got " + str( sizeResponses[ i ] ) )
1667 addResults = addResults and getResults and sizeResults
1668 utilities.assert_equals( expect=main.TRUE,
1669 actual=addResults,
1670 onpass="Set add correct",
1671 onfail="Set add was incorrect" )
1672
1673 main.step( "Distributed Set addAll()" )
1674 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001675 addResponses = main.Cluster.command( "setTestAdd",
1676 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001677 # main.TRUE = successfully changed the set
1678 # main.FALSE = action resulted in no change in set
1679 # main.ERROR - Some error in executing the function
1680 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001681 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001682 if addResponses[ i ] == main.TRUE:
1683 # All is well
1684 pass
1685 elif addResponses[ i ] == main.FALSE:
1686 # Already in set, probably fine
1687 pass
1688 elif addResponses[ i ] == main.ERROR:
1689 # Error in execution
1690 addAllResults = main.FALSE
1691 else:
1692 # unexpected result
1693 addAllResults = main.FALSE
1694 if addAllResults != main.TRUE:
1695 main.log.error( "Error executing set addAll" )
1696
1697 # Check if set is still correct
1698 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001699 getResponses = main.Cluster.command( "setTestGet",
1700 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001701 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001702 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001703 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001704 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001705 current = set( getResponses[ i ] )
1706 if len( current ) == len( getResponses[ i ] ):
1707 # no repeats
1708 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001709 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001710 " of set " + main.onosSetName + ":\n" +
1711 str( getResponses[ i ] ) )
1712 main.log.debug( "Expected: " + str( main.onosSet ) )
1713 main.log.debug( "Actual: " + str( current ) )
1714 getResults = main.FALSE
1715 else:
1716 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001717 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001718 " set " + main.onosSetName + ":\n" +
1719 str( getResponses[ i ] ) )
1720 getResults = main.FALSE
1721 elif getResponses[ i ] == main.ERROR:
1722 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001723 sizeResponses = main.Cluster.command( "setTestSize",
1724 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001725 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001726 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001727 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001728 if size != sizeResponses[ i ]:
1729 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001730 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001731 " for set " + main.onosSetName +
1732 " but got " + str( sizeResponses[ i ] ) )
1733 addAllResults = addAllResults and getResults and sizeResults
1734 utilities.assert_equals( expect=main.TRUE,
1735 actual=addAllResults,
1736 onpass="Set addAll correct",
1737 onfail="Set addAll was incorrect" )
1738
1739 main.step( "Distributed Set contains()" )
Jon Hallca319892017-06-15 15:25:22 -07001740 containsResponses = main.Cluster.command( "setTestGet",
1741 args=[ main.onosSetName ],
1742 kwargs={ "values": addValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001743 containsResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001744 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001745 if containsResponses[ i ] == main.ERROR:
1746 containsResults = main.FALSE
1747 else:
1748 containsResults = containsResults and\
1749 containsResponses[ i ][ 1 ]
1750 utilities.assert_equals( expect=main.TRUE,
1751 actual=containsResults,
1752 onpass="Set contains is functional",
1753 onfail="Set contains failed" )
1754
1755 main.step( "Distributed Set containsAll()" )
Jon Hallca319892017-06-15 15:25:22 -07001756 containsAllResponses = main.Cluster.command( "setTestGet",
1757 args=[ main.onosSetName ],
1758 kwargs={ "values": addAllValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001759 containsAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001760 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001761 if containsResponses[ i ] == main.ERROR:
1762 containsResults = main.FALSE
1763 else:
1764 containsResults = containsResults and\
1765 containsResponses[ i ][ 1 ]
1766 utilities.assert_equals( expect=main.TRUE,
1767 actual=containsAllResults,
1768 onpass="Set containsAll is functional",
1769 onfail="Set containsAll failed" )
1770
1771 main.step( "Distributed Set remove()" )
1772 main.onosSet.remove( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001773 removeResponses = main.Cluster.command( "setTestRemove",
1774 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001775 # main.TRUE = successfully changed the set
1776 # main.FALSE = action resulted in no change in set
1777 # main.ERROR - Some error in executing the function
1778 removeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001779 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001780 if removeResponses[ i ] == main.TRUE:
1781 # All is well
1782 pass
1783 elif removeResponses[ i ] == main.FALSE:
1784 # not in set, probably fine
1785 pass
1786 elif removeResponses[ i ] == main.ERROR:
1787 # Error in execution
1788 removeResults = main.FALSE
1789 else:
1790 # unexpected result
1791 removeResults = main.FALSE
1792 if removeResults != main.TRUE:
1793 main.log.error( "Error executing set remove" )
1794
1795 # Check if set is still correct
1796 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001797 getResponses = main.Cluster.command( "setTestGet",
1798 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001799 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001800 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001801 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001802 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001803 current = set( getResponses[ i ] )
1804 if len( current ) == len( getResponses[ i ] ):
1805 # no repeats
1806 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001807 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001808 " of set " + main.onosSetName + ":\n" +
1809 str( getResponses[ i ] ) )
1810 main.log.debug( "Expected: " + str( main.onosSet ) )
1811 main.log.debug( "Actual: " + str( current ) )
1812 getResults = main.FALSE
1813 else:
1814 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001815 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001816 " set " + main.onosSetName + ":\n" +
1817 str( getResponses[ i ] ) )
1818 getResults = main.FALSE
1819 elif getResponses[ i ] == main.ERROR:
1820 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001821 sizeResponses = main.Cluster.command( "setTestSize",
1822 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001823 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001824 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001825 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001826 if size != sizeResponses[ i ]:
1827 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001828 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001829 " for set " + main.onosSetName +
1830 " but got " + str( sizeResponses[ i ] ) )
1831 removeResults = removeResults and getResults and sizeResults
1832 utilities.assert_equals( expect=main.TRUE,
1833 actual=removeResults,
1834 onpass="Set remove correct",
1835 onfail="Set remove was incorrect" )
1836
1837 main.step( "Distributed Set removeAll()" )
1838 main.onosSet.difference_update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001839 removeAllResponses = main.Cluster.command( "setTestRemove",
1840 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001841 # main.TRUE = successfully changed the set
1842 # main.FALSE = action resulted in no change in set
1843 # main.ERROR - Some error in executing the function
1844 removeAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001845 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001846 if removeAllResponses[ i ] == main.TRUE:
1847 # All is well
1848 pass
1849 elif removeAllResponses[ i ] == main.FALSE:
1850 # not in set, probably fine
1851 pass
1852 elif removeAllResponses[ i ] == main.ERROR:
1853 # Error in execution
1854 removeAllResults = main.FALSE
1855 else:
1856 # unexpected result
1857 removeAllResults = main.FALSE
1858 if removeAllResults != main.TRUE:
1859 main.log.error( "Error executing set removeAll" )
1860
1861 # Check if set is still correct
1862 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001863 getResponses = main.Cluster.command( "setTestGet",
1864 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001865 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001866 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001867 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001868 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001869 current = set( getResponses[ i ] )
1870 if len( current ) == len( getResponses[ i ] ):
1871 # no repeats
1872 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001873 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001874 " of set " + main.onosSetName + ":\n" +
1875 str( getResponses[ i ] ) )
1876 main.log.debug( "Expected: " + str( main.onosSet ) )
1877 main.log.debug( "Actual: " + str( current ) )
1878 getResults = main.FALSE
1879 else:
1880 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001881 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001882 " set " + main.onosSetName + ":\n" +
1883 str( getResponses[ i ] ) )
1884 getResults = main.FALSE
1885 elif getResponses[ i ] == main.ERROR:
1886 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001887 sizeResponses = main.Cluster.command( "setTestSize",
1888 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001889 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001890 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001891 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001892 if size != sizeResponses[ i ]:
1893 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001894 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001895 " for set " + main.onosSetName +
1896 " but got " + str( sizeResponses[ i ] ) )
1897 removeAllResults = removeAllResults and getResults and sizeResults
1898 utilities.assert_equals( expect=main.TRUE,
1899 actual=removeAllResults,
1900 onpass="Set removeAll correct",
1901 onfail="Set removeAll was incorrect" )
1902
1903 main.step( "Distributed Set addAll()" )
1904 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001905 addResponses = main.Cluster.command( "setTestAdd",
1906 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001907 # main.TRUE = successfully changed the set
1908 # main.FALSE = action resulted in no change in set
1909 # main.ERROR - Some error in executing the function
1910 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001911 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001912 if addResponses[ i ] == main.TRUE:
1913 # All is well
1914 pass
1915 elif addResponses[ i ] == main.FALSE:
1916 # Already in set, probably fine
1917 pass
1918 elif addResponses[ i ] == main.ERROR:
1919 # Error in execution
1920 addAllResults = main.FALSE
1921 else:
1922 # unexpected result
1923 addAllResults = main.FALSE
1924 if addAllResults != main.TRUE:
1925 main.log.error( "Error executing set addAll" )
1926
1927 # Check if set is still correct
1928 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001929 getResponses = main.Cluster.command( "setTestGet",
1930 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001931 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001932 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001933 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001934 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001935 current = set( getResponses[ i ] )
1936 if len( current ) == len( getResponses[ i ] ):
1937 # no repeats
1938 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001939 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001940 " of set " + main.onosSetName + ":\n" +
1941 str( getResponses[ i ] ) )
1942 main.log.debug( "Expected: " + str( main.onosSet ) )
1943 main.log.debug( "Actual: " + str( current ) )
1944 getResults = main.FALSE
1945 else:
1946 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001947 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001948 " set " + main.onosSetName + ":\n" +
1949 str( getResponses[ i ] ) )
1950 getResults = main.FALSE
1951 elif getResponses[ i ] == main.ERROR:
1952 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001953 sizeResponses = main.Cluster.command( "setTestSize",
1954 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001955 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001956 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001957 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001958 if size != sizeResponses[ i ]:
1959 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001960 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001961 " for set " + main.onosSetName +
1962 " but got " + str( sizeResponses[ i ] ) )
1963 addAllResults = addAllResults and getResults and sizeResults
1964 utilities.assert_equals( expect=main.TRUE,
1965 actual=addAllResults,
1966 onpass="Set addAll correct",
1967 onfail="Set addAll was incorrect" )
1968
1969 main.step( "Distributed Set clear()" )
1970 main.onosSet.clear()
Jon Hallca319892017-06-15 15:25:22 -07001971 clearResponses = main.Cluster.command( "setTestRemove",
Jon Hall4173b242017-09-12 17:04:38 -07001972 args=[ main.onosSetName, " " ], # Values doesn't matter
1973 kwargs={ "clear": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001974 # main.TRUE = successfully changed the set
1975 # main.FALSE = action resulted in no change in set
1976 # main.ERROR - Some error in executing the function
1977 clearResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001978 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001979 if clearResponses[ i ] == main.TRUE:
1980 # All is well
1981 pass
1982 elif clearResponses[ i ] == main.FALSE:
1983 # Nothing set, probably fine
1984 pass
1985 elif clearResponses[ i ] == main.ERROR:
1986 # Error in execution
1987 clearResults = main.FALSE
1988 else:
1989 # unexpected result
1990 clearResults = main.FALSE
1991 if clearResults != main.TRUE:
1992 main.log.error( "Error executing set clear" )
1993
1994 # Check if set is still correct
1995 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001996 getResponses = main.Cluster.command( "setTestGet",
1997 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001998 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001999 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002000 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07002001 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002002 current = set( getResponses[ i ] )
2003 if len( current ) == len( getResponses[ i ] ):
2004 # no repeats
2005 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002006 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002007 " of set " + main.onosSetName + ":\n" +
2008 str( getResponses[ i ] ) )
2009 main.log.debug( "Expected: " + str( main.onosSet ) )
2010 main.log.debug( "Actual: " + str( current ) )
2011 getResults = main.FALSE
2012 else:
2013 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002014 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002015 " set " + main.onosSetName + ":\n" +
2016 str( getResponses[ i ] ) )
2017 getResults = main.FALSE
2018 elif getResponses[ i ] == main.ERROR:
2019 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002020 sizeResponses = main.Cluster.command( "setTestSize",
2021 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002022 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002023 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002024 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002025 if size != sizeResponses[ i ]:
2026 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002027 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002028 " for set " + main.onosSetName +
2029 " but got " + str( sizeResponses[ i ] ) )
2030 clearResults = clearResults and getResults and sizeResults
2031 utilities.assert_equals( expect=main.TRUE,
2032 actual=clearResults,
2033 onpass="Set clear correct",
2034 onfail="Set clear was incorrect" )
2035
2036 main.step( "Distributed Set addAll()" )
2037 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002038 addResponses = main.Cluster.command( "setTestAdd",
2039 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002040 # main.TRUE = successfully changed the set
2041 # main.FALSE = action resulted in no change in set
2042 # main.ERROR - Some error in executing the function
2043 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002044 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002045 if addResponses[ i ] == main.TRUE:
2046 # All is well
2047 pass
2048 elif addResponses[ i ] == main.FALSE:
2049 # Already in set, probably fine
2050 pass
2051 elif addResponses[ i ] == main.ERROR:
2052 # Error in execution
2053 addAllResults = main.FALSE
2054 else:
2055 # unexpected result
2056 addAllResults = main.FALSE
2057 if addAllResults != main.TRUE:
2058 main.log.error( "Error executing set addAll" )
2059
2060 # Check if set is still correct
2061 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002062 getResponses = main.Cluster.command( "setTestGet",
2063 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002064 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002065 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002066 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07002067 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002068 current = set( getResponses[ i ] )
2069 if len( current ) == len( getResponses[ i ] ):
2070 # no repeats
2071 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002072 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002073 " of set " + main.onosSetName + ":\n" +
2074 str( getResponses[ i ] ) )
2075 main.log.debug( "Expected: " + str( main.onosSet ) )
2076 main.log.debug( "Actual: " + str( current ) )
2077 getResults = main.FALSE
2078 else:
2079 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002080 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002081 " set " + main.onosSetName + ":\n" +
2082 str( getResponses[ i ] ) )
2083 getResults = main.FALSE
2084 elif getResponses[ i ] == main.ERROR:
2085 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002086 sizeResponses = main.Cluster.command( "setTestSize",
2087 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002088 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002089 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002090 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002091 if size != sizeResponses[ i ]:
2092 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002093 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002094 " for set " + main.onosSetName +
2095 " but got " + str( sizeResponses[ i ] ) )
2096 addAllResults = addAllResults and getResults and sizeResults
2097 utilities.assert_equals( expect=main.TRUE,
2098 actual=addAllResults,
2099 onpass="Set addAll correct",
2100 onfail="Set addAll was incorrect" )
2101
2102 main.step( "Distributed Set retain()" )
2103 main.onosSet.intersection_update( retainValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002104 retainResponses = main.Cluster.command( "setTestRemove",
2105 args=[ main.onosSetName, retainValue ],
2106 kwargs={ "retain": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002107 # main.TRUE = successfully changed the set
2108 # main.FALSE = action resulted in no change in set
2109 # main.ERROR - Some error in executing the function
2110 retainResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002111 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002112 if retainResponses[ i ] == main.TRUE:
2113 # All is well
2114 pass
2115 elif retainResponses[ i ] == main.FALSE:
2116 # Already in set, probably fine
2117 pass
2118 elif retainResponses[ i ] == main.ERROR:
2119 # Error in execution
2120 retainResults = main.FALSE
2121 else:
2122 # unexpected result
2123 retainResults = main.FALSE
2124 if retainResults != main.TRUE:
2125 main.log.error( "Error executing set retain" )
2126
2127 # Check if set is still correct
2128 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002129 getResponses = main.Cluster.command( "setTestGet",
2130 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002131 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002132 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002133 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07002134 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002135 current = set( getResponses[ i ] )
2136 if len( current ) == len( getResponses[ i ] ):
2137 # no repeats
2138 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002139 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002140 " of set " + main.onosSetName + ":\n" +
2141 str( getResponses[ i ] ) )
2142 main.log.debug( "Expected: " + str( main.onosSet ) )
2143 main.log.debug( "Actual: " + str( current ) )
2144 getResults = main.FALSE
2145 else:
2146 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002147 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002148 " set " + main.onosSetName + ":\n" +
2149 str( getResponses[ i ] ) )
2150 getResults = main.FALSE
2151 elif getResponses[ i ] == main.ERROR:
2152 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002153 sizeResponses = main.Cluster.command( "setTestSize",
2154 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002155 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002156 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002157 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002158 if size != sizeResponses[ i ]:
2159 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002160 main.log.error( node + " expected a size of " +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002161 str( size ) + " for set " + main.onosSetName +
2162 " but got " + str( sizeResponses[ i ] ) )
2163 retainResults = retainResults and getResults and sizeResults
2164 utilities.assert_equals( expect=main.TRUE,
2165 actual=retainResults,
2166 onpass="Set retain correct",
2167 onfail="Set retain was incorrect" )
2168
2169 # Transactional maps
2170 main.step( "Partitioned Transactional maps put" )
2171 tMapValue = "Testing"
2172 numKeys = 100
2173 putResult = True
Jon Hallca319892017-06-15 15:25:22 -07002174 ctrl = main.Cluster.next()
2175 putResponses = ctrl.transactionalMapPut( numKeys, tMapValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002176 if putResponses and len( putResponses ) == 100:
2177 for i in putResponses:
2178 if putResponses[ i ][ 'value' ] != tMapValue:
2179 putResult = False
2180 else:
2181 putResult = False
2182 if not putResult:
2183 main.log.debug( "Put response values: " + str( putResponses ) )
2184 utilities.assert_equals( expect=True,
2185 actual=putResult,
2186 onpass="Partitioned Transactional Map put successful",
2187 onfail="Partitioned Transactional Map put values are incorrect" )
2188
2189 main.step( "Partitioned Transactional maps get" )
2190 # FIXME: is this sleep needed?
2191 time.sleep( 5 )
2192
2193 getCheck = True
2194 for n in range( 1, numKeys + 1 ):
Jon Hallca319892017-06-15 15:25:22 -07002195 getResponses = main.Cluster.command( "transactionalMapGet",
2196 args=[ "Key" + str( n ) ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002197 valueCheck = True
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002198 for node in getResponses:
2199 if node != tMapValue:
2200 valueCheck = False
2201 if not valueCheck:
Jon Hall0e240372018-05-02 11:21:57 -07002202 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002203 main.log.warn( getResponses )
2204 getCheck = getCheck and valueCheck
2205 utilities.assert_equals( expect=True,
2206 actual=getCheck,
2207 onpass="Partitioned Transactional Map get values were correct",
2208 onfail="Partitioned Transactional Map values incorrect" )
2209
2210 # DISTRIBUTED ATOMIC VALUE
2211 main.step( "Get the value of a new value" )
Jon Hallca319892017-06-15 15:25:22 -07002212 getValues = main.Cluster.command( "valueTestGet",
2213 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002214 main.log.debug( getValues )
2215 # Check the results
2216 atomicValueGetResult = True
2217 expected = valueValue if valueValue is not None else "null"
2218 main.log.debug( "Checking for value of " + expected )
2219 for i in getValues:
2220 if i != expected:
2221 atomicValueGetResult = False
2222 utilities.assert_equals( expect=True,
2223 actual=atomicValueGetResult,
2224 onpass="Atomic Value get successful",
2225 onfail="Error getting atomic Value " +
2226 str( valueValue ) + ", found: " +
2227 str( getValues ) )
2228
2229 main.step( "Atomic Value set()" )
2230 valueValue = "foo"
Jon Hallca319892017-06-15 15:25:22 -07002231 setValues = main.Cluster.command( "valueTestSet",
2232 args=[ valueName, valueValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002233 main.log.debug( setValues )
2234 # Check the results
2235 atomicValueSetResults = True
2236 for i in setValues:
2237 if i != main.TRUE:
2238 atomicValueSetResults = False
2239 utilities.assert_equals( expect=True,
2240 actual=atomicValueSetResults,
2241 onpass="Atomic Value set successful",
2242 onfail="Error setting atomic Value" +
2243 str( setValues ) )
2244
2245 main.step( "Get the value after set()" )
Jon Hallca319892017-06-15 15:25:22 -07002246 getValues = main.Cluster.command( "valueTestGet",
2247 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002248 main.log.debug( getValues )
2249 # Check the results
2250 atomicValueGetResult = True
2251 expected = valueValue if valueValue is not None else "null"
2252 main.log.debug( "Checking for value of " + expected )
2253 for i in getValues:
2254 if i != expected:
2255 atomicValueGetResult = False
2256 utilities.assert_equals( expect=True,
2257 actual=atomicValueGetResult,
2258 onpass="Atomic Value get successful",
2259 onfail="Error getting atomic Value " +
2260 str( valueValue ) + ", found: " +
2261 str( getValues ) )
2262
2263 main.step( "Atomic Value compareAndSet()" )
2264 oldValue = valueValue
2265 valueValue = "bar"
Jon Hallca319892017-06-15 15:25:22 -07002266 ctrl = main.Cluster.next()
2267 CASValue = ctrl.valueTestCompareAndSet( valueName, oldValue, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002268 main.log.debug( CASValue )
2269 utilities.assert_equals( expect=main.TRUE,
2270 actual=CASValue,
2271 onpass="Atomic Value comapreAndSet successful",
2272 onfail="Error setting atomic Value:" +
2273 str( CASValue ) )
2274
2275 main.step( "Get the value after compareAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002276 getValues = main.Cluster.command( "valueTestGet",
2277 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002278 main.log.debug( getValues )
2279 # Check the results
2280 atomicValueGetResult = True
2281 expected = valueValue if valueValue is not None else "null"
2282 main.log.debug( "Checking for value of " + expected )
2283 for i in getValues:
2284 if i != expected:
2285 atomicValueGetResult = False
2286 utilities.assert_equals( expect=True,
2287 actual=atomicValueGetResult,
2288 onpass="Atomic Value get successful",
2289 onfail="Error getting atomic Value " +
2290 str( valueValue ) + ", found: " +
2291 str( getValues ) )
2292
2293 main.step( "Atomic Value getAndSet()" )
2294 oldValue = valueValue
2295 valueValue = "baz"
Jon Hallca319892017-06-15 15:25:22 -07002296 ctrl = main.Cluster.next()
2297 GASValue = ctrl.valueTestGetAndSet( valueName, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002298 main.log.debug( GASValue )
2299 expected = oldValue if oldValue is not None else "null"
2300 utilities.assert_equals( expect=expected,
2301 actual=GASValue,
2302 onpass="Atomic Value GAS successful",
2303 onfail="Error with GetAndSet atomic Value: expected " +
2304 str( expected ) + ", found: " +
2305 str( GASValue ) )
2306
2307 main.step( "Get the value after getAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002308 getValues = main.Cluster.command( "valueTestGet",
2309 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002310 main.log.debug( getValues )
2311 # Check the results
2312 atomicValueGetResult = True
2313 expected = valueValue if valueValue is not None else "null"
2314 main.log.debug( "Checking for value of " + expected )
2315 for i in getValues:
2316 if i != expected:
2317 atomicValueGetResult = False
2318 utilities.assert_equals( expect=True,
2319 actual=atomicValueGetResult,
2320 onpass="Atomic Value get successful",
2321 onfail="Error getting atomic Value: expected " +
2322 str( valueValue ) + ", found: " +
2323 str( getValues ) )
2324
2325 main.step( "Atomic Value destory()" )
2326 valueValue = None
Jon Hallca319892017-06-15 15:25:22 -07002327 ctrl = main.Cluster.next()
2328 destroyResult = ctrl.valueTestDestroy( valueName )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002329 main.log.debug( destroyResult )
2330 # Check the results
2331 utilities.assert_equals( expect=main.TRUE,
2332 actual=destroyResult,
2333 onpass="Atomic Value destroy successful",
2334 onfail="Error destroying atomic Value" )
2335
2336 main.step( "Get the value after destroy()" )
Jon Hallca319892017-06-15 15:25:22 -07002337 getValues = main.Cluster.command( "valueTestGet",
2338 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002339 main.log.debug( getValues )
2340 # Check the results
2341 atomicValueGetResult = True
2342 expected = valueValue if valueValue is not None else "null"
2343 main.log.debug( "Checking for value of " + expected )
2344 for i in getValues:
2345 if i != expected:
2346 atomicValueGetResult = False
2347 utilities.assert_equals( expect=True,
2348 actual=atomicValueGetResult,
2349 onpass="Atomic Value get successful",
2350 onfail="Error getting atomic Value " +
2351 str( valueValue ) + ", found: " +
2352 str( getValues ) )
Jon Halle0f0b342017-04-18 11:43:47 -07002353
2354 # WORK QUEUES
2355 main.step( "Work Queue add()" )
Jon Hallca319892017-06-15 15:25:22 -07002356 ctrl = main.Cluster.next()
2357 addResult = ctrl.workQueueAdd( workQueueName, 'foo' )
Jon Halle0f0b342017-04-18 11:43:47 -07002358 workQueuePending += 1
2359 main.log.debug( addResult )
2360 # Check the results
2361 utilities.assert_equals( expect=main.TRUE,
2362 actual=addResult,
2363 onpass="Work Queue add successful",
2364 onfail="Error adding to Work Queue" )
2365
2366 main.step( "Check the work queue stats" )
2367 statsResults = self.workQueueStatsCheck( workQueueName,
2368 workQueueCompleted,
2369 workQueueInProgress,
2370 workQueuePending )
2371 utilities.assert_equals( expect=True,
2372 actual=statsResults,
2373 onpass="Work Queue stats correct",
2374 onfail="Work Queue stats incorrect " )
2375
2376 main.step( "Work Queue addMultiple()" )
Jon Hallca319892017-06-15 15:25:22 -07002377 ctrl = main.Cluster.next()
2378 addMultipleResult = ctrl.workQueueAddMultiple( workQueueName, 'bar', 'baz' )
Jon Halle0f0b342017-04-18 11:43:47 -07002379 workQueuePending += 2
2380 main.log.debug( addMultipleResult )
2381 # Check the results
2382 utilities.assert_equals( expect=main.TRUE,
2383 actual=addMultipleResult,
2384 onpass="Work Queue add multiple successful",
2385 onfail="Error adding multiple items to Work Queue" )
2386
2387 main.step( "Check the work queue stats" )
2388 statsResults = self.workQueueStatsCheck( workQueueName,
2389 workQueueCompleted,
2390 workQueueInProgress,
2391 workQueuePending )
2392 utilities.assert_equals( expect=True,
2393 actual=statsResults,
2394 onpass="Work Queue stats correct",
2395 onfail="Work Queue stats incorrect " )
2396
2397 main.step( "Work Queue takeAndComplete() 1" )
Jon Hallca319892017-06-15 15:25:22 -07002398 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002399 number = 1
Jon Hallca319892017-06-15 15:25:22 -07002400 take1Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002401 workQueuePending -= number
2402 workQueueCompleted += number
2403 main.log.debug( take1Result )
2404 # Check the results
2405 utilities.assert_equals( expect=main.TRUE,
2406 actual=take1Result,
2407 onpass="Work Queue takeAndComplete 1 successful",
2408 onfail="Error taking 1 from Work Queue" )
2409
2410 main.step( "Check the work queue stats" )
2411 statsResults = self.workQueueStatsCheck( workQueueName,
2412 workQueueCompleted,
2413 workQueueInProgress,
2414 workQueuePending )
2415 utilities.assert_equals( expect=True,
2416 actual=statsResults,
2417 onpass="Work Queue stats correct",
2418 onfail="Work Queue stats incorrect " )
2419
2420 main.step( "Work Queue takeAndComplete() 2" )
Jon Hallca319892017-06-15 15:25:22 -07002421 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002422 number = 2
Jon Hallca319892017-06-15 15:25:22 -07002423 take2Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002424 workQueuePending -= number
2425 workQueueCompleted += number
2426 main.log.debug( take2Result )
2427 # Check the results
2428 utilities.assert_equals( expect=main.TRUE,
2429 actual=take2Result,
2430 onpass="Work Queue takeAndComplete 2 successful",
2431 onfail="Error taking 2 from Work Queue" )
2432
2433 main.step( "Check the work queue stats" )
2434 statsResults = self.workQueueStatsCheck( workQueueName,
2435 workQueueCompleted,
2436 workQueueInProgress,
2437 workQueuePending )
2438 utilities.assert_equals( expect=True,
2439 actual=statsResults,
2440 onpass="Work Queue stats correct",
2441 onfail="Work Queue stats incorrect " )
2442
2443 main.step( "Work Queue destroy()" )
2444 valueValue = None
2445 threads = []
Jon Hallca319892017-06-15 15:25:22 -07002446 ctrl = main.Cluster.next()
2447 destroyResult = ctrl.workQueueDestroy( workQueueName )
Jon Halle0f0b342017-04-18 11:43:47 -07002448 workQueueCompleted = 0
2449 workQueueInProgress = 0
2450 workQueuePending = 0
2451 main.log.debug( destroyResult )
2452 # Check the results
2453 utilities.assert_equals( expect=main.TRUE,
2454 actual=destroyResult,
2455 onpass="Work Queue destroy successful",
2456 onfail="Error destroying Work Queue" )
2457
2458 main.step( "Check the work queue stats" )
2459 statsResults = self.workQueueStatsCheck( workQueueName,
2460 workQueueCompleted,
2461 workQueueInProgress,
2462 workQueuePending )
2463 utilities.assert_equals( expect=True,
2464 actual=statsResults,
2465 onpass="Work Queue stats correct",
2466 onfail="Work Queue stats incorrect " )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002467 except Exception as e:
2468 main.log.error( "Exception: " + str( e ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002469
2470 def cleanUp( self, main ):
2471 """
2472 Clean up
2473 """
Devin Lim58046fa2017-07-05 16:55:00 -07002474 assert main, "main not defined"
2475 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002476
2477 # printing colors to terminal
2478 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2479 'blue': '\033[94m', 'green': '\033[92m',
2480 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
Jon Hall4173b242017-09-12 17:04:38 -07002481
Devin Lim58046fa2017-07-05 16:55:00 -07002482 main.case( "Test Cleanup" )
Jon Hall4173b242017-09-12 17:04:38 -07002483
2484 main.step( "Checking raft log size" )
2485 # TODO: this is a flaky check, but the intent is to make sure the raft logs
2486 # get compacted periodically
Jon Hall3e6edb32018-08-21 16:20:30 -07002487
2488 # FIXME: We need to look at the raft servers, which might not be on the ONOS machine
Jon Hall4173b242017-09-12 17:04:38 -07002489 logCheck = main.Cluster.checkPartitionSize()
2490 utilities.assert_equals( expect=True, actual=logCheck,
2491 onpass="Raft log size is not too big",
2492 onfail="Raft logs grew too big" )
2493
Devin Lim58046fa2017-07-05 16:55:00 -07002494 main.step( "Killing tcpdumps" )
2495 main.Mininet2.stopTcpdump()
2496
2497 testname = main.TEST
2498 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2499 main.step( "Copying MN pcap and ONOS log files to test station" )
2500 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2501 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2502 # NOTE: MN Pcap file is being saved to logdir.
2503 # We scp this file as MN and TestON aren't necessarily the same vm
2504
2505 # FIXME: To be replaced with a Jenkin's post script
2506 # TODO: Load these from params
2507 # NOTE: must end in /
2508 logFolder = "/opt/onos/log/"
2509 logFiles = [ "karaf.log", "karaf.log.1" ]
2510 # NOTE: must end in /
2511 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002512 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002513 dstName = main.logdir + "/" + ctrl.name + "-" + f
2514 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002515 logFolder + f, dstName )
2516 # std*.log's
2517 # NOTE: must end in /
2518 logFolder = "/opt/onos/var/"
2519 logFiles = [ "stderr.log", "stdout.log" ]
2520 # NOTE: must end in /
2521 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002522 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002523 dstName = main.logdir + "/" + ctrl.name + "-" + f
2524 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002525 logFolder + f, dstName )
2526 else:
2527 main.log.debug( "skipping saving log files" )
2528
Jon Hall5d5876e2017-11-30 09:33:16 -08002529 main.step( "Checking ONOS Logs for errors" )
2530 for ctrl in main.Cluster.runningNodes:
2531 main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
2532 main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
2533
Devin Lim58046fa2017-07-05 16:55:00 -07002534 main.step( "Stopping Mininet" )
2535 mnResult = main.Mininet1.stopNet()
2536 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2537 onpass="Mininet stopped",
2538 onfail="MN cleanup NOT successful" )
2539
Devin Lim58046fa2017-07-05 16:55:00 -07002540 try:
2541 timerLog = open( main.logdir + "/Timers.csv", 'w' )
2542 main.log.debug( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2543 timerLog.write( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2544 timerLog.close()
2545 except NameError as e:
2546 main.log.exception( e )
Jon Hallca319892017-06-15 15:25:22 -07002547
Devin Lim58046fa2017-07-05 16:55:00 -07002548 def assignMastership( self, main ):
2549 """
2550 Assign mastership to controllers
2551 """
2552 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002553 assert main, "main not defined"
2554 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002555
2556 main.case( "Assigning Controller roles for switches" )
2557 main.caseExplanation = "Check that ONOS is connected to each " +\
2558 "device. Then manually assign" +\
2559 " mastership to specific ONOS nodes using" +\
2560 " 'device-role'"
2561 main.step( "Assign mastership of switches to specific controllers" )
2562 # Manually assign mastership to the controller we want
2563 roleCall = main.TRUE
2564
2565 ipList = []
2566 deviceList = []
Devin Lim58046fa2017-07-05 16:55:00 -07002567 try:
2568 # Assign mastership to specific controllers. This assignment was
2569 # determined for a 7 node cluser, but will work with any sized
2570 # cluster
2571 for i in range( 1, 29 ): # switches 1 through 28
2572 # set up correct variables:
2573 if i == 1:
2574 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002575 ip = main.Cluster.active( c ).ip_address # ONOS1
Jon Hall0e240372018-05-02 11:21:57 -07002576 deviceId = main.Cluster.next().getDevice( "1000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002577 elif i == 2:
Devin Lim142b5342017-07-20 15:22:39 -07002578 c = 1 % main.Cluster.numCtrls
2579 ip = main.Cluster.active( c ).ip_address # ONOS2
Jon Hall0e240372018-05-02 11:21:57 -07002580 deviceId = main.Cluster.next().getDevice( "2000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002581 elif i == 3:
Devin Lim142b5342017-07-20 15:22:39 -07002582 c = 1 % main.Cluster.numCtrls
2583 ip = main.Cluster.active( c ).ip_address # ONOS2
Jon Hall0e240372018-05-02 11:21:57 -07002584 deviceId = main.Cluster.next().getDevice( "3000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002585 elif i == 4:
Devin Lim142b5342017-07-20 15:22:39 -07002586 c = 3 % main.Cluster.numCtrls
2587 ip = main.Cluster.active( c ).ip_address # ONOS4
Jon Hall0e240372018-05-02 11:21:57 -07002588 deviceId = main.Cluster.next().getDevice( "3004" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002589 elif i == 5:
Devin Lim142b5342017-07-20 15:22:39 -07002590 c = 2 % main.Cluster.numCtrls
2591 ip = main.Cluster.active( c ).ip_address # ONOS3
Jon Hall0e240372018-05-02 11:21:57 -07002592 deviceId = main.Cluster.next().getDevice( "5000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002593 elif i == 6:
Devin Lim142b5342017-07-20 15:22:39 -07002594 c = 2 % main.Cluster.numCtrls
2595 ip = main.Cluster.active( c ).ip_address # ONOS3
Jon Hall0e240372018-05-02 11:21:57 -07002596 deviceId = main.Cluster.next().getDevice( "6000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002597 elif i == 7:
Devin Lim142b5342017-07-20 15:22:39 -07002598 c = 5 % main.Cluster.numCtrls
2599 ip = main.Cluster.active( c ).ip_address # ONOS6
Jon Hall0e240372018-05-02 11:21:57 -07002600 deviceId = main.Cluster.next().getDevice( "6007" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002601 elif i >= 8 and i <= 17:
Devin Lim142b5342017-07-20 15:22:39 -07002602 c = 4 % main.Cluster.numCtrls
2603 ip = main.Cluster.active( c ).ip_address # ONOS5
Devin Lim58046fa2017-07-05 16:55:00 -07002604 dpid = '3' + str( i ).zfill( 3 )
Jon Hall0e240372018-05-02 11:21:57 -07002605 deviceId = main.Cluster.next().getDevice( dpid ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002606 elif i >= 18 and i <= 27:
Devin Lim142b5342017-07-20 15:22:39 -07002607 c = 6 % main.Cluster.numCtrls
2608 ip = main.Cluster.active( c ).ip_address # ONOS7
Devin Lim58046fa2017-07-05 16:55:00 -07002609 dpid = '6' + str( i ).zfill( 3 )
Jon Hall0e240372018-05-02 11:21:57 -07002610 deviceId = main.Cluster.next().getDevice( dpid ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002611 elif i == 28:
2612 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002613 ip = main.Cluster.active( c ).ip_address # ONOS1
Jon Hall0e240372018-05-02 11:21:57 -07002614 deviceId = main.Cluster.next().getDevice( "2800" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002615 else:
2616 main.log.error( "You didn't write an else statement for " +
2617 "switch s" + str( i ) )
2618 roleCall = main.FALSE
2619 # Assign switch
2620 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
2621 # TODO: make this controller dynamic
Jon Hall0e240372018-05-02 11:21:57 -07002622 roleCall = roleCall and main.Cluster.next().deviceRole( deviceId, ip )
Devin Lim58046fa2017-07-05 16:55:00 -07002623 ipList.append( ip )
2624 deviceList.append( deviceId )
2625 except ( AttributeError, AssertionError ):
2626 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hall0e240372018-05-02 11:21:57 -07002627 main.log.info( main.Cluster.next().devices() )
Devin Lim58046fa2017-07-05 16:55:00 -07002628 utilities.assert_equals(
2629 expect=main.TRUE,
2630 actual=roleCall,
2631 onpass="Re-assigned switch mastership to designated controller",
2632 onfail="Something wrong with deviceRole calls" )
2633
2634 main.step( "Check mastership was correctly assigned" )
2635 roleCheck = main.TRUE
2636 # NOTE: This is due to the fact that device mastership change is not
2637 # atomic and is actually a multi step process
2638 time.sleep( 5 )
2639 for i in range( len( ipList ) ):
2640 ip = ipList[ i ]
2641 deviceId = deviceList[ i ]
2642 # Check assignment
Jon Hall0e240372018-05-02 11:21:57 -07002643 master = main.Cluster.next().getRole( deviceId ).get( 'master' )
Devin Lim58046fa2017-07-05 16:55:00 -07002644 if ip in master:
2645 roleCheck = roleCheck and main.TRUE
2646 else:
2647 roleCheck = roleCheck and main.FALSE
2648 main.log.error( "Error, controller " + ip + " is not" +
2649 " master " + "of device " +
2650 str( deviceId ) + ". Master is " +
2651 repr( master ) + "." )
2652 utilities.assert_equals(
2653 expect=main.TRUE,
2654 actual=roleCheck,
2655 onpass="Switches were successfully reassigned to designated " +
2656 "controller",
2657 onfail="Switches were not successfully reassigned" )
Jon Hallca319892017-06-15 15:25:22 -07002658
Jon Hall5d5876e2017-11-30 09:33:16 -08002659 def bringUpStoppedNodes( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -07002660 """
Jon Hall5d5876e2017-11-30 09:33:16 -08002661 The bring up stopped nodes.
Devin Lim58046fa2017-07-05 16:55:00 -07002662 """
2663 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002664 assert main, "main not defined"
2665 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002666 assert main.kill, "main.kill not defined"
2667 main.case( "Restart minority of ONOS nodes" )
2668
2669 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
2670 startResults = main.TRUE
2671 restartTime = time.time()
Jon Hallca319892017-06-15 15:25:22 -07002672 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002673 startResults = startResults and\
Jon Hallca319892017-06-15 15:25:22 -07002674 ctrl.onosStart( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002675 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2676 onpass="ONOS nodes started successfully",
2677 onfail="ONOS nodes NOT successfully started" )
2678
2679 main.step( "Checking if ONOS is up yet" )
2680 count = 0
2681 onosIsupResult = main.FALSE
2682 while onosIsupResult == main.FALSE and count < 10:
2683 onosIsupResult = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002684 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002685 onosIsupResult = onosIsupResult and\
Jon Hallca319892017-06-15 15:25:22 -07002686 ctrl.isup( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002687 count = count + 1
2688 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
2689 onpass="ONOS restarted successfully",
2690 onfail="ONOS restart NOT successful" )
2691
Jon Hall5d5876e2017-11-30 09:33:16 -08002692 main.step( "Restarting ONOS CLI" )
Devin Lim58046fa2017-07-05 16:55:00 -07002693 cliResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002694 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002695 cliResults = cliResults and\
Jon Hallca319892017-06-15 15:25:22 -07002696 ctrl.startOnosCli( ctrl.ipAddress )
2697 ctrl.active = True
Devin Lim58046fa2017-07-05 16:55:00 -07002698 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
Jon Hallca319892017-06-15 15:25:22 -07002699 onpass="ONOS node(s) restarted",
2700 onfail="ONOS node(s) did not restart" )
Devin Lim58046fa2017-07-05 16:55:00 -07002701
Jon Hall5d5876e2017-11-30 09:33:16 -08002702 # Grab the time of restart so we can have some idea of average time
Devin Lim58046fa2017-07-05 16:55:00 -07002703 main.restartTime = time.time() - restartTime
2704 main.log.debug( "Restart time: " + str( main.restartTime ) )
2705 # TODO: MAke this configurable. Also, we are breaking the above timer
2706 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -08002707 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -07002708 False,
Devin Lim58046fa2017-07-05 16:55:00 -07002709 sleep=15,
2710 attempts=5 )
2711
2712 utilities.assert_equals( expect=True, actual=nodeResults,
2713 onpass="Nodes check successful",
2714 onfail="Nodes check NOT successful" )
2715
2716 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07002717 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07002718 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07002719 ctrl.name,
2720 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002721 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -07002722 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002723
Jon Hallca319892017-06-15 15:25:22 -07002724 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -07002725
2726 main.step( "Rerun for election on the node(s) that were killed" )
2727 runResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002728 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002729 runResults = runResults and\
Jon Hallca319892017-06-15 15:25:22 -07002730 ctrl.electionTestRun()
Devin Lim58046fa2017-07-05 16:55:00 -07002731 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2732 onpass="ONOS nodes reran for election topic",
Jon Hall5d5876e2017-11-30 09:33:16 -08002733 onfail="Error rerunning for election" )
2734
2735 def upgradeNodes( self, main ):
2736 """
2737 Reinstall some nodes with an upgraded version.
2738
2739 This will reinstall nodes in main.kill with an upgraded version.
2740 """
2741 import time
2742 assert main, "main not defined"
2743 assert utilities.assert_equals, "utilities.assert_equals not defined"
2744 assert main.kill, "main.kill not defined"
2745 nodeNames = [ node.name for node in main.kill ]
2746 main.step( "Upgrading" + str( nodeNames ) + " ONOS nodes" )
2747
2748 stopResults = main.TRUE
2749 uninstallResults = main.TRUE
2750 startResults = main.TRUE
2751 sshResults = main.TRUE
2752 isup = main.TRUE
2753 restartTime = time.time()
2754 for ctrl in main.kill:
2755 stopResults = stopResults and\
2756 ctrl.onosStop( ctrl.ipAddress )
2757 uninstallResults = uninstallResults and\
2758 ctrl.onosUninstall( ctrl.ipAddress )
2759 # Install the new version of onos
2760 startResults = startResults and\
2761 ctrl.onosInstall( options="-fv", node=ctrl.ipAddress )
2762 sshResults = sshResults and\
2763 ctrl.onosSecureSSH( node=ctrl.ipAddress )
2764 isup = isup and ctrl.isup( ctrl.ipAddress )
2765 utilities.assert_equals( expect=main.TRUE, actual=stopResults,
2766 onpass="ONOS nodes stopped successfully",
2767 onfail="ONOS nodes NOT successfully stopped" )
2768 utilities.assert_equals( expect=main.TRUE, actual=uninstallResults,
2769 onpass="ONOS nodes uninstalled successfully",
2770 onfail="ONOS nodes NOT successfully uninstalled" )
2771 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2772 onpass="ONOS nodes started successfully",
2773 onfail="ONOS nodes NOT successfully started" )
2774 utilities.assert_equals( expect=main.TRUE, actual=sshResults,
2775 onpass="Successfully secured onos ssh",
2776 onfail="Failed to secure onos ssh" )
2777 utilities.assert_equals( expect=main.TRUE, actual=isup,
2778 onpass="ONOS nodes fully started",
2779 onfail="ONOS nodes NOT fully started" )
2780
2781 main.step( "Restarting ONOS CLI" )
2782 cliResults = main.TRUE
2783 for ctrl in main.kill:
2784 cliResults = cliResults and\
2785 ctrl.startOnosCli( ctrl.ipAddress )
2786 ctrl.active = True
2787 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
2788 onpass="ONOS node(s) restarted",
2789 onfail="ONOS node(s) did not restart" )
2790
2791 # Grab the time of restart so we can have some idea of average time
2792 main.restartTime = time.time() - restartTime
2793 main.log.debug( "Restart time: " + str( main.restartTime ) )
2794 # TODO: Make this configurable.
2795 main.step( "Checking ONOS nodes" )
2796 nodeResults = utilities.retry( main.Cluster.nodesCheck,
2797 False,
2798 sleep=15,
2799 attempts=5 )
2800
2801 utilities.assert_equals( expect=True, actual=nodeResults,
2802 onpass="Nodes check successful",
2803 onfail="Nodes check NOT successful" )
2804
2805 if not nodeResults:
2806 for ctrl in main.Cluster.active():
2807 main.log.debug( "{} components not ACTIVE: \n{}".format(
2808 ctrl.name,
2809 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
2810 main.log.error( "Failed to start ONOS, stopping test" )
2811 main.cleanAndExit()
2812
2813 self.commonChecks()
2814
2815 main.step( "Rerun for election on the node(s) that were killed" )
2816 runResults = main.TRUE
2817 for ctrl in main.kill:
2818 runResults = runResults and\
2819 ctrl.electionTestRun()
2820 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2821 onpass="ONOS nodes reran for election topic",
2822 onfail="Error rerunning for election" )
Jon Hall4173b242017-09-12 17:04:38 -07002823
Devin Lim142b5342017-07-20 15:22:39 -07002824 def tempCell( self, cellName, ipList ):
2825 main.step( "Create cell file" )
2826 cellAppString = main.params[ 'ENV' ][ 'appString' ]
Devin Lim58046fa2017-07-05 16:55:00 -07002827
Devin Lim142b5342017-07-20 15:22:39 -07002828 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
2829 main.Mininet1.ip_address,
Jon Hall3e6edb32018-08-21 16:20:30 -07002830 cellAppString, ipList, ipList,
2831 main.ONOScli1.karafUser )
Devin Lim142b5342017-07-20 15:22:39 -07002832 main.step( "Applying cell variable to environment" )
2833 cellResult = main.ONOSbench.setCell( cellName )
2834 verifyResult = main.ONOSbench.verifyCell()
2835
Devin Lim142b5342017-07-20 15:22:39 -07002836 def checkStateAfterEvent( self, main, afterWhich, compareSwitch=False, isRestart=False ):
Devin Lim58046fa2017-07-05 16:55:00 -07002837 """
2838 afterWhich :
Jon Hallca319892017-06-15 15:25:22 -07002839 0: failure
Devin Lim58046fa2017-07-05 16:55:00 -07002840 1: scaling
2841 """
2842 """
2843 Check state after ONOS failure/scaling
2844 """
2845 import json
Devin Lim58046fa2017-07-05 16:55:00 -07002846 assert main, "main not defined"
2847 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002848 main.case( "Running ONOS Constant State Tests" )
2849
Jon Hall3e6edb32018-08-21 16:20:30 -07002850 OnosAfterWhich = [ "failure", "scaling" ]
Devin Lim58046fa2017-07-05 16:55:00 -07002851
Devin Lim58046fa2017-07-05 16:55:00 -07002852 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -07002853 self.checkRoleNotNull()
Devin Lim58046fa2017-07-05 16:55:00 -07002854
Devin Lim142b5342017-07-20 15:22:39 -07002855 ONOSMastership, rolesResults, consistentMastership = self.checkTheRole()
Jon Hallca319892017-06-15 15:25:22 -07002856 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07002857
2858 if rolesResults and not consistentMastership:
2859 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002860 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -07002861 main.log.warn( node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07002862 json.dumps( json.loads( ONOSMastership[ i ] ),
2863 sort_keys=True,
2864 indent=4,
2865 separators=( ',', ': ' ) ) )
2866
2867 if compareSwitch:
2868 description2 = "Compare switch roles from before failure"
2869 main.step( description2 )
2870 try:
2871 currentJson = json.loads( ONOSMastership[ 0 ] )
2872 oldJson = json.loads( mastershipState )
2873 except ( ValueError, TypeError ):
2874 main.log.exception( "Something is wrong with parsing " +
2875 "ONOSMastership[0] or mastershipState" )
Jon Hallca319892017-06-15 15:25:22 -07002876 main.log.debug( "ONOSMastership[0]: " + repr( ONOSMastership[ 0 ] ) )
2877 main.log.debug( "mastershipState" + repr( mastershipState ) )
Devin Lim44075962017-08-11 10:56:37 -07002878 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002879 mastershipCheck = main.TRUE
Jon Hallab611372018-02-21 15:26:05 -08002880 for swName, swDetails in main.Mininet1.getSwitches().items():
2881 switchDPID = swDetails[ 'dpid' ]
Devin Lim58046fa2017-07-05 16:55:00 -07002882 current = [ switch[ 'master' ] for switch in currentJson
2883 if switchDPID in switch[ 'id' ] ]
2884 old = [ switch[ 'master' ] for switch in oldJson
2885 if switchDPID in switch[ 'id' ] ]
2886 if current == old:
2887 mastershipCheck = mastershipCheck and main.TRUE
2888 else:
2889 main.log.warn( "Mastership of switch %s changed" % switchDPID )
2890 mastershipCheck = main.FALSE
2891 utilities.assert_equals(
2892 expect=main.TRUE,
2893 actual=mastershipCheck,
2894 onpass="Mastership of Switches was not changed",
2895 onfail="Mastership of some switches changed" )
2896
2897 # NOTE: we expect mastership to change on controller failure/scaling down
Devin Lim142b5342017-07-20 15:22:39 -07002898 ONOSIntents, intentsResults = self.checkingIntents()
Devin Lim58046fa2017-07-05 16:55:00 -07002899 intentCheck = main.FALSE
2900 consistentIntents = True
Devin Lim58046fa2017-07-05 16:55:00 -07002901
2902 main.step( "Check for consistency in Intents from each controller" )
2903 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2904 main.log.info( "Intents are consistent across all ONOS " +
2905 "nodes" )
2906 else:
2907 consistentIntents = False
2908
2909 # Try to make it easy to figure out what is happening
2910 #
2911 # Intent ONOS1 ONOS2 ...
2912 # 0x01 INSTALLED INSTALLING
2913 # ... ... ...
2914 # ... ... ...
2915 title = " ID"
Jon Hallca319892017-06-15 15:25:22 -07002916 for ctrl in main.Cluster.active():
2917 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07002918 main.log.warn( title )
2919 # get all intent keys in the cluster
2920 keys = []
2921 for nodeStr in ONOSIntents:
2922 node = json.loads( nodeStr )
2923 for intent in node:
2924 keys.append( intent.get( 'id' ) )
2925 keys = set( keys )
2926 for key in keys:
2927 row = "%-13s" % key
2928 for nodeStr in ONOSIntents:
2929 node = json.loads( nodeStr )
2930 for intent in node:
2931 if intent.get( 'id' ) == key:
2932 row += "%-15s" % intent.get( 'state' )
2933 main.log.warn( row )
2934 # End table view
2935
2936 utilities.assert_equals(
2937 expect=True,
2938 actual=consistentIntents,
2939 onpass="Intents are consistent across all ONOS nodes",
2940 onfail="ONOS nodes have different views of intents" )
2941 intentStates = []
2942 for node in ONOSIntents: # Iter through ONOS nodes
2943 nodeStates = []
2944 # Iter through intents of a node
2945 try:
2946 for intent in json.loads( node ):
2947 nodeStates.append( intent[ 'state' ] )
2948 except ( ValueError, TypeError ):
2949 main.log.exception( "Error in parsing intents" )
2950 main.log.error( repr( node ) )
2951 intentStates.append( nodeStates )
2952 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2953 main.log.info( dict( out ) )
2954
2955 if intentsResults and not consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07002956 for i in range( len( main.Cluster.active() ) ):
Jon Hall6040bcf2017-08-14 11:15:41 -07002957 ctrl = main.Cluster.controllers[ i ]
Jon Hallca319892017-06-15 15:25:22 -07002958 main.log.warn( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07002959 main.log.warn( json.dumps(
2960 json.loads( ONOSIntents[ i ] ),
2961 sort_keys=True,
2962 indent=4,
2963 separators=( ',', ': ' ) ) )
2964 elif intentsResults and consistentIntents:
2965 intentCheck = main.TRUE
2966
2967 # NOTE: Store has no durability, so intents are lost across system
2968 # restarts
2969 if not isRestart:
2970 main.step( "Compare current intents with intents before the " + OnosAfterWhich[ afterWhich ] )
2971 # NOTE: this requires case 5 to pass for intentState to be set.
2972 # maybe we should stop the test if that fails?
2973 sameIntents = main.FALSE
2974 try:
2975 intentState
2976 except NameError:
2977 main.log.warn( "No previous intent state was saved" )
2978 else:
2979 if intentState and intentState == ONOSIntents[ 0 ]:
2980 sameIntents = main.TRUE
2981 main.log.info( "Intents are consistent with before " + OnosAfterWhich[ afterWhich ] )
2982 # TODO: possibly the states have changed? we may need to figure out
2983 # what the acceptable states are
2984 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2985 sameIntents = main.TRUE
2986 try:
2987 before = json.loads( intentState )
2988 after = json.loads( ONOSIntents[ 0 ] )
2989 for intent in before:
2990 if intent not in after:
2991 sameIntents = main.FALSE
2992 main.log.debug( "Intent is not currently in ONOS " +
2993 "(at least in the same form):" )
2994 main.log.debug( json.dumps( intent ) )
2995 except ( ValueError, TypeError ):
2996 main.log.exception( "Exception printing intents" )
2997 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2998 main.log.debug( repr( intentState ) )
2999 if sameIntents == main.FALSE:
3000 try:
3001 main.log.debug( "ONOS intents before: " )
3002 main.log.debug( json.dumps( json.loads( intentState ),
3003 sort_keys=True, indent=4,
3004 separators=( ',', ': ' ) ) )
3005 main.log.debug( "Current ONOS intents: " )
3006 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
3007 sort_keys=True, indent=4,
3008 separators=( ',', ': ' ) ) )
3009 except ( ValueError, TypeError ):
3010 main.log.exception( "Exception printing intents" )
3011 main.log.debug( repr( ONOSIntents[ 0 ] ) )
3012 main.log.debug( repr( intentState ) )
3013 utilities.assert_equals(
3014 expect=main.TRUE,
3015 actual=sameIntents,
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07003016 onpass="Intents are consistent with before " + OnosAfterWhich[ afterWhich ],
Devin Lim58046fa2017-07-05 16:55:00 -07003017 onfail="The Intents changed during " + OnosAfterWhich[ afterWhich ] )
3018 intentCheck = intentCheck and sameIntents
3019
3020 main.step( "Get the OF Table entries and compare to before " +
3021 "component " + OnosAfterWhich[ afterWhich ] )
3022 FlowTables = main.TRUE
Jon Hallab611372018-02-21 15:26:05 -08003023 for switch in main.Mininet1.getSwitches().keys():
3024 main.log.info( "Checking flow table on " + switch )
3025 tmpFlows = main.Mininet1.getFlowTable( switch, version="1.3", debug=False )
3026 curSwitch = main.Mininet1.flowTableComp( flows[ switch ], tmpFlows )
Devin Lim58046fa2017-07-05 16:55:00 -07003027 FlowTables = FlowTables and curSwitch
3028 if curSwitch == main.FALSE:
Jon Hallab611372018-02-21 15:26:05 -08003029 main.log.warn( "Differences in flow table for switch: {}".format( switch ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003030 utilities.assert_equals(
3031 expect=main.TRUE,
3032 actual=FlowTables,
3033 onpass="No changes were found in the flow tables",
3034 onfail="Changes were found in the flow tables" )
3035
Jon Hallca319892017-06-15 15:25:22 -07003036 main.Mininet2.pingLongKill()
Devin Lim58046fa2017-07-05 16:55:00 -07003037 """
3038 main.step( "Check the continuous pings to ensure that no packets " +
3039 "were dropped during component failure" )
3040 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
3041 main.params[ 'TESTONIP' ] )
3042 LossInPings = main.FALSE
3043 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
3044 for i in range( 8, 18 ):
3045 main.log.info(
3046 "Checking for a loss in pings along flow from s" +
3047 str( i ) )
3048 LossInPings = main.Mininet2.checkForLoss(
3049 "/tmp/ping.h" +
3050 str( i ) ) or LossInPings
3051 if LossInPings == main.TRUE:
3052 main.log.info( "Loss in ping detected" )
3053 elif LossInPings == main.ERROR:
3054 main.log.info( "There are multiple mininet process running" )
3055 elif LossInPings == main.FALSE:
3056 main.log.info( "No Loss in the pings" )
3057 main.log.info( "No loss of dataplane connectivity" )
3058 utilities.assert_equals(
3059 expect=main.FALSE,
3060 actual=LossInPings,
3061 onpass="No Loss of connectivity",
3062 onfail="Loss of dataplane connectivity detected" )
3063 # NOTE: Since intents are not persisted with IntnentStore,
3064 # we expect loss in dataplane connectivity
3065 LossInPings = main.FALSE
3066 """
Devin Lim58046fa2017-07-05 16:55:00 -07003067 def compareTopo( self, main ):
3068 """
3069 Compare topo
3070 """
3071 import json
3072 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003073 assert main, "main not defined"
3074 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003075 try:
3076 from tests.dependencies.topology import Topology
3077 except ImportError:
3078 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07003079 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07003080 try:
3081 main.topoRelated
3082 except ( NameError, AttributeError ):
3083 main.topoRelated = Topology()
3084 main.case( "Compare ONOS Topology view to Mininet topology" )
3085 main.caseExplanation = "Compare topology objects between Mininet" +\
3086 " and ONOS"
3087 topoResult = main.FALSE
3088 topoFailMsg = "ONOS topology don't match Mininet"
3089 elapsed = 0
3090 count = 0
3091 main.step( "Comparing ONOS topology to MN topology" )
3092 startTime = time.time()
3093 # Give time for Gossip to work
3094 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
3095 devicesResults = main.TRUE
3096 linksResults = main.TRUE
3097 hostsResults = main.TRUE
3098 hostAttachmentResults = True
3099 count += 1
3100 cliStart = time.time()
Devin Lim142b5342017-07-20 15:22:39 -07003101 devices = main.topoRelated.getAll( "devices", True,
Jon Hallca319892017-06-15 15:25:22 -07003102 kwargs={ 'sleep': 5, 'attempts': 5,
3103 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003104 ipResult = main.TRUE
3105
Devin Lim142b5342017-07-20 15:22:39 -07003106 hosts = main.topoRelated.getAll( "hosts", True,
Jon Hallca319892017-06-15 15:25:22 -07003107 kwargs={ 'sleep': 5, 'attempts': 5,
3108 'randomTime': True },
3109 inJson=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003110
3111 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003112 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003113 if hosts[ controller ]:
3114 for host in hosts[ controller ]:
3115 if host is None or host.get( 'ipAddresses', [] ) == []:
3116 main.log.error(
3117 "Error with host ipAddresses on controller" +
3118 controllerStr + ": " + str( host ) )
3119 ipResult = main.FALSE
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07003120 ports = main.topoRelated.getAll( "ports", True,
Jon Hallca319892017-06-15 15:25:22 -07003121 kwargs={ 'sleep': 5, 'attempts': 5,
3122 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003123 links = main.topoRelated.getAll( "links", True,
Jon Hallca319892017-06-15 15:25:22 -07003124 kwargs={ 'sleep': 5, 'attempts': 5,
3125 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003126 clusters = main.topoRelated.getAll( "clusters", True,
Jon Hallca319892017-06-15 15:25:22 -07003127 kwargs={ 'sleep': 5, 'attempts': 5,
3128 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003129
3130 elapsed = time.time() - startTime
3131 cliTime = time.time() - cliStart
Jon Hall5d5876e2017-11-30 09:33:16 -08003132 main.log.debug( "Elapsed time: " + str( elapsed ) )
3133 main.log.debug( "CLI time: " + str( cliTime ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003134
3135 if all( e is None for e in devices ) and\
3136 all( e is None for e in hosts ) and\
3137 all( e is None for e in ports ) and\
3138 all( e is None for e in links ) and\
3139 all( e is None for e in clusters ):
3140 topoFailMsg = "Could not get topology from ONOS"
3141 main.log.error( topoFailMsg )
3142 continue # Try again, No use trying to compare
3143
3144 mnSwitches = main.Mininet1.getSwitches()
3145 mnLinks = main.Mininet1.getLinks()
3146 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07003147 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003148 controllerStr = str( main.Cluster.active( controller ) )
Jon Hall4173b242017-09-12 17:04:38 -07003149 currentDevicesResult = main.topoRelated.compareDevicePort( main.Mininet1,
3150 controller,
3151 mnSwitches,
3152 devices,
3153 ports )
Devin Lim58046fa2017-07-05 16:55:00 -07003154 utilities.assert_equals( expect=main.TRUE,
3155 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07003156 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003157 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003158 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003159 " Switches view is incorrect" )
3160
Devin Lim58046fa2017-07-05 16:55:00 -07003161 currentLinksResult = main.topoRelated.compareBase( links, controller,
Jon Hall4173b242017-09-12 17:04:38 -07003162 main.Mininet1.compareLinks,
3163 [ mnSwitches, mnLinks ] )
Devin Lim58046fa2017-07-05 16:55:00 -07003164 utilities.assert_equals( expect=main.TRUE,
3165 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07003166 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003167 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003168 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003169 " links view is incorrect" )
3170 if hosts[ controller ] and "Error" not in hosts[ controller ]:
3171 currentHostsResult = main.Mininet1.compareHosts(
3172 mnHosts,
3173 hosts[ controller ] )
3174 elif hosts[ controller ] == []:
3175 currentHostsResult = main.TRUE
3176 else:
3177 currentHostsResult = main.FALSE
3178 utilities.assert_equals( expect=main.TRUE,
3179 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07003180 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003181 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07003182 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003183 " hosts don't match Mininet" )
Devin Lim58046fa2017-07-05 16:55:00 -07003184 hostAttachment = True
Jon Hallab611372018-02-21 15:26:05 -08003185 if main.topoMappings:
3186 ctrl = main.Cluster.next()
3187 # CHECKING HOST ATTACHMENT POINTS
3188 zeroHosts = False
3189 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3190 if hosts[ controller ] == []:
3191 main.log.warn( "There are no hosts discovered" )
3192 zeroHosts = True
3193 else:
3194 for host in hosts[ controller ]:
3195 mac = None
3196 locations = []
3197 device = None
3198 port = None
3199 try:
3200 mac = host.get( 'mac' )
3201 assert mac, "mac field could not be found for this host object"
3202 if 'locations' in host:
3203 locations = host.get( 'locations' )
3204 elif 'location' in host:
3205 locations.append( host.get( 'location' ) )
3206 assert locations, "locations field could not be found for this host object"
Devin Lim58046fa2017-07-05 16:55:00 -07003207
Jon Hallab611372018-02-21 15:26:05 -08003208 # Trim the protocol identifier off deviceId
3209 device = str( locations[0].get( 'elementId' ) ).split( ':' )[ 1 ]
3210 assert device, "elementId field could not be found for this host location object"
Devin Lim58046fa2017-07-05 16:55:00 -07003211
Jon Hallab611372018-02-21 15:26:05 -08003212 port = locations[0].get( 'port' )
3213 assert port, "port field could not be found for this host location object"
Devin Lim58046fa2017-07-05 16:55:00 -07003214
Jon Hallab611372018-02-21 15:26:05 -08003215 # Now check if this matches where they should be
3216 if mac and device and port:
3217 if str( port ) != "1":
3218 main.log.error( "The attachment port is incorrect for " +
3219 "host " + str( mac ) +
3220 ". Expected: 1 Actual: " + str( port ) )
3221 hostAttachment = False
3222 if device != main.topoMappings[ str( mac ) ]:
3223 main.log.error( "The attachment device is incorrect for " +
3224 "host " + str( mac ) +
3225 ". Expected: " + main.topoMppings[ str( mac ) ] +
3226 " Actual: " + device )
3227 hostAttachment = False
3228 else:
Devin Lim58046fa2017-07-05 16:55:00 -07003229 hostAttachment = False
Jon Hallab611372018-02-21 15:26:05 -08003230 except ( AssertionError, TypeError ):
3231 main.log.exception( "Json object not as expected" )
3232 main.log.error( repr( host ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003233 hostAttachment = False
Jon Hallab611372018-02-21 15:26:05 -08003234 else:
3235 main.log.error( "No hosts json output or \"Error\"" +
3236 " in output. hosts = " +
3237 repr( hosts[ controller ] ) )
3238 if zeroHosts is False:
3239 # TODO: Find a way to know if there should be hosts in a
3240 # given point of the test
3241 hostAttachment = True
Devin Lim58046fa2017-07-05 16:55:00 -07003242
Jon Hallab611372018-02-21 15:26:05 -08003243 # END CHECKING HOST ATTACHMENT POINTS
Devin Lim58046fa2017-07-05 16:55:00 -07003244 devicesResults = devicesResults and currentDevicesResult
3245 linksResults = linksResults and currentLinksResult
3246 hostsResults = hostsResults and currentHostsResult
3247 hostAttachmentResults = hostAttachmentResults and\
3248 hostAttachment
3249 topoResult = ( devicesResults and linksResults
3250 and hostsResults and ipResult and
3251 hostAttachmentResults )
3252 utilities.assert_equals( expect=True,
3253 actual=topoResult,
3254 onpass="ONOS topology matches Mininet",
3255 onfail=topoFailMsg )
3256 # End of While loop to pull ONOS state
3257
3258 # Compare json objects for hosts and dataplane clusters
3259
3260 # hosts
3261 main.step( "Hosts view is consistent across all ONOS nodes" )
3262 consistentHostsResult = main.TRUE
3263 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003264 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003265 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3266 if hosts[ controller ] == hosts[ 0 ]:
3267 continue
3268 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07003269 main.log.error( "hosts from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003270 " is inconsistent with ONOS1" )
Jon Hallca319892017-06-15 15:25:22 -07003271 main.log.debug( repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003272 consistentHostsResult = main.FALSE
3273
3274 else:
Jon Hallca319892017-06-15 15:25:22 -07003275 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003276 controllerStr )
3277 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003278 main.log.debug( controllerStr +
3279 " hosts response: " +
3280 repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003281 utilities.assert_equals(
3282 expect=main.TRUE,
3283 actual=consistentHostsResult,
3284 onpass="Hosts view is consistent across all ONOS nodes",
3285 onfail="ONOS nodes have different views of hosts" )
3286
3287 main.step( "Hosts information is correct" )
3288 hostsResults = hostsResults and ipResult
3289 utilities.assert_equals(
3290 expect=main.TRUE,
3291 actual=hostsResults,
3292 onpass="Host information is correct",
3293 onfail="Host information is incorrect" )
3294
3295 main.step( "Host attachment points to the network" )
3296 utilities.assert_equals(
3297 expect=True,
3298 actual=hostAttachmentResults,
3299 onpass="Hosts are correctly attached to the network",
3300 onfail="ONOS did not correctly attach hosts to the network" )
3301
3302 # Strongly connected clusters of devices
3303 main.step( "Clusters view is consistent across all ONOS nodes" )
3304 consistentClustersResult = main.TRUE
3305 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003306 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003307 if "Error" not in clusters[ controller ]:
3308 if clusters[ controller ] == clusters[ 0 ]:
3309 continue
3310 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07003311 main.log.error( "clusters from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003312 controllerStr +
3313 " is inconsistent with ONOS1" )
3314 consistentClustersResult = main.FALSE
3315 else:
3316 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07003317 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07003318 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003319 main.log.debug( controllerStr +
3320 " clusters response: " +
3321 repr( clusters[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003322 utilities.assert_equals(
3323 expect=main.TRUE,
3324 actual=consistentClustersResult,
3325 onpass="Clusters view is consistent across all ONOS nodes",
3326 onfail="ONOS nodes have different views of clusters" )
3327 if not consistentClustersResult:
3328 main.log.debug( clusters )
3329 for x in links:
Jon Hallca319892017-06-15 15:25:22 -07003330 main.log.debug( "{}: {}".format( len( x ), x ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003331
3332 main.step( "There is only one SCC" )
3333 # there should always only be one cluster
3334 try:
3335 numClusters = len( json.loads( clusters[ 0 ] ) )
3336 except ( ValueError, TypeError ):
3337 main.log.exception( "Error parsing clusters[0]: " +
3338 repr( clusters[ 0 ] ) )
3339 numClusters = "ERROR"
3340 clusterResults = main.FALSE
3341 if numClusters == 1:
3342 clusterResults = main.TRUE
3343 utilities.assert_equals(
3344 expect=1,
3345 actual=numClusters,
3346 onpass="ONOS shows 1 SCC",
3347 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
3348
3349 topoResult = ( devicesResults and linksResults
3350 and hostsResults and consistentHostsResult
3351 and consistentClustersResult and clusterResults
3352 and ipResult and hostAttachmentResults )
3353
3354 topoResult = topoResult and int( count <= 2 )
3355 note = "note it takes about " + str( int( cliTime ) ) + \
3356 " seconds for the test to make all the cli calls to fetch " +\
3357 "the topology from each ONOS instance"
3358 main.log.info(
3359 "Very crass estimate for topology discovery/convergence( " +
3360 str( note ) + " ): " + str( elapsed ) + " seconds, " +
3361 str( count ) + " tries" )
3362
3363 main.step( "Device information is correct" )
3364 utilities.assert_equals(
3365 expect=main.TRUE,
3366 actual=devicesResults,
3367 onpass="Device information is correct",
3368 onfail="Device information is incorrect" )
3369
3370 main.step( "Links are correct" )
3371 utilities.assert_equals(
3372 expect=main.TRUE,
3373 actual=linksResults,
3374 onpass="Link are correct",
3375 onfail="Links are incorrect" )
3376
3377 main.step( "Hosts are correct" )
3378 utilities.assert_equals(
3379 expect=main.TRUE,
3380 actual=hostsResults,
3381 onpass="Hosts are correct",
3382 onfail="Hosts are incorrect" )
3383
3384 # FIXME: move this to an ONOS state case
3385 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -08003386 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -07003387 False,
Devin Lim58046fa2017-07-05 16:55:00 -07003388 attempts=5 )
3389 utilities.assert_equals( expect=True, actual=nodeResults,
3390 onpass="Nodes check successful",
3391 onfail="Nodes check NOT successful" )
3392 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07003393 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07003394 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07003395 ctrl.name,
3396 ctrl.CLI.sendline( "scr:list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003397
3398 if not topoResult:
Devin Lim44075962017-08-11 10:56:37 -07003399 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -07003400
Jon Hallab611372018-02-21 15:26:05 -08003401 def linkDown( self, main, src="s3", dst="s28" ):
Devin Lim58046fa2017-07-05 16:55:00 -07003402 """
Jon Hallab611372018-02-21 15:26:05 -08003403 Link src-dst down
Devin Lim58046fa2017-07-05 16:55:00 -07003404 """
3405 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003406 assert main, "main not defined"
3407 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003408 # NOTE: You should probably run a topology check after this
3409
3410 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3411
3412 description = "Turn off a link to ensure that Link Discovery " +\
3413 "is working properly"
3414 main.case( description )
3415
Jon Hallab611372018-02-21 15:26:05 -08003416 main.step( "Kill Link between " + src + " and " + dst )
3417 LinkDown = main.Mininet1.link( END1=src, END2=dst, OPTION="down" )
Devin Lim58046fa2017-07-05 16:55:00 -07003418 main.log.info( "Waiting " + str( linkSleep ) +
3419 " seconds for link down to be discovered" )
3420 time.sleep( linkSleep )
3421 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
3422 onpass="Link down successful",
3423 onfail="Failed to bring link down" )
3424 # TODO do some sort of check here
3425
Jon Hallab611372018-02-21 15:26:05 -08003426 def linkUp( self, main, src="s3", dst="s28" ):
Devin Lim58046fa2017-07-05 16:55:00 -07003427 """
Jon Hallab611372018-02-21 15:26:05 -08003428 Link src-dst up
Devin Lim58046fa2017-07-05 16:55:00 -07003429 """
3430 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003431 assert main, "main not defined"
3432 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003433 # NOTE: You should probably run a topology check after this
3434
3435 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3436
3437 description = "Restore a link to ensure that Link Discovery is " + \
3438 "working properly"
3439 main.case( description )
3440
Jon Hallab611372018-02-21 15:26:05 -08003441 main.step( "Bring link between " + src + " and " + dst + " back up" )
3442 LinkUp = main.Mininet1.link( END1=src, END2=dst, OPTION="up" )
Devin Lim58046fa2017-07-05 16:55:00 -07003443 main.log.info( "Waiting " + str( linkSleep ) +
3444 " seconds for link up to be discovered" )
3445 time.sleep( linkSleep )
3446 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
3447 onpass="Link up successful",
3448 onfail="Failed to bring link up" )
3449
3450 def switchDown( self, main ):
3451 """
3452 Switch Down
3453 """
3454 # NOTE: You should probably run a topology check after this
3455 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003456 assert main, "main not defined"
3457 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003458
3459 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3460
3461 description = "Killing a switch to ensure it is discovered correctly"
Devin Lim58046fa2017-07-05 16:55:00 -07003462 main.case( description )
3463 switch = main.params[ 'kill' ][ 'switch' ]
3464 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3465
3466 # TODO: Make this switch parameterizable
3467 main.step( "Kill " + switch )
3468 main.log.info( "Deleting " + switch )
3469 main.Mininet1.delSwitch( switch )
3470 main.log.info( "Waiting " + str( switchSleep ) +
3471 " seconds for switch down to be discovered" )
3472 time.sleep( switchSleep )
Jon Hall0e240372018-05-02 11:21:57 -07003473 device = main.Cluster.next().getDevice( dpid=switchDPID )
Devin Lim58046fa2017-07-05 16:55:00 -07003474 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003475 main.log.warn( "Bringing down switch " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003476 result = main.FALSE
3477 if device and device[ 'available' ] is False:
3478 result = main.TRUE
3479 utilities.assert_equals( expect=main.TRUE, actual=result,
3480 onpass="Kill switch successful",
3481 onfail="Failed to kill switch?" )
Jon Hallca319892017-06-15 15:25:22 -07003482
Devin Lim58046fa2017-07-05 16:55:00 -07003483 def switchUp( self, main ):
3484 """
3485 Switch Up
3486 """
3487 # NOTE: You should probably run a topology check after this
3488 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003489 assert main, "main not defined"
3490 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003491
3492 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3493 switch = main.params[ 'kill' ][ 'switch' ]
3494 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3495 links = main.params[ 'kill' ][ 'links' ].split()
Devin Lim58046fa2017-07-05 16:55:00 -07003496 description = "Adding a switch to ensure it is discovered correctly"
3497 main.case( description )
3498
3499 main.step( "Add back " + switch )
3500 main.Mininet1.addSwitch( switch, dpid=switchDPID )
3501 for peer in links:
3502 main.Mininet1.addLink( switch, peer )
Jon Hallca319892017-06-15 15:25:22 -07003503 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -07003504 main.Mininet1.assignSwController( sw=switch, ip=ipList )
3505 main.log.info( "Waiting " + str( switchSleep ) +
3506 " seconds for switch up to be discovered" )
3507 time.sleep( switchSleep )
Jon Hall0e240372018-05-02 11:21:57 -07003508 device = main.Cluster.next().getDevice( dpid=switchDPID )
Devin Lim58046fa2017-07-05 16:55:00 -07003509 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003510 main.log.debug( "Added device: " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003511 result = main.FALSE
3512 if device and device[ 'available' ]:
3513 result = main.TRUE
3514 utilities.assert_equals( expect=main.TRUE, actual=result,
3515 onpass="add switch successful",
3516 onfail="Failed to add switch?" )
3517
3518 def startElectionApp( self, main ):
3519 """
3520 start election app on all onos nodes
3521 """
Devin Lim58046fa2017-07-05 16:55:00 -07003522 assert main, "main not defined"
3523 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003524
3525 main.case( "Start Leadership Election app" )
3526 main.step( "Install leadership election app" )
Jon Hall0e240372018-05-02 11:21:57 -07003527 appResult = main.Cluster.next().CLI.activateApp( "org.onosproject.election" )
Devin Lim58046fa2017-07-05 16:55:00 -07003528 utilities.assert_equals(
3529 expect=main.TRUE,
3530 actual=appResult,
3531 onpass="Election app installed",
3532 onfail="Something went wrong with installing Leadership election" )
3533
3534 main.step( "Run for election on each node" )
Jon Hall0e240372018-05-02 11:21:57 -07003535 main.Cluster.next().electionTestRun()
Jon Hallca319892017-06-15 15:25:22 -07003536 main.Cluster.command( "electionTestRun" )
Devin Lim58046fa2017-07-05 16:55:00 -07003537 time.sleep( 5 )
Jon Hallca319892017-06-15 15:25:22 -07003538 sameResult, leaders = main.HA.consistentLeaderboards( main.Cluster.active() )
Devin Lim58046fa2017-07-05 16:55:00 -07003539 utilities.assert_equals(
3540 expect=True,
3541 actual=sameResult,
3542 onpass="All nodes see the same leaderboards",
3543 onfail="Inconsistent leaderboards" )
3544
3545 if sameResult:
Jon Hall5d5876e2017-11-30 09:33:16 -08003546 # Check that the leader is one of the active nodes
3547 ips = sorted( main.Cluster.getIps( activeOnly=True ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003548 leader = leaders[ 0 ][ 0 ]
Jon Hall5d5876e2017-11-30 09:33:16 -08003549 if leader in ips:
3550 legitimate = True
Devin Lim58046fa2017-07-05 16:55:00 -07003551 else:
Jon Hall5d5876e2017-11-30 09:33:16 -08003552 legitimate = False
3553 main.log.debug( leaders )
3554 main.step( "Active node was elected leader?" )
Devin Lim58046fa2017-07-05 16:55:00 -07003555 utilities.assert_equals(
3556 expect=True,
Jon Hall5d5876e2017-11-30 09:33:16 -08003557 actual=legitimate,
Devin Lim58046fa2017-07-05 16:55:00 -07003558 onpass="Correct leader was elected",
3559 onfail="Incorrect leader" )
Jon Hallca319892017-06-15 15:25:22 -07003560 main.Cluster.testLeader = leader
3561
Devin Lim58046fa2017-07-05 16:55:00 -07003562 def isElectionFunctional( self, main ):
3563 """
3564 Check that Leadership Election is still functional
3565 15.1 Run election on each node
3566 15.2 Check that each node has the same leaders and candidates
3567 15.3 Find current leader and withdraw
3568 15.4 Check that a new node was elected leader
3569 15.5 Check that that new leader was the candidate of old leader
3570 15.6 Run for election on old leader
3571 15.7 Check that oldLeader is a candidate, and leader if only 1 node
3572 15.8 Make sure that the old leader was added to the candidate list
3573
3574 old and new variable prefixes refer to data from before vs after
3575 withdrawl and later before withdrawl vs after re-election
3576 """
3577 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003578 assert main, "main not defined"
3579 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003580
3581 description = "Check that Leadership Election is still functional"
3582 main.case( description )
3583 # NOTE: Need to re-run after restarts since being a canidate is not persistant
3584
3585 oldLeaders = [] # list of lists of each nodes' candidates before
3586 newLeaders = [] # list of lists of each nodes' candidates after
3587 oldLeader = '' # the old leader from oldLeaders, None if not same
3588 newLeader = '' # the new leaders fron newLoeaders, None if not same
3589 oldLeaderCLI = None # the CLI of the old leader used for re-electing
3590 expectNoLeader = False # True when there is only one leader
Devin Lim142b5342017-07-20 15:22:39 -07003591 if len( main.Cluster.runningNodes ) == 1:
Devin Lim58046fa2017-07-05 16:55:00 -07003592 expectNoLeader = True
3593
3594 main.step( "Run for election on each node" )
Devin Lim142b5342017-07-20 15:22:39 -07003595 electionResult = main.Cluster.command( "electionTestRun", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003596 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -07003597 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -07003598 actual=electionResult,
3599 onpass="All nodes successfully ran for leadership",
3600 onfail="At least one node failed to run for leadership" )
3601
3602 if electionResult == main.FALSE:
3603 main.log.error(
3604 "Skipping Test Case because Election Test App isn't loaded" )
3605 main.skipCase()
3606
3607 main.step( "Check that each node shows the same leader and candidates" )
3608 failMessage = "Nodes have different leaderboards"
Jon Hallca319892017-06-15 15:25:22 -07003609 activeCLIs = main.Cluster.active()
3610 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Devin Lim58046fa2017-07-05 16:55:00 -07003611 if sameResult:
3612 oldLeader = oldLeaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003613 main.log.info( "Old leader: " + oldLeader )
Devin Lim58046fa2017-07-05 16:55:00 -07003614 else:
3615 oldLeader = None
3616 utilities.assert_equals(
3617 expect=True,
3618 actual=sameResult,
3619 onpass="Leaderboards are consistent for the election topic",
3620 onfail=failMessage )
3621
3622 main.step( "Find current leader and withdraw" )
3623 withdrawResult = main.TRUE
3624 # do some sanity checking on leader before using it
3625 if oldLeader is None:
3626 main.log.error( "Leadership isn't consistent." )
3627 withdrawResult = main.FALSE
3628 # Get the CLI of the oldLeader
Jon Hallca319892017-06-15 15:25:22 -07003629 for ctrl in main.Cluster.active():
3630 if oldLeader == ctrl.ipAddress:
3631 oldLeaderCLI = ctrl
Devin Lim58046fa2017-07-05 16:55:00 -07003632 break
3633 else: # FOR/ELSE statement
3634 main.log.error( "Leader election, could not find current leader" )
3635 if oldLeader:
3636 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3637 utilities.assert_equals(
3638 expect=main.TRUE,
3639 actual=withdrawResult,
3640 onpass="Node was withdrawn from election",
3641 onfail="Node was not withdrawn from election" )
3642
3643 main.step( "Check that a new node was elected leader" )
3644 failMessage = "Nodes have different leaders"
3645 # Get new leaders and candidates
3646 newLeaderResult, newLeaders = self.consistentLeaderboards( activeCLIs )
3647 newLeader = None
3648 if newLeaderResult:
3649 if newLeaders[ 0 ][ 0 ] == 'none':
3650 main.log.error( "No leader was elected on at least 1 node" )
3651 if not expectNoLeader:
3652 newLeaderResult = False
3653 newLeader = newLeaders[ 0 ][ 0 ]
3654
3655 # Check that the new leader is not the older leader, which was withdrawn
3656 if newLeader == oldLeader:
3657 newLeaderResult = False
3658 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3659 " as the current leader" )
3660 utilities.assert_equals(
3661 expect=True,
3662 actual=newLeaderResult,
3663 onpass="Leadership election passed",
3664 onfail="Something went wrong with Leadership election" )
3665
3666 main.step( "Check that that new leader was the candidate of old leader" )
3667 # candidates[ 2 ] should become the top candidate after withdrawl
3668 correctCandidateResult = main.TRUE
3669 if expectNoLeader:
3670 if newLeader == 'none':
3671 main.log.info( "No leader expected. None found. Pass" )
3672 correctCandidateResult = main.TRUE
3673 else:
3674 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3675 correctCandidateResult = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07003676 utilities.assert_equals(
3677 expect=main.TRUE,
3678 actual=correctCandidateResult,
3679 onpass="Correct Candidate Elected",
3680 onfail="Incorrect Candidate Elected" )
3681
3682 main.step( "Run for election on old leader( just so everyone " +
3683 "is in the hat )" )
3684 if oldLeaderCLI is not None:
3685 runResult = oldLeaderCLI.electionTestRun()
3686 else:
3687 main.log.error( "No old leader to re-elect" )
3688 runResult = main.FALSE
3689 utilities.assert_equals(
3690 expect=main.TRUE,
3691 actual=runResult,
3692 onpass="App re-ran for election",
3693 onfail="App failed to run for election" )
3694
3695 main.step(
3696 "Check that oldLeader is a candidate, and leader if only 1 node" )
3697 # verify leader didn't just change
3698 # Get new leaders and candidates
3699 reRunLeaders = []
3700 time.sleep( 5 ) # Paremterize
3701 positionResult, reRunLeaders = self.consistentLeaderboards( activeCLIs )
3702
Devin Lim58046fa2017-07-05 16:55:00 -07003703 def installDistributedPrimitiveApp( self, main ):
Jon Hall5d5876e2017-11-30 09:33:16 -08003704 '''
Devin Lim58046fa2017-07-05 16:55:00 -07003705 Install Distributed Primitives app
Jon Hall5d5876e2017-11-30 09:33:16 -08003706 '''
Devin Lim58046fa2017-07-05 16:55:00 -07003707 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003708 assert main, "main not defined"
3709 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003710
3711 # Variables for the distributed primitives tests
3712 main.pCounterName = "TestON-Partitions"
3713 main.pCounterValue = 0
3714 main.onosSet = set( [] )
3715 main.onosSetName = "TestON-set"
3716
3717 description = "Install Primitives app"
3718 main.case( description )
3719 main.step( "Install Primitives app" )
3720 appName = "org.onosproject.distributedprimitives"
Devin Lime9f0ccf2017-08-11 17:25:12 -07003721 appResults = main.Cluster.next().CLI.activateApp( appName )
Devin Lim58046fa2017-07-05 16:55:00 -07003722 utilities.assert_equals( expect=main.TRUE,
3723 actual=appResults,
3724 onpass="Primitives app activated",
3725 onfail="Primitives app not activated" )
3726 # TODO check on all nodes instead of sleeping
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003727 time.sleep( 5 ) # To allow all nodes to activate
Jon Halla478b852017-12-04 15:00:15 -08003728
3729 def upgradeInit( self, main ):
3730 '''
3731 Initiates an update
3732 '''
3733 main.step( "Send the command to initialize the upgrade" )
3734 ctrl = main.Cluster.next().CLI
3735 initialized = ctrl.issuInit()
3736 utilities.assert_equals( expect=main.TRUE, actual=initialized,
3737 onpass="ISSU initialized",
3738 onfail="Error initializing the upgrade" )
3739
3740 main.step( "Check the status of the upgrade" )
3741 ctrl = main.Cluster.next().CLI
3742 status = ctrl.issu()
3743 main.log.debug( status )
3744 # TODO: check things here?
3745
3746 main.step( "Checking ONOS nodes" )
3747 nodeResults = utilities.retry( main.Cluster.nodesCheck,
3748 False,
3749 sleep=15,
3750 attempts=5 )
3751 utilities.assert_equals( expect=True, actual=nodeResults,
3752 onpass="Nodes check successful",
3753 onfail="Nodes check NOT successful" )
Jon Hall7ce46ea2018-02-05 12:20:59 -08003754
3755 def backupData( self, main, location ):
3756 """
3757 Backs up ONOS data and logs to a given location on each active node in a cluster
3758 """
3759 result = True
3760 for ctrl in main.Cluster.active():
3761 try:
3762 ctrl.server.handle.sendline( "rm " + location )
3763 ctrl.server.handle.expect( ctrl.server.prompt )
3764 main.log.debug( ctrl.server.handle.before + ctrl.server.handle.after )
3765 except pexpect.ExceptionPexpect as e:
3766 main.log.error( e )
3767 main.cleanAndExit()
3768 ctrl.CLI.log( "'Starting backup of onos data'", level="INFO" )
3769 result = result and ( ctrl.server.backupData( location ) is main.TRUE )
3770 ctrl.CLI.log( "'End of backup of onos data'", level="INFO" )
3771 return result
3772
3773 def restoreData( self, main, location ):
3774 """
3775 Restores ONOS data and logs from a given location on each node in a cluster
3776 """
3777 result = True
3778 for ctrl in main.Cluster.controllers:
3779 result = result and ( ctrl.server.restoreData( location ) is main.TRUE )
3780 return result
Jon Hallab611372018-02-21 15:26:05 -08003781
3782 def startTopology( self, main ):
3783 """
3784 Starts Mininet using a topology file after pushing a network config file to ONOS.
3785 """
3786 import json
3787 import time
3788 main.case( "Starting Mininet Topology" )
3789
3790 main.step( "Pushing Network config" )
3791 ctrl = main.Cluster.next()
3792 cfgPath = main.testsRoot + main.params[ 'topology' ][ 'configPath' ]
3793 cfgResult = ctrl.onosNetCfg( ctrl.ipAddress,
3794 path=cfgPath,
3795 fileName=main.params[ 'topology' ][ 'configName' ] )
3796 utilities.assert_equals( expect=main.TRUE, actual=cfgResult,
3797 onpass="Pushed Network Configuration to ONOS",
3798 onfail="Failed to push Network Configuration to ONOS" )
3799
3800 main.step( "Check Network config" )
3801 try:
3802 cfgFile = cfgPath + main.params[ 'topology' ][ 'configName' ]
3803 with open( cfgFile, 'r' ) as contents:
3804 pushedNetCfg = json.load( contents )
3805 pushedNetCfg = json.loads( json.dumps( pushedNetCfg ).lower() )
3806 except IOError:
3807 main.log.exception( "Net Cfg file not found." )
3808 main.cleanAndExit()
3809 netCfgSleep = int( main.params[ 'timers' ][ 'NetCfg' ] )
3810 time.sleep( netCfgSleep )
3811 rawONOSNetCfg = utilities.retry( f=main.Cluster.next().REST.getNetCfg,
3812 retValue=False,
3813 attempts=5,
3814 sleep=netCfgSleep )
3815 # Fix differences between ONOS printing and Pushed Cfg
3816 onosNetCfg = json.loads( rawONOSNetCfg.lower() )
3817
3818 # Compare pushed device config
3819 cfgResult = True
3820 for did, pushedDevice in pushedNetCfg[ 'devices' ].items():
3821 onosDevice = onosNetCfg[ 'devices' ].get( did )
3822 if pushedDevice != onosDevice:
3823 cfgResult = False
3824 main.log.error( "Pushed Network configuration does not match what is in " +
3825 "ONOS:\nPushed: {}\nONOS: {}".format( ctrl.pprint( pushedDevice ),
3826 ctrl.pprint( onosDevice ) ) )
3827
3828 # Compare pushed port config
3829 for portURI, pushedInterface in pushedNetCfg[ 'ports' ].items():
3830 onosInterface = onosNetCfg[ 'ports' ].get( portURI )
3831 # NOTE: pushed Cfg doesn't have macs
3832 for i in xrange( 0, len( pushedInterface[ 'interfaces' ] ) ):
3833 keys = pushedInterface[ 'interfaces' ][ i ].keys()
3834 portCompare = True
3835 for key in keys:
3836 if pushedInterface[ 'interfaces' ][ i ].get( key ) != onosInterface[ 'interfaces' ][ i ].get( key ) :
3837 main.log.debug( "{} mismatch for port {}".format( key, portURI ) )
3838 portCompare = False
3839 if not portCompare:
3840 cfgResult = False
3841 main.log.error( "Pushed Network configuration does not match what is in " +
3842 "ONOS:\nPushed: {}\nONOS: {}".format( ctrl.pprint( pushedInterface ),
3843 ctrl.pprint( onosInterface ) ) )
3844
Jon Hall9677ed32018-04-24 11:16:23 -07003845 if pushedNetCfg.get( 'hosts' ) is not None:
3846 # Compare pushed host config
3847 for hid, pushedHost in pushedNetCfg[ 'hosts' ].items():
3848 onosHost = onosNetCfg[ 'hosts' ].get( hid.lower() )
3849 if pushedHost != onosHost:
3850 cfgResult = False
3851 main.log.error( "Pushed Network configuration does not match what is in " +
Jon Hall0e240372018-05-02 11:21:57 -07003852 "ONOS:\nPushed: {}\nONOS: {}".format( ctrl.pprint( pushedHost ),
Jon Hall9677ed32018-04-24 11:16:23 -07003853 ctrl.pprint( onosHost ) ) )
Jon Hallab611372018-02-21 15:26:05 -08003854 utilities.assert_equals( expect=True,
3855 actual=cfgResult,
3856 onpass="Net Cfg set",
3857 onfail="Net Cfg not correctly set" )
3858 if not cfgResult:
3859 main.log.debug( "Pushed Network Config:" + ctrl.pprint( pushedNetCfg ) )
3860 main.log.debug( "ONOS Network Config:" + ctrl.pprint( onosNetCfg ) )
3861
3862 main.step( "Start Mininet topology" )
3863 for f in main.params[ 'topology' ][ 'files' ].values():
3864 main.ONOSbench.scp( main.Mininet1,
3865 f,
3866 main.Mininet1.home,
3867 direction="to" )
3868 topoName = main.params[ 'topology' ][ 'topoFile' ]
3869 topo = main.Mininet1.home + topoName
3870 ctrlList = ''
3871 for ctrl in main.Cluster.controllers:
3872 ctrlList += str( ctrl.ipAddress ) + ","
3873 args = main.params[ 'topology' ][ 'args' ]
3874 startResult = main.Mininet1.startNet( topoFile=topo,
3875 args=" --onos-ip=" + ctrlList + " " + args )
3876 utilities.assert_equals( expect=main.TRUE, actual=startResult,
3877 onpass="Mininet Started",
3878 onfail="Failed to start Mininet" )
3879 # Give SR app time to configure the network
3880 time.sleep( int( main.params[ 'timers' ][ 'SRSetup' ] ) )