blob: 4342f96e20c7c8129e9dabba0c98b84cec140617 [file] [log] [blame]
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07001"""
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002Copyright 2015 Open Networking Foundation ( ONF )
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003
4Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
5the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
6or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
7
8 TestON is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation, either version 2 of the License, or
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -070011 ( at your option ) any later version.
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -070012
13 TestON is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with TestON. If not, see <http://www.gnu.org/licenses/>.
20"""
Jon Halla440e872016-03-31 15:15:50 -070021import json
Jon Hall41d39f12016-04-11 22:54:35 -070022import time
Jon Halla478b852017-12-04 15:00:15 -080023import pexpect
24import re
Jon Halle1a3b752015-07-22 13:02:46 -070025
Jon Hallf37d44d2017-05-24 10:37:30 -070026
Jon Hall41d39f12016-04-11 22:54:35 -070027class HA():
Jon Hall57b50432015-10-22 10:20:10 -070028
Jon Halla440e872016-03-31 15:15:50 -070029 def __init__( self ):
30 self.default = ''
Jon Hallab611372018-02-21 15:26:05 -080031 main.topoMappings = {}
Jon Hall57b50432015-10-22 10:20:10 -070032
Devin Lim58046fa2017-07-05 16:55:00 -070033 def customizeOnosGenPartitions( self ):
Devin Lim58046fa2017-07-05 16:55:00 -070034 # copy gen-partions file to ONOS
35 # NOTE: this assumes TestON and ONOS are on the same machine
Jon Hallab611372018-02-21 15:26:05 -080036 srcFile = main.testsRoot + "/HA/dependencies/onos-gen-partitions"
Devin Lim58046fa2017-07-05 16:55:00 -070037 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
38 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
39 main.ONOSbench.ip_address,
40 srcFile,
41 dstDir,
42 pwd=main.ONOSbench.pwd,
43 direction="from" )
Jon Hallca319892017-06-15 15:25:22 -070044
Devin Lim58046fa2017-07-05 16:55:00 -070045 def cleanUpGenPartition( self ):
46 # clean up gen-partitions file
47 try:
48 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
49 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
50 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
51 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
Jon Hall0e240372018-05-02 11:21:57 -070052 main.log.info( "Cleaning custom gen partitions file, response was: \n" +
Devin Lim58046fa2017-07-05 16:55:00 -070053 str( main.ONOSbench.handle.before ) )
54 except ( pexpect.TIMEOUT, pexpect.EOF ):
55 main.log.exception( "ONOSbench: pexpect exception found:" +
56 main.ONOSbench.handle.before )
Devin Lim44075962017-08-11 10:56:37 -070057 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -070058
Devin Lim58046fa2017-07-05 16:55:00 -070059 def startingMininet( self ):
60 main.step( "Starting Mininet" )
61 # scp topo file to mininet
62 # TODO: move to params?
63 topoName = "obelisk.py"
64 filePath = main.ONOSbench.home + "/tools/test/topos/"
65 main.ONOSbench.scp( main.Mininet1,
66 filePath + topoName,
67 main.Mininet1.home,
68 direction="to" )
69 mnResult = main.Mininet1.startNet()
70 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
71 onpass="Mininet Started",
72 onfail="Error starting Mininet" )
Jon Hallca319892017-06-15 15:25:22 -070073
Devin Lim58046fa2017-07-05 16:55:00 -070074 def swapNodeMetadata( self ):
Devin Lim142b5342017-07-20 15:22:39 -070075 if main.Cluster.numCtrls >= 5:
76 main.Cluster.setRunningNode( main.Cluster.numCtrls - 2 )
Devin Lim58046fa2017-07-05 16:55:00 -070077 else:
78 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
Devin Lim58046fa2017-07-05 16:55:00 -070079
Jon Hall4f360bc2017-09-07 10:19:52 -070080 def copyBackupConfig( self ):
Devin Lim58046fa2017-07-05 16:55:00 -070081 main.step( "Copying backup config files" )
82 main.onosServicepath = main.ONOSbench.home + "/tools/package/bin/onos-service"
83 cp = main.ONOSbench.scp( main.ONOSbench,
84 main.onosServicepath,
85 main.onosServicepath + ".backup",
86 direction="to" )
87
88 utilities.assert_equals( expect=main.TRUE,
89 actual=cp,
90 onpass="Copy backup config file succeeded",
91 onfail="Copy backup config file failed" )
Jon Hall4f360bc2017-09-07 10:19:52 -070092
93 def setMetadataUrl( self ):
94 # NOTE: You should probably backup the config before and reset the config after the test
Devin Lim58046fa2017-07-05 16:55:00 -070095 # we need to modify the onos-service file to use remote metadata file
96 # url for cluster metadata file
97 iface = main.params[ 'server' ].get( 'interface' )
98 ip = main.ONOSbench.getIpAddr( iface=iface )
99 metaFile = "cluster.json"
100 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, main.serverPort, metaFile )
Devin Lim58046fa2017-07-05 16:55:00 -0700101 main.log.warn( repr( javaArgs ) )
102 handle = main.ONOSbench.handle
Jon Hall4173b242017-09-12 17:04:38 -0700103 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs,
104 main.onosServicepath )
Devin Lim58046fa2017-07-05 16:55:00 -0700105 main.log.warn( repr( sed ) )
106 handle.sendline( sed )
107 handle.expect( metaFile )
108 output = handle.before
109 handle.expect( "\$" )
110 output += handle.before
111 main.log.debug( repr( output ) )
112
113 def cleanUpOnosService( self ):
114 # Cleanup custom onos-service file
115 main.ONOSbench.scp( main.ONOSbench,
116 main.onosServicepath + ".backup",
117 main.onosServicepath,
118 direction="to" )
Jon Hallca319892017-06-15 15:25:22 -0700119
Jon Halla440e872016-03-31 15:15:50 -0700120 def consistentCheck( self ):
121 """
122 Checks that TestON counters are consistent across all nodes.
Jon Halle1a3b752015-07-22 13:02:46 -0700123
Jon Hallf37d44d2017-05-24 10:37:30 -0700124 Returns the tuple ( onosCounters, consistent )
Jon Hall41d39f12016-04-11 22:54:35 -0700125 - onosCounters is the parsed json output of the counters command on
126 all nodes
127 - consistent is main.TRUE if all "TestON" counters are consitent across
128 all nodes or main.FALSE
Jon Halla440e872016-03-31 15:15:50 -0700129 """
Jon Halle1a3b752015-07-22 13:02:46 -0700130 try:
Jon Halla440e872016-03-31 15:15:50 -0700131 # Get onos counters results
132 onosCountersRaw = []
133 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700134 for ctrl in main.Cluster.active():
Jon Halla440e872016-03-31 15:15:50 -0700135 t = main.Thread( target=utilities.retry,
Jon Hallca319892017-06-15 15:25:22 -0700136 name="counters-" + str( ctrl ),
137 args=[ ctrl.counters, [ None ] ],
Jon Hallf37d44d2017-05-24 10:37:30 -0700138 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Halla440e872016-03-31 15:15:50 -0700139 'randomTime': True } )
140 threads.append( t )
141 t.start()
142 for t in threads:
143 t.join()
144 onosCountersRaw.append( t.result )
145 onosCounters = []
Jon Hallca319892017-06-15 15:25:22 -0700146 for i in range( len( onosCountersRaw ) ):
Jon Halla440e872016-03-31 15:15:50 -0700147 try:
Jon Hall3e6edb32018-08-21 16:20:30 -0700148 value = json.loads( onosCountersRaw[ i ] )
149 onosCounters.append( value )
Jon Halla440e872016-03-31 15:15:50 -0700150 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700151 main.log.error( "Could not parse counters response from " +
Devin Lim142b5342017-07-20 15:22:39 -0700152 str( main.Cluster.active( i ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700153 main.log.warn( repr( onosCountersRaw[ i ] ) )
Jon Hall0e240372018-05-02 11:21:57 -0700154 onosCounters.append( {} )
Jon Halla440e872016-03-31 15:15:50 -0700155
156 testCounters = {}
157 # make a list of all the "TestON-*" counters in ONOS
Jon Hall41d39f12016-04-11 22:54:35 -0700158 # lookes like a dict whose keys are the name of the ONOS node and
159 # values are a list of the counters. I.E.
Jon Hallf37d44d2017-05-24 10:37:30 -0700160 # { "ONOS1": [ { "name":"TestON-Partitions","value":56 } ]
Jon Halla440e872016-03-31 15:15:50 -0700161 # }
162 # NOTE: There is an assumtion that all nodes are active
163 # based on the above for loops
164 for controller in enumerate( onosCounters ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700165 for key, value in controller[ 1 ].iteritems():
Jon Halla440e872016-03-31 15:15:50 -0700166 if 'TestON' in key:
Jon Hall0e240372018-05-02 11:21:57 -0700167 node = main.Cluster.active( controller[ 0 ] )
Jon Halla440e872016-03-31 15:15:50 -0700168 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700169 testCounters[ node ].append( { key: value } )
Jon Halla440e872016-03-31 15:15:50 -0700170 except KeyError:
Jon Hallf37d44d2017-05-24 10:37:30 -0700171 testCounters[ node ] = [ { key: value } ]
Jon Halla440e872016-03-31 15:15:50 -0700172 # compare the counters on each node
Jon Hallf37d44d2017-05-24 10:37:30 -0700173 firstV = testCounters.values()[ 0 ]
Jon Halla440e872016-03-31 15:15:50 -0700174 tmp = [ v == firstV for k, v in testCounters.iteritems() ]
175 if all( tmp ):
176 consistent = main.TRUE
177 else:
178 consistent = main.FALSE
Jon Hall0e240372018-05-02 11:21:57 -0700179 main.log.error( "ONOS nodes have different values for counters: %s",
Jon Halla440e872016-03-31 15:15:50 -0700180 testCounters )
181 return ( onosCounters, consistent )
182 except Exception:
183 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700184 main.cleanAndExit()
Jon Halla440e872016-03-31 15:15:50 -0700185
186 def counterCheck( self, counterName, counterValue ):
187 """
188 Checks that TestON counters are consistent across all nodes and that
189 specified counter is in ONOS with the given value
190 """
191 try:
192 correctResults = main.TRUE
193 # Get onos counters results and consistentCheck
194 onosCounters, consistent = self.consistentCheck()
195 # Check for correct values
Jon Hallca319892017-06-15 15:25:22 -0700196 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -0700197 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -0700198 current = onosCounters[ i ]
Jon Halla440e872016-03-31 15:15:50 -0700199 onosValue = None
200 try:
201 onosValue = current.get( counterName )
Jon Hall41d39f12016-04-11 22:54:35 -0700202 except AttributeError:
Jon Hallca319892017-06-15 15:25:22 -0700203 main.log.exception( node + " counters result " +
Jon Hall41d39f12016-04-11 22:54:35 -0700204 "is not as expected" )
Jon Halla440e872016-03-31 15:15:50 -0700205 correctResults = main.FALSE
206 if onosValue == counterValue:
Jon Hall0e240372018-05-02 11:21:57 -0700207 main.log.info( "{}: {} counter value is correct".format( node, counterName ) )
Jon Halla440e872016-03-31 15:15:50 -0700208 else:
Jon Hall0e240372018-05-02 11:21:57 -0700209 main.log.error( node + ": " + counterName +
Jon Hall41d39f12016-04-11 22:54:35 -0700210 " counter value is incorrect," +
211 " expected value: " + str( counterValue ) +
212 " current value: " + str( onosValue ) )
Jon Halla440e872016-03-31 15:15:50 -0700213 correctResults = main.FALSE
214 return consistent and correctResults
215 except Exception:
216 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700217 main.cleanAndExit()
Jon Hall41d39f12016-04-11 22:54:35 -0700218
219 def consistentLeaderboards( self, nodes ):
220 TOPIC = 'org.onosproject.election'
221 # FIXME: use threads
Jon Halle0f0b342017-04-18 11:43:47 -0700222 # FIXME: should we retry outside the function?
Jon Hall41d39f12016-04-11 22:54:35 -0700223 for n in range( 5 ): # Retry in case election is still happening
224 leaderList = []
225 # Get all leaderboards
226 for cli in nodes:
227 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
228 # Compare leaderboards
Jon Hallf37d44d2017-05-24 10:37:30 -0700229 result = all( i == leaderList[ 0 ] for i in leaderList ) and\
Jon Hall41d39f12016-04-11 22:54:35 -0700230 leaderList is not None
Jon Hall41d39f12016-04-11 22:54:35 -0700231 if result:
232 return ( result, leaderList )
Jon Hallf37d44d2017-05-24 10:37:30 -0700233 time.sleep( 5 ) # TODO: paramerterize
Jon Hall41d39f12016-04-11 22:54:35 -0700234 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
235 return ( result, leaderList )
236
Devin Lim58046fa2017-07-05 16:55:00 -0700237 def initialSetUp( self, serviceClean=False ):
238 """
239 rest of initialSetup
240 """
Devin Lim58046fa2017-07-05 16:55:00 -0700241 if main.params[ 'tcpdump' ].lower() == "true":
242 main.step( "Start Packet Capture MN" )
243 main.Mininet2.startTcpdump(
244 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
245 + "-MN.pcap",
246 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
247 port=main.params[ 'MNtcpdump' ][ 'port' ] )
248
249 if serviceClean:
250 main.step( "Clean up ONOS service changes" )
Devin Lim142b5342017-07-20 15:22:39 -0700251 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
252 main.ONOSbench.handle.expect( "\$" )
253 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
254 main.ONOSbench.handle.expect( "\$" )
Devin Lim58046fa2017-07-05 16:55:00 -0700255
256 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -0800257 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700258 False,
Jon Hallcd1126d2018-09-11 11:32:48 -0700259 attempts=90 )
Devin Lim58046fa2017-07-05 16:55:00 -0700260
261 utilities.assert_equals( expect=True, actual=nodeResults,
262 onpass="Nodes check successful",
263 onfail="Nodes check NOT successful" )
264
265 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -0700266 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700267 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -0700268 ctrl.name,
Jon Hall6c9e2da2018-11-06 12:01:23 -0800269 ctrl.CLI.sendline( "onos:scr-list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700270 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -0700271 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700272
273 main.step( "Activate apps defined in the params file" )
274 # get data from the params
275 apps = main.params.get( 'apps' )
276 if apps:
277 apps = apps.split( ',' )
Jon Hallca319892017-06-15 15:25:22 -0700278 main.log.debug( "Apps: " + str( apps ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700279 activateResult = True
280 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700281 main.Cluster.active( 0 ).app( app, "Activate" )
Devin Lim58046fa2017-07-05 16:55:00 -0700282 # TODO: check this worked
283 time.sleep( 10 ) # wait for apps to activate
284 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700285 state = main.Cluster.active( 0 ).appStatus( app )
Devin Lim58046fa2017-07-05 16:55:00 -0700286 if state == "ACTIVE":
287 activateResult = activateResult and True
288 else:
289 main.log.error( "{} is in {} state".format( app, state ) )
290 activateResult = False
291 utilities.assert_equals( expect=True,
292 actual=activateResult,
293 onpass="Successfully activated apps",
294 onfail="Failed to activate apps" )
295 else:
296 main.log.warn( "No apps were specified to be loaded after startup" )
297
298 main.step( "Set ONOS configurations" )
Jon Hallca319892017-06-15 15:25:22 -0700299 # FIXME: This shoudl be part of the general startup sequence
Devin Lim58046fa2017-07-05 16:55:00 -0700300 config = main.params.get( 'ONOS_Configuration' )
301 if config:
302 main.log.debug( config )
303 checkResult = main.TRUE
304 for component in config:
305 for setting in config[ component ]:
306 value = config[ component ][ setting ]
Jon Hallca319892017-06-15 15:25:22 -0700307 check = main.Cluster.next().setCfg( component, setting, value )
Devin Lim58046fa2017-07-05 16:55:00 -0700308 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
309 checkResult = check and checkResult
310 utilities.assert_equals( expect=main.TRUE,
311 actual=checkResult,
312 onpass="Successfully set config",
313 onfail="Failed to set config" )
314 else:
315 main.log.warn( "No configurations were specified to be changed after startup" )
316
Jon Hallca319892017-06-15 15:25:22 -0700317 main.step( "Check app ids" )
318 appCheck = self.appCheck()
319 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700320 onpass="App Ids seem to be correct",
321 onfail="Something is wrong with app Ids" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -0700322
Jon Hallca319892017-06-15 15:25:22 -0700323 def commonChecks( self ):
324 # TODO: make this assertable or assert in here?
325 self.topicsCheck()
326 self.partitionsCheck()
327 self.pendingMapCheck()
328 self.appCheck()
329
330 def topicsCheck( self, extraTopics=[] ):
331 """
332 Check for work partition topics in leaders output
333 """
334 leaders = main.Cluster.next().leaders()
335 missing = False
336 try:
337 if leaders:
338 parsedLeaders = json.loads( leaders )
339 output = json.dumps( parsedLeaders,
340 sort_keys=True,
341 indent=4,
342 separators=( ',', ': ' ) )
Jon Hallca319892017-06-15 15:25:22 -0700343 # check for all intent partitions
344 topics = []
345 for i in range( 14 ):
346 topics.append( "work-partition-" + str( i ) )
347 topics += extraTopics
Jon Hallca319892017-06-15 15:25:22 -0700348 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
349 for topic in topics:
350 if topic not in ONOStopics:
351 main.log.error( "Error: " + topic +
352 " not in leaders" )
353 missing = True
354 else:
355 main.log.error( "leaders() returned None" )
356 except ( ValueError, TypeError ):
357 main.log.exception( "Error parsing leaders" )
358 main.log.error( repr( leaders ) )
359 if missing:
Jon Hall4173b242017-09-12 17:04:38 -0700360 # NOTE Can we refactor this into the Cluster class?
361 # Maybe an option to print the output of a command from each node?
Jon Hallca319892017-06-15 15:25:22 -0700362 for ctrl in main.Cluster.active():
363 response = ctrl.CLI.leaders( jsonFormat=False )
364 main.log.debug( str( ctrl.name ) + " leaders output: \n" +
365 str( response ) )
366 return missing
367
368 def partitionsCheck( self ):
369 # TODO: return something assertable
370 partitions = main.Cluster.next().partitions()
371 try:
372 if partitions:
373 parsedPartitions = json.loads( partitions )
374 output = json.dumps( parsedPartitions,
375 sort_keys=True,
376 indent=4,
377 separators=( ',', ': ' ) )
378 main.log.debug( "Partitions: " + output )
379 # TODO check for a leader in all paritions
380 # TODO check for consistency among nodes
381 else:
382 main.log.error( "partitions() returned None" )
383 except ( ValueError, TypeError ):
384 main.log.exception( "Error parsing partitions" )
385 main.log.error( repr( partitions ) )
386
387 def pendingMapCheck( self ):
388 pendingMap = main.Cluster.next().pendingMap()
389 try:
390 if pendingMap:
391 parsedPending = json.loads( pendingMap )
392 output = json.dumps( parsedPending,
393 sort_keys=True,
394 indent=4,
395 separators=( ',', ': ' ) )
396 main.log.debug( "Pending map: " + output )
397 # TODO check something here?
398 else:
399 main.log.error( "pendingMap() returned None" )
400 except ( ValueError, TypeError ):
401 main.log.exception( "Error parsing pending map" )
402 main.log.error( repr( pendingMap ) )
403
404 def appCheck( self ):
405 """
406 Check App IDs on all nodes
407 """
408 # FIXME: Rename this to appIDCheck? or add a check for isntalled apps
Jon Hallb9d381e2018-02-05 12:02:10 -0800409 for i in range( 15 ):
410 # TODO modify retry or add a new version that accepts looking for
411 # a value in a return list instead of needing to match the entire
412 # return value to retry
413 appResults = main.Cluster.command( "appToIDCheck" )
414 appCheck = all( i == main.TRUE for i in appResults )
415 if appCheck:
416 break
417 else:
418 time.sleep( 5 )
419
Jon Hallca319892017-06-15 15:25:22 -0700420 if not appCheck:
Devin Lim142b5342017-07-20 15:22:39 -0700421 ctrl = main.Cluster.active( 0 )
Jon Hallb9d381e2018-02-05 12:02:10 -0800422 main.log.debug( "%s apps: %s" % ( ctrl.name, ctrl.pprint( ctrl.apps() ) ) )
423 main.log.debug( "%s appIDs: %s" % ( ctrl.name, ctrl.pprint( ctrl.appIDs() ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700424 return appCheck
425
Jon Halle0f0b342017-04-18 11:43:47 -0700426 def workQueueStatsCheck( self, workQueueName, completed, inProgress, pending ):
427 # Completed
Jon Hallca319892017-06-15 15:25:22 -0700428 completedValues = main.Cluster.command( "workQueueTotalCompleted",
429 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700430 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700431 completedResults = [ int( x ) == completed for x in completedValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700432 completedResult = all( completedResults )
433 if not completedResult:
434 main.log.warn( "Expected Work Queue {} to have {} completed, found {}".format(
435 workQueueName, completed, completedValues ) )
436
437 # In Progress
Jon Hallca319892017-06-15 15:25:22 -0700438 inProgressValues = main.Cluster.command( "workQueueTotalInProgress",
439 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700440 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700441 inProgressResults = [ int( x ) == inProgress for x in inProgressValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700442 inProgressResult = all( inProgressResults )
443 if not inProgressResult:
444 main.log.warn( "Expected Work Queue {} to have {} inProgress, found {}".format(
445 workQueueName, inProgress, inProgressValues ) )
446
447 # Pending
Jon Hallca319892017-06-15 15:25:22 -0700448 pendingValues = main.Cluster.command( "workQueueTotalPending",
449 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700450 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700451 pendingResults = [ int( x ) == pending for x in pendingValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700452 pendingResult = all( pendingResults )
453 if not pendingResult:
454 main.log.warn( "Expected Work Queue {} to have {} pending, found {}".format(
455 workQueueName, pending, pendingValues ) )
456 return completedResult and inProgressResult and pendingResult
457
Devin Lim58046fa2017-07-05 16:55:00 -0700458 def assignDevices( self, main ):
459 """
460 Assign devices to controllers
461 """
462 import re
Devin Lim58046fa2017-07-05 16:55:00 -0700463 assert main, "main not defined"
464 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700465
466 main.case( "Assigning devices to controllers" )
467 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " + \
468 "and check that an ONOS node becomes the " + \
469 "master of the device."
470 main.step( "Assign switches to controllers" )
471
Jon Hallca319892017-06-15 15:25:22 -0700472 ipList = main.Cluster.getIps()
Jon Hallab611372018-02-21 15:26:05 -0800473 swList = main.Mininet1.getSwitches().keys()
Devin Lim58046fa2017-07-05 16:55:00 -0700474 main.Mininet1.assignSwController( sw=swList, ip=ipList )
475
476 mastershipCheck = main.TRUE
Jon Hallab611372018-02-21 15:26:05 -0800477 for switch in swList:
478 response = main.Mininet1.getSwController( switch )
Devin Lim58046fa2017-07-05 16:55:00 -0700479 try:
480 main.log.info( str( response ) )
Jon Hallab611372018-02-21 15:26:05 -0800481 for ctrl in main.Cluster.runningNodes:
482 if re.search( "tcp:" + ctrl.ipAddress, response ):
483 mastershipCheck = mastershipCheck and main.TRUE
484 else:
485 main.log.error( "Error, node " + repr( ctrl ) + " is " +
486 "not in the list of controllers " +
487 switch + " is connecting to." )
488 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -0700489 except Exception:
Jon Hallab611372018-02-21 15:26:05 -0800490 main.log.warn( "Error parsing get-controller response" )
491 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -0700492 utilities.assert_equals(
493 expect=main.TRUE,
494 actual=mastershipCheck,
495 onpass="Switch mastership assigned correctly",
496 onfail="Switches not assigned correctly to controllers" )
Jon Hallca319892017-06-15 15:25:22 -0700497
Jon Hallab611372018-02-21 15:26:05 -0800498 # Mappings for attachmentPoints from host mac to deviceID
499 # TODO: make the key a dict with deviceIds and port #'s
500 # FIXME: topo-HA/obelisk specific mappings:
501 # key is mac and value is dpid
502 main.topoMappings = {}
503 for i in range( 1, 29 ): # hosts 1 through 28
504 # set up correct variables:
505 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
506 if i == 1:
507 deviceId = "1000".zfill( 16 )
508 elif i == 2:
509 deviceId = "2000".zfill( 16 )
510 elif i == 3:
511 deviceId = "3000".zfill( 16 )
512 elif i == 4:
513 deviceId = "3004".zfill( 16 )
514 elif i == 5:
515 deviceId = "5000".zfill( 16 )
516 elif i == 6:
517 deviceId = "6000".zfill( 16 )
518 elif i == 7:
519 deviceId = "6007".zfill( 16 )
520 elif i >= 8 and i <= 17:
521 dpid = '3' + str( i ).zfill( 3 )
522 deviceId = dpid.zfill( 16 )
523 elif i >= 18 and i <= 27:
524 dpid = '6' + str( i ).zfill( 3 )
525 deviceId = dpid.zfill( 16 )
526 elif i == 28:
527 deviceId = "2800".zfill( 16 )
528 main.topoMappings[ macId ] = deviceId
529
Devin Lim58046fa2017-07-05 16:55:00 -0700530 def assignIntents( self, main ):
531 """
532 Assign intents
533 """
534 import time
535 import json
Devin Lim58046fa2017-07-05 16:55:00 -0700536 assert main, "main not defined"
537 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700538 try:
539 main.HAlabels
540 except ( NameError, AttributeError ):
541 main.log.error( "main.HAlabels not defined, setting to []" )
542 main.HAlabels = []
543 try:
544 main.HAdata
545 except ( NameError, AttributeError ):
546 main.log.error( "data not defined, setting to []" )
547 main.HAdata = []
548 main.case( "Adding host Intents" )
549 main.caseExplanation = "Discover hosts by using pingall then " +\
550 "assign predetermined host-to-host intents." +\
551 " After installation, check that the intent" +\
552 " is distributed to all nodes and the state" +\
553 " is INSTALLED"
554
555 # install onos-app-fwd
556 main.step( "Install reactive forwarding app" )
Jon Hall0e240372018-05-02 11:21:57 -0700557 installResults = main.Cluster.next().CLI.activateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700558 utilities.assert_equals( expect=main.TRUE, actual=installResults,
559 onpass="Install fwd successful",
560 onfail="Install fwd failed" )
561
562 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700563 appCheck = self.appCheck()
564 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700565 onpass="App Ids seem to be correct",
566 onfail="Something is wrong with app Ids" )
567
568 main.step( "Discovering Hosts( Via pingall for now )" )
569 # FIXME: Once we have a host discovery mechanism, use that instead
570 # REACTIVE FWD test
571 pingResult = main.FALSE
572 passMsg = "Reactive Pingall test passed"
573 time1 = time.time()
574 pingResult = main.Mininet1.pingall()
575 time2 = time.time()
576 if not pingResult:
577 main.log.warn( "First pingall failed. Trying again..." )
578 pingResult = main.Mininet1.pingall()
579 passMsg += " on the second try"
580 utilities.assert_equals(
581 expect=main.TRUE,
582 actual=pingResult,
583 onpass=passMsg,
584 onfail="Reactive Pingall failed, " +
585 "one or more ping pairs failed" )
586 main.log.info( "Time for pingall: %2f seconds" %
587 ( time2 - time1 ) )
Jon Hallca319892017-06-15 15:25:22 -0700588 if not pingResult:
Devin Lim44075962017-08-11 10:56:37 -0700589 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700590 # timeout for fwd flows
591 time.sleep( 11 )
592 # uninstall onos-app-fwd
593 main.step( "Uninstall reactive forwarding app" )
Jon Hall0e240372018-05-02 11:21:57 -0700594 uninstallResult = main.Cluster.next().CLI.deactivateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700595 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
596 onpass="Uninstall fwd successful",
597 onfail="Uninstall fwd failed" )
598
599 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700600 appCheck2 = self.appCheck()
601 utilities.assert_equals( expect=True, actual=appCheck2,
Devin Lim58046fa2017-07-05 16:55:00 -0700602 onpass="App Ids seem to be correct",
603 onfail="Something is wrong with app Ids" )
604
605 main.step( "Add host intents via cli" )
606 intentIds = []
607 # TODO: move the host numbers to params
608 # Maybe look at all the paths we ping?
609 intentAddResult = True
610 hostResult = main.TRUE
611 for i in range( 8, 18 ):
612 main.log.info( "Adding host intent between h" + str( i ) +
613 " and h" + str( i + 10 ) )
614 host1 = "00:00:00:00:00:" + \
615 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
616 host2 = "00:00:00:00:00:" + \
617 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
618 # NOTE: getHost can return None
Jon Hall0e240372018-05-02 11:21:57 -0700619 host1Dict = main.Cluster.next().CLI.getHost( host1 )
620 host2Dict = main.Cluster.next().CLI.getHost( host2 )
Devin Lim58046fa2017-07-05 16:55:00 -0700621 host1Id = None
622 host2Id = None
623 if host1Dict and host2Dict:
624 host1Id = host1Dict.get( 'id', None )
625 host2Id = host2Dict.get( 'id', None )
626 if host1Id and host2Id:
Jon Hallca319892017-06-15 15:25:22 -0700627 nodeNum = len( main.Cluster.active() )
Devin Lim142b5342017-07-20 15:22:39 -0700628 ctrl = main.Cluster.active( i % nodeNum )
Jon Hallca319892017-06-15 15:25:22 -0700629 tmpId = ctrl.CLI.addHostIntent( host1Id, host2Id )
Devin Lim58046fa2017-07-05 16:55:00 -0700630 if tmpId:
631 main.log.info( "Added intent with id: " + tmpId )
632 intentIds.append( tmpId )
633 else:
634 main.log.error( "addHostIntent returned: " +
635 repr( tmpId ) )
636 else:
637 main.log.error( "Error, getHost() failed for h" + str( i ) +
638 " and/or h" + str( i + 10 ) )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700639 hosts = main.Cluster.next().CLI.hosts()
Devin Lim58046fa2017-07-05 16:55:00 -0700640 try:
Jon Hallca319892017-06-15 15:25:22 -0700641 output = json.dumps( json.loads( hosts ),
642 sort_keys=True,
643 indent=4,
644 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700645 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700646 output = repr( hosts )
647 main.log.debug( "Hosts output: %s" % output )
Devin Lim58046fa2017-07-05 16:55:00 -0700648 hostResult = main.FALSE
649 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
650 onpass="Found a host id for each host",
651 onfail="Error looking up host ids" )
652
653 intentStart = time.time()
Jon Hall0e240372018-05-02 11:21:57 -0700654 onosIds = main.Cluster.next().getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700655 main.log.info( "Submitted intents: " + str( intentIds ) )
656 main.log.info( "Intents in ONOS: " + str( onosIds ) )
657 for intent in intentIds:
658 if intent in onosIds:
659 pass # intent submitted is in onos
660 else:
661 intentAddResult = False
662 if intentAddResult:
663 intentStop = time.time()
664 else:
665 intentStop = None
666 # Print the intent states
Jon Hall0e240372018-05-02 11:21:57 -0700667 intents = main.Cluster.next().CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700668 intentStates = []
669 installedCheck = True
670 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
671 count = 0
672 try:
673 for intent in json.loads( intents ):
674 state = intent.get( 'state', None )
675 if "INSTALLED" not in state:
676 installedCheck = False
677 intentId = intent.get( 'id', None )
678 intentStates.append( ( intentId, state ) )
679 except ( ValueError, TypeError ):
680 main.log.exception( "Error parsing intents" )
681 # add submitted intents not in the store
682 tmplist = [ i for i, s in intentStates ]
683 missingIntents = False
684 for i in intentIds:
685 if i not in tmplist:
686 intentStates.append( ( i, " - " ) )
687 missingIntents = True
688 intentStates.sort()
689 for i, s in intentStates:
690 count += 1
691 main.log.info( "%-6s%-15s%-15s" %
692 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700693 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700694
695 intentAddResult = bool( intentAddResult and not missingIntents and
696 installedCheck )
697 if not intentAddResult:
698 main.log.error( "Error in pushing host intents to ONOS" )
699
700 main.step( "Intent Anti-Entropy dispersion" )
701 for j in range( 100 ):
702 correct = True
703 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700704 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700705 onosIds = []
Jon Hallca319892017-06-15 15:25:22 -0700706 ids = ctrl.getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700707 onosIds.append( ids )
Jon Hallca319892017-06-15 15:25:22 -0700708 main.log.debug( "Intents in " + ctrl.name + ": " +
Devin Lim58046fa2017-07-05 16:55:00 -0700709 str( sorted( onosIds ) ) )
710 if sorted( ids ) != sorted( intentIds ):
711 main.log.warn( "Set of intent IDs doesn't match" )
712 correct = False
713 break
714 else:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700715 intents = json.loads( ctrl.CLI.intents() )
Devin Lim58046fa2017-07-05 16:55:00 -0700716 for intent in intents:
717 if intent[ 'state' ] != "INSTALLED":
718 main.log.warn( "Intent " + intent[ 'id' ] +
719 " is " + intent[ 'state' ] )
720 correct = False
721 break
722 if correct:
723 break
724 else:
725 time.sleep( 1 )
726 if not intentStop:
727 intentStop = time.time()
728 global gossipTime
729 gossipTime = intentStop - intentStart
730 main.log.info( "It took about " + str( gossipTime ) +
731 " seconds for all intents to appear in each node" )
732 append = False
733 title = "Gossip Intents"
734 count = 1
735 while append is False:
736 curTitle = title + str( count )
737 if curTitle not in main.HAlabels:
738 main.HAlabels.append( curTitle )
739 main.HAdata.append( str( gossipTime ) )
740 append = True
741 else:
742 count += 1
743 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Devin Lim142b5342017-07-20 15:22:39 -0700744 maxGossipTime = gossipPeriod * len( main.Cluster.runningNodes )
Devin Lim58046fa2017-07-05 16:55:00 -0700745 utilities.assert_greater_equals(
746 expect=maxGossipTime, actual=gossipTime,
747 onpass="ECM anti-entropy for intents worked within " +
748 "expected time",
749 onfail="Intent ECM anti-entropy took too long. " +
750 "Expected time:{}, Actual time:{}".format( maxGossipTime,
751 gossipTime ) )
752 if gossipTime <= maxGossipTime:
753 intentAddResult = True
754
Jon Hallca319892017-06-15 15:25:22 -0700755 pendingMap = main.Cluster.next().pendingMap()
Devin Lim58046fa2017-07-05 16:55:00 -0700756 if not intentAddResult or "key" in pendingMap:
Devin Lim58046fa2017-07-05 16:55:00 -0700757 installedCheck = True
758 main.log.info( "Sleeping 60 seconds to see if intents are found" )
759 time.sleep( 60 )
Jon Hall0e240372018-05-02 11:21:57 -0700760 onosIds = main.Cluster.next().getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700761 main.log.info( "Submitted intents: " + str( intentIds ) )
762 main.log.info( "Intents in ONOS: " + str( onosIds ) )
763 # Print the intent states
Jon Hall0e240372018-05-02 11:21:57 -0700764 intents = main.Cluster.next().CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700765 intentStates = []
766 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
767 count = 0
768 try:
769 for intent in json.loads( intents ):
770 # Iter through intents of a node
771 state = intent.get( 'state', None )
772 if "INSTALLED" not in state:
773 installedCheck = False
774 intentId = intent.get( 'id', None )
775 intentStates.append( ( intentId, state ) )
776 except ( ValueError, TypeError ):
777 main.log.exception( "Error parsing intents" )
778 # add submitted intents not in the store
779 tmplist = [ i for i, s in intentStates ]
780 for i in intentIds:
781 if i not in tmplist:
782 intentStates.append( ( i, " - " ) )
783 intentStates.sort()
784 for i, s in intentStates:
785 count += 1
786 main.log.info( "%-6s%-15s%-15s" %
787 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700788 self.topicsCheck( [ "org.onosproject.election" ] )
789 self.partitionsCheck()
790 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700791
Jon Hallca319892017-06-15 15:25:22 -0700792 def pingAcrossHostIntent( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -0700793 """
794 Ping across added host intents
795 """
796 import json
797 import time
Devin Lim58046fa2017-07-05 16:55:00 -0700798 assert main, "main not defined"
799 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700800 main.case( "Verify connectivity by sending traffic across Intents" )
801 main.caseExplanation = "Ping across added host intents to check " +\
802 "functionality and check the state of " +\
803 "the intent"
804
Jon Hallca319892017-06-15 15:25:22 -0700805 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -0700806 main.step( "Check Intent state" )
807 installedCheck = False
808 loopCount = 0
Jon Hall5d5876e2017-11-30 09:33:16 -0800809 while not installedCheck and loopCount < 90:
Devin Lim58046fa2017-07-05 16:55:00 -0700810 installedCheck = True
811 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700812 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700813 intentStates = []
814 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
815 count = 0
816 # Iter through intents of a node
817 try:
818 for intent in json.loads( intents ):
819 state = intent.get( 'state', None )
820 if "INSTALLED" not in state:
821 installedCheck = False
Jon Hall8bafdc02017-09-05 11:36:26 -0700822 main.log.debug( "Failed intent: " + str( intent ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700823 intentId = intent.get( 'id', None )
824 intentStates.append( ( intentId, state ) )
825 except ( ValueError, TypeError ):
826 main.log.exception( "Error parsing intents." )
827 # Print states
828 intentStates.sort()
829 for i, s in intentStates:
830 count += 1
831 main.log.info( "%-6s%-15s%-15s" %
832 ( str( count ), str( i ), str( s ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700833 if not installedCheck:
834 time.sleep( 1 )
835 loopCount += 1
836 utilities.assert_equals( expect=True, actual=installedCheck,
837 onpass="Intents are all INSTALLED",
838 onfail="Intents are not all in " +
839 "INSTALLED state" )
840
841 main.step( "Ping across added host intents" )
842 PingResult = main.TRUE
843 for i in range( 8, 18 ):
844 ping = main.Mininet1.pingHost( src="h" + str( i ),
845 target="h" + str( i + 10 ) )
846 PingResult = PingResult and ping
847 if ping == main.FALSE:
848 main.log.warn( "Ping failed between h" + str( i ) +
849 " and h" + str( i + 10 ) )
850 elif ping == main.TRUE:
851 main.log.info( "Ping test passed!" )
852 # Don't set PingResult or you'd override failures
853 if PingResult == main.FALSE:
854 main.log.error(
855 "Intents have not been installed correctly, pings failed." )
856 # TODO: pretty print
Devin Lim58046fa2017-07-05 16:55:00 -0700857 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700858 tmpIntents = onosCli.CLI.intents()
Jon Hallca319892017-06-15 15:25:22 -0700859 output = json.dumps( json.loads( tmpIntents ),
860 sort_keys=True,
861 indent=4,
862 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700863 except ( ValueError, TypeError ):
Jon Hall4173b242017-09-12 17:04:38 -0700864 output = repr( tmpIntents )
Jon Hallca319892017-06-15 15:25:22 -0700865 main.log.debug( "ONOS1 intents: " + output )
Devin Lim58046fa2017-07-05 16:55:00 -0700866 utilities.assert_equals(
867 expect=main.TRUE,
868 actual=PingResult,
869 onpass="Intents have been installed correctly and pings work",
870 onfail="Intents have not been installed correctly, pings failed." )
871
872 main.step( "Check leadership of topics" )
Jon Hallca319892017-06-15 15:25:22 -0700873 topicsCheck = self.topicsCheck()
874 utilities.assert_equals( expect=False, actual=topicsCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700875 onpass="intent Partitions is in leaders",
Jon Hallca319892017-06-15 15:25:22 -0700876 onfail="Some topics were lost" )
877 self.partitionsCheck()
878 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700879
880 if not installedCheck:
881 main.log.info( "Waiting 60 seconds to see if the state of " +
882 "intents change" )
883 time.sleep( 60 )
884 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700885 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700886 intentStates = []
887 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
888 count = 0
889 # Iter through intents of a node
890 try:
891 for intent in json.loads( intents ):
892 state = intent.get( 'state', None )
893 if "INSTALLED" not in state:
894 installedCheck = False
895 intentId = intent.get( 'id', None )
896 intentStates.append( ( intentId, state ) )
897 except ( ValueError, TypeError ):
898 main.log.exception( "Error parsing intents." )
899 intentStates.sort()
900 for i, s in intentStates:
901 count += 1
902 main.log.info( "%-6s%-15s%-15s" %
903 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700904 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700905
Devin Lim58046fa2017-07-05 16:55:00 -0700906 main.step( "Wait a minute then ping again" )
907 # the wait is above
908 PingResult = main.TRUE
909 for i in range( 8, 18 ):
910 ping = main.Mininet1.pingHost( src="h" + str( i ),
911 target="h" + str( i + 10 ) )
912 PingResult = PingResult and ping
913 if ping == main.FALSE:
914 main.log.warn( "Ping failed between h" + str( i ) +
915 " and h" + str( i + 10 ) )
916 elif ping == main.TRUE:
917 main.log.info( "Ping test passed!" )
918 # Don't set PingResult or you'd override failures
919 if PingResult == main.FALSE:
920 main.log.error(
921 "Intents have not been installed correctly, pings failed." )
922 # TODO: pretty print
Jon Hallca319892017-06-15 15:25:22 -0700923 main.log.warn( str( onosCli.name ) + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -0700924 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700925 tmpIntents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700926 main.log.warn( json.dumps( json.loads( tmpIntents ),
927 sort_keys=True,
928 indent=4,
929 separators=( ',', ': ' ) ) )
930 except ( ValueError, TypeError ):
931 main.log.warn( repr( tmpIntents ) )
932 utilities.assert_equals(
933 expect=main.TRUE,
934 actual=PingResult,
935 onpass="Intents have been installed correctly and pings work",
936 onfail="Intents have not been installed correctly, pings failed." )
937
Devin Lim142b5342017-07-20 15:22:39 -0700938 def checkRoleNotNull( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700939 main.step( "Check that each switch has a master" )
Devin Lim58046fa2017-07-05 16:55:00 -0700940 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -0700941 rolesNotNull = main.Cluster.command( "rolesNotNull", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -0700942 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -0700943 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -0700944 actual=rolesNotNull,
945 onpass="Each device has a master",
946 onfail="Some devices don't have a master assigned" )
947
Devin Lim142b5342017-07-20 15:22:39 -0700948 def checkTheRole( self ):
949 main.step( "Read device roles from ONOS" )
Jon Hallca319892017-06-15 15:25:22 -0700950 ONOSMastership = main.Cluster.command( "roles" )
Devin Lim58046fa2017-07-05 16:55:00 -0700951 consistentMastership = True
952 rolesResults = True
Devin Lim58046fa2017-07-05 16:55:00 -0700953 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -0700954 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700955 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hallca319892017-06-15 15:25:22 -0700956 main.log.error( "Error in getting " + node + " roles" )
957 main.log.warn( node + " mastership response: " +
Devin Lim58046fa2017-07-05 16:55:00 -0700958 repr( ONOSMastership[ i ] ) )
959 rolesResults = False
960 utilities.assert_equals(
961 expect=True,
962 actual=rolesResults,
963 onpass="No error in reading roles output",
964 onfail="Error in reading roles from ONOS" )
965
966 main.step( "Check for consistency in roles from each controller" )
967 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
968 main.log.info(
969 "Switch roles are consistent across all ONOS nodes" )
970 else:
971 consistentMastership = False
972 utilities.assert_equals(
973 expect=True,
974 actual=consistentMastership,
975 onpass="Switch roles are consistent across all ONOS nodes",
976 onfail="ONOS nodes have different views of switch roles" )
Devin Lim142b5342017-07-20 15:22:39 -0700977 return ONOSMastership, rolesResults, consistentMastership
978
979 def checkingIntents( self ):
980 main.step( "Get the intents from each controller" )
981 ONOSIntents = main.Cluster.command( "intents", specificDriver=2 )
982 intentsResults = True
983 for i in range( len( ONOSIntents ) ):
984 node = str( main.Cluster.active( i ) )
985 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
986 main.log.error( "Error in getting " + node + " intents" )
987 main.log.warn( node + " intents response: " +
988 repr( ONOSIntents[ i ] ) )
989 intentsResults = False
990 utilities.assert_equals(
991 expect=True,
992 actual=intentsResults,
993 onpass="No error in reading intents output",
994 onfail="Error in reading intents from ONOS" )
995 return ONOSIntents, intentsResults
996
997 def readingState( self, main ):
998 """
999 Reading state of ONOS
1000 """
1001 import json
Devin Lim142b5342017-07-20 15:22:39 -07001002 assert main, "main not defined"
1003 assert utilities.assert_equals, "utilities.assert_equals not defined"
1004 try:
1005 from tests.dependencies.topology import Topology
1006 except ImportError:
1007 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07001008 main.cleanAndExit()
Devin Lim142b5342017-07-20 15:22:39 -07001009 try:
1010 main.topoRelated
1011 except ( NameError, AttributeError ):
1012 main.topoRelated = Topology()
1013 main.case( "Setting up and gathering data for current state" )
1014 # The general idea for this test case is to pull the state of
1015 # ( intents,flows, topology,... ) from each ONOS node
1016 # We can then compare them with each other and also with past states
1017
1018 global mastershipState
1019 mastershipState = '[]'
1020
1021 self.checkRoleNotNull()
1022
1023 main.step( "Get the Mastership of each switch from each controller" )
1024 mastershipCheck = main.FALSE
1025
1026 ONOSMastership, consistentMastership, rolesResults = self.checkTheRole()
Devin Lim58046fa2017-07-05 16:55:00 -07001027
1028 if rolesResults and not consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001029 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001030 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001031 try:
1032 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001033 node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07001034 json.dumps(
1035 json.loads( ONOSMastership[ i ] ),
1036 sort_keys=True,
1037 indent=4,
1038 separators=( ',', ': ' ) ) )
1039 except ( ValueError, TypeError ):
1040 main.log.warn( repr( ONOSMastership[ i ] ) )
1041 elif rolesResults and consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001042 mastershipCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001043 mastershipState = ONOSMastership[ 0 ]
1044
Devin Lim58046fa2017-07-05 16:55:00 -07001045 global intentState
1046 intentState = []
Devin Lim142b5342017-07-20 15:22:39 -07001047 ONOSIntents, intentsResults = self.checkingIntents()
Jon Hallca319892017-06-15 15:25:22 -07001048 intentCheck = main.FALSE
1049 consistentIntents = True
Devin Lim142b5342017-07-20 15:22:39 -07001050
Devin Lim58046fa2017-07-05 16:55:00 -07001051 main.step( "Check for consistency in Intents from each controller" )
1052 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1053 main.log.info( "Intents are consistent across all ONOS " +
1054 "nodes" )
1055 else:
1056 consistentIntents = False
1057 main.log.error( "Intents not consistent" )
1058 utilities.assert_equals(
1059 expect=True,
1060 actual=consistentIntents,
1061 onpass="Intents are consistent across all ONOS nodes",
1062 onfail="ONOS nodes have different views of intents" )
1063
1064 if intentsResults:
1065 # Try to make it easy to figure out what is happening
1066 #
1067 # Intent ONOS1 ONOS2 ...
1068 # 0x01 INSTALLED INSTALLING
1069 # ... ... ...
1070 # ... ... ...
1071 title = " Id"
Jon Hallca319892017-06-15 15:25:22 -07001072 for ctrl in main.Cluster.active():
1073 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07001074 main.log.warn( title )
1075 # get all intent keys in the cluster
1076 keys = []
1077 try:
1078 # Get the set of all intent keys
1079 for nodeStr in ONOSIntents:
1080 node = json.loads( nodeStr )
1081 for intent in node:
1082 keys.append( intent.get( 'id' ) )
1083 keys = set( keys )
1084 # For each intent key, print the state on each node
1085 for key in keys:
1086 row = "%-13s" % key
1087 for nodeStr in ONOSIntents:
1088 node = json.loads( nodeStr )
1089 for intent in node:
1090 if intent.get( 'id', "Error" ) == key:
1091 row += "%-15s" % intent.get( 'state' )
1092 main.log.warn( row )
1093 # End of intent state table
1094 except ValueError as e:
1095 main.log.exception( e )
1096 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1097
1098 if intentsResults and not consistentIntents:
1099 # print the json objects
Jon Hallca319892017-06-15 15:25:22 -07001100 main.log.debug( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001101 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1102 sort_keys=True,
1103 indent=4,
1104 separators=( ',', ': ' ) ) )
1105 for i in range( len( ONOSIntents ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001106 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001107 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallca319892017-06-15 15:25:22 -07001108 main.log.debug( node + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001109 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
1110 sort_keys=True,
1111 indent=4,
1112 separators=( ',', ': ' ) ) )
1113 else:
Jon Hallca319892017-06-15 15:25:22 -07001114 main.log.debug( node + " intents match " + ctrl.name + " intents" )
Devin Lim58046fa2017-07-05 16:55:00 -07001115 elif intentsResults and consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07001116 intentCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001117 intentState = ONOSIntents[ 0 ]
1118
1119 main.step( "Get the flows from each controller" )
1120 global flowState
1121 flowState = []
Jon Hall4173b242017-09-12 17:04:38 -07001122 ONOSFlows = main.Cluster.command( "flows", specificDriver=2 ) # TODO: Possible arg: sleep = 30
Devin Lim58046fa2017-07-05 16:55:00 -07001123 ONOSFlowsJson = []
1124 flowCheck = main.FALSE
1125 consistentFlows = True
1126 flowsResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001127 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001128 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001129 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001130 main.log.error( "Error in getting " + node + " flows" )
1131 main.log.warn( node + " flows response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001132 repr( ONOSFlows[ i ] ) )
1133 flowsResults = False
1134 ONOSFlowsJson.append( None )
1135 else:
1136 try:
1137 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1138 except ( ValueError, TypeError ):
1139 # FIXME: change this to log.error?
Jon Hallca319892017-06-15 15:25:22 -07001140 main.log.exception( "Error in parsing " + node +
Devin Lim58046fa2017-07-05 16:55:00 -07001141 " response as json." )
1142 main.log.error( repr( ONOSFlows[ i ] ) )
1143 ONOSFlowsJson.append( None )
1144 flowsResults = False
1145 utilities.assert_equals(
1146 expect=True,
1147 actual=flowsResults,
1148 onpass="No error in reading flows output",
1149 onfail="Error in reading flows from ONOS" )
1150
1151 main.step( "Check for consistency in Flows from each controller" )
1152 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1153 if all( tmp ):
1154 main.log.info( "Flow count is consistent across all ONOS nodes" )
1155 else:
1156 consistentFlows = False
1157 utilities.assert_equals(
1158 expect=True,
1159 actual=consistentFlows,
1160 onpass="The flow count is consistent across all ONOS nodes",
1161 onfail="ONOS nodes have different flow counts" )
1162
1163 if flowsResults and not consistentFlows:
1164 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001165 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001166 try:
1167 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001168 node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001169 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
1170 indent=4, separators=( ',', ': ' ) ) )
1171 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -07001172 main.log.warn( node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001173 repr( ONOSFlows[ i ] ) )
1174 elif flowsResults and consistentFlows:
1175 flowCheck = main.TRUE
1176 flowState = ONOSFlows[ 0 ]
1177
1178 main.step( "Get the OF Table entries" )
1179 global flows
Jon Hallab611372018-02-21 15:26:05 -08001180 flows = {}
1181 for swName, swDetails in main.Mininet1.getSwitches().items():
1182 main.log.debug( repr( swName ) + repr( swDetails ) )
1183 flows[ swName ] = main.Mininet1.getFlowTable( swName, version="1.3", debug=False )
Devin Lim58046fa2017-07-05 16:55:00 -07001184 if flowCheck == main.FALSE:
1185 for table in flows:
1186 main.log.warn( table )
1187 # TODO: Compare switch flow tables with ONOS flow tables
1188
1189 main.step( "Start continuous pings" )
Jon Hallab611372018-02-21 15:26:05 -08001190 if main.params.get( 'PING', False ):
1191 # TODO: Make this more dynamic and less hardcoded, ie, # or ping pairs
1192 main.Mininet2.pingLong(
1193 src=main.params[ 'PING' ][ 'source1' ],
1194 target=main.params[ 'PING' ][ 'target1' ],
1195 pingTime=500 )
1196 main.Mininet2.pingLong(
1197 src=main.params[ 'PING' ][ 'source2' ],
1198 target=main.params[ 'PING' ][ 'target2' ],
1199 pingTime=500 )
1200 main.Mininet2.pingLong(
1201 src=main.params[ 'PING' ][ 'source3' ],
1202 target=main.params[ 'PING' ][ 'target3' ],
1203 pingTime=500 )
1204 main.Mininet2.pingLong(
1205 src=main.params[ 'PING' ][ 'source4' ],
1206 target=main.params[ 'PING' ][ 'target4' ],
1207 pingTime=500 )
1208 main.Mininet2.pingLong(
1209 src=main.params[ 'PING' ][ 'source5' ],
1210 target=main.params[ 'PING' ][ 'target5' ],
1211 pingTime=500 )
1212 main.Mininet2.pingLong(
1213 src=main.params[ 'PING' ][ 'source6' ],
1214 target=main.params[ 'PING' ][ 'target6' ],
1215 pingTime=500 )
1216 main.Mininet2.pingLong(
1217 src=main.params[ 'PING' ][ 'source7' ],
1218 target=main.params[ 'PING' ][ 'target7' ],
1219 pingTime=500 )
1220 main.Mininet2.pingLong(
1221 src=main.params[ 'PING' ][ 'source8' ],
1222 target=main.params[ 'PING' ][ 'target8' ],
1223 pingTime=500 )
1224 main.Mininet2.pingLong(
1225 src=main.params[ 'PING' ][ 'source9' ],
1226 target=main.params[ 'PING' ][ 'target9' ],
1227 pingTime=500 )
1228 main.Mininet2.pingLong(
1229 src=main.params[ 'PING' ][ 'source10' ],
1230 target=main.params[ 'PING' ][ 'target10' ],
1231 pingTime=500 )
Devin Lim58046fa2017-07-05 16:55:00 -07001232
1233 main.step( "Collecting topology information from ONOS" )
Devin Lim142b5342017-07-20 15:22:39 -07001234 devices = main.topoRelated.getAll( "devices" )
1235 hosts = main.topoRelated.getAll( "hosts", inJson=True )
1236 ports = main.topoRelated.getAll( "ports" )
1237 links = main.topoRelated.getAll( "links" )
1238 clusters = main.topoRelated.getAll( "clusters" )
Devin Lim58046fa2017-07-05 16:55:00 -07001239 # Compare json objects for hosts and dataplane clusters
1240
1241 # hosts
1242 main.step( "Host view is consistent across ONOS nodes" )
1243 consistentHostsResult = main.TRUE
1244 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001245 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001246 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1247 if hosts[ controller ] == hosts[ 0 ]:
1248 continue
1249 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07001250 main.log.error( "hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001251 controllerStr +
1252 " is inconsistent with ONOS1" )
1253 main.log.warn( repr( hosts[ controller ] ) )
1254 consistentHostsResult = main.FALSE
1255
1256 else:
Jon Hallca319892017-06-15 15:25:22 -07001257 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001258 controllerStr )
1259 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001260 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001261 " hosts response: " +
1262 repr( hosts[ controller ] ) )
1263 utilities.assert_equals(
1264 expect=main.TRUE,
1265 actual=consistentHostsResult,
1266 onpass="Hosts view is consistent across all ONOS nodes",
1267 onfail="ONOS nodes have different views of hosts" )
1268
1269 main.step( "Each host has an IP address" )
1270 ipResult = main.TRUE
1271 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001272 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001273 if hosts[ controller ]:
1274 for host in hosts[ controller ]:
1275 if not host.get( 'ipAddresses', [] ):
Jon Hallca319892017-06-15 15:25:22 -07001276 main.log.error( "Error with host ips on " +
Devin Lim58046fa2017-07-05 16:55:00 -07001277 controllerStr + ": " + str( host ) )
1278 ipResult = main.FALSE
1279 utilities.assert_equals(
1280 expect=main.TRUE,
1281 actual=ipResult,
1282 onpass="The ips of the hosts aren't empty",
1283 onfail="The ip of at least one host is missing" )
1284
1285 # Strongly connected clusters of devices
1286 main.step( "Cluster view is consistent across ONOS nodes" )
1287 consistentClustersResult = main.TRUE
1288 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001289 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001290 if "Error" not in clusters[ controller ]:
1291 if clusters[ controller ] == clusters[ 0 ]:
1292 continue
1293 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07001294 main.log.error( "clusters from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001295 " is inconsistent with ONOS1" )
1296 consistentClustersResult = main.FALSE
1297
1298 else:
1299 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07001300 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07001301 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001302 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001303 " clusters response: " +
1304 repr( clusters[ controller ] ) )
1305 utilities.assert_equals(
1306 expect=main.TRUE,
1307 actual=consistentClustersResult,
1308 onpass="Clusters view is consistent across all ONOS nodes",
1309 onfail="ONOS nodes have different views of clusters" )
1310 if not consistentClustersResult:
1311 main.log.debug( clusters )
1312
1313 # there should always only be one cluster
1314 main.step( "Cluster view correct across ONOS nodes" )
1315 try:
1316 numClusters = len( json.loads( clusters[ 0 ] ) )
1317 except ( ValueError, TypeError ):
1318 main.log.exception( "Error parsing clusters[0]: " +
1319 repr( clusters[ 0 ] ) )
1320 numClusters = "ERROR"
1321 utilities.assert_equals(
1322 expect=1,
1323 actual=numClusters,
1324 onpass="ONOS shows 1 SCC",
1325 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1326
1327 main.step( "Comparing ONOS topology to MN" )
1328 devicesResults = main.TRUE
1329 linksResults = main.TRUE
1330 hostsResults = main.TRUE
1331 mnSwitches = main.Mininet1.getSwitches()
1332 mnLinks = main.Mininet1.getLinks()
1333 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07001334 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001335 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001336 currentDevicesResult = main.topoRelated.compareDevicePort(
1337 main.Mininet1, controller,
1338 mnSwitches, devices, ports )
1339 utilities.assert_equals( expect=main.TRUE,
1340 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07001341 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001342 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001343 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001344 " Switches view is incorrect" )
1345
1346 currentLinksResult = main.topoRelated.compareBase( links, controller,
1347 main.Mininet1.compareLinks,
1348 [ mnSwitches, mnLinks ] )
1349 utilities.assert_equals( expect=main.TRUE,
1350 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07001351 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001352 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001353 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001354 " links view is incorrect" )
1355
1356 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1357 currentHostsResult = main.Mininet1.compareHosts(
1358 mnHosts,
1359 hosts[ controller ] )
1360 else:
1361 currentHostsResult = main.FALSE
1362 utilities.assert_equals( expect=main.TRUE,
1363 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07001364 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001365 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07001366 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001367 " hosts don't match Mininet" )
1368
1369 devicesResults = devicesResults and currentDevicesResult
1370 linksResults = linksResults and currentLinksResult
1371 hostsResults = hostsResults and currentHostsResult
1372
1373 main.step( "Device information is correct" )
1374 utilities.assert_equals(
1375 expect=main.TRUE,
1376 actual=devicesResults,
1377 onpass="Device information is correct",
1378 onfail="Device information is incorrect" )
1379
1380 main.step( "Links are correct" )
1381 utilities.assert_equals(
1382 expect=main.TRUE,
1383 actual=linksResults,
1384 onpass="Link are correct",
1385 onfail="Links are incorrect" )
1386
1387 main.step( "Hosts are correct" )
1388 utilities.assert_equals(
1389 expect=main.TRUE,
1390 actual=hostsResults,
1391 onpass="Hosts are correct",
1392 onfail="Hosts are incorrect" )
1393
1394 def checkDistPrimitivesFunc( self, main ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001395 """
1396 Check for basic functionality with distributed primitives
1397 """
Jon Halle0f0b342017-04-18 11:43:47 -07001398 # TODO: Clean this up so it's not just a cut/paste from the test
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001399 try:
1400 # Make sure variables are defined/set
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001401 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001402 assert main.pCounterName, "main.pCounterName not defined"
1403 assert main.onosSetName, "main.onosSetName not defined"
1404 # NOTE: assert fails if value is 0/None/Empty/False
1405 try:
1406 main.pCounterValue
Devin Lim58046fa2017-07-05 16:55:00 -07001407 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001408 main.log.error( "main.pCounterValue not defined, setting to 0" )
1409 main.pCounterValue = 0
1410 try:
1411 main.onosSet
Devin Lim58046fa2017-07-05 16:55:00 -07001412 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001413 main.log.error( "main.onosSet not defined, setting to empty Set" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001414 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001415 # Variables for the distributed primitives tests. These are local only
1416 addValue = "a"
1417 addAllValue = "a b c d e f"
1418 retainValue = "c d e f"
Jon Halle0f0b342017-04-18 11:43:47 -07001419 valueName = "TestON-Value"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001420 valueValue = None
Jon Halle0f0b342017-04-18 11:43:47 -07001421 workQueueName = "TestON-Queue"
1422 workQueueCompleted = 0
1423 workQueueInProgress = 0
1424 workQueuePending = 0
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001425
1426 description = "Check for basic functionality with distributed " +\
1427 "primitives"
1428 main.case( description )
1429 main.caseExplanation = "Test the methods of the distributed " +\
1430 "primitives (counters and sets) throught the cli"
1431 # DISTRIBUTED ATOMIC COUNTERS
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001432 main.step( "Increment then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001433 pCounters = main.Cluster.command( "counterTestAddAndGet",
1434 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001435 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001436 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001437 main.pCounterValue += 1
1438 addedPValues.append( main.pCounterValue )
Jon Hallca319892017-06-15 15:25:22 -07001439 # Check that counter incremented once per controller
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001440 pCounterResults = True
1441 for i in addedPValues:
1442 tmpResult = i in pCounters
1443 pCounterResults = pCounterResults and tmpResult
1444 if not tmpResult:
1445 main.log.error( str( i ) + " is not in partitioned "
1446 "counter incremented results" )
1447 utilities.assert_equals( expect=True,
1448 actual=pCounterResults,
1449 onpass="Default counter incremented",
1450 onfail="Error incrementing default" +
1451 " counter" )
1452
1453 main.step( "Get then Increment a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001454 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1455 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001456 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001457 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001458 addedPValues.append( main.pCounterValue )
1459 main.pCounterValue += 1
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001460 # Check that counter incremented numController times
1461 pCounterResults = True
1462 for i in addedPValues:
1463 tmpResult = i in pCounters
1464 pCounterResults = pCounterResults and tmpResult
1465 if not tmpResult:
1466 main.log.error( str( i ) + " is not in partitioned "
1467 "counter incremented results" )
1468 utilities.assert_equals( expect=True,
1469 actual=pCounterResults,
1470 onpass="Default counter incremented",
1471 onfail="Error incrementing default" +
1472 " counter" )
1473
1474 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001475 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001476 utilities.assert_equals( expect=main.TRUE,
1477 actual=incrementCheck,
1478 onpass="Added counters are correct",
1479 onfail="Added counters are incorrect" )
1480
1481 main.step( "Add -8 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001482 pCounters = main.Cluster.command( "counterTestAddAndGet",
1483 args=[ main.pCounterName ],
1484 kwargs={ "delta": -8 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001485 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001486 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001487 main.pCounterValue += -8
1488 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001489 # Check that counter incremented numController times
1490 pCounterResults = True
1491 for i in addedPValues:
1492 tmpResult = i in pCounters
1493 pCounterResults = pCounterResults and tmpResult
1494 if not tmpResult:
1495 main.log.error( str( i ) + " is not in partitioned "
1496 "counter incremented results" )
1497 utilities.assert_equals( expect=True,
1498 actual=pCounterResults,
1499 onpass="Default counter incremented",
1500 onfail="Error incrementing default" +
1501 " counter" )
1502
1503 main.step( "Add 5 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001504 pCounters = main.Cluster.command( "counterTestAddAndGet",
1505 args=[ main.pCounterName ],
1506 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001507 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001508 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001509 main.pCounterValue += 5
1510 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001511
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001512 # Check that counter incremented numController times
1513 pCounterResults = True
1514 for i in addedPValues:
1515 tmpResult = i in pCounters
1516 pCounterResults = pCounterResults and tmpResult
1517 if not tmpResult:
1518 main.log.error( str( i ) + " is not in partitioned "
1519 "counter incremented results" )
1520 utilities.assert_equals( expect=True,
1521 actual=pCounterResults,
1522 onpass="Default counter incremented",
1523 onfail="Error incrementing default" +
1524 " counter" )
1525
1526 main.step( "Get then add 5 to a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001527 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1528 args=[ main.pCounterName ],
1529 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001530 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001531 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001532 addedPValues.append( main.pCounterValue )
1533 main.pCounterValue += 5
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001534 # Check that counter incremented numController times
1535 pCounterResults = True
1536 for i in addedPValues:
1537 tmpResult = i in pCounters
1538 pCounterResults = pCounterResults and tmpResult
1539 if not tmpResult:
1540 main.log.error( str( i ) + " is not in partitioned "
1541 "counter incremented results" )
1542 utilities.assert_equals( expect=True,
1543 actual=pCounterResults,
1544 onpass="Default counter incremented",
1545 onfail="Error incrementing default" +
1546 " counter" )
1547
1548 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001549 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001550 utilities.assert_equals( expect=main.TRUE,
1551 actual=incrementCheck,
1552 onpass="Added counters are correct",
1553 onfail="Added counters are incorrect" )
1554
1555 # DISTRIBUTED SETS
1556 main.step( "Distributed Set get" )
1557 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001558 getResponses = main.Cluster.command( "setTestGet",
1559 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001560 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001561 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001562 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001563 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001564 current = set( getResponses[ i ] )
1565 if len( current ) == len( getResponses[ i ] ):
1566 # no repeats
1567 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001568 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001569 " has incorrect view" +
1570 " of set " + main.onosSetName + ":\n" +
1571 str( getResponses[ i ] ) )
1572 main.log.debug( "Expected: " + str( main.onosSet ) )
1573 main.log.debug( "Actual: " + str( current ) )
1574 getResults = main.FALSE
1575 else:
1576 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001577 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001578 " has repeat elements in" +
1579 " set " + main.onosSetName + ":\n" +
1580 str( getResponses[ i ] ) )
1581 getResults = main.FALSE
1582 elif getResponses[ i ] == main.ERROR:
1583 getResults = main.FALSE
1584 utilities.assert_equals( expect=main.TRUE,
1585 actual=getResults,
1586 onpass="Set elements are correct",
1587 onfail="Set elements are incorrect" )
1588
1589 main.step( "Distributed Set size" )
Jon Hallca319892017-06-15 15:25:22 -07001590 sizeResponses = main.Cluster.command( "setTestSize",
1591 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001592 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001593 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001594 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001595 if size != sizeResponses[ i ]:
1596 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001597 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001598 " expected a size of " + str( size ) +
1599 " for set " + main.onosSetName +
1600 " but got " + str( sizeResponses[ i ] ) )
1601 utilities.assert_equals( expect=main.TRUE,
1602 actual=sizeResults,
1603 onpass="Set sizes are correct",
1604 onfail="Set sizes are incorrect" )
1605
1606 main.step( "Distributed Set add()" )
1607 main.onosSet.add( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001608 addResponses = main.Cluster.command( "setTestAdd",
1609 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001610 # main.TRUE = successfully changed the set
1611 # main.FALSE = action resulted in no change in set
1612 # main.ERROR - Some error in executing the function
1613 addResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001614 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001615 if addResponses[ i ] == main.TRUE:
1616 # All is well
1617 pass
1618 elif addResponses[ i ] == main.FALSE:
1619 # Already in set, probably fine
1620 pass
1621 elif addResponses[ i ] == main.ERROR:
1622 # Error in execution
1623 addResults = main.FALSE
1624 else:
1625 # unexpected result
1626 addResults = main.FALSE
1627 if addResults != main.TRUE:
1628 main.log.error( "Error executing set add" )
1629
1630 # Check if set is still correct
1631 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001632 getResponses = main.Cluster.command( "setTestGet",
1633 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001634 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001635 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001636 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001637 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001638 current = set( getResponses[ i ] )
1639 if len( current ) == len( getResponses[ i ] ):
1640 # no repeats
1641 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001642 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001643 " of set " + main.onosSetName + ":\n" +
1644 str( getResponses[ i ] ) )
1645 main.log.debug( "Expected: " + str( main.onosSet ) )
1646 main.log.debug( "Actual: " + str( current ) )
1647 getResults = main.FALSE
1648 else:
1649 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001650 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001651 " set " + main.onosSetName + ":\n" +
1652 str( getResponses[ i ] ) )
1653 getResults = main.FALSE
1654 elif getResponses[ i ] == main.ERROR:
1655 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001656 sizeResponses = main.Cluster.command( "setTestSize",
1657 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001658 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001659 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001660 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001661 if size != sizeResponses[ i ]:
1662 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001663 main.log.error( node + " expected a size of " +
1664 str( size ) + " for set " + main.onosSetName +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001665 " but got " + str( sizeResponses[ i ] ) )
1666 addResults = addResults and getResults and sizeResults
1667 utilities.assert_equals( expect=main.TRUE,
1668 actual=addResults,
1669 onpass="Set add correct",
1670 onfail="Set add was incorrect" )
1671
1672 main.step( "Distributed Set addAll()" )
1673 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001674 addResponses = main.Cluster.command( "setTestAdd",
1675 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001676 # main.TRUE = successfully changed the set
1677 # main.FALSE = action resulted in no change in set
1678 # main.ERROR - Some error in executing the function
1679 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001680 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001681 if addResponses[ i ] == main.TRUE:
1682 # All is well
1683 pass
1684 elif addResponses[ i ] == main.FALSE:
1685 # Already in set, probably fine
1686 pass
1687 elif addResponses[ i ] == main.ERROR:
1688 # Error in execution
1689 addAllResults = main.FALSE
1690 else:
1691 # unexpected result
1692 addAllResults = main.FALSE
1693 if addAllResults != main.TRUE:
1694 main.log.error( "Error executing set addAll" )
1695
1696 # Check if set is still correct
1697 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001698 getResponses = main.Cluster.command( "setTestGet",
1699 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001700 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001701 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001702 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001703 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001704 current = set( getResponses[ i ] )
1705 if len( current ) == len( getResponses[ i ] ):
1706 # no repeats
1707 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001708 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001709 " of set " + main.onosSetName + ":\n" +
1710 str( getResponses[ i ] ) )
1711 main.log.debug( "Expected: " + str( main.onosSet ) )
1712 main.log.debug( "Actual: " + str( current ) )
1713 getResults = main.FALSE
1714 else:
1715 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001716 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001717 " set " + main.onosSetName + ":\n" +
1718 str( getResponses[ i ] ) )
1719 getResults = main.FALSE
1720 elif getResponses[ i ] == main.ERROR:
1721 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001722 sizeResponses = main.Cluster.command( "setTestSize",
1723 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001724 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001725 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001726 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001727 if size != sizeResponses[ i ]:
1728 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001729 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001730 " for set " + main.onosSetName +
1731 " but got " + str( sizeResponses[ i ] ) )
1732 addAllResults = addAllResults and getResults and sizeResults
1733 utilities.assert_equals( expect=main.TRUE,
1734 actual=addAllResults,
1735 onpass="Set addAll correct",
1736 onfail="Set addAll was incorrect" )
1737
1738 main.step( "Distributed Set contains()" )
Jon Hallca319892017-06-15 15:25:22 -07001739 containsResponses = main.Cluster.command( "setTestGet",
1740 args=[ main.onosSetName ],
1741 kwargs={ "values": addValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001742 containsResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001743 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001744 if containsResponses[ i ] == main.ERROR:
1745 containsResults = main.FALSE
1746 else:
1747 containsResults = containsResults and\
1748 containsResponses[ i ][ 1 ]
1749 utilities.assert_equals( expect=main.TRUE,
1750 actual=containsResults,
1751 onpass="Set contains is functional",
1752 onfail="Set contains failed" )
1753
1754 main.step( "Distributed Set containsAll()" )
Jon Hallca319892017-06-15 15:25:22 -07001755 containsAllResponses = main.Cluster.command( "setTestGet",
1756 args=[ main.onosSetName ],
1757 kwargs={ "values": addAllValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001758 containsAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001759 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001760 if containsResponses[ i ] == main.ERROR:
1761 containsResults = main.FALSE
1762 else:
1763 containsResults = containsResults and\
1764 containsResponses[ i ][ 1 ]
1765 utilities.assert_equals( expect=main.TRUE,
1766 actual=containsAllResults,
1767 onpass="Set containsAll is functional",
1768 onfail="Set containsAll failed" )
1769
1770 main.step( "Distributed Set remove()" )
1771 main.onosSet.remove( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001772 removeResponses = main.Cluster.command( "setTestRemove",
1773 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001774 # main.TRUE = successfully changed the set
1775 # main.FALSE = action resulted in no change in set
1776 # main.ERROR - Some error in executing the function
1777 removeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001778 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001779 if removeResponses[ i ] == main.TRUE:
1780 # All is well
1781 pass
1782 elif removeResponses[ i ] == main.FALSE:
1783 # not in set, probably fine
1784 pass
1785 elif removeResponses[ i ] == main.ERROR:
1786 # Error in execution
1787 removeResults = main.FALSE
1788 else:
1789 # unexpected result
1790 removeResults = main.FALSE
1791 if removeResults != main.TRUE:
1792 main.log.error( "Error executing set remove" )
1793
1794 # Check if set is still correct
1795 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001796 getResponses = main.Cluster.command( "setTestGet",
1797 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001798 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001799 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001800 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001801 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001802 current = set( getResponses[ i ] )
1803 if len( current ) == len( getResponses[ i ] ):
1804 # no repeats
1805 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001806 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001807 " of set " + main.onosSetName + ":\n" +
1808 str( getResponses[ i ] ) )
1809 main.log.debug( "Expected: " + str( main.onosSet ) )
1810 main.log.debug( "Actual: " + str( current ) )
1811 getResults = main.FALSE
1812 else:
1813 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001814 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001815 " set " + main.onosSetName + ":\n" +
1816 str( getResponses[ i ] ) )
1817 getResults = main.FALSE
1818 elif getResponses[ i ] == main.ERROR:
1819 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001820 sizeResponses = main.Cluster.command( "setTestSize",
1821 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001822 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001823 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001824 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001825 if size != sizeResponses[ i ]:
1826 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001827 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001828 " for set " + main.onosSetName +
1829 " but got " + str( sizeResponses[ i ] ) )
1830 removeResults = removeResults and getResults and sizeResults
1831 utilities.assert_equals( expect=main.TRUE,
1832 actual=removeResults,
1833 onpass="Set remove correct",
1834 onfail="Set remove was incorrect" )
1835
1836 main.step( "Distributed Set removeAll()" )
1837 main.onosSet.difference_update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001838 removeAllResponses = main.Cluster.command( "setTestRemove",
1839 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001840 # main.TRUE = successfully changed the set
1841 # main.FALSE = action resulted in no change in set
1842 # main.ERROR - Some error in executing the function
1843 removeAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001844 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001845 if removeAllResponses[ i ] == main.TRUE:
1846 # All is well
1847 pass
1848 elif removeAllResponses[ i ] == main.FALSE:
1849 # not in set, probably fine
1850 pass
1851 elif removeAllResponses[ i ] == main.ERROR:
1852 # Error in execution
1853 removeAllResults = main.FALSE
1854 else:
1855 # unexpected result
1856 removeAllResults = main.FALSE
1857 if removeAllResults != main.TRUE:
1858 main.log.error( "Error executing set removeAll" )
1859
1860 # Check if set is still correct
1861 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001862 getResponses = main.Cluster.command( "setTestGet",
1863 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001864 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001865 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001866 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001867 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001868 current = set( getResponses[ i ] )
1869 if len( current ) == len( getResponses[ i ] ):
1870 # no repeats
1871 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001872 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001873 " of set " + main.onosSetName + ":\n" +
1874 str( getResponses[ i ] ) )
1875 main.log.debug( "Expected: " + str( main.onosSet ) )
1876 main.log.debug( "Actual: " + str( current ) )
1877 getResults = main.FALSE
1878 else:
1879 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001880 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001881 " set " + main.onosSetName + ":\n" +
1882 str( getResponses[ i ] ) )
1883 getResults = main.FALSE
1884 elif getResponses[ i ] == main.ERROR:
1885 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001886 sizeResponses = main.Cluster.command( "setTestSize",
1887 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001888 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001889 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001890 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001891 if size != sizeResponses[ i ]:
1892 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001893 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001894 " for set " + main.onosSetName +
1895 " but got " + str( sizeResponses[ i ] ) )
1896 removeAllResults = removeAllResults and getResults and sizeResults
1897 utilities.assert_equals( expect=main.TRUE,
1898 actual=removeAllResults,
1899 onpass="Set removeAll correct",
1900 onfail="Set removeAll was incorrect" )
1901
1902 main.step( "Distributed Set addAll()" )
1903 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001904 addResponses = main.Cluster.command( "setTestAdd",
1905 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001906 # main.TRUE = successfully changed the set
1907 # main.FALSE = action resulted in no change in set
1908 # main.ERROR - Some error in executing the function
1909 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001910 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001911 if addResponses[ i ] == main.TRUE:
1912 # All is well
1913 pass
1914 elif addResponses[ i ] == main.FALSE:
1915 # Already in set, probably fine
1916 pass
1917 elif addResponses[ i ] == main.ERROR:
1918 # Error in execution
1919 addAllResults = main.FALSE
1920 else:
1921 # unexpected result
1922 addAllResults = main.FALSE
1923 if addAllResults != main.TRUE:
1924 main.log.error( "Error executing set addAll" )
1925
1926 # Check if set is still correct
1927 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001928 getResponses = main.Cluster.command( "setTestGet",
1929 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001930 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001931 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001932 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001933 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001934 current = set( getResponses[ i ] )
1935 if len( current ) == len( getResponses[ i ] ):
1936 # no repeats
1937 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001938 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001939 " of set " + main.onosSetName + ":\n" +
1940 str( getResponses[ i ] ) )
1941 main.log.debug( "Expected: " + str( main.onosSet ) )
1942 main.log.debug( "Actual: " + str( current ) )
1943 getResults = main.FALSE
1944 else:
1945 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001946 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001947 " set " + main.onosSetName + ":\n" +
1948 str( getResponses[ i ] ) )
1949 getResults = main.FALSE
1950 elif getResponses[ i ] == main.ERROR:
1951 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001952 sizeResponses = main.Cluster.command( "setTestSize",
1953 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001954 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001955 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001956 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001957 if size != sizeResponses[ i ]:
1958 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001959 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001960 " for set " + main.onosSetName +
1961 " but got " + str( sizeResponses[ i ] ) )
1962 addAllResults = addAllResults and getResults and sizeResults
1963 utilities.assert_equals( expect=main.TRUE,
1964 actual=addAllResults,
1965 onpass="Set addAll correct",
1966 onfail="Set addAll was incorrect" )
1967
1968 main.step( "Distributed Set clear()" )
1969 main.onosSet.clear()
Jon Hallca319892017-06-15 15:25:22 -07001970 clearResponses = main.Cluster.command( "setTestRemove",
Jon Hall4173b242017-09-12 17:04:38 -07001971 args=[ main.onosSetName, " " ], # Values doesn't matter
1972 kwargs={ "clear": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001973 # main.TRUE = successfully changed the set
1974 # main.FALSE = action resulted in no change in set
1975 # main.ERROR - Some error in executing the function
1976 clearResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001977 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001978 if clearResponses[ i ] == main.TRUE:
1979 # All is well
1980 pass
1981 elif clearResponses[ i ] == main.FALSE:
1982 # Nothing set, probably fine
1983 pass
1984 elif clearResponses[ i ] == main.ERROR:
1985 # Error in execution
1986 clearResults = main.FALSE
1987 else:
1988 # unexpected result
1989 clearResults = main.FALSE
1990 if clearResults != main.TRUE:
1991 main.log.error( "Error executing set clear" )
1992
1993 # Check if set is still correct
1994 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001995 getResponses = main.Cluster.command( "setTestGet",
1996 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001997 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001998 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001999 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07002000 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002001 current = set( getResponses[ i ] )
2002 if len( current ) == len( getResponses[ i ] ):
2003 # no repeats
2004 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002005 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002006 " of set " + main.onosSetName + ":\n" +
2007 str( getResponses[ i ] ) )
2008 main.log.debug( "Expected: " + str( main.onosSet ) )
2009 main.log.debug( "Actual: " + str( current ) )
2010 getResults = main.FALSE
2011 else:
2012 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002013 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002014 " set " + main.onosSetName + ":\n" +
2015 str( getResponses[ i ] ) )
2016 getResults = main.FALSE
2017 elif getResponses[ i ] == main.ERROR:
2018 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002019 sizeResponses = main.Cluster.command( "setTestSize",
2020 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002021 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002022 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002023 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002024 if size != sizeResponses[ i ]:
2025 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002026 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002027 " for set " + main.onosSetName +
2028 " but got " + str( sizeResponses[ i ] ) )
2029 clearResults = clearResults and getResults and sizeResults
2030 utilities.assert_equals( expect=main.TRUE,
2031 actual=clearResults,
2032 onpass="Set clear correct",
2033 onfail="Set clear was incorrect" )
2034
2035 main.step( "Distributed Set addAll()" )
2036 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002037 addResponses = main.Cluster.command( "setTestAdd",
2038 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002039 # main.TRUE = successfully changed the set
2040 # main.FALSE = action resulted in no change in set
2041 # main.ERROR - Some error in executing the function
2042 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002043 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002044 if addResponses[ i ] == main.TRUE:
2045 # All is well
2046 pass
2047 elif addResponses[ i ] == main.FALSE:
2048 # Already in set, probably fine
2049 pass
2050 elif addResponses[ i ] == main.ERROR:
2051 # Error in execution
2052 addAllResults = main.FALSE
2053 else:
2054 # unexpected result
2055 addAllResults = main.FALSE
2056 if addAllResults != main.TRUE:
2057 main.log.error( "Error executing set addAll" )
2058
2059 # Check if set is still correct
2060 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002061 getResponses = main.Cluster.command( "setTestGet",
2062 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002063 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002064 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002065 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07002066 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002067 current = set( getResponses[ i ] )
2068 if len( current ) == len( getResponses[ i ] ):
2069 # no repeats
2070 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002071 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002072 " of set " + main.onosSetName + ":\n" +
2073 str( getResponses[ i ] ) )
2074 main.log.debug( "Expected: " + str( main.onosSet ) )
2075 main.log.debug( "Actual: " + str( current ) )
2076 getResults = main.FALSE
2077 else:
2078 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002079 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002080 " set " + main.onosSetName + ":\n" +
2081 str( getResponses[ i ] ) )
2082 getResults = main.FALSE
2083 elif getResponses[ i ] == main.ERROR:
2084 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002085 sizeResponses = main.Cluster.command( "setTestSize",
2086 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002087 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002088 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002089 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002090 if size != sizeResponses[ i ]:
2091 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002092 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002093 " for set " + main.onosSetName +
2094 " but got " + str( sizeResponses[ i ] ) )
2095 addAllResults = addAllResults and getResults and sizeResults
2096 utilities.assert_equals( expect=main.TRUE,
2097 actual=addAllResults,
2098 onpass="Set addAll correct",
2099 onfail="Set addAll was incorrect" )
2100
2101 main.step( "Distributed Set retain()" )
2102 main.onosSet.intersection_update( retainValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002103 retainResponses = main.Cluster.command( "setTestRemove",
2104 args=[ main.onosSetName, retainValue ],
2105 kwargs={ "retain": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002106 # main.TRUE = successfully changed the set
2107 # main.FALSE = action resulted in no change in set
2108 # main.ERROR - Some error in executing the function
2109 retainResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002110 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002111 if retainResponses[ i ] == main.TRUE:
2112 # All is well
2113 pass
2114 elif retainResponses[ i ] == main.FALSE:
2115 # Already in set, probably fine
2116 pass
2117 elif retainResponses[ i ] == main.ERROR:
2118 # Error in execution
2119 retainResults = main.FALSE
2120 else:
2121 # unexpected result
2122 retainResults = main.FALSE
2123 if retainResults != main.TRUE:
2124 main.log.error( "Error executing set retain" )
2125
2126 # Check if set is still correct
2127 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002128 getResponses = main.Cluster.command( "setTestGet",
2129 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002130 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002131 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002132 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07002133 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002134 current = set( getResponses[ i ] )
2135 if len( current ) == len( getResponses[ i ] ):
2136 # no repeats
2137 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002138 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002139 " of set " + main.onosSetName + ":\n" +
2140 str( getResponses[ i ] ) )
2141 main.log.debug( "Expected: " + str( main.onosSet ) )
2142 main.log.debug( "Actual: " + str( current ) )
2143 getResults = main.FALSE
2144 else:
2145 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002146 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002147 " set " + main.onosSetName + ":\n" +
2148 str( getResponses[ i ] ) )
2149 getResults = main.FALSE
2150 elif getResponses[ i ] == main.ERROR:
2151 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002152 sizeResponses = main.Cluster.command( "setTestSize",
2153 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002154 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002155 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002156 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002157 if size != sizeResponses[ i ]:
2158 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002159 main.log.error( node + " expected a size of " +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002160 str( size ) + " for set " + main.onosSetName +
2161 " but got " + str( sizeResponses[ i ] ) )
2162 retainResults = retainResults and getResults and sizeResults
2163 utilities.assert_equals( expect=main.TRUE,
2164 actual=retainResults,
2165 onpass="Set retain correct",
2166 onfail="Set retain was incorrect" )
2167
2168 # Transactional maps
2169 main.step( "Partitioned Transactional maps put" )
2170 tMapValue = "Testing"
2171 numKeys = 100
2172 putResult = True
Jon Hallca319892017-06-15 15:25:22 -07002173 ctrl = main.Cluster.next()
2174 putResponses = ctrl.transactionalMapPut( numKeys, tMapValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002175 if putResponses and len( putResponses ) == 100:
2176 for i in putResponses:
2177 if putResponses[ i ][ 'value' ] != tMapValue:
2178 putResult = False
2179 else:
2180 putResult = False
2181 if not putResult:
2182 main.log.debug( "Put response values: " + str( putResponses ) )
2183 utilities.assert_equals( expect=True,
2184 actual=putResult,
2185 onpass="Partitioned Transactional Map put successful",
2186 onfail="Partitioned Transactional Map put values are incorrect" )
2187
2188 main.step( "Partitioned Transactional maps get" )
2189 # FIXME: is this sleep needed?
2190 time.sleep( 5 )
2191
2192 getCheck = True
2193 for n in range( 1, numKeys + 1 ):
Jon Hallca319892017-06-15 15:25:22 -07002194 getResponses = main.Cluster.command( "transactionalMapGet",
2195 args=[ "Key" + str( n ) ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002196 valueCheck = True
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002197 for node in getResponses:
2198 if node != tMapValue:
2199 valueCheck = False
2200 if not valueCheck:
Jon Hall0e240372018-05-02 11:21:57 -07002201 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002202 main.log.warn( getResponses )
2203 getCheck = getCheck and valueCheck
2204 utilities.assert_equals( expect=True,
2205 actual=getCheck,
2206 onpass="Partitioned Transactional Map get values were correct",
2207 onfail="Partitioned Transactional Map values incorrect" )
2208
2209 # DISTRIBUTED ATOMIC VALUE
2210 main.step( "Get the value of a new value" )
Jon Hallca319892017-06-15 15:25:22 -07002211 getValues = main.Cluster.command( "valueTestGet",
2212 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002213 main.log.debug( getValues )
2214 # Check the results
2215 atomicValueGetResult = True
2216 expected = valueValue if valueValue is not None else "null"
2217 main.log.debug( "Checking for value of " + expected )
2218 for i in getValues:
2219 if i != expected:
2220 atomicValueGetResult = False
2221 utilities.assert_equals( expect=True,
2222 actual=atomicValueGetResult,
2223 onpass="Atomic Value get successful",
2224 onfail="Error getting atomic Value " +
2225 str( valueValue ) + ", found: " +
2226 str( getValues ) )
2227
2228 main.step( "Atomic Value set()" )
2229 valueValue = "foo"
Jon Hallca319892017-06-15 15:25:22 -07002230 setValues = main.Cluster.command( "valueTestSet",
2231 args=[ valueName, valueValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002232 main.log.debug( setValues )
2233 # Check the results
2234 atomicValueSetResults = True
2235 for i in setValues:
2236 if i != main.TRUE:
2237 atomicValueSetResults = False
2238 utilities.assert_equals( expect=True,
2239 actual=atomicValueSetResults,
2240 onpass="Atomic Value set successful",
2241 onfail="Error setting atomic Value" +
2242 str( setValues ) )
2243
2244 main.step( "Get the value after set()" )
Jon Hallca319892017-06-15 15:25:22 -07002245 getValues = main.Cluster.command( "valueTestGet",
2246 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002247 main.log.debug( getValues )
2248 # Check the results
2249 atomicValueGetResult = True
2250 expected = valueValue if valueValue is not None else "null"
2251 main.log.debug( "Checking for value of " + expected )
2252 for i in getValues:
2253 if i != expected:
2254 atomicValueGetResult = False
2255 utilities.assert_equals( expect=True,
2256 actual=atomicValueGetResult,
2257 onpass="Atomic Value get successful",
2258 onfail="Error getting atomic Value " +
2259 str( valueValue ) + ", found: " +
2260 str( getValues ) )
2261
2262 main.step( "Atomic Value compareAndSet()" )
2263 oldValue = valueValue
2264 valueValue = "bar"
Jon Hallca319892017-06-15 15:25:22 -07002265 ctrl = main.Cluster.next()
2266 CASValue = ctrl.valueTestCompareAndSet( valueName, oldValue, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002267 main.log.debug( CASValue )
2268 utilities.assert_equals( expect=main.TRUE,
2269 actual=CASValue,
2270 onpass="Atomic Value comapreAndSet successful",
2271 onfail="Error setting atomic Value:" +
2272 str( CASValue ) )
2273
2274 main.step( "Get the value after compareAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002275 getValues = main.Cluster.command( "valueTestGet",
2276 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002277 main.log.debug( getValues )
2278 # Check the results
2279 atomicValueGetResult = True
2280 expected = valueValue if valueValue is not None else "null"
2281 main.log.debug( "Checking for value of " + expected )
2282 for i in getValues:
2283 if i != expected:
2284 atomicValueGetResult = False
2285 utilities.assert_equals( expect=True,
2286 actual=atomicValueGetResult,
2287 onpass="Atomic Value get successful",
2288 onfail="Error getting atomic Value " +
2289 str( valueValue ) + ", found: " +
2290 str( getValues ) )
2291
2292 main.step( "Atomic Value getAndSet()" )
2293 oldValue = valueValue
2294 valueValue = "baz"
Jon Hallca319892017-06-15 15:25:22 -07002295 ctrl = main.Cluster.next()
2296 GASValue = ctrl.valueTestGetAndSet( valueName, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002297 main.log.debug( GASValue )
2298 expected = oldValue if oldValue is not None else "null"
2299 utilities.assert_equals( expect=expected,
2300 actual=GASValue,
2301 onpass="Atomic Value GAS successful",
2302 onfail="Error with GetAndSet atomic Value: expected " +
2303 str( expected ) + ", found: " +
2304 str( GASValue ) )
2305
2306 main.step( "Get the value after getAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002307 getValues = main.Cluster.command( "valueTestGet",
2308 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002309 main.log.debug( getValues )
2310 # Check the results
2311 atomicValueGetResult = True
2312 expected = valueValue if valueValue is not None else "null"
2313 main.log.debug( "Checking for value of " + expected )
2314 for i in getValues:
2315 if i != expected:
2316 atomicValueGetResult = False
2317 utilities.assert_equals( expect=True,
2318 actual=atomicValueGetResult,
2319 onpass="Atomic Value get successful",
2320 onfail="Error getting atomic Value: expected " +
2321 str( valueValue ) + ", found: " +
2322 str( getValues ) )
2323
2324 main.step( "Atomic Value destory()" )
2325 valueValue = None
Jon Hallca319892017-06-15 15:25:22 -07002326 ctrl = main.Cluster.next()
2327 destroyResult = ctrl.valueTestDestroy( valueName )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002328 main.log.debug( destroyResult )
2329 # Check the results
2330 utilities.assert_equals( expect=main.TRUE,
2331 actual=destroyResult,
2332 onpass="Atomic Value destroy successful",
2333 onfail="Error destroying atomic Value" )
2334
2335 main.step( "Get the value after destroy()" )
Jon Hallca319892017-06-15 15:25:22 -07002336 getValues = main.Cluster.command( "valueTestGet",
2337 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002338 main.log.debug( getValues )
2339 # Check the results
2340 atomicValueGetResult = True
2341 expected = valueValue if valueValue is not None else "null"
2342 main.log.debug( "Checking for value of " + expected )
2343 for i in getValues:
2344 if i != expected:
2345 atomicValueGetResult = False
2346 utilities.assert_equals( expect=True,
2347 actual=atomicValueGetResult,
2348 onpass="Atomic Value get successful",
2349 onfail="Error getting atomic Value " +
2350 str( valueValue ) + ", found: " +
2351 str( getValues ) )
Jon Halle0f0b342017-04-18 11:43:47 -07002352
2353 # WORK QUEUES
2354 main.step( "Work Queue add()" )
Jon Hallca319892017-06-15 15:25:22 -07002355 ctrl = main.Cluster.next()
2356 addResult = ctrl.workQueueAdd( workQueueName, 'foo' )
Jon Halle0f0b342017-04-18 11:43:47 -07002357 workQueuePending += 1
2358 main.log.debug( addResult )
2359 # Check the results
2360 utilities.assert_equals( expect=main.TRUE,
2361 actual=addResult,
2362 onpass="Work Queue add successful",
2363 onfail="Error adding to Work Queue" )
2364
2365 main.step( "Check the work queue stats" )
2366 statsResults = self.workQueueStatsCheck( workQueueName,
2367 workQueueCompleted,
2368 workQueueInProgress,
2369 workQueuePending )
2370 utilities.assert_equals( expect=True,
2371 actual=statsResults,
2372 onpass="Work Queue stats correct",
2373 onfail="Work Queue stats incorrect " )
2374
2375 main.step( "Work Queue addMultiple()" )
Jon Hallca319892017-06-15 15:25:22 -07002376 ctrl = main.Cluster.next()
2377 addMultipleResult = ctrl.workQueueAddMultiple( workQueueName, 'bar', 'baz' )
Jon Halle0f0b342017-04-18 11:43:47 -07002378 workQueuePending += 2
2379 main.log.debug( addMultipleResult )
2380 # Check the results
2381 utilities.assert_equals( expect=main.TRUE,
2382 actual=addMultipleResult,
2383 onpass="Work Queue add multiple successful",
2384 onfail="Error adding multiple items to Work Queue" )
2385
2386 main.step( "Check the work queue stats" )
2387 statsResults = self.workQueueStatsCheck( workQueueName,
2388 workQueueCompleted,
2389 workQueueInProgress,
2390 workQueuePending )
2391 utilities.assert_equals( expect=True,
2392 actual=statsResults,
2393 onpass="Work Queue stats correct",
2394 onfail="Work Queue stats incorrect " )
2395
2396 main.step( "Work Queue takeAndComplete() 1" )
Jon Hallca319892017-06-15 15:25:22 -07002397 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002398 number = 1
Jon Hallca319892017-06-15 15:25:22 -07002399 take1Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002400 workQueuePending -= number
2401 workQueueCompleted += number
2402 main.log.debug( take1Result )
2403 # Check the results
2404 utilities.assert_equals( expect=main.TRUE,
2405 actual=take1Result,
2406 onpass="Work Queue takeAndComplete 1 successful",
2407 onfail="Error taking 1 from Work Queue" )
2408
2409 main.step( "Check the work queue stats" )
2410 statsResults = self.workQueueStatsCheck( workQueueName,
2411 workQueueCompleted,
2412 workQueueInProgress,
2413 workQueuePending )
2414 utilities.assert_equals( expect=True,
2415 actual=statsResults,
2416 onpass="Work Queue stats correct",
2417 onfail="Work Queue stats incorrect " )
2418
2419 main.step( "Work Queue takeAndComplete() 2" )
Jon Hallca319892017-06-15 15:25:22 -07002420 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002421 number = 2
Jon Hallca319892017-06-15 15:25:22 -07002422 take2Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002423 workQueuePending -= number
2424 workQueueCompleted += number
2425 main.log.debug( take2Result )
2426 # Check the results
2427 utilities.assert_equals( expect=main.TRUE,
2428 actual=take2Result,
2429 onpass="Work Queue takeAndComplete 2 successful",
2430 onfail="Error taking 2 from Work Queue" )
2431
2432 main.step( "Check the work queue stats" )
2433 statsResults = self.workQueueStatsCheck( workQueueName,
2434 workQueueCompleted,
2435 workQueueInProgress,
2436 workQueuePending )
2437 utilities.assert_equals( expect=True,
2438 actual=statsResults,
2439 onpass="Work Queue stats correct",
2440 onfail="Work Queue stats incorrect " )
2441
2442 main.step( "Work Queue destroy()" )
2443 valueValue = None
2444 threads = []
Jon Hallca319892017-06-15 15:25:22 -07002445 ctrl = main.Cluster.next()
2446 destroyResult = ctrl.workQueueDestroy( workQueueName )
Jon Halle0f0b342017-04-18 11:43:47 -07002447 workQueueCompleted = 0
2448 workQueueInProgress = 0
2449 workQueuePending = 0
2450 main.log.debug( destroyResult )
2451 # Check the results
2452 utilities.assert_equals( expect=main.TRUE,
2453 actual=destroyResult,
2454 onpass="Work Queue destroy successful",
2455 onfail="Error destroying Work Queue" )
2456
2457 main.step( "Check the work queue stats" )
2458 statsResults = self.workQueueStatsCheck( workQueueName,
2459 workQueueCompleted,
2460 workQueueInProgress,
2461 workQueuePending )
2462 utilities.assert_equals( expect=True,
2463 actual=statsResults,
2464 onpass="Work Queue stats correct",
2465 onfail="Work Queue stats incorrect " )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002466 except Exception as e:
2467 main.log.error( "Exception: " + str( e ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002468
2469 def cleanUp( self, main ):
2470 """
2471 Clean up
2472 """
Devin Lim58046fa2017-07-05 16:55:00 -07002473 assert main, "main not defined"
2474 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002475
2476 # printing colors to terminal
2477 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2478 'blue': '\033[94m', 'green': '\033[92m',
2479 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
Jon Hall4173b242017-09-12 17:04:38 -07002480
Devin Lim58046fa2017-07-05 16:55:00 -07002481 main.case( "Test Cleanup" )
Jon Hall4173b242017-09-12 17:04:38 -07002482
2483 main.step( "Checking raft log size" )
2484 # TODO: this is a flaky check, but the intent is to make sure the raft logs
2485 # get compacted periodically
Jon Hall3e6edb32018-08-21 16:20:30 -07002486
2487 # FIXME: We need to look at the raft servers, which might not be on the ONOS machine
Jon Hall4173b242017-09-12 17:04:38 -07002488 logCheck = main.Cluster.checkPartitionSize()
2489 utilities.assert_equals( expect=True, actual=logCheck,
2490 onpass="Raft log size is not too big",
2491 onfail="Raft logs grew too big" )
2492
Devin Lim58046fa2017-07-05 16:55:00 -07002493 main.step( "Killing tcpdumps" )
2494 main.Mininet2.stopTcpdump()
2495
2496 testname = main.TEST
2497 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2498 main.step( "Copying MN pcap and ONOS log files to test station" )
2499 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2500 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2501 # NOTE: MN Pcap file is being saved to logdir.
2502 # We scp this file as MN and TestON aren't necessarily the same vm
2503
2504 # FIXME: To be replaced with a Jenkin's post script
2505 # TODO: Load these from params
2506 # NOTE: must end in /
2507 logFolder = "/opt/onos/log/"
2508 logFiles = [ "karaf.log", "karaf.log.1" ]
2509 # NOTE: must end in /
2510 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002511 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002512 dstName = main.logdir + "/" + ctrl.name + "-" + f
2513 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002514 logFolder + f, dstName )
2515 # std*.log's
2516 # NOTE: must end in /
2517 logFolder = "/opt/onos/var/"
2518 logFiles = [ "stderr.log", "stdout.log" ]
2519 # NOTE: must end in /
2520 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002521 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002522 dstName = main.logdir + "/" + ctrl.name + "-" + f
2523 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002524 logFolder + f, dstName )
2525 else:
2526 main.log.debug( "skipping saving log files" )
2527
Jon Hall5d5876e2017-11-30 09:33:16 -08002528 main.step( "Checking ONOS Logs for errors" )
2529 for ctrl in main.Cluster.runningNodes:
2530 main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
2531 main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
2532
Devin Lim58046fa2017-07-05 16:55:00 -07002533 main.step( "Stopping Mininet" )
2534 mnResult = main.Mininet1.stopNet()
2535 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2536 onpass="Mininet stopped",
2537 onfail="MN cleanup NOT successful" )
2538
Devin Lim58046fa2017-07-05 16:55:00 -07002539 try:
2540 timerLog = open( main.logdir + "/Timers.csv", 'w' )
2541 main.log.debug( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2542 timerLog.write( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2543 timerLog.close()
2544 except NameError as e:
2545 main.log.exception( e )
Jon Hallca319892017-06-15 15:25:22 -07002546
Devin Lim58046fa2017-07-05 16:55:00 -07002547 def assignMastership( self, main ):
2548 """
2549 Assign mastership to controllers
2550 """
2551 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002552 assert main, "main not defined"
2553 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002554
2555 main.case( "Assigning Controller roles for switches" )
2556 main.caseExplanation = "Check that ONOS is connected to each " +\
2557 "device. Then manually assign" +\
2558 " mastership to specific ONOS nodes using" +\
2559 " 'device-role'"
2560 main.step( "Assign mastership of switches to specific controllers" )
2561 # Manually assign mastership to the controller we want
2562 roleCall = main.TRUE
2563
2564 ipList = []
2565 deviceList = []
Devin Lim58046fa2017-07-05 16:55:00 -07002566 try:
2567 # Assign mastership to specific controllers. This assignment was
2568 # determined for a 7 node cluser, but will work with any sized
2569 # cluster
2570 for i in range( 1, 29 ): # switches 1 through 28
2571 # set up correct variables:
2572 if i == 1:
2573 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002574 ip = main.Cluster.active( c ).ip_address # ONOS1
Jon Hall0e240372018-05-02 11:21:57 -07002575 deviceId = main.Cluster.next().getDevice( "1000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002576 elif i == 2:
Devin Lim142b5342017-07-20 15:22:39 -07002577 c = 1 % main.Cluster.numCtrls
2578 ip = main.Cluster.active( c ).ip_address # ONOS2
Jon Hall0e240372018-05-02 11:21:57 -07002579 deviceId = main.Cluster.next().getDevice( "2000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002580 elif i == 3:
Devin Lim142b5342017-07-20 15:22:39 -07002581 c = 1 % main.Cluster.numCtrls
2582 ip = main.Cluster.active( c ).ip_address # ONOS2
Jon Hall0e240372018-05-02 11:21:57 -07002583 deviceId = main.Cluster.next().getDevice( "3000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002584 elif i == 4:
Devin Lim142b5342017-07-20 15:22:39 -07002585 c = 3 % main.Cluster.numCtrls
2586 ip = main.Cluster.active( c ).ip_address # ONOS4
Jon Hall0e240372018-05-02 11:21:57 -07002587 deviceId = main.Cluster.next().getDevice( "3004" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002588 elif i == 5:
Devin Lim142b5342017-07-20 15:22:39 -07002589 c = 2 % main.Cluster.numCtrls
2590 ip = main.Cluster.active( c ).ip_address # ONOS3
Jon Hall0e240372018-05-02 11:21:57 -07002591 deviceId = main.Cluster.next().getDevice( "5000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002592 elif i == 6:
Devin Lim142b5342017-07-20 15:22:39 -07002593 c = 2 % main.Cluster.numCtrls
2594 ip = main.Cluster.active( c ).ip_address # ONOS3
Jon Hall0e240372018-05-02 11:21:57 -07002595 deviceId = main.Cluster.next().getDevice( "6000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002596 elif i == 7:
Devin Lim142b5342017-07-20 15:22:39 -07002597 c = 5 % main.Cluster.numCtrls
2598 ip = main.Cluster.active( c ).ip_address # ONOS6
Jon Hall0e240372018-05-02 11:21:57 -07002599 deviceId = main.Cluster.next().getDevice( "6007" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002600 elif i >= 8 and i <= 17:
Devin Lim142b5342017-07-20 15:22:39 -07002601 c = 4 % main.Cluster.numCtrls
2602 ip = main.Cluster.active( c ).ip_address # ONOS5
Devin Lim58046fa2017-07-05 16:55:00 -07002603 dpid = '3' + str( i ).zfill( 3 )
Jon Hall0e240372018-05-02 11:21:57 -07002604 deviceId = main.Cluster.next().getDevice( dpid ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002605 elif i >= 18 and i <= 27:
Devin Lim142b5342017-07-20 15:22:39 -07002606 c = 6 % main.Cluster.numCtrls
2607 ip = main.Cluster.active( c ).ip_address # ONOS7
Devin Lim58046fa2017-07-05 16:55:00 -07002608 dpid = '6' + str( i ).zfill( 3 )
Jon Hall0e240372018-05-02 11:21:57 -07002609 deviceId = main.Cluster.next().getDevice( dpid ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002610 elif i == 28:
2611 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002612 ip = main.Cluster.active( c ).ip_address # ONOS1
Jon Hall0e240372018-05-02 11:21:57 -07002613 deviceId = main.Cluster.next().getDevice( "2800" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002614 else:
2615 main.log.error( "You didn't write an else statement for " +
2616 "switch s" + str( i ) )
2617 roleCall = main.FALSE
2618 # Assign switch
2619 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
2620 # TODO: make this controller dynamic
Jon Hall0e240372018-05-02 11:21:57 -07002621 roleCall = roleCall and main.Cluster.next().deviceRole( deviceId, ip )
Devin Lim58046fa2017-07-05 16:55:00 -07002622 ipList.append( ip )
2623 deviceList.append( deviceId )
2624 except ( AttributeError, AssertionError ):
2625 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hall0e240372018-05-02 11:21:57 -07002626 main.log.info( main.Cluster.next().devices() )
Devin Lim58046fa2017-07-05 16:55:00 -07002627 utilities.assert_equals(
2628 expect=main.TRUE,
2629 actual=roleCall,
2630 onpass="Re-assigned switch mastership to designated controller",
2631 onfail="Something wrong with deviceRole calls" )
2632
2633 main.step( "Check mastership was correctly assigned" )
2634 roleCheck = main.TRUE
2635 # NOTE: This is due to the fact that device mastership change is not
2636 # atomic and is actually a multi step process
2637 time.sleep( 5 )
2638 for i in range( len( ipList ) ):
2639 ip = ipList[ i ]
2640 deviceId = deviceList[ i ]
2641 # Check assignment
Jon Hall0e240372018-05-02 11:21:57 -07002642 master = main.Cluster.next().getRole( deviceId ).get( 'master' )
Devin Lim58046fa2017-07-05 16:55:00 -07002643 if ip in master:
2644 roleCheck = roleCheck and main.TRUE
2645 else:
2646 roleCheck = roleCheck and main.FALSE
2647 main.log.error( "Error, controller " + ip + " is not" +
2648 " master " + "of device " +
2649 str( deviceId ) + ". Master is " +
2650 repr( master ) + "." )
2651 utilities.assert_equals(
2652 expect=main.TRUE,
2653 actual=roleCheck,
2654 onpass="Switches were successfully reassigned to designated " +
2655 "controller",
2656 onfail="Switches were not successfully reassigned" )
Jon Hallca319892017-06-15 15:25:22 -07002657
Jon Hall5d5876e2017-11-30 09:33:16 -08002658 def bringUpStoppedNodes( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -07002659 """
Jon Hall5d5876e2017-11-30 09:33:16 -08002660 The bring up stopped nodes.
Devin Lim58046fa2017-07-05 16:55:00 -07002661 """
2662 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002663 assert main, "main not defined"
2664 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002665 assert main.kill, "main.kill not defined"
2666 main.case( "Restart minority of ONOS nodes" )
2667
2668 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
2669 startResults = main.TRUE
2670 restartTime = time.time()
Jon Hallca319892017-06-15 15:25:22 -07002671 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002672 startResults = startResults and\
Jon Hallca319892017-06-15 15:25:22 -07002673 ctrl.onosStart( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002674 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2675 onpass="ONOS nodes started successfully",
2676 onfail="ONOS nodes NOT successfully started" )
2677
2678 main.step( "Checking if ONOS is up yet" )
2679 count = 0
2680 onosIsupResult = main.FALSE
2681 while onosIsupResult == main.FALSE and count < 10:
2682 onosIsupResult = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002683 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002684 onosIsupResult = onosIsupResult and\
Jon Hallca319892017-06-15 15:25:22 -07002685 ctrl.isup( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002686 count = count + 1
2687 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
2688 onpass="ONOS restarted successfully",
2689 onfail="ONOS restart NOT successful" )
2690
Jon Hall5d5876e2017-11-30 09:33:16 -08002691 main.step( "Restarting ONOS CLI" )
Devin Lim58046fa2017-07-05 16:55:00 -07002692 cliResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002693 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002694 cliResults = cliResults and\
Jon Hallca319892017-06-15 15:25:22 -07002695 ctrl.startOnosCli( ctrl.ipAddress )
2696 ctrl.active = True
Devin Lim58046fa2017-07-05 16:55:00 -07002697 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
Jon Hallca319892017-06-15 15:25:22 -07002698 onpass="ONOS node(s) restarted",
2699 onfail="ONOS node(s) did not restart" )
Devin Lim58046fa2017-07-05 16:55:00 -07002700
Jon Hall5d5876e2017-11-30 09:33:16 -08002701 # Grab the time of restart so we can have some idea of average time
Devin Lim58046fa2017-07-05 16:55:00 -07002702 main.restartTime = time.time() - restartTime
2703 main.log.debug( "Restart time: " + str( main.restartTime ) )
2704 # TODO: MAke this configurable. Also, we are breaking the above timer
2705 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -08002706 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -07002707 False,
Devin Lim58046fa2017-07-05 16:55:00 -07002708 sleep=15,
2709 attempts=5 )
2710
2711 utilities.assert_equals( expect=True, actual=nodeResults,
2712 onpass="Nodes check successful",
2713 onfail="Nodes check NOT successful" )
2714
2715 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07002716 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07002717 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07002718 ctrl.name,
Jon Hall6c9e2da2018-11-06 12:01:23 -08002719 ctrl.CLI.sendline( "onos:scr-list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002720 main.log.error( "Failed to start ONOS, stopping test" )
Devin Lim44075962017-08-11 10:56:37 -07002721 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002722
Jon Hallca319892017-06-15 15:25:22 -07002723 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -07002724
2725 main.step( "Rerun for election on the node(s) that were killed" )
2726 runResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002727 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002728 runResults = runResults and\
Jon Hallca319892017-06-15 15:25:22 -07002729 ctrl.electionTestRun()
Devin Lim58046fa2017-07-05 16:55:00 -07002730 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2731 onpass="ONOS nodes reran for election topic",
Jon Hall5d5876e2017-11-30 09:33:16 -08002732 onfail="Error rerunning for election" )
2733
2734 def upgradeNodes( self, main ):
2735 """
2736 Reinstall some nodes with an upgraded version.
2737
2738 This will reinstall nodes in main.kill with an upgraded version.
2739 """
2740 import time
2741 assert main, "main not defined"
2742 assert utilities.assert_equals, "utilities.assert_equals not defined"
2743 assert main.kill, "main.kill not defined"
2744 nodeNames = [ node.name for node in main.kill ]
2745 main.step( "Upgrading" + str( nodeNames ) + " ONOS nodes" )
2746
2747 stopResults = main.TRUE
2748 uninstallResults = main.TRUE
2749 startResults = main.TRUE
2750 sshResults = main.TRUE
2751 isup = main.TRUE
2752 restartTime = time.time()
2753 for ctrl in main.kill:
2754 stopResults = stopResults and\
2755 ctrl.onosStop( ctrl.ipAddress )
2756 uninstallResults = uninstallResults and\
2757 ctrl.onosUninstall( ctrl.ipAddress )
2758 # Install the new version of onos
2759 startResults = startResults and\
2760 ctrl.onosInstall( options="-fv", node=ctrl.ipAddress )
2761 sshResults = sshResults and\
2762 ctrl.onosSecureSSH( node=ctrl.ipAddress )
2763 isup = isup and ctrl.isup( ctrl.ipAddress )
2764 utilities.assert_equals( expect=main.TRUE, actual=stopResults,
2765 onpass="ONOS nodes stopped successfully",
2766 onfail="ONOS nodes NOT successfully stopped" )
2767 utilities.assert_equals( expect=main.TRUE, actual=uninstallResults,
2768 onpass="ONOS nodes uninstalled successfully",
2769 onfail="ONOS nodes NOT successfully uninstalled" )
2770 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2771 onpass="ONOS nodes started successfully",
2772 onfail="ONOS nodes NOT successfully started" )
2773 utilities.assert_equals( expect=main.TRUE, actual=sshResults,
2774 onpass="Successfully secured onos ssh",
2775 onfail="Failed to secure onos ssh" )
2776 utilities.assert_equals( expect=main.TRUE, actual=isup,
2777 onpass="ONOS nodes fully started",
2778 onfail="ONOS nodes NOT fully started" )
2779
2780 main.step( "Restarting ONOS CLI" )
2781 cliResults = main.TRUE
2782 for ctrl in main.kill:
2783 cliResults = cliResults and\
2784 ctrl.startOnosCli( ctrl.ipAddress )
2785 ctrl.active = True
2786 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
2787 onpass="ONOS node(s) restarted",
2788 onfail="ONOS node(s) did not restart" )
2789
2790 # Grab the time of restart so we can have some idea of average time
2791 main.restartTime = time.time() - restartTime
2792 main.log.debug( "Restart time: " + str( main.restartTime ) )
2793 # TODO: Make this configurable.
2794 main.step( "Checking ONOS nodes" )
2795 nodeResults = utilities.retry( main.Cluster.nodesCheck,
2796 False,
2797 sleep=15,
2798 attempts=5 )
2799
2800 utilities.assert_equals( expect=True, actual=nodeResults,
2801 onpass="Nodes check successful",
2802 onfail="Nodes check NOT successful" )
2803
2804 if not nodeResults:
2805 for ctrl in main.Cluster.active():
2806 main.log.debug( "{} components not ACTIVE: \n{}".format(
2807 ctrl.name,
Jon Hall6c9e2da2018-11-06 12:01:23 -08002808 ctrl.CLI.sendline( "onos:scr-list | grep -v ACTIVE" ) ) )
Jon Hall5d5876e2017-11-30 09:33:16 -08002809 main.log.error( "Failed to start ONOS, stopping test" )
2810 main.cleanAndExit()
2811
2812 self.commonChecks()
2813
2814 main.step( "Rerun for election on the node(s) that were killed" )
2815 runResults = main.TRUE
2816 for ctrl in main.kill:
2817 runResults = runResults and\
2818 ctrl.electionTestRun()
2819 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2820 onpass="ONOS nodes reran for election topic",
2821 onfail="Error rerunning for election" )
Jon Hall4173b242017-09-12 17:04:38 -07002822
Devin Lim142b5342017-07-20 15:22:39 -07002823 def tempCell( self, cellName, ipList ):
2824 main.step( "Create cell file" )
2825 cellAppString = main.params[ 'ENV' ][ 'appString' ]
Devin Lim58046fa2017-07-05 16:55:00 -07002826
Devin Lim142b5342017-07-20 15:22:39 -07002827 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
2828 main.Mininet1.ip_address,
Jon Hall3e6edb32018-08-21 16:20:30 -07002829 cellAppString, ipList, ipList,
2830 main.ONOScli1.karafUser )
Devin Lim142b5342017-07-20 15:22:39 -07002831 main.step( "Applying cell variable to environment" )
2832 cellResult = main.ONOSbench.setCell( cellName )
2833 verifyResult = main.ONOSbench.verifyCell()
2834
Devin Lim142b5342017-07-20 15:22:39 -07002835 def checkStateAfterEvent( self, main, afterWhich, compareSwitch=False, isRestart=False ):
Devin Lim58046fa2017-07-05 16:55:00 -07002836 """
2837 afterWhich :
Jon Hallca319892017-06-15 15:25:22 -07002838 0: failure
Devin Lim58046fa2017-07-05 16:55:00 -07002839 1: scaling
2840 """
2841 """
2842 Check state after ONOS failure/scaling
2843 """
2844 import json
Devin Lim58046fa2017-07-05 16:55:00 -07002845 assert main, "main not defined"
2846 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002847 main.case( "Running ONOS Constant State Tests" )
2848
Jon Hall3e6edb32018-08-21 16:20:30 -07002849 OnosAfterWhich = [ "failure", "scaling" ]
Devin Lim58046fa2017-07-05 16:55:00 -07002850
Devin Lim58046fa2017-07-05 16:55:00 -07002851 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -07002852 self.checkRoleNotNull()
Devin Lim58046fa2017-07-05 16:55:00 -07002853
Devin Lim142b5342017-07-20 15:22:39 -07002854 ONOSMastership, rolesResults, consistentMastership = self.checkTheRole()
Jon Hallca319892017-06-15 15:25:22 -07002855 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07002856
2857 if rolesResults and not consistentMastership:
2858 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002859 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -07002860 main.log.warn( node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07002861 json.dumps( json.loads( ONOSMastership[ i ] ),
2862 sort_keys=True,
2863 indent=4,
2864 separators=( ',', ': ' ) ) )
2865
2866 if compareSwitch:
2867 description2 = "Compare switch roles from before failure"
2868 main.step( description2 )
2869 try:
2870 currentJson = json.loads( ONOSMastership[ 0 ] )
2871 oldJson = json.loads( mastershipState )
2872 except ( ValueError, TypeError ):
2873 main.log.exception( "Something is wrong with parsing " +
2874 "ONOSMastership[0] or mastershipState" )
Jon Hallca319892017-06-15 15:25:22 -07002875 main.log.debug( "ONOSMastership[0]: " + repr( ONOSMastership[ 0 ] ) )
2876 main.log.debug( "mastershipState" + repr( mastershipState ) )
Devin Lim44075962017-08-11 10:56:37 -07002877 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002878 mastershipCheck = main.TRUE
Jon Hallab611372018-02-21 15:26:05 -08002879 for swName, swDetails in main.Mininet1.getSwitches().items():
2880 switchDPID = swDetails[ 'dpid' ]
Devin Lim58046fa2017-07-05 16:55:00 -07002881 current = [ switch[ 'master' ] for switch in currentJson
2882 if switchDPID in switch[ 'id' ] ]
2883 old = [ switch[ 'master' ] for switch in oldJson
2884 if switchDPID in switch[ 'id' ] ]
2885 if current == old:
2886 mastershipCheck = mastershipCheck and main.TRUE
2887 else:
2888 main.log.warn( "Mastership of switch %s changed" % switchDPID )
2889 mastershipCheck = main.FALSE
2890 utilities.assert_equals(
2891 expect=main.TRUE,
2892 actual=mastershipCheck,
2893 onpass="Mastership of Switches was not changed",
2894 onfail="Mastership of some switches changed" )
2895
2896 # NOTE: we expect mastership to change on controller failure/scaling down
Devin Lim142b5342017-07-20 15:22:39 -07002897 ONOSIntents, intentsResults = self.checkingIntents()
Devin Lim58046fa2017-07-05 16:55:00 -07002898 intentCheck = main.FALSE
2899 consistentIntents = True
Devin Lim58046fa2017-07-05 16:55:00 -07002900
2901 main.step( "Check for consistency in Intents from each controller" )
2902 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2903 main.log.info( "Intents are consistent across all ONOS " +
2904 "nodes" )
2905 else:
2906 consistentIntents = False
2907
2908 # Try to make it easy to figure out what is happening
2909 #
2910 # Intent ONOS1 ONOS2 ...
2911 # 0x01 INSTALLED INSTALLING
2912 # ... ... ...
2913 # ... ... ...
2914 title = " ID"
Jon Hallca319892017-06-15 15:25:22 -07002915 for ctrl in main.Cluster.active():
2916 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07002917 main.log.warn( title )
2918 # get all intent keys in the cluster
2919 keys = []
2920 for nodeStr in ONOSIntents:
2921 node = json.loads( nodeStr )
2922 for intent in node:
2923 keys.append( intent.get( 'id' ) )
2924 keys = set( keys )
2925 for key in keys:
2926 row = "%-13s" % key
2927 for nodeStr in ONOSIntents:
2928 node = json.loads( nodeStr )
2929 for intent in node:
2930 if intent.get( 'id' ) == key:
2931 row += "%-15s" % intent.get( 'state' )
2932 main.log.warn( row )
2933 # End table view
2934
2935 utilities.assert_equals(
2936 expect=True,
2937 actual=consistentIntents,
2938 onpass="Intents are consistent across all ONOS nodes",
2939 onfail="ONOS nodes have different views of intents" )
2940 intentStates = []
2941 for node in ONOSIntents: # Iter through ONOS nodes
2942 nodeStates = []
2943 # Iter through intents of a node
2944 try:
2945 for intent in json.loads( node ):
2946 nodeStates.append( intent[ 'state' ] )
2947 except ( ValueError, TypeError ):
2948 main.log.exception( "Error in parsing intents" )
2949 main.log.error( repr( node ) )
2950 intentStates.append( nodeStates )
2951 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2952 main.log.info( dict( out ) )
2953
2954 if intentsResults and not consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07002955 for i in range( len( main.Cluster.active() ) ):
Jon Hall6040bcf2017-08-14 11:15:41 -07002956 ctrl = main.Cluster.controllers[ i ]
Jon Hallca319892017-06-15 15:25:22 -07002957 main.log.warn( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07002958 main.log.warn( json.dumps(
2959 json.loads( ONOSIntents[ i ] ),
2960 sort_keys=True,
2961 indent=4,
2962 separators=( ',', ': ' ) ) )
2963 elif intentsResults and consistentIntents:
2964 intentCheck = main.TRUE
2965
2966 # NOTE: Store has no durability, so intents are lost across system
2967 # restarts
2968 if not isRestart:
2969 main.step( "Compare current intents with intents before the " + OnosAfterWhich[ afterWhich ] )
2970 # NOTE: this requires case 5 to pass for intentState to be set.
2971 # maybe we should stop the test if that fails?
2972 sameIntents = main.FALSE
2973 try:
2974 intentState
2975 except NameError:
2976 main.log.warn( "No previous intent state was saved" )
2977 else:
2978 if intentState and intentState == ONOSIntents[ 0 ]:
2979 sameIntents = main.TRUE
2980 main.log.info( "Intents are consistent with before " + OnosAfterWhich[ afterWhich ] )
2981 # TODO: possibly the states have changed? we may need to figure out
2982 # what the acceptable states are
2983 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2984 sameIntents = main.TRUE
2985 try:
2986 before = json.loads( intentState )
2987 after = json.loads( ONOSIntents[ 0 ] )
2988 for intent in before:
2989 if intent not in after:
2990 sameIntents = main.FALSE
2991 main.log.debug( "Intent is not currently in ONOS " +
2992 "(at least in the same form):" )
2993 main.log.debug( json.dumps( intent ) )
2994 except ( ValueError, TypeError ):
2995 main.log.exception( "Exception printing intents" )
2996 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2997 main.log.debug( repr( intentState ) )
2998 if sameIntents == main.FALSE:
2999 try:
3000 main.log.debug( "ONOS intents before: " )
3001 main.log.debug( json.dumps( json.loads( intentState ),
3002 sort_keys=True, indent=4,
3003 separators=( ',', ': ' ) ) )
3004 main.log.debug( "Current ONOS intents: " )
3005 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
3006 sort_keys=True, indent=4,
3007 separators=( ',', ': ' ) ) )
3008 except ( ValueError, TypeError ):
3009 main.log.exception( "Exception printing intents" )
3010 main.log.debug( repr( ONOSIntents[ 0 ] ) )
3011 main.log.debug( repr( intentState ) )
3012 utilities.assert_equals(
3013 expect=main.TRUE,
3014 actual=sameIntents,
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07003015 onpass="Intents are consistent with before " + OnosAfterWhich[ afterWhich ],
Devin Lim58046fa2017-07-05 16:55:00 -07003016 onfail="The Intents changed during " + OnosAfterWhich[ afterWhich ] )
3017 intentCheck = intentCheck and sameIntents
3018
3019 main.step( "Get the OF Table entries and compare to before " +
3020 "component " + OnosAfterWhich[ afterWhich ] )
3021 FlowTables = main.TRUE
Jon Hallab611372018-02-21 15:26:05 -08003022 for switch in main.Mininet1.getSwitches().keys():
3023 main.log.info( "Checking flow table on " + switch )
3024 tmpFlows = main.Mininet1.getFlowTable( switch, version="1.3", debug=False )
3025 curSwitch = main.Mininet1.flowTableComp( flows[ switch ], tmpFlows )
Devin Lim58046fa2017-07-05 16:55:00 -07003026 FlowTables = FlowTables and curSwitch
3027 if curSwitch == main.FALSE:
Jon Hallab611372018-02-21 15:26:05 -08003028 main.log.warn( "Differences in flow table for switch: {}".format( switch ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003029 utilities.assert_equals(
3030 expect=main.TRUE,
3031 actual=FlowTables,
3032 onpass="No changes were found in the flow tables",
3033 onfail="Changes were found in the flow tables" )
3034
Jon Hallca319892017-06-15 15:25:22 -07003035 main.Mininet2.pingLongKill()
Devin Lim58046fa2017-07-05 16:55:00 -07003036 """
3037 main.step( "Check the continuous pings to ensure that no packets " +
3038 "were dropped during component failure" )
3039 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
3040 main.params[ 'TESTONIP' ] )
3041 LossInPings = main.FALSE
3042 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
3043 for i in range( 8, 18 ):
3044 main.log.info(
3045 "Checking for a loss in pings along flow from s" +
3046 str( i ) )
3047 LossInPings = main.Mininet2.checkForLoss(
3048 "/tmp/ping.h" +
3049 str( i ) ) or LossInPings
3050 if LossInPings == main.TRUE:
3051 main.log.info( "Loss in ping detected" )
3052 elif LossInPings == main.ERROR:
3053 main.log.info( "There are multiple mininet process running" )
3054 elif LossInPings == main.FALSE:
3055 main.log.info( "No Loss in the pings" )
3056 main.log.info( "No loss of dataplane connectivity" )
3057 utilities.assert_equals(
3058 expect=main.FALSE,
3059 actual=LossInPings,
3060 onpass="No Loss of connectivity",
3061 onfail="Loss of dataplane connectivity detected" )
3062 # NOTE: Since intents are not persisted with IntnentStore,
3063 # we expect loss in dataplane connectivity
3064 LossInPings = main.FALSE
3065 """
Devin Lim58046fa2017-07-05 16:55:00 -07003066 def compareTopo( self, main ):
3067 """
3068 Compare topo
3069 """
3070 import json
3071 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003072 assert main, "main not defined"
3073 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003074 try:
3075 from tests.dependencies.topology import Topology
3076 except ImportError:
3077 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07003078 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07003079 try:
3080 main.topoRelated
3081 except ( NameError, AttributeError ):
3082 main.topoRelated = Topology()
3083 main.case( "Compare ONOS Topology view to Mininet topology" )
3084 main.caseExplanation = "Compare topology objects between Mininet" +\
3085 " and ONOS"
3086 topoResult = main.FALSE
3087 topoFailMsg = "ONOS topology don't match Mininet"
3088 elapsed = 0
3089 count = 0
3090 main.step( "Comparing ONOS topology to MN topology" )
3091 startTime = time.time()
3092 # Give time for Gossip to work
3093 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
3094 devicesResults = main.TRUE
3095 linksResults = main.TRUE
3096 hostsResults = main.TRUE
3097 hostAttachmentResults = True
3098 count += 1
3099 cliStart = time.time()
Devin Lim142b5342017-07-20 15:22:39 -07003100 devices = main.topoRelated.getAll( "devices", True,
Jon Hallca319892017-06-15 15:25:22 -07003101 kwargs={ 'sleep': 5, 'attempts': 5,
3102 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003103 ipResult = main.TRUE
3104
Devin Lim142b5342017-07-20 15:22:39 -07003105 hosts = main.topoRelated.getAll( "hosts", True,
Jon Hallca319892017-06-15 15:25:22 -07003106 kwargs={ 'sleep': 5, 'attempts': 5,
3107 'randomTime': True },
3108 inJson=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003109
3110 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003111 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003112 if hosts[ controller ]:
3113 for host in hosts[ controller ]:
3114 if host is None or host.get( 'ipAddresses', [] ) == []:
3115 main.log.error(
3116 "Error with host ipAddresses on controller" +
3117 controllerStr + ": " + str( host ) )
3118 ipResult = main.FALSE
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07003119 ports = main.topoRelated.getAll( "ports", True,
Jon Hallca319892017-06-15 15:25:22 -07003120 kwargs={ 'sleep': 5, 'attempts': 5,
3121 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003122 links = main.topoRelated.getAll( "links", True,
Jon Hallca319892017-06-15 15:25:22 -07003123 kwargs={ 'sleep': 5, 'attempts': 5,
3124 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003125 clusters = main.topoRelated.getAll( "clusters", True,
Jon Hallca319892017-06-15 15:25:22 -07003126 kwargs={ 'sleep': 5, 'attempts': 5,
3127 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003128
3129 elapsed = time.time() - startTime
3130 cliTime = time.time() - cliStart
Jon Hall5d5876e2017-11-30 09:33:16 -08003131 main.log.debug( "Elapsed time: " + str( elapsed ) )
3132 main.log.debug( "CLI time: " + str( cliTime ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003133
3134 if all( e is None for e in devices ) and\
3135 all( e is None for e in hosts ) and\
3136 all( e is None for e in ports ) and\
3137 all( e is None for e in links ) and\
3138 all( e is None for e in clusters ):
3139 topoFailMsg = "Could not get topology from ONOS"
3140 main.log.error( topoFailMsg )
3141 continue # Try again, No use trying to compare
3142
3143 mnSwitches = main.Mininet1.getSwitches()
3144 mnLinks = main.Mininet1.getLinks()
3145 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07003146 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003147 controllerStr = str( main.Cluster.active( controller ) )
Jon Hall4173b242017-09-12 17:04:38 -07003148 currentDevicesResult = main.topoRelated.compareDevicePort( main.Mininet1,
3149 controller,
3150 mnSwitches,
3151 devices,
3152 ports )
Devin Lim58046fa2017-07-05 16:55:00 -07003153 utilities.assert_equals( expect=main.TRUE,
3154 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07003155 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003156 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003157 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003158 " Switches view is incorrect" )
3159
Devin Lim58046fa2017-07-05 16:55:00 -07003160 currentLinksResult = main.topoRelated.compareBase( links, controller,
Jon Hall4173b242017-09-12 17:04:38 -07003161 main.Mininet1.compareLinks,
3162 [ mnSwitches, mnLinks ] )
Devin Lim58046fa2017-07-05 16:55:00 -07003163 utilities.assert_equals( expect=main.TRUE,
3164 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07003165 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003166 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003167 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003168 " links view is incorrect" )
3169 if hosts[ controller ] and "Error" not in hosts[ controller ]:
3170 currentHostsResult = main.Mininet1.compareHosts(
3171 mnHosts,
3172 hosts[ controller ] )
3173 elif hosts[ controller ] == []:
3174 currentHostsResult = main.TRUE
3175 else:
3176 currentHostsResult = main.FALSE
3177 utilities.assert_equals( expect=main.TRUE,
3178 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07003179 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003180 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07003181 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003182 " hosts don't match Mininet" )
Devin Lim58046fa2017-07-05 16:55:00 -07003183 hostAttachment = True
Jon Hallab611372018-02-21 15:26:05 -08003184 if main.topoMappings:
3185 ctrl = main.Cluster.next()
3186 # CHECKING HOST ATTACHMENT POINTS
3187 zeroHosts = False
3188 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3189 if hosts[ controller ] == []:
3190 main.log.warn( "There are no hosts discovered" )
3191 zeroHosts = True
3192 else:
3193 for host in hosts[ controller ]:
3194 mac = None
3195 locations = []
3196 device = None
3197 port = None
3198 try:
3199 mac = host.get( 'mac' )
3200 assert mac, "mac field could not be found for this host object"
3201 if 'locations' in host:
3202 locations = host.get( 'locations' )
3203 elif 'location' in host:
3204 locations.append( host.get( 'location' ) )
3205 assert locations, "locations field could not be found for this host object"
Devin Lim58046fa2017-07-05 16:55:00 -07003206
Jon Hallab611372018-02-21 15:26:05 -08003207 # Trim the protocol identifier off deviceId
3208 device = str( locations[0].get( 'elementId' ) ).split( ':' )[ 1 ]
3209 assert device, "elementId field could not be found for this host location object"
Devin Lim58046fa2017-07-05 16:55:00 -07003210
Jon Hallab611372018-02-21 15:26:05 -08003211 port = locations[0].get( 'port' )
3212 assert port, "port field could not be found for this host location object"
Devin Lim58046fa2017-07-05 16:55:00 -07003213
Jon Hallab611372018-02-21 15:26:05 -08003214 # Now check if this matches where they should be
3215 if mac and device and port:
3216 if str( port ) != "1":
3217 main.log.error( "The attachment port is incorrect for " +
3218 "host " + str( mac ) +
3219 ". Expected: 1 Actual: " + str( port ) )
3220 hostAttachment = False
3221 if device != main.topoMappings[ str( mac ) ]:
3222 main.log.error( "The attachment device is incorrect for " +
3223 "host " + str( mac ) +
3224 ". Expected: " + main.topoMppings[ str( mac ) ] +
3225 " Actual: " + device )
3226 hostAttachment = False
3227 else:
Devin Lim58046fa2017-07-05 16:55:00 -07003228 hostAttachment = False
Jon Hallab611372018-02-21 15:26:05 -08003229 except ( AssertionError, TypeError ):
3230 main.log.exception( "Json object not as expected" )
3231 main.log.error( repr( host ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003232 hostAttachment = False
Jon Hallab611372018-02-21 15:26:05 -08003233 else:
3234 main.log.error( "No hosts json output or \"Error\"" +
3235 " in output. hosts = " +
3236 repr( hosts[ controller ] ) )
3237 if zeroHosts is False:
3238 # TODO: Find a way to know if there should be hosts in a
3239 # given point of the test
3240 hostAttachment = True
Devin Lim58046fa2017-07-05 16:55:00 -07003241
Jon Hallab611372018-02-21 15:26:05 -08003242 # END CHECKING HOST ATTACHMENT POINTS
Devin Lim58046fa2017-07-05 16:55:00 -07003243 devicesResults = devicesResults and currentDevicesResult
3244 linksResults = linksResults and currentLinksResult
3245 hostsResults = hostsResults and currentHostsResult
3246 hostAttachmentResults = hostAttachmentResults and\
3247 hostAttachment
3248 topoResult = ( devicesResults and linksResults
3249 and hostsResults and ipResult and
3250 hostAttachmentResults )
3251 utilities.assert_equals( expect=True,
3252 actual=topoResult,
3253 onpass="ONOS topology matches Mininet",
3254 onfail=topoFailMsg )
3255 # End of While loop to pull ONOS state
3256
3257 # Compare json objects for hosts and dataplane clusters
3258
3259 # hosts
3260 main.step( "Hosts view is consistent across all ONOS nodes" )
3261 consistentHostsResult = main.TRUE
3262 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003263 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003264 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3265 if hosts[ controller ] == hosts[ 0 ]:
3266 continue
3267 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07003268 main.log.error( "hosts from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003269 " is inconsistent with ONOS1" )
Jon Hallca319892017-06-15 15:25:22 -07003270 main.log.debug( repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003271 consistentHostsResult = main.FALSE
3272
3273 else:
Jon Hallca319892017-06-15 15:25:22 -07003274 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003275 controllerStr )
3276 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003277 main.log.debug( controllerStr +
3278 " hosts response: " +
3279 repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003280 utilities.assert_equals(
3281 expect=main.TRUE,
3282 actual=consistentHostsResult,
3283 onpass="Hosts view is consistent across all ONOS nodes",
3284 onfail="ONOS nodes have different views of hosts" )
3285
3286 main.step( "Hosts information is correct" )
3287 hostsResults = hostsResults and ipResult
3288 utilities.assert_equals(
3289 expect=main.TRUE,
3290 actual=hostsResults,
3291 onpass="Host information is correct",
3292 onfail="Host information is incorrect" )
3293
3294 main.step( "Host attachment points to the network" )
3295 utilities.assert_equals(
3296 expect=True,
3297 actual=hostAttachmentResults,
3298 onpass="Hosts are correctly attached to the network",
3299 onfail="ONOS did not correctly attach hosts to the network" )
3300
3301 # Strongly connected clusters of devices
3302 main.step( "Clusters view is consistent across all ONOS nodes" )
3303 consistentClustersResult = main.TRUE
3304 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003305 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003306 if "Error" not in clusters[ controller ]:
3307 if clusters[ controller ] == clusters[ 0 ]:
3308 continue
3309 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07003310 main.log.error( "clusters from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003311 controllerStr +
3312 " is inconsistent with ONOS1" )
3313 consistentClustersResult = main.FALSE
3314 else:
3315 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07003316 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07003317 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003318 main.log.debug( controllerStr +
3319 " clusters response: " +
3320 repr( clusters[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003321 utilities.assert_equals(
3322 expect=main.TRUE,
3323 actual=consistentClustersResult,
3324 onpass="Clusters view is consistent across all ONOS nodes",
3325 onfail="ONOS nodes have different views of clusters" )
3326 if not consistentClustersResult:
3327 main.log.debug( clusters )
3328 for x in links:
Jon Hallca319892017-06-15 15:25:22 -07003329 main.log.debug( "{}: {}".format( len( x ), x ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003330
3331 main.step( "There is only one SCC" )
3332 # there should always only be one cluster
3333 try:
3334 numClusters = len( json.loads( clusters[ 0 ] ) )
3335 except ( ValueError, TypeError ):
3336 main.log.exception( "Error parsing clusters[0]: " +
3337 repr( clusters[ 0 ] ) )
3338 numClusters = "ERROR"
3339 clusterResults = main.FALSE
3340 if numClusters == 1:
3341 clusterResults = main.TRUE
3342 utilities.assert_equals(
3343 expect=1,
3344 actual=numClusters,
3345 onpass="ONOS shows 1 SCC",
3346 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
3347
3348 topoResult = ( devicesResults and linksResults
3349 and hostsResults and consistentHostsResult
3350 and consistentClustersResult and clusterResults
3351 and ipResult and hostAttachmentResults )
3352
3353 topoResult = topoResult and int( count <= 2 )
3354 note = "note it takes about " + str( int( cliTime ) ) + \
3355 " seconds for the test to make all the cli calls to fetch " +\
3356 "the topology from each ONOS instance"
3357 main.log.info(
3358 "Very crass estimate for topology discovery/convergence( " +
3359 str( note ) + " ): " + str( elapsed ) + " seconds, " +
3360 str( count ) + " tries" )
3361
3362 main.step( "Device information is correct" )
3363 utilities.assert_equals(
3364 expect=main.TRUE,
3365 actual=devicesResults,
3366 onpass="Device information is correct",
3367 onfail="Device information is incorrect" )
3368
3369 main.step( "Links are correct" )
3370 utilities.assert_equals(
3371 expect=main.TRUE,
3372 actual=linksResults,
3373 onpass="Link are correct",
3374 onfail="Links are incorrect" )
3375
3376 main.step( "Hosts are correct" )
3377 utilities.assert_equals(
3378 expect=main.TRUE,
3379 actual=hostsResults,
3380 onpass="Hosts are correct",
3381 onfail="Hosts are incorrect" )
3382
3383 # FIXME: move this to an ONOS state case
3384 main.step( "Checking ONOS nodes" )
Devin Lim3ebd5e72017-11-14 10:38:00 -08003385 nodeResults = utilities.retry( main.Cluster.nodesCheck,
Devin Lim58046fa2017-07-05 16:55:00 -07003386 False,
Devin Lim58046fa2017-07-05 16:55:00 -07003387 attempts=5 )
3388 utilities.assert_equals( expect=True, actual=nodeResults,
3389 onpass="Nodes check successful",
3390 onfail="Nodes check NOT successful" )
3391 if not nodeResults:
Jon Hallca319892017-06-15 15:25:22 -07003392 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -07003393 main.log.debug( "{} components not ACTIVE: \n{}".format(
Jon Hallca319892017-06-15 15:25:22 -07003394 ctrl.name,
Jon Hall6c9e2da2018-11-06 12:01:23 -08003395 ctrl.CLI.sendline( "onos:scr-list | grep -v ACTIVE" ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003396
3397 if not topoResult:
Devin Lim44075962017-08-11 10:56:37 -07003398 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -07003399
Jon Hallab611372018-02-21 15:26:05 -08003400 def linkDown( self, main, src="s3", dst="s28" ):
Devin Lim58046fa2017-07-05 16:55:00 -07003401 """
Jon Hallab611372018-02-21 15:26:05 -08003402 Link src-dst down
Devin Lim58046fa2017-07-05 16:55:00 -07003403 """
3404 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003405 assert main, "main not defined"
3406 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003407 # NOTE: You should probably run a topology check after this
3408
3409 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3410
3411 description = "Turn off a link to ensure that Link Discovery " +\
3412 "is working properly"
3413 main.case( description )
3414
Jon Hallab611372018-02-21 15:26:05 -08003415 main.step( "Kill Link between " + src + " and " + dst )
3416 LinkDown = main.Mininet1.link( END1=src, END2=dst, OPTION="down" )
Devin Lim58046fa2017-07-05 16:55:00 -07003417 main.log.info( "Waiting " + str( linkSleep ) +
3418 " seconds for link down to be discovered" )
3419 time.sleep( linkSleep )
3420 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
3421 onpass="Link down successful",
3422 onfail="Failed to bring link down" )
3423 # TODO do some sort of check here
3424
Jon Hallab611372018-02-21 15:26:05 -08003425 def linkUp( self, main, src="s3", dst="s28" ):
Devin Lim58046fa2017-07-05 16:55:00 -07003426 """
Jon Hallab611372018-02-21 15:26:05 -08003427 Link src-dst up
Devin Lim58046fa2017-07-05 16:55:00 -07003428 """
3429 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003430 assert main, "main not defined"
3431 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003432 # NOTE: You should probably run a topology check after this
3433
3434 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3435
3436 description = "Restore a link to ensure that Link Discovery is " + \
3437 "working properly"
3438 main.case( description )
3439
Jon Hallab611372018-02-21 15:26:05 -08003440 main.step( "Bring link between " + src + " and " + dst + " back up" )
3441 LinkUp = main.Mininet1.link( END1=src, END2=dst, OPTION="up" )
Devin Lim58046fa2017-07-05 16:55:00 -07003442 main.log.info( "Waiting " + str( linkSleep ) +
3443 " seconds for link up to be discovered" )
3444 time.sleep( linkSleep )
3445 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
3446 onpass="Link up successful",
3447 onfail="Failed to bring link up" )
3448
3449 def switchDown( self, main ):
3450 """
3451 Switch Down
3452 """
3453 # NOTE: You should probably run a topology check after this
3454 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003455 assert main, "main not defined"
3456 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003457
3458 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3459
3460 description = "Killing a switch to ensure it is discovered correctly"
Devin Lim58046fa2017-07-05 16:55:00 -07003461 main.case( description )
3462 switch = main.params[ 'kill' ][ 'switch' ]
3463 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3464
3465 # TODO: Make this switch parameterizable
3466 main.step( "Kill " + switch )
3467 main.log.info( "Deleting " + switch )
3468 main.Mininet1.delSwitch( switch )
3469 main.log.info( "Waiting " + str( switchSleep ) +
3470 " seconds for switch down to be discovered" )
3471 time.sleep( switchSleep )
Jon Hall0e240372018-05-02 11:21:57 -07003472 device = main.Cluster.next().getDevice( dpid=switchDPID )
Devin Lim58046fa2017-07-05 16:55:00 -07003473 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003474 main.log.warn( "Bringing down switch " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003475 result = main.FALSE
3476 if device and device[ 'available' ] is False:
3477 result = main.TRUE
3478 utilities.assert_equals( expect=main.TRUE, actual=result,
3479 onpass="Kill switch successful",
3480 onfail="Failed to kill switch?" )
Jon Hallca319892017-06-15 15:25:22 -07003481
Devin Lim58046fa2017-07-05 16:55:00 -07003482 def switchUp( self, main ):
3483 """
3484 Switch Up
3485 """
3486 # NOTE: You should probably run a topology check after this
3487 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003488 assert main, "main not defined"
3489 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003490
3491 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3492 switch = main.params[ 'kill' ][ 'switch' ]
3493 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3494 links = main.params[ 'kill' ][ 'links' ].split()
Devin Lim58046fa2017-07-05 16:55:00 -07003495 description = "Adding a switch to ensure it is discovered correctly"
3496 main.case( description )
3497
3498 main.step( "Add back " + switch )
3499 main.Mininet1.addSwitch( switch, dpid=switchDPID )
3500 for peer in links:
3501 main.Mininet1.addLink( switch, peer )
Jon Hallca319892017-06-15 15:25:22 -07003502 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -07003503 main.Mininet1.assignSwController( sw=switch, ip=ipList )
3504 main.log.info( "Waiting " + str( switchSleep ) +
3505 " seconds for switch up to be discovered" )
3506 time.sleep( switchSleep )
Jon Hall0e240372018-05-02 11:21:57 -07003507 device = main.Cluster.next().getDevice( dpid=switchDPID )
Devin Lim58046fa2017-07-05 16:55:00 -07003508 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003509 main.log.debug( "Added device: " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003510 result = main.FALSE
3511 if device and device[ 'available' ]:
3512 result = main.TRUE
3513 utilities.assert_equals( expect=main.TRUE, actual=result,
3514 onpass="add switch successful",
3515 onfail="Failed to add switch?" )
3516
3517 def startElectionApp( self, main ):
3518 """
3519 start election app on all onos nodes
3520 """
Devin Lim58046fa2017-07-05 16:55:00 -07003521 assert main, "main not defined"
3522 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003523
3524 main.case( "Start Leadership Election app" )
3525 main.step( "Install leadership election app" )
Jon Hall0e240372018-05-02 11:21:57 -07003526 appResult = main.Cluster.next().CLI.activateApp( "org.onosproject.election" )
Devin Lim58046fa2017-07-05 16:55:00 -07003527 utilities.assert_equals(
3528 expect=main.TRUE,
3529 actual=appResult,
3530 onpass="Election app installed",
3531 onfail="Something went wrong with installing Leadership election" )
3532
3533 main.step( "Run for election on each node" )
Jon Hall0e240372018-05-02 11:21:57 -07003534 main.Cluster.next().electionTestRun()
Jon Hallca319892017-06-15 15:25:22 -07003535 main.Cluster.command( "electionTestRun" )
Devin Lim58046fa2017-07-05 16:55:00 -07003536 time.sleep( 5 )
Jon Hallca319892017-06-15 15:25:22 -07003537 sameResult, leaders = main.HA.consistentLeaderboards( main.Cluster.active() )
Devin Lim58046fa2017-07-05 16:55:00 -07003538 utilities.assert_equals(
3539 expect=True,
3540 actual=sameResult,
3541 onpass="All nodes see the same leaderboards",
3542 onfail="Inconsistent leaderboards" )
3543
3544 if sameResult:
Jon Hall5d5876e2017-11-30 09:33:16 -08003545 # Check that the leader is one of the active nodes
3546 ips = sorted( main.Cluster.getIps( activeOnly=True ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003547 leader = leaders[ 0 ][ 0 ]
Jon Hall5d5876e2017-11-30 09:33:16 -08003548 if leader in ips:
3549 legitimate = True
Devin Lim58046fa2017-07-05 16:55:00 -07003550 else:
Jon Hall5d5876e2017-11-30 09:33:16 -08003551 legitimate = False
3552 main.log.debug( leaders )
3553 main.step( "Active node was elected leader?" )
Devin Lim58046fa2017-07-05 16:55:00 -07003554 utilities.assert_equals(
3555 expect=True,
Jon Hall5d5876e2017-11-30 09:33:16 -08003556 actual=legitimate,
Devin Lim58046fa2017-07-05 16:55:00 -07003557 onpass="Correct leader was elected",
3558 onfail="Incorrect leader" )
Jon Hallca319892017-06-15 15:25:22 -07003559 main.Cluster.testLeader = leader
3560
Devin Lim58046fa2017-07-05 16:55:00 -07003561 def isElectionFunctional( self, main ):
3562 """
3563 Check that Leadership Election is still functional
3564 15.1 Run election on each node
3565 15.2 Check that each node has the same leaders and candidates
3566 15.3 Find current leader and withdraw
3567 15.4 Check that a new node was elected leader
3568 15.5 Check that that new leader was the candidate of old leader
3569 15.6 Run for election on old leader
3570 15.7 Check that oldLeader is a candidate, and leader if only 1 node
3571 15.8 Make sure that the old leader was added to the candidate list
3572
3573 old and new variable prefixes refer to data from before vs after
3574 withdrawl and later before withdrawl vs after re-election
3575 """
3576 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003577 assert main, "main not defined"
3578 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003579
3580 description = "Check that Leadership Election is still functional"
3581 main.case( description )
3582 # NOTE: Need to re-run after restarts since being a canidate is not persistant
3583
3584 oldLeaders = [] # list of lists of each nodes' candidates before
3585 newLeaders = [] # list of lists of each nodes' candidates after
3586 oldLeader = '' # the old leader from oldLeaders, None if not same
3587 newLeader = '' # the new leaders fron newLoeaders, None if not same
3588 oldLeaderCLI = None # the CLI of the old leader used for re-electing
3589 expectNoLeader = False # True when there is only one leader
Devin Lim142b5342017-07-20 15:22:39 -07003590 if len( main.Cluster.runningNodes ) == 1:
Devin Lim58046fa2017-07-05 16:55:00 -07003591 expectNoLeader = True
3592
3593 main.step( "Run for election on each node" )
Devin Lim142b5342017-07-20 15:22:39 -07003594 electionResult = main.Cluster.command( "electionTestRun", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003595 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -07003596 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -07003597 actual=electionResult,
3598 onpass="All nodes successfully ran for leadership",
3599 onfail="At least one node failed to run for leadership" )
3600
3601 if electionResult == main.FALSE:
3602 main.log.error(
3603 "Skipping Test Case because Election Test App isn't loaded" )
3604 main.skipCase()
3605
3606 main.step( "Check that each node shows the same leader and candidates" )
3607 failMessage = "Nodes have different leaderboards"
Jon Hallca319892017-06-15 15:25:22 -07003608 activeCLIs = main.Cluster.active()
3609 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Devin Lim58046fa2017-07-05 16:55:00 -07003610 if sameResult:
3611 oldLeader = oldLeaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003612 main.log.info( "Old leader: " + oldLeader )
Devin Lim58046fa2017-07-05 16:55:00 -07003613 else:
3614 oldLeader = None
3615 utilities.assert_equals(
3616 expect=True,
3617 actual=sameResult,
3618 onpass="Leaderboards are consistent for the election topic",
3619 onfail=failMessage )
3620
3621 main.step( "Find current leader and withdraw" )
3622 withdrawResult = main.TRUE
3623 # do some sanity checking on leader before using it
3624 if oldLeader is None:
3625 main.log.error( "Leadership isn't consistent." )
3626 withdrawResult = main.FALSE
3627 # Get the CLI of the oldLeader
Jon Hallca319892017-06-15 15:25:22 -07003628 for ctrl in main.Cluster.active():
3629 if oldLeader == ctrl.ipAddress:
3630 oldLeaderCLI = ctrl
Devin Lim58046fa2017-07-05 16:55:00 -07003631 break
3632 else: # FOR/ELSE statement
Jon Hall701fea12018-10-08 11:09:22 -07003633 main.log.error( "Leader election, could not find current leader amongst active nodes" )
3634 for ctrl in main.Cluster.controllers:
3635 if oldLeader == ctrl.ipAddress:
3636 oldLeaderCLI = ctrl
3637 main.log.warn( "Old leader was found as node " + str( ctrl.ipAddress ) )
3638 # Should we skip the next if statement then? There should be a new leader elected?
Devin Lim58046fa2017-07-05 16:55:00 -07003639 if oldLeader:
3640 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3641 utilities.assert_equals(
3642 expect=main.TRUE,
3643 actual=withdrawResult,
3644 onpass="Node was withdrawn from election",
3645 onfail="Node was not withdrawn from election" )
3646
3647 main.step( "Check that a new node was elected leader" )
3648 failMessage = "Nodes have different leaders"
3649 # Get new leaders and candidates
3650 newLeaderResult, newLeaders = self.consistentLeaderboards( activeCLIs )
3651 newLeader = None
3652 if newLeaderResult:
3653 if newLeaders[ 0 ][ 0 ] == 'none':
3654 main.log.error( "No leader was elected on at least 1 node" )
3655 if not expectNoLeader:
3656 newLeaderResult = False
3657 newLeader = newLeaders[ 0 ][ 0 ]
3658
3659 # Check that the new leader is not the older leader, which was withdrawn
3660 if newLeader == oldLeader:
3661 newLeaderResult = False
3662 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3663 " as the current leader" )
3664 utilities.assert_equals(
3665 expect=True,
3666 actual=newLeaderResult,
3667 onpass="Leadership election passed",
3668 onfail="Something went wrong with Leadership election" )
3669
3670 main.step( "Check that that new leader was the candidate of old leader" )
3671 # candidates[ 2 ] should become the top candidate after withdrawl
3672 correctCandidateResult = main.TRUE
3673 if expectNoLeader:
3674 if newLeader == 'none':
3675 main.log.info( "No leader expected. None found. Pass" )
3676 correctCandidateResult = main.TRUE
3677 else:
3678 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3679 correctCandidateResult = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07003680 utilities.assert_equals(
3681 expect=main.TRUE,
3682 actual=correctCandidateResult,
3683 onpass="Correct Candidate Elected",
3684 onfail="Incorrect Candidate Elected" )
3685
3686 main.step( "Run for election on old leader( just so everyone " +
3687 "is in the hat )" )
3688 if oldLeaderCLI is not None:
3689 runResult = oldLeaderCLI.electionTestRun()
3690 else:
3691 main.log.error( "No old leader to re-elect" )
3692 runResult = main.FALSE
3693 utilities.assert_equals(
3694 expect=main.TRUE,
3695 actual=runResult,
3696 onpass="App re-ran for election",
3697 onfail="App failed to run for election" )
3698
3699 main.step(
3700 "Check that oldLeader is a candidate, and leader if only 1 node" )
3701 # verify leader didn't just change
3702 # Get new leaders and candidates
3703 reRunLeaders = []
3704 time.sleep( 5 ) # Paremterize
3705 positionResult, reRunLeaders = self.consistentLeaderboards( activeCLIs )
3706
Devin Lim58046fa2017-07-05 16:55:00 -07003707 def installDistributedPrimitiveApp( self, main ):
Jon Hall5d5876e2017-11-30 09:33:16 -08003708 '''
Devin Lim58046fa2017-07-05 16:55:00 -07003709 Install Distributed Primitives app
Jon Hall5d5876e2017-11-30 09:33:16 -08003710 '''
Devin Lim58046fa2017-07-05 16:55:00 -07003711 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003712 assert main, "main not defined"
3713 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003714
3715 # Variables for the distributed primitives tests
3716 main.pCounterName = "TestON-Partitions"
3717 main.pCounterValue = 0
3718 main.onosSet = set( [] )
3719 main.onosSetName = "TestON-set"
3720
3721 description = "Install Primitives app"
3722 main.case( description )
3723 main.step( "Install Primitives app" )
3724 appName = "org.onosproject.distributedprimitives"
Devin Lime9f0ccf2017-08-11 17:25:12 -07003725 appResults = main.Cluster.next().CLI.activateApp( appName )
Devin Lim58046fa2017-07-05 16:55:00 -07003726 utilities.assert_equals( expect=main.TRUE,
3727 actual=appResults,
3728 onpass="Primitives app activated",
3729 onfail="Primitives app not activated" )
3730 # TODO check on all nodes instead of sleeping
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003731 time.sleep( 5 ) # To allow all nodes to activate
Jon Halla478b852017-12-04 15:00:15 -08003732
3733 def upgradeInit( self, main ):
3734 '''
3735 Initiates an update
3736 '''
3737 main.step( "Send the command to initialize the upgrade" )
3738 ctrl = main.Cluster.next().CLI
3739 initialized = ctrl.issuInit()
3740 utilities.assert_equals( expect=main.TRUE, actual=initialized,
3741 onpass="ISSU initialized",
3742 onfail="Error initializing the upgrade" )
3743
3744 main.step( "Check the status of the upgrade" )
3745 ctrl = main.Cluster.next().CLI
3746 status = ctrl.issu()
3747 main.log.debug( status )
3748 # TODO: check things here?
3749
3750 main.step( "Checking ONOS nodes" )
3751 nodeResults = utilities.retry( main.Cluster.nodesCheck,
3752 False,
3753 sleep=15,
3754 attempts=5 )
3755 utilities.assert_equals( expect=True, actual=nodeResults,
3756 onpass="Nodes check successful",
3757 onfail="Nodes check NOT successful" )
Jon Hall7ce46ea2018-02-05 12:20:59 -08003758
3759 def backupData( self, main, location ):
3760 """
3761 Backs up ONOS data and logs to a given location on each active node in a cluster
3762 """
3763 result = True
3764 for ctrl in main.Cluster.active():
3765 try:
3766 ctrl.server.handle.sendline( "rm " + location )
3767 ctrl.server.handle.expect( ctrl.server.prompt )
3768 main.log.debug( ctrl.server.handle.before + ctrl.server.handle.after )
3769 except pexpect.ExceptionPexpect as e:
3770 main.log.error( e )
3771 main.cleanAndExit()
3772 ctrl.CLI.log( "'Starting backup of onos data'", level="INFO" )
3773 result = result and ( ctrl.server.backupData( location ) is main.TRUE )
3774 ctrl.CLI.log( "'End of backup of onos data'", level="INFO" )
3775 return result
3776
3777 def restoreData( self, main, location ):
3778 """
3779 Restores ONOS data and logs from a given location on each node in a cluster
3780 """
3781 result = True
3782 for ctrl in main.Cluster.controllers:
3783 result = result and ( ctrl.server.restoreData( location ) is main.TRUE )
3784 return result
Jon Hallab611372018-02-21 15:26:05 -08003785
3786 def startTopology( self, main ):
3787 """
3788 Starts Mininet using a topology file after pushing a network config file to ONOS.
3789 """
3790 import json
3791 import time
3792 main.case( "Starting Mininet Topology" )
3793
3794 main.step( "Pushing Network config" )
3795 ctrl = main.Cluster.next()
3796 cfgPath = main.testsRoot + main.params[ 'topology' ][ 'configPath' ]
3797 cfgResult = ctrl.onosNetCfg( ctrl.ipAddress,
3798 path=cfgPath,
3799 fileName=main.params[ 'topology' ][ 'configName' ] )
3800 utilities.assert_equals( expect=main.TRUE, actual=cfgResult,
3801 onpass="Pushed Network Configuration to ONOS",
3802 onfail="Failed to push Network Configuration to ONOS" )
3803
3804 main.step( "Check Network config" )
3805 try:
3806 cfgFile = cfgPath + main.params[ 'topology' ][ 'configName' ]
3807 with open( cfgFile, 'r' ) as contents:
3808 pushedNetCfg = json.load( contents )
3809 pushedNetCfg = json.loads( json.dumps( pushedNetCfg ).lower() )
3810 except IOError:
3811 main.log.exception( "Net Cfg file not found." )
3812 main.cleanAndExit()
3813 netCfgSleep = int( main.params[ 'timers' ][ 'NetCfg' ] )
3814 time.sleep( netCfgSleep )
3815 rawONOSNetCfg = utilities.retry( f=main.Cluster.next().REST.getNetCfg,
3816 retValue=False,
3817 attempts=5,
3818 sleep=netCfgSleep )
3819 # Fix differences between ONOS printing and Pushed Cfg
3820 onosNetCfg = json.loads( rawONOSNetCfg.lower() )
3821
3822 # Compare pushed device config
3823 cfgResult = True
3824 for did, pushedDevice in pushedNetCfg[ 'devices' ].items():
3825 onosDevice = onosNetCfg[ 'devices' ].get( did )
3826 if pushedDevice != onosDevice:
3827 cfgResult = False
3828 main.log.error( "Pushed Network configuration does not match what is in " +
3829 "ONOS:\nPushed: {}\nONOS: {}".format( ctrl.pprint( pushedDevice ),
3830 ctrl.pprint( onosDevice ) ) )
3831
3832 # Compare pushed port config
3833 for portURI, pushedInterface in pushedNetCfg[ 'ports' ].items():
3834 onosInterface = onosNetCfg[ 'ports' ].get( portURI )
3835 # NOTE: pushed Cfg doesn't have macs
3836 for i in xrange( 0, len( pushedInterface[ 'interfaces' ] ) ):
3837 keys = pushedInterface[ 'interfaces' ][ i ].keys()
3838 portCompare = True
3839 for key in keys:
3840 if pushedInterface[ 'interfaces' ][ i ].get( key ) != onosInterface[ 'interfaces' ][ i ].get( key ) :
3841 main.log.debug( "{} mismatch for port {}".format( key, portURI ) )
3842 portCompare = False
3843 if not portCompare:
3844 cfgResult = False
3845 main.log.error( "Pushed Network configuration does not match what is in " +
3846 "ONOS:\nPushed: {}\nONOS: {}".format( ctrl.pprint( pushedInterface ),
3847 ctrl.pprint( onosInterface ) ) )
3848
Jon Hall9677ed32018-04-24 11:16:23 -07003849 if pushedNetCfg.get( 'hosts' ) is not None:
3850 # Compare pushed host config
3851 for hid, pushedHost in pushedNetCfg[ 'hosts' ].items():
3852 onosHost = onosNetCfg[ 'hosts' ].get( hid.lower() )
3853 if pushedHost != onosHost:
3854 cfgResult = False
3855 main.log.error( "Pushed Network configuration does not match what is in " +
Jon Hall0e240372018-05-02 11:21:57 -07003856 "ONOS:\nPushed: {}\nONOS: {}".format( ctrl.pprint( pushedHost ),
Jon Hall9677ed32018-04-24 11:16:23 -07003857 ctrl.pprint( onosHost ) ) )
Jon Hallab611372018-02-21 15:26:05 -08003858 utilities.assert_equals( expect=True,
3859 actual=cfgResult,
3860 onpass="Net Cfg set",
3861 onfail="Net Cfg not correctly set" )
3862 if not cfgResult:
3863 main.log.debug( "Pushed Network Config:" + ctrl.pprint( pushedNetCfg ) )
3864 main.log.debug( "ONOS Network Config:" + ctrl.pprint( onosNetCfg ) )
3865
3866 main.step( "Start Mininet topology" )
3867 for f in main.params[ 'topology' ][ 'files' ].values():
3868 main.ONOSbench.scp( main.Mininet1,
3869 f,
3870 main.Mininet1.home,
3871 direction="to" )
3872 topoName = main.params[ 'topology' ][ 'topoFile' ]
3873 topo = main.Mininet1.home + topoName
3874 ctrlList = ''
3875 for ctrl in main.Cluster.controllers:
3876 ctrlList += str( ctrl.ipAddress ) + ","
3877 args = main.params[ 'topology' ][ 'args' ]
3878 startResult = main.Mininet1.startNet( topoFile=topo,
3879 args=" --onos-ip=" + ctrlList + " " + args )
3880 utilities.assert_equals( expect=main.TRUE, actual=startResult,
3881 onpass="Mininet Started",
3882 onfail="Failed to start Mininet" )
3883 # Give SR app time to configure the network
3884 time.sleep( int( main.params[ 'timers' ][ 'SRSetup' ] ) )