blob: 9af5dfbcd697e53552835cde4cda79767ae7b2b6 [file] [log] [blame]
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07001"""
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002Copyright 2015 Open Networking Foundation ( ONF )
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003
4Please refer questions to either the onos test mailing list at <onos-test@onosproject.org>,
5the System Testing Plans and Results wiki page at <https://wiki.onosproject.org/x/voMg>,
6or the System Testing Guide page at <https://wiki.onosproject.org/x/WYQg>
7
8 TestON is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation, either version 2 of the License, or
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -070011 ( at your option ) any later version.
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -070012
13 TestON is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with TestON. If not, see <http://www.gnu.org/licenses/>.
20"""
Jon Halla440e872016-03-31 15:15:50 -070021import json
Jon Hall41d39f12016-04-11 22:54:35 -070022import time
Jon Halla478b852017-12-04 15:00:15 -080023import pexpect
24import re
Jon Halle1a3b752015-07-22 13:02:46 -070025
Jon Hallf37d44d2017-05-24 10:37:30 -070026
Jon Hall41d39f12016-04-11 22:54:35 -070027class HA():
Jon Hall57b50432015-10-22 10:20:10 -070028
Jon Halla440e872016-03-31 15:15:50 -070029 def __init__( self ):
30 self.default = ''
Jon Hallab611372018-02-21 15:26:05 -080031 main.topoMappings = {}
Jon Hall30668ff2019-02-27 17:43:09 -080032 from tests.dependencies.ONOSSetup import ONOSSetup
33 main.testSetUp = ONOSSetup()
Jon Hall57b50432015-10-22 10:20:10 -070034
Jon Hall5a5c8432018-11-28 11:39:57 -080035 def removeKarafConsoleLogging( self ):
36 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
37 main.ONOSbench.handle.expect( main.ONOSbench.prompt )
38 main.ONOSbench.handle.sendline( "sed -i 's/-Dkaraf.log.console=INFO //g' tools/package/bin/onos-service" )
39 main.ONOSbench.handle.expect( main.ONOSbench.prompt )
40
Devin Lim58046fa2017-07-05 16:55:00 -070041 def customizeOnosGenPartitions( self ):
Devin Lim58046fa2017-07-05 16:55:00 -070042 # copy gen-partions file to ONOS
43 # NOTE: this assumes TestON and ONOS are on the same machine
Jon Hallab611372018-02-21 15:26:05 -080044 srcFile = main.testsRoot + "/HA/dependencies/onos-gen-partitions"
Devin Lim58046fa2017-07-05 16:55:00 -070045 dstDir = main.ONOSbench.home + "/tools/test/bin/onos-gen-partitions"
46 cpResult = main.ONOSbench.secureCopy( main.ONOSbench.user_name,
47 main.ONOSbench.ip_address,
48 srcFile,
49 dstDir,
50 pwd=main.ONOSbench.pwd,
51 direction="from" )
Jon Hallca319892017-06-15 15:25:22 -070052
Devin Lim58046fa2017-07-05 16:55:00 -070053 def cleanUpGenPartition( self ):
54 # clean up gen-partitions file
55 try:
56 main.ONOSbench.handle.sendline( "cd " + main.ONOSbench.home )
57 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
58 main.ONOSbench.handle.sendline( "git checkout -- tools/test/bin/onos-gen-partitions" )
59 main.ONOSbench.handle.expect( main.ONOSbench.home + "\$" )
Jon Hall0e240372018-05-02 11:21:57 -070060 main.log.info( "Cleaning custom gen partitions file, response was: \n" +
Devin Lim58046fa2017-07-05 16:55:00 -070061 str( main.ONOSbench.handle.before ) )
62 except ( pexpect.TIMEOUT, pexpect.EOF ):
63 main.log.exception( "ONOSbench: pexpect exception found:" +
64 main.ONOSbench.handle.before )
Devin Lim44075962017-08-11 10:56:37 -070065 main.cleanAndExit()
Jon Hallca319892017-06-15 15:25:22 -070066
Devin Lim58046fa2017-07-05 16:55:00 -070067 def startingMininet( self ):
68 main.step( "Starting Mininet" )
69 # scp topo file to mininet
70 # TODO: move to params?
71 topoName = "obelisk.py"
72 filePath = main.ONOSbench.home + "/tools/test/topos/"
73 main.ONOSbench.scp( main.Mininet1,
74 filePath + topoName,
75 main.Mininet1.home,
76 direction="to" )
77 mnResult = main.Mininet1.startNet()
78 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
79 onpass="Mininet Started",
80 onfail="Error starting Mininet" )
Jon Hallca319892017-06-15 15:25:22 -070081
Devin Lim58046fa2017-07-05 16:55:00 -070082 def swapNodeMetadata( self ):
Devin Lim142b5342017-07-20 15:22:39 -070083 if main.Cluster.numCtrls >= 5:
84 main.Cluster.setRunningNode( main.Cluster.numCtrls - 2 )
Devin Lim58046fa2017-07-05 16:55:00 -070085 else:
86 main.log.error( "Not enough ONOS nodes to run this test. Requires 5 or more" )
Devin Lim58046fa2017-07-05 16:55:00 -070087
Jon Hall4f360bc2017-09-07 10:19:52 -070088 def copyBackupConfig( self ):
Devin Lim58046fa2017-07-05 16:55:00 -070089 main.step( "Copying backup config files" )
90 main.onosServicepath = main.ONOSbench.home + "/tools/package/bin/onos-service"
91 cp = main.ONOSbench.scp( main.ONOSbench,
92 main.onosServicepath,
93 main.onosServicepath + ".backup",
94 direction="to" )
95
96 utilities.assert_equals( expect=main.TRUE,
97 actual=cp,
98 onpass="Copy backup config file succeeded",
99 onfail="Copy backup config file failed" )
Jon Hall4f360bc2017-09-07 10:19:52 -0700100
101 def setMetadataUrl( self ):
102 # NOTE: You should probably backup the config before and reset the config after the test
Devin Lim58046fa2017-07-05 16:55:00 -0700103 # we need to modify the onos-service file to use remote metadata file
104 # url for cluster metadata file
105 iface = main.params[ 'server' ].get( 'interface' )
106 ip = main.ONOSbench.getIpAddr( iface=iface )
107 metaFile = "cluster.json"
108 javaArgs = r"-Donos.cluster.metadata.uri=http:\/\/{}:{}\/{}".format( ip, main.serverPort, metaFile )
Devin Lim58046fa2017-07-05 16:55:00 -0700109 main.log.warn( repr( javaArgs ) )
110 handle = main.ONOSbench.handle
Jon Hall4173b242017-09-12 17:04:38 -0700111 sed = r"sed -i 's/bash/bash\nexport JAVA_OPTS=${{JAVA_OPTS:-{}}}\n/' {}".format( javaArgs,
112 main.onosServicepath )
Devin Lim58046fa2017-07-05 16:55:00 -0700113 main.log.warn( repr( sed ) )
114 handle.sendline( sed )
115 handle.expect( metaFile )
116 output = handle.before
117 handle.expect( "\$" )
118 output += handle.before
119 main.log.debug( repr( output ) )
120
121 def cleanUpOnosService( self ):
122 # Cleanup custom onos-service file
123 main.ONOSbench.scp( main.ONOSbench,
124 main.onosServicepath + ".backup",
125 main.onosServicepath,
126 direction="to" )
Jon Hallca319892017-06-15 15:25:22 -0700127
Jon Halla440e872016-03-31 15:15:50 -0700128 def consistentCheck( self ):
129 """
130 Checks that TestON counters are consistent across all nodes.
Jon Halle1a3b752015-07-22 13:02:46 -0700131
Jon Hallf37d44d2017-05-24 10:37:30 -0700132 Returns the tuple ( onosCounters, consistent )
Jon Hall41d39f12016-04-11 22:54:35 -0700133 - onosCounters is the parsed json output of the counters command on
134 all nodes
135 - consistent is main.TRUE if all "TestON" counters are consitent across
136 all nodes or main.FALSE
Jon Halla440e872016-03-31 15:15:50 -0700137 """
Jon Halle1a3b752015-07-22 13:02:46 -0700138 try:
Jon Halla440e872016-03-31 15:15:50 -0700139 # Get onos counters results
140 onosCountersRaw = []
141 threads = []
Jon Hallca319892017-06-15 15:25:22 -0700142 for ctrl in main.Cluster.active():
Jon Halla440e872016-03-31 15:15:50 -0700143 t = main.Thread( target=utilities.retry,
Jon Hallca319892017-06-15 15:25:22 -0700144 name="counters-" + str( ctrl ),
145 args=[ ctrl.counters, [ None ] ],
Jon Hallf37d44d2017-05-24 10:37:30 -0700146 kwargs={ 'sleep': 5, 'attempts': 5,
Jon Halla440e872016-03-31 15:15:50 -0700147 'randomTime': True } )
148 threads.append( t )
149 t.start()
150 for t in threads:
151 t.join()
152 onosCountersRaw.append( t.result )
153 onosCounters = []
Jon Hallca319892017-06-15 15:25:22 -0700154 for i in range( len( onosCountersRaw ) ):
Jon Halla440e872016-03-31 15:15:50 -0700155 try:
Jon Hall3e6edb32018-08-21 16:20:30 -0700156 value = json.loads( onosCountersRaw[ i ] )
157 onosCounters.append( value )
Jon Halla440e872016-03-31 15:15:50 -0700158 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700159 main.log.error( "Could not parse counters response from " +
Devin Lim142b5342017-07-20 15:22:39 -0700160 str( main.Cluster.active( i ) ) )
Jon Halla440e872016-03-31 15:15:50 -0700161 main.log.warn( repr( onosCountersRaw[ i ] ) )
Jon Hall0e240372018-05-02 11:21:57 -0700162 onosCounters.append( {} )
Jon Halla440e872016-03-31 15:15:50 -0700163
164 testCounters = {}
165 # make a list of all the "TestON-*" counters in ONOS
Jon Hall41d39f12016-04-11 22:54:35 -0700166 # lookes like a dict whose keys are the name of the ONOS node and
167 # values are a list of the counters. I.E.
Jon Hallf37d44d2017-05-24 10:37:30 -0700168 # { "ONOS1": [ { "name":"TestON-Partitions","value":56 } ]
Jon Halla440e872016-03-31 15:15:50 -0700169 # }
170 # NOTE: There is an assumtion that all nodes are active
171 # based on the above for loops
172 for controller in enumerate( onosCounters ):
Jon Hallf37d44d2017-05-24 10:37:30 -0700173 for key, value in controller[ 1 ].iteritems():
Jon Halla440e872016-03-31 15:15:50 -0700174 if 'TestON' in key:
Jon Hall0e240372018-05-02 11:21:57 -0700175 node = main.Cluster.active( controller[ 0 ] )
Jon Halla440e872016-03-31 15:15:50 -0700176 try:
Jon Hallf37d44d2017-05-24 10:37:30 -0700177 testCounters[ node ].append( { key: value } )
Jon Halla440e872016-03-31 15:15:50 -0700178 except KeyError:
Jon Hallf37d44d2017-05-24 10:37:30 -0700179 testCounters[ node ] = [ { key: value } ]
Jon Halla440e872016-03-31 15:15:50 -0700180 # compare the counters on each node
Jon Hallf37d44d2017-05-24 10:37:30 -0700181 firstV = testCounters.values()[ 0 ]
Jon Halla440e872016-03-31 15:15:50 -0700182 tmp = [ v == firstV for k, v in testCounters.iteritems() ]
183 if all( tmp ):
184 consistent = main.TRUE
185 else:
186 consistent = main.FALSE
Jon Hall0e240372018-05-02 11:21:57 -0700187 main.log.error( "ONOS nodes have different values for counters: %s",
Jon Halla440e872016-03-31 15:15:50 -0700188 testCounters )
189 return ( onosCounters, consistent )
190 except Exception:
191 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700192 main.cleanAndExit()
Jon Halla440e872016-03-31 15:15:50 -0700193
194 def counterCheck( self, counterName, counterValue ):
195 """
196 Checks that TestON counters are consistent across all nodes and that
197 specified counter is in ONOS with the given value
198 """
199 try:
200 correctResults = main.TRUE
201 # Get onos counters results and consistentCheck
202 onosCounters, consistent = self.consistentCheck()
203 # Check for correct values
Jon Hallca319892017-06-15 15:25:22 -0700204 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -0700205 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -0700206 current = onosCounters[ i ]
Jon Halla440e872016-03-31 15:15:50 -0700207 onosValue = None
208 try:
209 onosValue = current.get( counterName )
Jon Hall41d39f12016-04-11 22:54:35 -0700210 except AttributeError:
Jon Hallca319892017-06-15 15:25:22 -0700211 main.log.exception( node + " counters result " +
Jon Hall41d39f12016-04-11 22:54:35 -0700212 "is not as expected" )
Jon Halla440e872016-03-31 15:15:50 -0700213 correctResults = main.FALSE
214 if onosValue == counterValue:
Jon Hall0e240372018-05-02 11:21:57 -0700215 main.log.info( "{}: {} counter value is correct".format( node, counterName ) )
Jon Halla440e872016-03-31 15:15:50 -0700216 else:
Jon Hall0e240372018-05-02 11:21:57 -0700217 main.log.error( node + ": " + counterName +
Jon Hall41d39f12016-04-11 22:54:35 -0700218 " counter value is incorrect," +
219 " expected value: " + str( counterValue ) +
220 " current value: " + str( onosValue ) )
Jon Halla440e872016-03-31 15:15:50 -0700221 correctResults = main.FALSE
222 return consistent and correctResults
223 except Exception:
224 main.log.exception( "" )
Devin Lim44075962017-08-11 10:56:37 -0700225 main.cleanAndExit()
Jon Hall41d39f12016-04-11 22:54:35 -0700226
227 def consistentLeaderboards( self, nodes ):
228 TOPIC = 'org.onosproject.election'
229 # FIXME: use threads
Jon Halle0f0b342017-04-18 11:43:47 -0700230 # FIXME: should we retry outside the function?
Jon Hall41d39f12016-04-11 22:54:35 -0700231 for n in range( 5 ): # Retry in case election is still happening
232 leaderList = []
233 # Get all leaderboards
234 for cli in nodes:
235 leaderList.append( cli.specificLeaderCandidate( TOPIC ) )
236 # Compare leaderboards
Jon Hallf37d44d2017-05-24 10:37:30 -0700237 result = all( i == leaderList[ 0 ] for i in leaderList ) and\
Jon Hall41d39f12016-04-11 22:54:35 -0700238 leaderList is not None
Jon Hall41d39f12016-04-11 22:54:35 -0700239 if result:
240 return ( result, leaderList )
Jon Hallf37d44d2017-05-24 10:37:30 -0700241 time.sleep( 5 ) # TODO: paramerterize
Jon Hall41d39f12016-04-11 22:54:35 -0700242 main.log.error( "Inconsistent leaderboards:" + str( leaderList ) )
243 return ( result, leaderList )
244
Devin Lim58046fa2017-07-05 16:55:00 -0700245 def initialSetUp( self, serviceClean=False ):
246 """
247 rest of initialSetup
248 """
Devin Lim58046fa2017-07-05 16:55:00 -0700249 if main.params[ 'tcpdump' ].lower() == "true":
250 main.step( "Start Packet Capture MN" )
251 main.Mininet2.startTcpdump(
252 str( main.params[ 'MNtcpdump' ][ 'folder' ] ) + str( main.TEST )
253 + "-MN.pcap",
254 intf=main.params[ 'MNtcpdump' ][ 'intf' ],
255 port=main.params[ 'MNtcpdump' ][ 'port' ] )
256
257 if serviceClean:
258 main.step( "Clean up ONOS service changes" )
Devin Lim142b5342017-07-20 15:22:39 -0700259 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.conf" )
260 main.ONOSbench.handle.expect( "\$" )
261 main.ONOSbench.handle.sendline( "git checkout -- tools/package/init/onos.service" )
262 main.ONOSbench.handle.expect( "\$" )
Devin Lim58046fa2017-07-05 16:55:00 -0700263
Jon Hall30668ff2019-02-27 17:43:09 -0800264 main.testSetUp.checkOnosNodes( main.Cluster )
Devin Lim58046fa2017-07-05 16:55:00 -0700265
266 main.step( "Activate apps defined in the params file" )
267 # get data from the params
268 apps = main.params.get( 'apps' )
269 if apps:
270 apps = apps.split( ',' )
Jon Hallca319892017-06-15 15:25:22 -0700271 main.log.debug( "Apps: " + str( apps ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700272 activateResult = True
273 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700274 main.Cluster.active( 0 ).app( app, "Activate" )
Devin Lim58046fa2017-07-05 16:55:00 -0700275 # TODO: check this worked
276 time.sleep( 10 ) # wait for apps to activate
277 for app in apps:
Devin Lim142b5342017-07-20 15:22:39 -0700278 state = main.Cluster.active( 0 ).appStatus( app )
Devin Lim58046fa2017-07-05 16:55:00 -0700279 if state == "ACTIVE":
280 activateResult = activateResult and True
281 else:
282 main.log.error( "{} is in {} state".format( app, state ) )
283 activateResult = False
284 utilities.assert_equals( expect=True,
285 actual=activateResult,
286 onpass="Successfully activated apps",
287 onfail="Failed to activate apps" )
288 else:
289 main.log.warn( "No apps were specified to be loaded after startup" )
290
291 main.step( "Set ONOS configurations" )
Jon Hallca319892017-06-15 15:25:22 -0700292 # FIXME: This shoudl be part of the general startup sequence
Devin Lim58046fa2017-07-05 16:55:00 -0700293 config = main.params.get( 'ONOS_Configuration' )
294 if config:
295 main.log.debug( config )
296 checkResult = main.TRUE
297 for component in config:
298 for setting in config[ component ]:
299 value = config[ component ][ setting ]
Jon Hallca319892017-06-15 15:25:22 -0700300 check = main.Cluster.next().setCfg( component, setting, value )
Devin Lim58046fa2017-07-05 16:55:00 -0700301 main.log.info( "Value was changed? {}".format( main.TRUE == check ) )
302 checkResult = check and checkResult
303 utilities.assert_equals( expect=main.TRUE,
304 actual=checkResult,
305 onpass="Successfully set config",
306 onfail="Failed to set config" )
307 else:
308 main.log.warn( "No configurations were specified to be changed after startup" )
309
Jon Hallca319892017-06-15 15:25:22 -0700310 main.step( "Check app ids" )
311 appCheck = self.appCheck()
312 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700313 onpass="App Ids seem to be correct",
314 onfail="Something is wrong with app Ids" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -0700315
Jon Hallca319892017-06-15 15:25:22 -0700316 def commonChecks( self ):
317 # TODO: make this assertable or assert in here?
318 self.topicsCheck()
319 self.partitionsCheck()
320 self.pendingMapCheck()
321 self.appCheck()
322
323 def topicsCheck( self, extraTopics=[] ):
324 """
325 Check for work partition topics in leaders output
326 """
327 leaders = main.Cluster.next().leaders()
328 missing = False
329 try:
330 if leaders:
331 parsedLeaders = json.loads( leaders )
332 output = json.dumps( parsedLeaders,
333 sort_keys=True,
334 indent=4,
335 separators=( ',', ': ' ) )
Jon Hallca319892017-06-15 15:25:22 -0700336 # check for all intent partitions
337 topics = []
338 for i in range( 14 ):
339 topics.append( "work-partition-" + str( i ) )
340 topics += extraTopics
Jon Hallca319892017-06-15 15:25:22 -0700341 ONOStopics = [ j[ 'topic' ] for j in parsedLeaders ]
342 for topic in topics:
343 if topic not in ONOStopics:
344 main.log.error( "Error: " + topic +
345 " not in leaders" )
346 missing = True
347 else:
348 main.log.error( "leaders() returned None" )
349 except ( ValueError, TypeError ):
350 main.log.exception( "Error parsing leaders" )
351 main.log.error( repr( leaders ) )
352 if missing:
Jon Hall4173b242017-09-12 17:04:38 -0700353 # NOTE Can we refactor this into the Cluster class?
354 # Maybe an option to print the output of a command from each node?
Jon Hallca319892017-06-15 15:25:22 -0700355 for ctrl in main.Cluster.active():
356 response = ctrl.CLI.leaders( jsonFormat=False )
357 main.log.debug( str( ctrl.name ) + " leaders output: \n" +
358 str( response ) )
359 return missing
360
361 def partitionsCheck( self ):
362 # TODO: return something assertable
363 partitions = main.Cluster.next().partitions()
364 try:
365 if partitions:
366 parsedPartitions = json.loads( partitions )
367 output = json.dumps( parsedPartitions,
368 sort_keys=True,
369 indent=4,
370 separators=( ',', ': ' ) )
371 main.log.debug( "Partitions: " + output )
372 # TODO check for a leader in all paritions
373 # TODO check for consistency among nodes
374 else:
375 main.log.error( "partitions() returned None" )
376 except ( ValueError, TypeError ):
377 main.log.exception( "Error parsing partitions" )
378 main.log.error( repr( partitions ) )
379
380 def pendingMapCheck( self ):
381 pendingMap = main.Cluster.next().pendingMap()
382 try:
383 if pendingMap:
384 parsedPending = json.loads( pendingMap )
385 output = json.dumps( parsedPending,
386 sort_keys=True,
387 indent=4,
388 separators=( ',', ': ' ) )
389 main.log.debug( "Pending map: " + output )
390 # TODO check something here?
391 else:
392 main.log.error( "pendingMap() returned None" )
393 except ( ValueError, TypeError ):
394 main.log.exception( "Error parsing pending map" )
395 main.log.error( repr( pendingMap ) )
396
397 def appCheck( self ):
398 """
399 Check App IDs on all nodes
400 """
401 # FIXME: Rename this to appIDCheck? or add a check for isntalled apps
Jon Hallb9d381e2018-02-05 12:02:10 -0800402 for i in range( 15 ):
403 # TODO modify retry or add a new version that accepts looking for
404 # a value in a return list instead of needing to match the entire
405 # return value to retry
406 appResults = main.Cluster.command( "appToIDCheck" )
407 appCheck = all( i == main.TRUE for i in appResults )
408 if appCheck:
409 break
410 else:
411 time.sleep( 5 )
412
Jon Hallca319892017-06-15 15:25:22 -0700413 if not appCheck:
Devin Lim142b5342017-07-20 15:22:39 -0700414 ctrl = main.Cluster.active( 0 )
Jon Hallb9d381e2018-02-05 12:02:10 -0800415 main.log.debug( "%s apps: %s" % ( ctrl.name, ctrl.pprint( ctrl.apps() ) ) )
416 main.log.debug( "%s appIDs: %s" % ( ctrl.name, ctrl.pprint( ctrl.appIDs() ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700417 return appCheck
418
Jon Halle0f0b342017-04-18 11:43:47 -0700419 def workQueueStatsCheck( self, workQueueName, completed, inProgress, pending ):
420 # Completed
Jon Hallca319892017-06-15 15:25:22 -0700421 completedValues = main.Cluster.command( "workQueueTotalCompleted",
422 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700423 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700424 completedResults = [ int( x ) == completed for x in completedValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700425 completedResult = all( completedResults )
426 if not completedResult:
427 main.log.warn( "Expected Work Queue {} to have {} completed, found {}".format(
428 workQueueName, completed, completedValues ) )
429
430 # In Progress
Jon Hallca319892017-06-15 15:25:22 -0700431 inProgressValues = main.Cluster.command( "workQueueTotalInProgress",
432 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700433 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700434 inProgressResults = [ int( x ) == inProgress for x in inProgressValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700435 inProgressResult = all( inProgressResults )
436 if not inProgressResult:
437 main.log.warn( "Expected Work Queue {} to have {} inProgress, found {}".format(
438 workQueueName, inProgress, inProgressValues ) )
439
440 # Pending
Jon Hallca319892017-06-15 15:25:22 -0700441 pendingValues = main.Cluster.command( "workQueueTotalPending",
442 args=[ workQueueName ] )
Jon Halle0f0b342017-04-18 11:43:47 -0700443 # Check the results
Jon Hallca319892017-06-15 15:25:22 -0700444 pendingResults = [ int( x ) == pending for x in pendingValues ]
Jon Halle0f0b342017-04-18 11:43:47 -0700445 pendingResult = all( pendingResults )
446 if not pendingResult:
447 main.log.warn( "Expected Work Queue {} to have {} pending, found {}".format(
448 workQueueName, pending, pendingValues ) )
449 return completedResult and inProgressResult and pendingResult
450
Devin Lim58046fa2017-07-05 16:55:00 -0700451 def assignDevices( self, main ):
452 """
453 Assign devices to controllers
454 """
455 import re
Devin Lim58046fa2017-07-05 16:55:00 -0700456 assert main, "main not defined"
457 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700458
459 main.case( "Assigning devices to controllers" )
460 main.caseExplanation = "Assign switches to ONOS using 'ovs-vsctl' " + \
461 "and check that an ONOS node becomes the " + \
462 "master of the device."
463 main.step( "Assign switches to controllers" )
464
Jon Hallca319892017-06-15 15:25:22 -0700465 ipList = main.Cluster.getIps()
Jon Hallab611372018-02-21 15:26:05 -0800466 swList = main.Mininet1.getSwitches().keys()
Devin Lim58046fa2017-07-05 16:55:00 -0700467 main.Mininet1.assignSwController( sw=swList, ip=ipList )
468
469 mastershipCheck = main.TRUE
Jon Hallab611372018-02-21 15:26:05 -0800470 for switch in swList:
471 response = main.Mininet1.getSwController( switch )
Devin Lim58046fa2017-07-05 16:55:00 -0700472 try:
473 main.log.info( str( response ) )
Jon Hallab611372018-02-21 15:26:05 -0800474 for ctrl in main.Cluster.runningNodes:
475 if re.search( "tcp:" + ctrl.ipAddress, response ):
476 mastershipCheck = mastershipCheck and main.TRUE
477 else:
478 main.log.error( "Error, node " + repr( ctrl ) + " is " +
479 "not in the list of controllers " +
480 switch + " is connecting to." )
481 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -0700482 except Exception:
Jon Hallab611372018-02-21 15:26:05 -0800483 main.log.warn( "Error parsing get-controller response" )
484 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -0700485 utilities.assert_equals(
486 expect=main.TRUE,
487 actual=mastershipCheck,
488 onpass="Switch mastership assigned correctly",
489 onfail="Switches not assigned correctly to controllers" )
Jon Hallca319892017-06-15 15:25:22 -0700490
Jon Hallab611372018-02-21 15:26:05 -0800491 # Mappings for attachmentPoints from host mac to deviceID
492 # TODO: make the key a dict with deviceIds and port #'s
493 # FIXME: topo-HA/obelisk specific mappings:
494 # key is mac and value is dpid
495 main.topoMappings = {}
496 for i in range( 1, 29 ): # hosts 1 through 28
497 # set up correct variables:
498 macId = "00:" * 5 + hex( i ).split( "0x" )[ 1 ].upper().zfill( 2 )
499 if i == 1:
500 deviceId = "1000".zfill( 16 )
501 elif i == 2:
502 deviceId = "2000".zfill( 16 )
503 elif i == 3:
504 deviceId = "3000".zfill( 16 )
505 elif i == 4:
506 deviceId = "3004".zfill( 16 )
507 elif i == 5:
508 deviceId = "5000".zfill( 16 )
509 elif i == 6:
510 deviceId = "6000".zfill( 16 )
511 elif i == 7:
512 deviceId = "6007".zfill( 16 )
513 elif i >= 8 and i <= 17:
514 dpid = '3' + str( i ).zfill( 3 )
515 deviceId = dpid.zfill( 16 )
516 elif i >= 18 and i <= 27:
517 dpid = '6' + str( i ).zfill( 3 )
518 deviceId = dpid.zfill( 16 )
519 elif i == 28:
520 deviceId = "2800".zfill( 16 )
521 main.topoMappings[ macId ] = deviceId
522
Devin Lim58046fa2017-07-05 16:55:00 -0700523 def assignIntents( self, main ):
524 """
525 Assign intents
526 """
527 import time
528 import json
Devin Lim58046fa2017-07-05 16:55:00 -0700529 assert main, "main not defined"
530 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700531 try:
532 main.HAlabels
533 except ( NameError, AttributeError ):
534 main.log.error( "main.HAlabels not defined, setting to []" )
535 main.HAlabels = []
536 try:
537 main.HAdata
538 except ( NameError, AttributeError ):
539 main.log.error( "data not defined, setting to []" )
540 main.HAdata = []
541 main.case( "Adding host Intents" )
542 main.caseExplanation = "Discover hosts by using pingall then " +\
543 "assign predetermined host-to-host intents." +\
544 " After installation, check that the intent" +\
545 " is distributed to all nodes and the state" +\
546 " is INSTALLED"
547
548 # install onos-app-fwd
549 main.step( "Install reactive forwarding app" )
Jon Hall0e240372018-05-02 11:21:57 -0700550 installResults = main.Cluster.next().CLI.activateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700551 utilities.assert_equals( expect=main.TRUE, actual=installResults,
552 onpass="Install fwd successful",
553 onfail="Install fwd failed" )
554
555 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700556 appCheck = self.appCheck()
557 utilities.assert_equals( expect=True, actual=appCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700558 onpass="App Ids seem to be correct",
559 onfail="Something is wrong with app Ids" )
560
561 main.step( "Discovering Hosts( Via pingall for now )" )
562 # FIXME: Once we have a host discovery mechanism, use that instead
563 # REACTIVE FWD test
564 pingResult = main.FALSE
565 passMsg = "Reactive Pingall test passed"
566 time1 = time.time()
567 pingResult = main.Mininet1.pingall()
568 time2 = time.time()
569 if not pingResult:
570 main.log.warn( "First pingall failed. Trying again..." )
571 pingResult = main.Mininet1.pingall()
572 passMsg += " on the second try"
573 utilities.assert_equals(
574 expect=main.TRUE,
575 actual=pingResult,
576 onpass=passMsg,
577 onfail="Reactive Pingall failed, " +
578 "one or more ping pairs failed" )
579 main.log.info( "Time for pingall: %2f seconds" %
580 ( time2 - time1 ) )
Jon Hallca319892017-06-15 15:25:22 -0700581 if not pingResult:
Devin Lim44075962017-08-11 10:56:37 -0700582 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -0700583 # timeout for fwd flows
584 time.sleep( 11 )
585 # uninstall onos-app-fwd
586 main.step( "Uninstall reactive forwarding app" )
Jon Hall0e240372018-05-02 11:21:57 -0700587 uninstallResult = main.Cluster.next().CLI.deactivateApp( "org.onosproject.fwd" )
Devin Lim58046fa2017-07-05 16:55:00 -0700588 utilities.assert_equals( expect=main.TRUE, actual=uninstallResult,
589 onpass="Uninstall fwd successful",
590 onfail="Uninstall fwd failed" )
591
592 main.step( "Check app ids" )
Jon Hallca319892017-06-15 15:25:22 -0700593 appCheck2 = self.appCheck()
594 utilities.assert_equals( expect=True, actual=appCheck2,
Devin Lim58046fa2017-07-05 16:55:00 -0700595 onpass="App Ids seem to be correct",
596 onfail="Something is wrong with app Ids" )
597
598 main.step( "Add host intents via cli" )
599 intentIds = []
600 # TODO: move the host numbers to params
601 # Maybe look at all the paths we ping?
602 intentAddResult = True
603 hostResult = main.TRUE
604 for i in range( 8, 18 ):
605 main.log.info( "Adding host intent between h" + str( i ) +
606 " and h" + str( i + 10 ) )
607 host1 = "00:00:00:00:00:" + \
608 str( hex( i )[ 2: ] ).zfill( 2 ).upper()
609 host2 = "00:00:00:00:00:" + \
610 str( hex( i + 10 )[ 2: ] ).zfill( 2 ).upper()
611 # NOTE: getHost can return None
Jon Hall0e240372018-05-02 11:21:57 -0700612 host1Dict = main.Cluster.next().CLI.getHost( host1 )
613 host2Dict = main.Cluster.next().CLI.getHost( host2 )
Devin Lim58046fa2017-07-05 16:55:00 -0700614 host1Id = None
615 host2Id = None
616 if host1Dict and host2Dict:
617 host1Id = host1Dict.get( 'id', None )
618 host2Id = host2Dict.get( 'id', None )
619 if host1Id and host2Id:
Jon Hallca319892017-06-15 15:25:22 -0700620 nodeNum = len( main.Cluster.active() )
Devin Lim142b5342017-07-20 15:22:39 -0700621 ctrl = main.Cluster.active( i % nodeNum )
Jon Hallca319892017-06-15 15:25:22 -0700622 tmpId = ctrl.CLI.addHostIntent( host1Id, host2Id )
Devin Lim58046fa2017-07-05 16:55:00 -0700623 if tmpId:
624 main.log.info( "Added intent with id: " + tmpId )
625 intentIds.append( tmpId )
626 else:
627 main.log.error( "addHostIntent returned: " +
628 repr( tmpId ) )
629 else:
630 main.log.error( "Error, getHost() failed for h" + str( i ) +
631 " and/or h" + str( i + 10 ) )
Devin Lime9f0ccf2017-08-11 17:25:12 -0700632 hosts = main.Cluster.next().CLI.hosts()
Devin Lim58046fa2017-07-05 16:55:00 -0700633 try:
Jon Hallca319892017-06-15 15:25:22 -0700634 output = json.dumps( json.loads( hosts ),
635 sort_keys=True,
636 indent=4,
637 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700638 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -0700639 output = repr( hosts )
640 main.log.debug( "Hosts output: %s" % output )
Devin Lim58046fa2017-07-05 16:55:00 -0700641 hostResult = main.FALSE
642 utilities.assert_equals( expect=main.TRUE, actual=hostResult,
643 onpass="Found a host id for each host",
644 onfail="Error looking up host ids" )
645
646 intentStart = time.time()
Jon Hall0e240372018-05-02 11:21:57 -0700647 onosIds = main.Cluster.next().getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700648 main.log.info( "Submitted intents: " + str( intentIds ) )
649 main.log.info( "Intents in ONOS: " + str( onosIds ) )
650 for intent in intentIds:
651 if intent in onosIds:
652 pass # intent submitted is in onos
653 else:
654 intentAddResult = False
655 if intentAddResult:
656 intentStop = time.time()
657 else:
658 intentStop = None
659 # Print the intent states
Jon Hall0e240372018-05-02 11:21:57 -0700660 intents = main.Cluster.next().CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700661 intentStates = []
662 installedCheck = True
663 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
664 count = 0
665 try:
666 for intent in json.loads( intents ):
667 state = intent.get( 'state', None )
668 if "INSTALLED" not in state:
669 installedCheck = False
670 intentId = intent.get( 'id', None )
671 intentStates.append( ( intentId, state ) )
672 except ( ValueError, TypeError ):
673 main.log.exception( "Error parsing intents" )
674 # add submitted intents not in the store
675 tmplist = [ i for i, s in intentStates ]
676 missingIntents = False
677 for i in intentIds:
678 if i not in tmplist:
679 intentStates.append( ( i, " - " ) )
680 missingIntents = True
681 intentStates.sort()
682 for i, s in intentStates:
683 count += 1
684 main.log.info( "%-6s%-15s%-15s" %
685 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700686 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700687
688 intentAddResult = bool( intentAddResult and not missingIntents and
689 installedCheck )
690 if not intentAddResult:
691 main.log.error( "Error in pushing host intents to ONOS" )
692
693 main.step( "Intent Anti-Entropy dispersion" )
694 for j in range( 100 ):
695 correct = True
696 main.log.info( "Submitted intents: " + str( sorted( intentIds ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700697 for ctrl in main.Cluster.active():
Devin Lim58046fa2017-07-05 16:55:00 -0700698 onosIds = []
Jon Hallca319892017-06-15 15:25:22 -0700699 ids = ctrl.getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700700 onosIds.append( ids )
Jon Hallca319892017-06-15 15:25:22 -0700701 main.log.debug( "Intents in " + ctrl.name + ": " +
Devin Lim58046fa2017-07-05 16:55:00 -0700702 str( sorted( onosIds ) ) )
703 if sorted( ids ) != sorted( intentIds ):
704 main.log.warn( "Set of intent IDs doesn't match" )
705 correct = False
706 break
707 else:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700708 intents = json.loads( ctrl.CLI.intents() )
Devin Lim58046fa2017-07-05 16:55:00 -0700709 for intent in intents:
710 if intent[ 'state' ] != "INSTALLED":
711 main.log.warn( "Intent " + intent[ 'id' ] +
712 " is " + intent[ 'state' ] )
713 correct = False
714 break
715 if correct:
716 break
717 else:
718 time.sleep( 1 )
719 if not intentStop:
720 intentStop = time.time()
721 global gossipTime
722 gossipTime = intentStop - intentStart
723 main.log.info( "It took about " + str( gossipTime ) +
724 " seconds for all intents to appear in each node" )
725 append = False
726 title = "Gossip Intents"
727 count = 1
728 while append is False:
729 curTitle = title + str( count )
730 if curTitle not in main.HAlabels:
731 main.HAlabels.append( curTitle )
732 main.HAdata.append( str( gossipTime ) )
733 append = True
734 else:
735 count += 1
736 gossipPeriod = int( main.params[ 'timers' ][ 'gossip' ] )
Devin Lim142b5342017-07-20 15:22:39 -0700737 maxGossipTime = gossipPeriod * len( main.Cluster.runningNodes )
Devin Lim58046fa2017-07-05 16:55:00 -0700738 utilities.assert_greater_equals(
739 expect=maxGossipTime, actual=gossipTime,
740 onpass="ECM anti-entropy for intents worked within " +
741 "expected time",
742 onfail="Intent ECM anti-entropy took too long. " +
743 "Expected time:{}, Actual time:{}".format( maxGossipTime,
744 gossipTime ) )
745 if gossipTime <= maxGossipTime:
746 intentAddResult = True
747
Jon Hallca319892017-06-15 15:25:22 -0700748 pendingMap = main.Cluster.next().pendingMap()
Devin Lim58046fa2017-07-05 16:55:00 -0700749 if not intentAddResult or "key" in pendingMap:
Devin Lim58046fa2017-07-05 16:55:00 -0700750 installedCheck = True
751 main.log.info( "Sleeping 60 seconds to see if intents are found" )
752 time.sleep( 60 )
Jon Hall0e240372018-05-02 11:21:57 -0700753 onosIds = main.Cluster.next().getAllIntentsId()
Devin Lim58046fa2017-07-05 16:55:00 -0700754 main.log.info( "Submitted intents: " + str( intentIds ) )
755 main.log.info( "Intents in ONOS: " + str( onosIds ) )
756 # Print the intent states
Jon Hall0e240372018-05-02 11:21:57 -0700757 intents = main.Cluster.next().CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700758 intentStates = []
759 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
760 count = 0
761 try:
762 for intent in json.loads( intents ):
763 # Iter through intents of a node
764 state = intent.get( 'state', None )
765 if "INSTALLED" not in state:
766 installedCheck = False
767 intentId = intent.get( 'id', None )
768 intentStates.append( ( intentId, state ) )
769 except ( ValueError, TypeError ):
770 main.log.exception( "Error parsing intents" )
771 # add submitted intents not in the store
772 tmplist = [ i for i, s in intentStates ]
773 for i in intentIds:
774 if i not in tmplist:
775 intentStates.append( ( i, " - " ) )
776 intentStates.sort()
777 for i, s in intentStates:
778 count += 1
779 main.log.info( "%-6s%-15s%-15s" %
780 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700781 self.topicsCheck( [ "org.onosproject.election" ] )
782 self.partitionsCheck()
783 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700784
Jon Hallca319892017-06-15 15:25:22 -0700785 def pingAcrossHostIntent( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -0700786 """
787 Ping across added host intents
788 """
789 import json
790 import time
Devin Lim58046fa2017-07-05 16:55:00 -0700791 assert main, "main not defined"
792 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -0700793 main.case( "Verify connectivity by sending traffic across Intents" )
794 main.caseExplanation = "Ping across added host intents to check " +\
795 "functionality and check the state of " +\
796 "the intent"
797
Jon Hallca319892017-06-15 15:25:22 -0700798 onosCli = main.Cluster.next()
Devin Lim58046fa2017-07-05 16:55:00 -0700799 main.step( "Check Intent state" )
800 installedCheck = False
801 loopCount = 0
Jon Hall5d5876e2017-11-30 09:33:16 -0800802 while not installedCheck and loopCount < 90:
Devin Lim58046fa2017-07-05 16:55:00 -0700803 installedCheck = True
804 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700805 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700806 intentStates = []
807 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
808 count = 0
809 # Iter through intents of a node
810 try:
811 for intent in json.loads( intents ):
812 state = intent.get( 'state', None )
813 if "INSTALLED" not in state:
814 installedCheck = False
Jon Hall8bafdc02017-09-05 11:36:26 -0700815 main.log.debug( "Failed intent: " + str( intent ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700816 intentId = intent.get( 'id', None )
817 intentStates.append( ( intentId, state ) )
818 except ( ValueError, TypeError ):
819 main.log.exception( "Error parsing intents." )
820 # Print states
821 intentStates.sort()
822 for i, s in intentStates:
823 count += 1
824 main.log.info( "%-6s%-15s%-15s" %
825 ( str( count ), str( i ), str( s ) ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700826 if not installedCheck:
827 time.sleep( 1 )
828 loopCount += 1
829 utilities.assert_equals( expect=True, actual=installedCheck,
830 onpass="Intents are all INSTALLED",
831 onfail="Intents are not all in " +
832 "INSTALLED state" )
833
834 main.step( "Ping across added host intents" )
835 PingResult = main.TRUE
836 for i in range( 8, 18 ):
837 ping = main.Mininet1.pingHost( src="h" + str( i ),
838 target="h" + str( i + 10 ) )
839 PingResult = PingResult and ping
840 if ping == main.FALSE:
841 main.log.warn( "Ping failed between h" + str( i ) +
842 " and h" + str( i + 10 ) )
843 elif ping == main.TRUE:
844 main.log.info( "Ping test passed!" )
845 # Don't set PingResult or you'd override failures
846 if PingResult == main.FALSE:
847 main.log.error(
848 "Intents have not been installed correctly, pings failed." )
849 # TODO: pretty print
Devin Lim58046fa2017-07-05 16:55:00 -0700850 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700851 tmpIntents = onosCli.CLI.intents()
Jon Hallca319892017-06-15 15:25:22 -0700852 output = json.dumps( json.loads( tmpIntents ),
853 sort_keys=True,
854 indent=4,
855 separators=( ',', ': ' ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700856 except ( ValueError, TypeError ):
Jon Hall4173b242017-09-12 17:04:38 -0700857 output = repr( tmpIntents )
Jon Hallca319892017-06-15 15:25:22 -0700858 main.log.debug( "ONOS1 intents: " + output )
Devin Lim58046fa2017-07-05 16:55:00 -0700859 utilities.assert_equals(
860 expect=main.TRUE,
861 actual=PingResult,
862 onpass="Intents have been installed correctly and pings work",
863 onfail="Intents have not been installed correctly, pings failed." )
864
865 main.step( "Check leadership of topics" )
Jon Hallca319892017-06-15 15:25:22 -0700866 topicsCheck = self.topicsCheck()
867 utilities.assert_equals( expect=False, actual=topicsCheck,
Devin Lim58046fa2017-07-05 16:55:00 -0700868 onpass="intent Partitions is in leaders",
Jon Hallca319892017-06-15 15:25:22 -0700869 onfail="Some topics were lost" )
870 self.partitionsCheck()
871 self.pendingMapCheck()
Devin Lim58046fa2017-07-05 16:55:00 -0700872
873 if not installedCheck:
874 main.log.info( "Waiting 60 seconds to see if the state of " +
875 "intents change" )
876 time.sleep( 60 )
877 # Print the intent states
Devin Lime9f0ccf2017-08-11 17:25:12 -0700878 intents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700879 intentStates = []
880 main.log.info( "%-6s%-15s%-15s" % ( 'Count', 'ID', 'State' ) )
881 count = 0
882 # Iter through intents of a node
883 try:
884 for intent in json.loads( intents ):
885 state = intent.get( 'state', None )
886 if "INSTALLED" not in state:
887 installedCheck = False
888 intentId = intent.get( 'id', None )
889 intentStates.append( ( intentId, state ) )
890 except ( ValueError, TypeError ):
891 main.log.exception( "Error parsing intents." )
892 intentStates.sort()
893 for i, s in intentStates:
894 count += 1
895 main.log.info( "%-6s%-15s%-15s" %
896 ( str( count ), str( i ), str( s ) ) )
Jon Hallca319892017-06-15 15:25:22 -0700897 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -0700898
Devin Lim58046fa2017-07-05 16:55:00 -0700899 main.step( "Wait a minute then ping again" )
900 # the wait is above
901 PingResult = main.TRUE
902 for i in range( 8, 18 ):
903 ping = main.Mininet1.pingHost( src="h" + str( i ),
904 target="h" + str( i + 10 ) )
905 PingResult = PingResult and ping
906 if ping == main.FALSE:
907 main.log.warn( "Ping failed between h" + str( i ) +
908 " and h" + str( i + 10 ) )
909 elif ping == main.TRUE:
910 main.log.info( "Ping test passed!" )
911 # Don't set PingResult or you'd override failures
912 if PingResult == main.FALSE:
913 main.log.error(
914 "Intents have not been installed correctly, pings failed." )
915 # TODO: pretty print
Jon Hallca319892017-06-15 15:25:22 -0700916 main.log.warn( str( onosCli.name ) + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -0700917 try:
Devin Lime9f0ccf2017-08-11 17:25:12 -0700918 tmpIntents = onosCli.CLI.intents()
Devin Lim58046fa2017-07-05 16:55:00 -0700919 main.log.warn( json.dumps( json.loads( tmpIntents ),
920 sort_keys=True,
921 indent=4,
922 separators=( ',', ': ' ) ) )
923 except ( ValueError, TypeError ):
924 main.log.warn( repr( tmpIntents ) )
925 utilities.assert_equals(
926 expect=main.TRUE,
927 actual=PingResult,
928 onpass="Intents have been installed correctly and pings work",
929 onfail="Intents have not been installed correctly, pings failed." )
930
Devin Lim142b5342017-07-20 15:22:39 -0700931 def checkRoleNotNull( self ):
Devin Lim58046fa2017-07-05 16:55:00 -0700932 main.step( "Check that each switch has a master" )
Devin Lim58046fa2017-07-05 16:55:00 -0700933 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -0700934 rolesNotNull = main.Cluster.command( "rolesNotNull", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -0700935 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -0700936 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -0700937 actual=rolesNotNull,
938 onpass="Each device has a master",
939 onfail="Some devices don't have a master assigned" )
940
Devin Lim142b5342017-07-20 15:22:39 -0700941 def checkTheRole( self ):
942 main.step( "Read device roles from ONOS" )
Jon Hallca319892017-06-15 15:25:22 -0700943 ONOSMastership = main.Cluster.command( "roles" )
Devin Lim58046fa2017-07-05 16:55:00 -0700944 consistentMastership = True
945 rolesResults = True
Devin Lim58046fa2017-07-05 16:55:00 -0700946 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -0700947 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -0700948 if not ONOSMastership[ i ] or "Error" in ONOSMastership[ i ]:
Jon Hallca319892017-06-15 15:25:22 -0700949 main.log.error( "Error in getting " + node + " roles" )
950 main.log.warn( node + " mastership response: " +
Devin Lim58046fa2017-07-05 16:55:00 -0700951 repr( ONOSMastership[ i ] ) )
952 rolesResults = False
953 utilities.assert_equals(
954 expect=True,
955 actual=rolesResults,
956 onpass="No error in reading roles output",
957 onfail="Error in reading roles from ONOS" )
958
959 main.step( "Check for consistency in roles from each controller" )
960 if all( [ i == ONOSMastership[ 0 ] for i in ONOSMastership ] ):
961 main.log.info(
962 "Switch roles are consistent across all ONOS nodes" )
963 else:
964 consistentMastership = False
965 utilities.assert_equals(
966 expect=True,
967 actual=consistentMastership,
968 onpass="Switch roles are consistent across all ONOS nodes",
969 onfail="ONOS nodes have different views of switch roles" )
Devin Lim142b5342017-07-20 15:22:39 -0700970 return ONOSMastership, rolesResults, consistentMastership
971
972 def checkingIntents( self ):
973 main.step( "Get the intents from each controller" )
974 ONOSIntents = main.Cluster.command( "intents", specificDriver=2 )
975 intentsResults = True
976 for i in range( len( ONOSIntents ) ):
977 node = str( main.Cluster.active( i ) )
978 if not ONOSIntents[ i ] or "Error" in ONOSIntents[ i ]:
979 main.log.error( "Error in getting " + node + " intents" )
980 main.log.warn( node + " intents response: " +
981 repr( ONOSIntents[ i ] ) )
982 intentsResults = False
983 utilities.assert_equals(
984 expect=True,
985 actual=intentsResults,
986 onpass="No error in reading intents output",
987 onfail="Error in reading intents from ONOS" )
988 return ONOSIntents, intentsResults
989
990 def readingState( self, main ):
991 """
992 Reading state of ONOS
993 """
994 import json
Devin Lim142b5342017-07-20 15:22:39 -0700995 assert main, "main not defined"
996 assert utilities.assert_equals, "utilities.assert_equals not defined"
997 try:
998 from tests.dependencies.topology import Topology
999 except ImportError:
1000 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07001001 main.cleanAndExit()
Devin Lim142b5342017-07-20 15:22:39 -07001002 try:
1003 main.topoRelated
1004 except ( NameError, AttributeError ):
1005 main.topoRelated = Topology()
1006 main.case( "Setting up and gathering data for current state" )
1007 # The general idea for this test case is to pull the state of
1008 # ( intents,flows, topology,... ) from each ONOS node
1009 # We can then compare them with each other and also with past states
1010
1011 global mastershipState
1012 mastershipState = '[]'
1013
1014 self.checkRoleNotNull()
1015
1016 main.step( "Get the Mastership of each switch from each controller" )
1017 mastershipCheck = main.FALSE
1018
1019 ONOSMastership, consistentMastership, rolesResults = self.checkTheRole()
Devin Lim58046fa2017-07-05 16:55:00 -07001020
1021 if rolesResults and not consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001022 for i in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001023 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001024 try:
1025 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001026 node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07001027 json.dumps(
1028 json.loads( ONOSMastership[ i ] ),
1029 sort_keys=True,
1030 indent=4,
1031 separators=( ',', ': ' ) ) )
1032 except ( ValueError, TypeError ):
1033 main.log.warn( repr( ONOSMastership[ i ] ) )
1034 elif rolesResults and consistentMastership:
Jon Hallca319892017-06-15 15:25:22 -07001035 mastershipCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001036 mastershipState = ONOSMastership[ 0 ]
1037
Devin Lim58046fa2017-07-05 16:55:00 -07001038 global intentState
1039 intentState = []
Devin Lim142b5342017-07-20 15:22:39 -07001040 ONOSIntents, intentsResults = self.checkingIntents()
Jon Hallca319892017-06-15 15:25:22 -07001041 intentCheck = main.FALSE
1042 consistentIntents = True
Devin Lim142b5342017-07-20 15:22:39 -07001043
Devin Lim58046fa2017-07-05 16:55:00 -07001044 main.step( "Check for consistency in Intents from each controller" )
1045 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
1046 main.log.info( "Intents are consistent across all ONOS " +
1047 "nodes" )
1048 else:
1049 consistentIntents = False
1050 main.log.error( "Intents not consistent" )
1051 utilities.assert_equals(
1052 expect=True,
1053 actual=consistentIntents,
1054 onpass="Intents are consistent across all ONOS nodes",
1055 onfail="ONOS nodes have different views of intents" )
1056
1057 if intentsResults:
1058 # Try to make it easy to figure out what is happening
1059 #
1060 # Intent ONOS1 ONOS2 ...
1061 # 0x01 INSTALLED INSTALLING
1062 # ... ... ...
1063 # ... ... ...
1064 title = " Id"
Jon Hallca319892017-06-15 15:25:22 -07001065 for ctrl in main.Cluster.active():
1066 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07001067 main.log.warn( title )
1068 # get all intent keys in the cluster
1069 keys = []
1070 try:
1071 # Get the set of all intent keys
1072 for nodeStr in ONOSIntents:
1073 node = json.loads( nodeStr )
1074 for intent in node:
1075 keys.append( intent.get( 'id' ) )
1076 keys = set( keys )
1077 # For each intent key, print the state on each node
1078 for key in keys:
1079 row = "%-13s" % key
1080 for nodeStr in ONOSIntents:
1081 node = json.loads( nodeStr )
1082 for intent in node:
1083 if intent.get( 'id', "Error" ) == key:
1084 row += "%-15s" % intent.get( 'state' )
1085 main.log.warn( row )
1086 # End of intent state table
1087 except ValueError as e:
1088 main.log.exception( e )
1089 main.log.debug( "nodeStr was: " + repr( nodeStr ) )
1090
1091 if intentsResults and not consistentIntents:
1092 # print the json objects
Jon Hallca319892017-06-15 15:25:22 -07001093 main.log.debug( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001094 main.log.debug( json.dumps( json.loads( ONOSIntents[ -1 ] ),
1095 sort_keys=True,
1096 indent=4,
1097 separators=( ',', ': ' ) ) )
1098 for i in range( len( ONOSIntents ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001099 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001100 if ONOSIntents[ i ] != ONOSIntents[ -1 ]:
Jon Hallca319892017-06-15 15:25:22 -07001101 main.log.debug( node + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07001102 main.log.debug( json.dumps( json.loads( ONOSIntents[ i ] ),
1103 sort_keys=True,
1104 indent=4,
1105 separators=( ',', ': ' ) ) )
1106 else:
Jon Hallca319892017-06-15 15:25:22 -07001107 main.log.debug( node + " intents match " + ctrl.name + " intents" )
Devin Lim58046fa2017-07-05 16:55:00 -07001108 elif intentsResults and consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07001109 intentCheck = main.TRUE
Devin Lim58046fa2017-07-05 16:55:00 -07001110 intentState = ONOSIntents[ 0 ]
1111
1112 main.step( "Get the flows from each controller" )
1113 global flowState
1114 flowState = []
Jon Hall4173b242017-09-12 17:04:38 -07001115 ONOSFlows = main.Cluster.command( "flows", specificDriver=2 ) # TODO: Possible arg: sleep = 30
Devin Lim58046fa2017-07-05 16:55:00 -07001116 ONOSFlowsJson = []
1117 flowCheck = main.FALSE
1118 consistentFlows = True
1119 flowsResults = True
Devin Lim58046fa2017-07-05 16:55:00 -07001120 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001121 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001122 if not ONOSFlows[ i ] or "Error" in ONOSFlows[ i ]:
Jon Hallca319892017-06-15 15:25:22 -07001123 main.log.error( "Error in getting " + node + " flows" )
1124 main.log.warn( node + " flows response: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001125 repr( ONOSFlows[ i ] ) )
1126 flowsResults = False
1127 ONOSFlowsJson.append( None )
1128 else:
1129 try:
1130 ONOSFlowsJson.append( json.loads( ONOSFlows[ i ] ) )
1131 except ( ValueError, TypeError ):
1132 # FIXME: change this to log.error?
Jon Hallca319892017-06-15 15:25:22 -07001133 main.log.exception( "Error in parsing " + node +
Devin Lim58046fa2017-07-05 16:55:00 -07001134 " response as json." )
1135 main.log.error( repr( ONOSFlows[ i ] ) )
1136 ONOSFlowsJson.append( None )
1137 flowsResults = False
1138 utilities.assert_equals(
1139 expect=True,
1140 actual=flowsResults,
1141 onpass="No error in reading flows output",
1142 onfail="Error in reading flows from ONOS" )
1143
1144 main.step( "Check for consistency in Flows from each controller" )
1145 tmp = [ len( i ) == len( ONOSFlowsJson[ 0 ] ) for i in ONOSFlowsJson ]
1146 if all( tmp ):
1147 main.log.info( "Flow count is consistent across all ONOS nodes" )
1148 else:
1149 consistentFlows = False
1150 utilities.assert_equals(
1151 expect=True,
1152 actual=consistentFlows,
1153 onpass="The flow count is consistent across all ONOS nodes",
1154 onfail="ONOS nodes have different flow counts" )
1155
1156 if flowsResults and not consistentFlows:
1157 for i in range( len( ONOSFlows ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001158 node = str( main.Cluster.active( i ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001159 try:
1160 main.log.warn(
Jon Hallca319892017-06-15 15:25:22 -07001161 node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001162 json.dumps( json.loads( ONOSFlows[ i ] ), sort_keys=True,
1163 indent=4, separators=( ',', ': ' ) ) )
1164 except ( ValueError, TypeError ):
Jon Hallca319892017-06-15 15:25:22 -07001165 main.log.warn( node + " flows: " +
Devin Lim58046fa2017-07-05 16:55:00 -07001166 repr( ONOSFlows[ i ] ) )
1167 elif flowsResults and consistentFlows:
1168 flowCheck = main.TRUE
1169 flowState = ONOSFlows[ 0 ]
1170
1171 main.step( "Get the OF Table entries" )
1172 global flows
Jon Hallab611372018-02-21 15:26:05 -08001173 flows = {}
1174 for swName, swDetails in main.Mininet1.getSwitches().items():
1175 main.log.debug( repr( swName ) + repr( swDetails ) )
1176 flows[ swName ] = main.Mininet1.getFlowTable( swName, version="1.3", debug=False )
Devin Lim58046fa2017-07-05 16:55:00 -07001177 if flowCheck == main.FALSE:
1178 for table in flows:
1179 main.log.warn( table )
1180 # TODO: Compare switch flow tables with ONOS flow tables
1181
1182 main.step( "Start continuous pings" )
Jon Hallab611372018-02-21 15:26:05 -08001183 if main.params.get( 'PING', False ):
1184 # TODO: Make this more dynamic and less hardcoded, ie, # or ping pairs
1185 main.Mininet2.pingLong(
1186 src=main.params[ 'PING' ][ 'source1' ],
1187 target=main.params[ 'PING' ][ 'target1' ],
1188 pingTime=500 )
1189 main.Mininet2.pingLong(
1190 src=main.params[ 'PING' ][ 'source2' ],
1191 target=main.params[ 'PING' ][ 'target2' ],
1192 pingTime=500 )
1193 main.Mininet2.pingLong(
1194 src=main.params[ 'PING' ][ 'source3' ],
1195 target=main.params[ 'PING' ][ 'target3' ],
1196 pingTime=500 )
1197 main.Mininet2.pingLong(
1198 src=main.params[ 'PING' ][ 'source4' ],
1199 target=main.params[ 'PING' ][ 'target4' ],
1200 pingTime=500 )
1201 main.Mininet2.pingLong(
1202 src=main.params[ 'PING' ][ 'source5' ],
1203 target=main.params[ 'PING' ][ 'target5' ],
1204 pingTime=500 )
1205 main.Mininet2.pingLong(
1206 src=main.params[ 'PING' ][ 'source6' ],
1207 target=main.params[ 'PING' ][ 'target6' ],
1208 pingTime=500 )
1209 main.Mininet2.pingLong(
1210 src=main.params[ 'PING' ][ 'source7' ],
1211 target=main.params[ 'PING' ][ 'target7' ],
1212 pingTime=500 )
1213 main.Mininet2.pingLong(
1214 src=main.params[ 'PING' ][ 'source8' ],
1215 target=main.params[ 'PING' ][ 'target8' ],
1216 pingTime=500 )
1217 main.Mininet2.pingLong(
1218 src=main.params[ 'PING' ][ 'source9' ],
1219 target=main.params[ 'PING' ][ 'target9' ],
1220 pingTime=500 )
1221 main.Mininet2.pingLong(
1222 src=main.params[ 'PING' ][ 'source10' ],
1223 target=main.params[ 'PING' ][ 'target10' ],
1224 pingTime=500 )
Devin Lim58046fa2017-07-05 16:55:00 -07001225
1226 main.step( "Collecting topology information from ONOS" )
Devin Lim142b5342017-07-20 15:22:39 -07001227 devices = main.topoRelated.getAll( "devices" )
1228 hosts = main.topoRelated.getAll( "hosts", inJson=True )
1229 ports = main.topoRelated.getAll( "ports" )
1230 links = main.topoRelated.getAll( "links" )
1231 clusters = main.topoRelated.getAll( "clusters" )
Devin Lim58046fa2017-07-05 16:55:00 -07001232 # Compare json objects for hosts and dataplane clusters
1233
1234 # hosts
1235 main.step( "Host view is consistent across ONOS nodes" )
1236 consistentHostsResult = main.TRUE
1237 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001238 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001239 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1240 if hosts[ controller ] == hosts[ 0 ]:
1241 continue
1242 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07001243 main.log.error( "hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001244 controllerStr +
1245 " is inconsistent with ONOS1" )
1246 main.log.warn( repr( hosts[ controller ] ) )
1247 consistentHostsResult = main.FALSE
1248
1249 else:
Jon Hallca319892017-06-15 15:25:22 -07001250 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07001251 controllerStr )
1252 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001253 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001254 " hosts response: " +
1255 repr( hosts[ controller ] ) )
1256 utilities.assert_equals(
1257 expect=main.TRUE,
1258 actual=consistentHostsResult,
1259 onpass="Hosts view is consistent across all ONOS nodes",
1260 onfail="ONOS nodes have different views of hosts" )
1261
1262 main.step( "Each host has an IP address" )
1263 ipResult = main.TRUE
1264 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001265 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001266 if hosts[ controller ]:
1267 for host in hosts[ controller ]:
1268 if not host.get( 'ipAddresses', [] ):
Jon Hallca319892017-06-15 15:25:22 -07001269 main.log.error( "Error with host ips on " +
Devin Lim58046fa2017-07-05 16:55:00 -07001270 controllerStr + ": " + str( host ) )
1271 ipResult = main.FALSE
1272 utilities.assert_equals(
1273 expect=main.TRUE,
1274 actual=ipResult,
1275 onpass="The ips of the hosts aren't empty",
1276 onfail="The ip of at least one host is missing" )
1277
1278 # Strongly connected clusters of devices
1279 main.step( "Cluster view is consistent across ONOS nodes" )
1280 consistentClustersResult = main.TRUE
1281 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001282 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001283 if "Error" not in clusters[ controller ]:
1284 if clusters[ controller ] == clusters[ 0 ]:
1285 continue
1286 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07001287 main.log.error( "clusters from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001288 " is inconsistent with ONOS1" )
1289 consistentClustersResult = main.FALSE
1290
1291 else:
1292 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07001293 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07001294 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001295 main.log.warn( controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001296 " clusters response: " +
1297 repr( clusters[ controller ] ) )
1298 utilities.assert_equals(
1299 expect=main.TRUE,
1300 actual=consistentClustersResult,
1301 onpass="Clusters view is consistent across all ONOS nodes",
1302 onfail="ONOS nodes have different views of clusters" )
1303 if not consistentClustersResult:
1304 main.log.debug( clusters )
1305
1306 # there should always only be one cluster
1307 main.step( "Cluster view correct across ONOS nodes" )
1308 try:
1309 numClusters = len( json.loads( clusters[ 0 ] ) )
1310 except ( ValueError, TypeError ):
1311 main.log.exception( "Error parsing clusters[0]: " +
1312 repr( clusters[ 0 ] ) )
1313 numClusters = "ERROR"
1314 utilities.assert_equals(
1315 expect=1,
1316 actual=numClusters,
1317 onpass="ONOS shows 1 SCC",
1318 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
1319
1320 main.step( "Comparing ONOS topology to MN" )
1321 devicesResults = main.TRUE
1322 linksResults = main.TRUE
1323 hostsResults = main.TRUE
1324 mnSwitches = main.Mininet1.getSwitches()
1325 mnLinks = main.Mininet1.getLinks()
1326 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07001327 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07001328 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07001329 currentDevicesResult = main.topoRelated.compareDevicePort(
1330 main.Mininet1, controller,
1331 mnSwitches, devices, ports )
1332 utilities.assert_equals( expect=main.TRUE,
1333 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07001334 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001335 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001336 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001337 " Switches view is incorrect" )
1338
1339 currentLinksResult = main.topoRelated.compareBase( links, controller,
1340 main.Mininet1.compareLinks,
1341 [ mnSwitches, mnLinks ] )
1342 utilities.assert_equals( expect=main.TRUE,
1343 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07001344 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001345 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07001346 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001347 " links view is incorrect" )
1348
1349 if hosts[ controller ] and "Error" not in hosts[ controller ]:
1350 currentHostsResult = main.Mininet1.compareHosts(
1351 mnHosts,
1352 hosts[ controller ] )
1353 else:
1354 currentHostsResult = main.FALSE
1355 utilities.assert_equals( expect=main.TRUE,
1356 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07001357 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001358 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07001359 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07001360 " hosts don't match Mininet" )
1361
1362 devicesResults = devicesResults and currentDevicesResult
1363 linksResults = linksResults and currentLinksResult
1364 hostsResults = hostsResults and currentHostsResult
1365
1366 main.step( "Device information is correct" )
1367 utilities.assert_equals(
1368 expect=main.TRUE,
1369 actual=devicesResults,
1370 onpass="Device information is correct",
1371 onfail="Device information is incorrect" )
1372
1373 main.step( "Links are correct" )
1374 utilities.assert_equals(
1375 expect=main.TRUE,
1376 actual=linksResults,
1377 onpass="Link are correct",
1378 onfail="Links are incorrect" )
1379
1380 main.step( "Hosts are correct" )
1381 utilities.assert_equals(
1382 expect=main.TRUE,
1383 actual=hostsResults,
1384 onpass="Hosts are correct",
1385 onfail="Hosts are incorrect" )
1386
1387 def checkDistPrimitivesFunc( self, main ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001388 """
1389 Check for basic functionality with distributed primitives
1390 """
Jon Halle0f0b342017-04-18 11:43:47 -07001391 # TODO: Clean this up so it's not just a cut/paste from the test
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001392 try:
1393 # Make sure variables are defined/set
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001394 assert utilities.assert_equals, "utilities.assert_equals not defined"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001395 assert main.pCounterName, "main.pCounterName not defined"
1396 assert main.onosSetName, "main.onosSetName not defined"
1397 # NOTE: assert fails if value is 0/None/Empty/False
1398 try:
1399 main.pCounterValue
Devin Lim58046fa2017-07-05 16:55:00 -07001400 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001401 main.log.error( "main.pCounterValue not defined, setting to 0" )
1402 main.pCounterValue = 0
1403 try:
1404 main.onosSet
Devin Lim58046fa2017-07-05 16:55:00 -07001405 except ( NameError, AttributeError ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001406 main.log.error( "main.onosSet not defined, setting to empty Set" )
Jon Hallf37d44d2017-05-24 10:37:30 -07001407 main.onosSet = set( [] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001408 # Variables for the distributed primitives tests. These are local only
1409 addValue = "a"
1410 addAllValue = "a b c d e f"
1411 retainValue = "c d e f"
Jon Halle0f0b342017-04-18 11:43:47 -07001412 valueName = "TestON-Value"
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001413 valueValue = None
Jon Halle0f0b342017-04-18 11:43:47 -07001414 workQueueName = "TestON-Queue"
1415 workQueueCompleted = 0
1416 workQueueInProgress = 0
1417 workQueuePending = 0
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001418
1419 description = "Check for basic functionality with distributed " +\
1420 "primitives"
1421 main.case( description )
1422 main.caseExplanation = "Test the methods of the distributed " +\
1423 "primitives (counters and sets) throught the cli"
1424 # DISTRIBUTED ATOMIC COUNTERS
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001425 main.step( "Increment then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001426 pCounters = main.Cluster.command( "counterTestAddAndGet",
1427 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001428 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001429 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001430 main.pCounterValue += 1
1431 addedPValues.append( main.pCounterValue )
Jon Hallca319892017-06-15 15:25:22 -07001432 # Check that counter incremented once per controller
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001433 pCounterResults = True
1434 for i in addedPValues:
1435 tmpResult = i in pCounters
1436 pCounterResults = pCounterResults and tmpResult
1437 if not tmpResult:
1438 main.log.error( str( i ) + " is not in partitioned "
1439 "counter incremented results" )
1440 utilities.assert_equals( expect=True,
1441 actual=pCounterResults,
1442 onpass="Default counter incremented",
1443 onfail="Error incrementing default" +
1444 " counter" )
1445
1446 main.step( "Get then Increment a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001447 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1448 args=[ main.pCounterName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001449 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001450 for i in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001451 addedPValues.append( main.pCounterValue )
1452 main.pCounterValue += 1
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001453 # Check that counter incremented numController times
1454 pCounterResults = True
1455 for i in addedPValues:
1456 tmpResult = i in pCounters
1457 pCounterResults = pCounterResults and tmpResult
1458 if not tmpResult:
1459 main.log.error( str( i ) + " is not in partitioned "
1460 "counter incremented results" )
1461 utilities.assert_equals( expect=True,
1462 actual=pCounterResults,
1463 onpass="Default counter incremented",
1464 onfail="Error incrementing default" +
1465 " counter" )
1466
1467 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001468 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001469 utilities.assert_equals( expect=main.TRUE,
1470 actual=incrementCheck,
1471 onpass="Added counters are correct",
1472 onfail="Added counters are incorrect" )
1473
1474 main.step( "Add -8 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001475 pCounters = main.Cluster.command( "counterTestAddAndGet",
1476 args=[ main.pCounterName ],
1477 kwargs={ "delta": -8 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001478 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001479 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001480 main.pCounterValue += -8
1481 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001482 # Check that counter incremented numController times
1483 pCounterResults = True
1484 for i in addedPValues:
1485 tmpResult = i in pCounters
1486 pCounterResults = pCounterResults and tmpResult
1487 if not tmpResult:
1488 main.log.error( str( i ) + " is not in partitioned "
1489 "counter incremented results" )
1490 utilities.assert_equals( expect=True,
1491 actual=pCounterResults,
1492 onpass="Default counter incremented",
1493 onfail="Error incrementing default" +
1494 " counter" )
1495
1496 main.step( "Add 5 to then get a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001497 pCounters = main.Cluster.command( "counterTestAddAndGet",
1498 args=[ main.pCounterName ],
1499 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001500 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001501 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001502 main.pCounterValue += 5
1503 addedPValues.append( main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001504
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001505 # Check that counter incremented numController times
1506 pCounterResults = True
1507 for i in addedPValues:
1508 tmpResult = i in pCounters
1509 pCounterResults = pCounterResults and tmpResult
1510 if not tmpResult:
1511 main.log.error( str( i ) + " is not in partitioned "
1512 "counter incremented results" )
1513 utilities.assert_equals( expect=True,
1514 actual=pCounterResults,
1515 onpass="Default counter incremented",
1516 onfail="Error incrementing default" +
1517 " counter" )
1518
1519 main.step( "Get then add 5 to a default counter on each node" )
Jon Hallca319892017-06-15 15:25:22 -07001520 pCounters = main.Cluster.command( "counterTestGetAndAdd",
1521 args=[ main.pCounterName ],
1522 kwargs={ "delta": 5 } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001523 addedPValues = []
Jon Hallca319892017-06-15 15:25:22 -07001524 for ctrl in main.Cluster.active():
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001525 addedPValues.append( main.pCounterValue )
1526 main.pCounterValue += 5
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001527 # Check that counter incremented numController times
1528 pCounterResults = True
1529 for i in addedPValues:
1530 tmpResult = i in pCounters
1531 pCounterResults = pCounterResults and tmpResult
1532 if not tmpResult:
1533 main.log.error( str( i ) + " is not in partitioned "
1534 "counter incremented results" )
1535 utilities.assert_equals( expect=True,
1536 actual=pCounterResults,
1537 onpass="Default counter incremented",
1538 onfail="Error incrementing default" +
1539 " counter" )
1540
1541 main.step( "Counters we added have the correct values" )
Devin Lim58046fa2017-07-05 16:55:00 -07001542 incrementCheck = self.counterCheck( main.pCounterName, main.pCounterValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001543 utilities.assert_equals( expect=main.TRUE,
1544 actual=incrementCheck,
1545 onpass="Added counters are correct",
1546 onfail="Added counters are incorrect" )
1547
1548 # DISTRIBUTED SETS
1549 main.step( "Distributed Set get" )
1550 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001551 getResponses = main.Cluster.command( "setTestGet",
1552 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001553 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001554 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001555 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001556 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001557 current = set( getResponses[ i ] )
1558 if len( current ) == len( getResponses[ i ] ):
1559 # no repeats
1560 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001561 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001562 " has incorrect view" +
1563 " of set " + main.onosSetName + ":\n" +
1564 str( getResponses[ i ] ) )
1565 main.log.debug( "Expected: " + str( main.onosSet ) )
1566 main.log.debug( "Actual: " + str( current ) )
1567 getResults = main.FALSE
1568 else:
1569 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001570 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001571 " has repeat elements in" +
1572 " set " + main.onosSetName + ":\n" +
1573 str( getResponses[ i ] ) )
1574 getResults = main.FALSE
1575 elif getResponses[ i ] == main.ERROR:
1576 getResults = main.FALSE
1577 utilities.assert_equals( expect=main.TRUE,
1578 actual=getResults,
1579 onpass="Set elements are correct",
1580 onfail="Set elements are incorrect" )
1581
1582 main.step( "Distributed Set size" )
Jon Hallca319892017-06-15 15:25:22 -07001583 sizeResponses = main.Cluster.command( "setTestSize",
1584 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001585 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001586 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001587 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001588 if size != sizeResponses[ i ]:
1589 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001590 main.log.error( node +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001591 " expected a size of " + str( size ) +
1592 " for set " + main.onosSetName +
1593 " but got " + str( sizeResponses[ i ] ) )
1594 utilities.assert_equals( expect=main.TRUE,
1595 actual=sizeResults,
1596 onpass="Set sizes are correct",
1597 onfail="Set sizes are incorrect" )
1598
1599 main.step( "Distributed Set add()" )
1600 main.onosSet.add( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001601 addResponses = main.Cluster.command( "setTestAdd",
1602 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001603 # main.TRUE = successfully changed the set
1604 # main.FALSE = action resulted in no change in set
1605 # main.ERROR - Some error in executing the function
1606 addResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001607 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001608 if addResponses[ i ] == main.TRUE:
1609 # All is well
1610 pass
1611 elif addResponses[ i ] == main.FALSE:
1612 # Already in set, probably fine
1613 pass
1614 elif addResponses[ i ] == main.ERROR:
1615 # Error in execution
1616 addResults = main.FALSE
1617 else:
1618 # unexpected result
1619 addResults = main.FALSE
1620 if addResults != main.TRUE:
1621 main.log.error( "Error executing set add" )
1622
1623 # Check if set is still correct
1624 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001625 getResponses = main.Cluster.command( "setTestGet",
1626 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001627 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001628 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001629 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001630 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001631 current = set( getResponses[ i ] )
1632 if len( current ) == len( getResponses[ i ] ):
1633 # no repeats
1634 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001635 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001636 " of set " + main.onosSetName + ":\n" +
1637 str( getResponses[ i ] ) )
1638 main.log.debug( "Expected: " + str( main.onosSet ) )
1639 main.log.debug( "Actual: " + str( current ) )
1640 getResults = main.FALSE
1641 else:
1642 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001643 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001644 " set " + main.onosSetName + ":\n" +
1645 str( getResponses[ i ] ) )
1646 getResults = main.FALSE
1647 elif getResponses[ i ] == main.ERROR:
1648 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001649 sizeResponses = main.Cluster.command( "setTestSize",
1650 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001651 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001652 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001653 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001654 if size != sizeResponses[ i ]:
1655 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001656 main.log.error( node + " expected a size of " +
1657 str( size ) + " for set " + main.onosSetName +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001658 " but got " + str( sizeResponses[ i ] ) )
1659 addResults = addResults and getResults and sizeResults
1660 utilities.assert_equals( expect=main.TRUE,
1661 actual=addResults,
1662 onpass="Set add correct",
1663 onfail="Set add was incorrect" )
1664
1665 main.step( "Distributed Set addAll()" )
1666 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001667 addResponses = main.Cluster.command( "setTestAdd",
1668 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001669 # main.TRUE = successfully changed the set
1670 # main.FALSE = action resulted in no change in set
1671 # main.ERROR - Some error in executing the function
1672 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001673 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001674 if addResponses[ i ] == main.TRUE:
1675 # All is well
1676 pass
1677 elif addResponses[ i ] == main.FALSE:
1678 # Already in set, probably fine
1679 pass
1680 elif addResponses[ i ] == main.ERROR:
1681 # Error in execution
1682 addAllResults = main.FALSE
1683 else:
1684 # unexpected result
1685 addAllResults = main.FALSE
1686 if addAllResults != main.TRUE:
1687 main.log.error( "Error executing set addAll" )
1688
1689 # Check if set is still correct
1690 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001691 getResponses = main.Cluster.command( "setTestGet",
1692 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001693 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001694 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001695 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001696 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001697 current = set( getResponses[ i ] )
1698 if len( current ) == len( getResponses[ i ] ):
1699 # no repeats
1700 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001701 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001702 " of set " + main.onosSetName + ":\n" +
1703 str( getResponses[ i ] ) )
1704 main.log.debug( "Expected: " + str( main.onosSet ) )
1705 main.log.debug( "Actual: " + str( current ) )
1706 getResults = main.FALSE
1707 else:
1708 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001709 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001710 " set " + main.onosSetName + ":\n" +
1711 str( getResponses[ i ] ) )
1712 getResults = main.FALSE
1713 elif getResponses[ i ] == main.ERROR:
1714 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001715 sizeResponses = main.Cluster.command( "setTestSize",
1716 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001717 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001718 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001719 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001720 if size != sizeResponses[ i ]:
1721 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001722 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001723 " for set " + main.onosSetName +
1724 " but got " + str( sizeResponses[ i ] ) )
1725 addAllResults = addAllResults and getResults and sizeResults
1726 utilities.assert_equals( expect=main.TRUE,
1727 actual=addAllResults,
1728 onpass="Set addAll correct",
1729 onfail="Set addAll was incorrect" )
1730
1731 main.step( "Distributed Set contains()" )
Jon Hallca319892017-06-15 15:25:22 -07001732 containsResponses = main.Cluster.command( "setTestGet",
1733 args=[ main.onosSetName ],
1734 kwargs={ "values": addValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001735 containsResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001736 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001737 if containsResponses[ i ] == main.ERROR:
1738 containsResults = main.FALSE
1739 else:
1740 containsResults = containsResults and\
1741 containsResponses[ i ][ 1 ]
1742 utilities.assert_equals( expect=main.TRUE,
1743 actual=containsResults,
1744 onpass="Set contains is functional",
1745 onfail="Set contains failed" )
1746
1747 main.step( "Distributed Set containsAll()" )
Jon Hallca319892017-06-15 15:25:22 -07001748 containsAllResponses = main.Cluster.command( "setTestGet",
1749 args=[ main.onosSetName ],
1750 kwargs={ "values": addAllValue } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001751 containsAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001752 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001753 if containsResponses[ i ] == main.ERROR:
1754 containsResults = main.FALSE
1755 else:
1756 containsResults = containsResults and\
1757 containsResponses[ i ][ 1 ]
1758 utilities.assert_equals( expect=main.TRUE,
1759 actual=containsAllResults,
1760 onpass="Set containsAll is functional",
1761 onfail="Set containsAll failed" )
1762
1763 main.step( "Distributed Set remove()" )
1764 main.onosSet.remove( addValue )
Jon Hallca319892017-06-15 15:25:22 -07001765 removeResponses = main.Cluster.command( "setTestRemove",
1766 args=[ main.onosSetName, addValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001767 # main.TRUE = successfully changed the set
1768 # main.FALSE = action resulted in no change in set
1769 # main.ERROR - Some error in executing the function
1770 removeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001771 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001772 if removeResponses[ i ] == main.TRUE:
1773 # All is well
1774 pass
1775 elif removeResponses[ i ] == main.FALSE:
1776 # not in set, probably fine
1777 pass
1778 elif removeResponses[ i ] == main.ERROR:
1779 # Error in execution
1780 removeResults = main.FALSE
1781 else:
1782 # unexpected result
1783 removeResults = main.FALSE
1784 if removeResults != main.TRUE:
1785 main.log.error( "Error executing set remove" )
1786
1787 # Check if set is still correct
1788 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001789 getResponses = main.Cluster.command( "setTestGet",
1790 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001791 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001792 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001793 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001794 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001795 current = set( getResponses[ i ] )
1796 if len( current ) == len( getResponses[ i ] ):
1797 # no repeats
1798 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001799 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001800 " of set " + main.onosSetName + ":\n" +
1801 str( getResponses[ i ] ) )
1802 main.log.debug( "Expected: " + str( main.onosSet ) )
1803 main.log.debug( "Actual: " + str( current ) )
1804 getResults = main.FALSE
1805 else:
1806 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001807 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001808 " set " + main.onosSetName + ":\n" +
1809 str( getResponses[ i ] ) )
1810 getResults = main.FALSE
1811 elif getResponses[ i ] == main.ERROR:
1812 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001813 sizeResponses = main.Cluster.command( "setTestSize",
1814 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001815 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001816 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001817 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001818 if size != sizeResponses[ i ]:
1819 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001820 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001821 " for set " + main.onosSetName +
1822 " but got " + str( sizeResponses[ i ] ) )
1823 removeResults = removeResults and getResults and sizeResults
1824 utilities.assert_equals( expect=main.TRUE,
1825 actual=removeResults,
1826 onpass="Set remove correct",
1827 onfail="Set remove was incorrect" )
1828
1829 main.step( "Distributed Set removeAll()" )
1830 main.onosSet.difference_update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001831 removeAllResponses = main.Cluster.command( "setTestRemove",
1832 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001833 # main.TRUE = successfully changed the set
1834 # main.FALSE = action resulted in no change in set
1835 # main.ERROR - Some error in executing the function
1836 removeAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001837 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001838 if removeAllResponses[ i ] == main.TRUE:
1839 # All is well
1840 pass
1841 elif removeAllResponses[ i ] == main.FALSE:
1842 # not in set, probably fine
1843 pass
1844 elif removeAllResponses[ i ] == main.ERROR:
1845 # Error in execution
1846 removeAllResults = main.FALSE
1847 else:
1848 # unexpected result
1849 removeAllResults = main.FALSE
1850 if removeAllResults != main.TRUE:
1851 main.log.error( "Error executing set removeAll" )
1852
1853 # Check if set is still correct
1854 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001855 getResponses = main.Cluster.command( "setTestGet",
1856 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001857 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001858 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001859 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001860 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001861 current = set( getResponses[ i ] )
1862 if len( current ) == len( getResponses[ i ] ):
1863 # no repeats
1864 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001865 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001866 " of set " + main.onosSetName + ":\n" +
1867 str( getResponses[ i ] ) )
1868 main.log.debug( "Expected: " + str( main.onosSet ) )
1869 main.log.debug( "Actual: " + str( current ) )
1870 getResults = main.FALSE
1871 else:
1872 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001873 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001874 " set " + main.onosSetName + ":\n" +
1875 str( getResponses[ i ] ) )
1876 getResults = main.FALSE
1877 elif getResponses[ i ] == main.ERROR:
1878 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001879 sizeResponses = main.Cluster.command( "setTestSize",
1880 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001881 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001882 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001883 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001884 if size != sizeResponses[ i ]:
1885 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001886 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001887 " for set " + main.onosSetName +
1888 " but got " + str( sizeResponses[ i ] ) )
1889 removeAllResults = removeAllResults and getResults and sizeResults
1890 utilities.assert_equals( expect=main.TRUE,
1891 actual=removeAllResults,
1892 onpass="Set removeAll correct",
1893 onfail="Set removeAll was incorrect" )
1894
1895 main.step( "Distributed Set addAll()" )
1896 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07001897 addResponses = main.Cluster.command( "setTestAdd",
1898 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001899 # main.TRUE = successfully changed the set
1900 # main.FALSE = action resulted in no change in set
1901 # main.ERROR - Some error in executing the function
1902 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001903 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001904 if addResponses[ i ] == main.TRUE:
1905 # All is well
1906 pass
1907 elif addResponses[ i ] == main.FALSE:
1908 # Already in set, probably fine
1909 pass
1910 elif addResponses[ i ] == main.ERROR:
1911 # Error in execution
1912 addAllResults = main.FALSE
1913 else:
1914 # unexpected result
1915 addAllResults = main.FALSE
1916 if addAllResults != main.TRUE:
1917 main.log.error( "Error executing set addAll" )
1918
1919 # Check if set is still correct
1920 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001921 getResponses = main.Cluster.command( "setTestGet",
1922 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001923 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001924 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001925 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001926 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001927 current = set( getResponses[ i ] )
1928 if len( current ) == len( getResponses[ i ] ):
1929 # no repeats
1930 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001931 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001932 " of set " + main.onosSetName + ":\n" +
1933 str( getResponses[ i ] ) )
1934 main.log.debug( "Expected: " + str( main.onosSet ) )
1935 main.log.debug( "Actual: " + str( current ) )
1936 getResults = main.FALSE
1937 else:
1938 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07001939 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001940 " set " + main.onosSetName + ":\n" +
1941 str( getResponses[ i ] ) )
1942 getResults = main.FALSE
1943 elif getResponses[ i ] == main.ERROR:
1944 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001945 sizeResponses = main.Cluster.command( "setTestSize",
1946 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001947 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001948 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001949 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001950 if size != sizeResponses[ i ]:
1951 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07001952 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001953 " for set " + main.onosSetName +
1954 " but got " + str( sizeResponses[ i ] ) )
1955 addAllResults = addAllResults and getResults and sizeResults
1956 utilities.assert_equals( expect=main.TRUE,
1957 actual=addAllResults,
1958 onpass="Set addAll correct",
1959 onfail="Set addAll was incorrect" )
1960
1961 main.step( "Distributed Set clear()" )
1962 main.onosSet.clear()
Jon Hallca319892017-06-15 15:25:22 -07001963 clearResponses = main.Cluster.command( "setTestRemove",
Jon Hall4173b242017-09-12 17:04:38 -07001964 args=[ main.onosSetName, " " ], # Values doesn't matter
1965 kwargs={ "clear": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001966 # main.TRUE = successfully changed the set
1967 # main.FALSE = action resulted in no change in set
1968 # main.ERROR - Some error in executing the function
1969 clearResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001970 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001971 if clearResponses[ i ] == main.TRUE:
1972 # All is well
1973 pass
1974 elif clearResponses[ i ] == main.FALSE:
1975 # Nothing set, probably fine
1976 pass
1977 elif clearResponses[ i ] == main.ERROR:
1978 # Error in execution
1979 clearResults = main.FALSE
1980 else:
1981 # unexpected result
1982 clearResults = main.FALSE
1983 if clearResults != main.TRUE:
1984 main.log.error( "Error executing set clear" )
1985
1986 # Check if set is still correct
1987 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07001988 getResponses = main.Cluster.command( "setTestGet",
1989 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001990 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07001991 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07001992 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07001993 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001994 current = set( getResponses[ i ] )
1995 if len( current ) == len( getResponses[ i ] ):
1996 # no repeats
1997 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07001998 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07001999 " of set " + main.onosSetName + ":\n" +
2000 str( getResponses[ i ] ) )
2001 main.log.debug( "Expected: " + str( main.onosSet ) )
2002 main.log.debug( "Actual: " + str( current ) )
2003 getResults = main.FALSE
2004 else:
2005 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002006 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002007 " set " + main.onosSetName + ":\n" +
2008 str( getResponses[ i ] ) )
2009 getResults = main.FALSE
2010 elif getResponses[ i ] == main.ERROR:
2011 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002012 sizeResponses = main.Cluster.command( "setTestSize",
2013 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002014 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002015 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002016 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002017 if size != sizeResponses[ i ]:
2018 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002019 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002020 " for set " + main.onosSetName +
2021 " but got " + str( sizeResponses[ i ] ) )
2022 clearResults = clearResults and getResults and sizeResults
2023 utilities.assert_equals( expect=main.TRUE,
2024 actual=clearResults,
2025 onpass="Set clear correct",
2026 onfail="Set clear was incorrect" )
2027
2028 main.step( "Distributed Set addAll()" )
2029 main.onosSet.update( addAllValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002030 addResponses = main.Cluster.command( "setTestAdd",
2031 args=[ main.onosSetName, addAllValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002032 # main.TRUE = successfully changed the set
2033 # main.FALSE = action resulted in no change in set
2034 # main.ERROR - Some error in executing the function
2035 addAllResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002036 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002037 if addResponses[ i ] == main.TRUE:
2038 # All is well
2039 pass
2040 elif addResponses[ i ] == main.FALSE:
2041 # Already in set, probably fine
2042 pass
2043 elif addResponses[ i ] == main.ERROR:
2044 # Error in execution
2045 addAllResults = main.FALSE
2046 else:
2047 # unexpected result
2048 addAllResults = main.FALSE
2049 if addAllResults != main.TRUE:
2050 main.log.error( "Error executing set addAll" )
2051
2052 # Check if set is still correct
2053 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002054 getResponses = main.Cluster.command( "setTestGet",
2055 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002056 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002057 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002058 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07002059 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002060 current = set( getResponses[ i ] )
2061 if len( current ) == len( getResponses[ i ] ):
2062 # no repeats
2063 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002064 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002065 " of set " + main.onosSetName + ":\n" +
2066 str( getResponses[ i ] ) )
2067 main.log.debug( "Expected: " + str( main.onosSet ) )
2068 main.log.debug( "Actual: " + str( current ) )
2069 getResults = main.FALSE
2070 else:
2071 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002072 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002073 " set " + main.onosSetName + ":\n" +
2074 str( getResponses[ i ] ) )
2075 getResults = main.FALSE
2076 elif getResponses[ i ] == main.ERROR:
2077 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002078 sizeResponses = main.Cluster.command( "setTestSize",
2079 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002080 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002081 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002082 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002083 if size != sizeResponses[ i ]:
2084 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002085 main.log.error( node + " expected a size of " + str( size ) +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002086 " for set " + main.onosSetName +
2087 " but got " + str( sizeResponses[ i ] ) )
2088 addAllResults = addAllResults and getResults and sizeResults
2089 utilities.assert_equals( expect=main.TRUE,
2090 actual=addAllResults,
2091 onpass="Set addAll correct",
2092 onfail="Set addAll was incorrect" )
2093
2094 main.step( "Distributed Set retain()" )
2095 main.onosSet.intersection_update( retainValue.split() )
Jon Hallca319892017-06-15 15:25:22 -07002096 retainResponses = main.Cluster.command( "setTestRemove",
2097 args=[ main.onosSetName, retainValue ],
2098 kwargs={ "retain": True } )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002099 # main.TRUE = successfully changed the set
2100 # main.FALSE = action resulted in no change in set
2101 # main.ERROR - Some error in executing the function
2102 retainResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002103 for i in range( len( main.Cluster.active() ) ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002104 if retainResponses[ i ] == main.TRUE:
2105 # All is well
2106 pass
2107 elif retainResponses[ i ] == main.FALSE:
2108 # Already in set, probably fine
2109 pass
2110 elif retainResponses[ i ] == main.ERROR:
2111 # Error in execution
2112 retainResults = main.FALSE
2113 else:
2114 # unexpected result
2115 retainResults = main.FALSE
2116 if retainResults != main.TRUE:
2117 main.log.error( "Error executing set retain" )
2118
2119 # Check if set is still correct
2120 size = len( main.onosSet )
Jon Hallca319892017-06-15 15:25:22 -07002121 getResponses = main.Cluster.command( "setTestGet",
2122 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002123 getResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002124 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002125 node = str( main.Cluster.active( i ) )
Jon Hallf37d44d2017-05-24 10:37:30 -07002126 if isinstance( getResponses[ i ], list ):
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002127 current = set( getResponses[ i ] )
2128 if len( current ) == len( getResponses[ i ] ):
2129 # no repeats
2130 if main.onosSet != current:
Jon Hallca319892017-06-15 15:25:22 -07002131 main.log.error( node + " has incorrect view" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002132 " of set " + main.onosSetName + ":\n" +
2133 str( getResponses[ i ] ) )
2134 main.log.debug( "Expected: " + str( main.onosSet ) )
2135 main.log.debug( "Actual: " + str( current ) )
2136 getResults = main.FALSE
2137 else:
2138 # error, set is not a set
Jon Hallca319892017-06-15 15:25:22 -07002139 main.log.error( node + " has repeat elements in" +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002140 " set " + main.onosSetName + ":\n" +
2141 str( getResponses[ i ] ) )
2142 getResults = main.FALSE
2143 elif getResponses[ i ] == main.ERROR:
2144 getResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002145 sizeResponses = main.Cluster.command( "setTestSize",
2146 args=[ main.onosSetName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002147 sizeResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002148 for i in range( len( main.Cluster.active() ) ):
Jon Hall0e240372018-05-02 11:21:57 -07002149 node = str( main.Cluster.active( i ) )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002150 if size != sizeResponses[ i ]:
2151 sizeResults = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07002152 main.log.error( node + " expected a size of " +
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002153 str( size ) + " for set " + main.onosSetName +
2154 " but got " + str( sizeResponses[ i ] ) )
2155 retainResults = retainResults and getResults and sizeResults
2156 utilities.assert_equals( expect=main.TRUE,
2157 actual=retainResults,
2158 onpass="Set retain correct",
2159 onfail="Set retain was incorrect" )
2160
2161 # Transactional maps
2162 main.step( "Partitioned Transactional maps put" )
2163 tMapValue = "Testing"
2164 numKeys = 100
2165 putResult = True
Jon Hallca319892017-06-15 15:25:22 -07002166 ctrl = main.Cluster.next()
2167 putResponses = ctrl.transactionalMapPut( numKeys, tMapValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002168 if putResponses and len( putResponses ) == 100:
2169 for i in putResponses:
2170 if putResponses[ i ][ 'value' ] != tMapValue:
2171 putResult = False
2172 else:
2173 putResult = False
2174 if not putResult:
2175 main.log.debug( "Put response values: " + str( putResponses ) )
2176 utilities.assert_equals( expect=True,
2177 actual=putResult,
2178 onpass="Partitioned Transactional Map put successful",
2179 onfail="Partitioned Transactional Map put values are incorrect" )
2180
2181 main.step( "Partitioned Transactional maps get" )
2182 # FIXME: is this sleep needed?
2183 time.sleep( 5 )
2184
2185 getCheck = True
2186 for n in range( 1, numKeys + 1 ):
Jon Hallca319892017-06-15 15:25:22 -07002187 getResponses = main.Cluster.command( "transactionalMapGet",
2188 args=[ "Key" + str( n ) ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002189 valueCheck = True
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002190 for node in getResponses:
2191 if node != tMapValue:
2192 valueCheck = False
2193 if not valueCheck:
Jon Hall0e240372018-05-02 11:21:57 -07002194 main.log.warn( "Values for key 'Key" + str( n ) + "' do not match:" )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002195 main.log.warn( getResponses )
2196 getCheck = getCheck and valueCheck
2197 utilities.assert_equals( expect=True,
2198 actual=getCheck,
2199 onpass="Partitioned Transactional Map get values were correct",
2200 onfail="Partitioned Transactional Map values incorrect" )
2201
2202 # DISTRIBUTED ATOMIC VALUE
2203 main.step( "Get the value of a new value" )
Jon Hallca319892017-06-15 15:25:22 -07002204 getValues = main.Cluster.command( "valueTestGet",
2205 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002206 main.log.debug( getValues )
2207 # Check the results
2208 atomicValueGetResult = True
2209 expected = valueValue if valueValue is not None else "null"
2210 main.log.debug( "Checking for value of " + expected )
2211 for i in getValues:
2212 if i != expected:
2213 atomicValueGetResult = False
2214 utilities.assert_equals( expect=True,
2215 actual=atomicValueGetResult,
2216 onpass="Atomic Value get successful",
2217 onfail="Error getting atomic Value " +
2218 str( valueValue ) + ", found: " +
2219 str( getValues ) )
2220
2221 main.step( "Atomic Value set()" )
2222 valueValue = "foo"
Jon Hallca319892017-06-15 15:25:22 -07002223 setValues = main.Cluster.command( "valueTestSet",
2224 args=[ valueName, valueValue ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002225 main.log.debug( setValues )
2226 # Check the results
2227 atomicValueSetResults = True
2228 for i in setValues:
2229 if i != main.TRUE:
2230 atomicValueSetResults = False
2231 utilities.assert_equals( expect=True,
2232 actual=atomicValueSetResults,
2233 onpass="Atomic Value set successful",
2234 onfail="Error setting atomic Value" +
2235 str( setValues ) )
2236
2237 main.step( "Get the value after set()" )
Jon Hallca319892017-06-15 15:25:22 -07002238 getValues = main.Cluster.command( "valueTestGet",
2239 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002240 main.log.debug( getValues )
2241 # Check the results
2242 atomicValueGetResult = True
2243 expected = valueValue if valueValue is not None else "null"
2244 main.log.debug( "Checking for value of " + expected )
2245 for i in getValues:
2246 if i != expected:
2247 atomicValueGetResult = False
2248 utilities.assert_equals( expect=True,
2249 actual=atomicValueGetResult,
2250 onpass="Atomic Value get successful",
2251 onfail="Error getting atomic Value " +
2252 str( valueValue ) + ", found: " +
2253 str( getValues ) )
2254
2255 main.step( "Atomic Value compareAndSet()" )
2256 oldValue = valueValue
2257 valueValue = "bar"
Jon Hallca319892017-06-15 15:25:22 -07002258 ctrl = main.Cluster.next()
2259 CASValue = ctrl.valueTestCompareAndSet( valueName, oldValue, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002260 main.log.debug( CASValue )
2261 utilities.assert_equals( expect=main.TRUE,
2262 actual=CASValue,
2263 onpass="Atomic Value comapreAndSet successful",
2264 onfail="Error setting atomic Value:" +
2265 str( CASValue ) )
2266
2267 main.step( "Get the value after compareAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002268 getValues = main.Cluster.command( "valueTestGet",
2269 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002270 main.log.debug( getValues )
2271 # Check the results
2272 atomicValueGetResult = True
2273 expected = valueValue if valueValue is not None else "null"
2274 main.log.debug( "Checking for value of " + expected )
2275 for i in getValues:
2276 if i != expected:
2277 atomicValueGetResult = False
2278 utilities.assert_equals( expect=True,
2279 actual=atomicValueGetResult,
2280 onpass="Atomic Value get successful",
2281 onfail="Error getting atomic Value " +
2282 str( valueValue ) + ", found: " +
2283 str( getValues ) )
2284
2285 main.step( "Atomic Value getAndSet()" )
2286 oldValue = valueValue
2287 valueValue = "baz"
Jon Hallca319892017-06-15 15:25:22 -07002288 ctrl = main.Cluster.next()
2289 GASValue = ctrl.valueTestGetAndSet( valueName, valueValue )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002290 main.log.debug( GASValue )
2291 expected = oldValue if oldValue is not None else "null"
2292 utilities.assert_equals( expect=expected,
2293 actual=GASValue,
2294 onpass="Atomic Value GAS successful",
2295 onfail="Error with GetAndSet atomic Value: expected " +
2296 str( expected ) + ", found: " +
2297 str( GASValue ) )
2298
2299 main.step( "Get the value after getAndSet()" )
Jon Hallca319892017-06-15 15:25:22 -07002300 getValues = main.Cluster.command( "valueTestGet",
2301 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002302 main.log.debug( getValues )
2303 # Check the results
2304 atomicValueGetResult = True
2305 expected = valueValue if valueValue is not None else "null"
2306 main.log.debug( "Checking for value of " + expected )
2307 for i in getValues:
2308 if i != expected:
2309 atomicValueGetResult = False
2310 utilities.assert_equals( expect=True,
2311 actual=atomicValueGetResult,
2312 onpass="Atomic Value get successful",
2313 onfail="Error getting atomic Value: expected " +
2314 str( valueValue ) + ", found: " +
2315 str( getValues ) )
2316
2317 main.step( "Atomic Value destory()" )
2318 valueValue = None
Jon Hallca319892017-06-15 15:25:22 -07002319 ctrl = main.Cluster.next()
2320 destroyResult = ctrl.valueTestDestroy( valueName )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002321 main.log.debug( destroyResult )
2322 # Check the results
2323 utilities.assert_equals( expect=main.TRUE,
2324 actual=destroyResult,
2325 onpass="Atomic Value destroy successful",
2326 onfail="Error destroying atomic Value" )
2327
2328 main.step( "Get the value after destroy()" )
Jon Hallca319892017-06-15 15:25:22 -07002329 getValues = main.Cluster.command( "valueTestGet",
2330 args=[ valueName ] )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002331 main.log.debug( getValues )
2332 # Check the results
2333 atomicValueGetResult = True
2334 expected = valueValue if valueValue is not None else "null"
2335 main.log.debug( "Checking for value of " + expected )
2336 for i in getValues:
2337 if i != expected:
2338 atomicValueGetResult = False
2339 utilities.assert_equals( expect=True,
2340 actual=atomicValueGetResult,
2341 onpass="Atomic Value get successful",
2342 onfail="Error getting atomic Value " +
2343 str( valueValue ) + ", found: " +
2344 str( getValues ) )
Jon Halle0f0b342017-04-18 11:43:47 -07002345
2346 # WORK QUEUES
2347 main.step( "Work Queue add()" )
Jon Hallca319892017-06-15 15:25:22 -07002348 ctrl = main.Cluster.next()
2349 addResult = ctrl.workQueueAdd( workQueueName, 'foo' )
Jon Halle0f0b342017-04-18 11:43:47 -07002350 workQueuePending += 1
2351 main.log.debug( addResult )
2352 # Check the results
2353 utilities.assert_equals( expect=main.TRUE,
2354 actual=addResult,
2355 onpass="Work Queue add successful",
2356 onfail="Error adding to Work Queue" )
2357
2358 main.step( "Check the work queue stats" )
2359 statsResults = self.workQueueStatsCheck( workQueueName,
2360 workQueueCompleted,
2361 workQueueInProgress,
2362 workQueuePending )
2363 utilities.assert_equals( expect=True,
2364 actual=statsResults,
2365 onpass="Work Queue stats correct",
2366 onfail="Work Queue stats incorrect " )
2367
2368 main.step( "Work Queue addMultiple()" )
Jon Hallca319892017-06-15 15:25:22 -07002369 ctrl = main.Cluster.next()
2370 addMultipleResult = ctrl.workQueueAddMultiple( workQueueName, 'bar', 'baz' )
Jon Halle0f0b342017-04-18 11:43:47 -07002371 workQueuePending += 2
2372 main.log.debug( addMultipleResult )
2373 # Check the results
2374 utilities.assert_equals( expect=main.TRUE,
2375 actual=addMultipleResult,
2376 onpass="Work Queue add multiple successful",
2377 onfail="Error adding multiple items to Work Queue" )
2378
2379 main.step( "Check the work queue stats" )
2380 statsResults = self.workQueueStatsCheck( workQueueName,
2381 workQueueCompleted,
2382 workQueueInProgress,
2383 workQueuePending )
2384 utilities.assert_equals( expect=True,
2385 actual=statsResults,
2386 onpass="Work Queue stats correct",
2387 onfail="Work Queue stats incorrect " )
2388
2389 main.step( "Work Queue takeAndComplete() 1" )
Jon Hallca319892017-06-15 15:25:22 -07002390 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002391 number = 1
Jon Hallca319892017-06-15 15:25:22 -07002392 take1Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002393 workQueuePending -= number
2394 workQueueCompleted += number
2395 main.log.debug( take1Result )
2396 # Check the results
2397 utilities.assert_equals( expect=main.TRUE,
2398 actual=take1Result,
2399 onpass="Work Queue takeAndComplete 1 successful",
2400 onfail="Error taking 1 from Work Queue" )
2401
2402 main.step( "Check the work queue stats" )
2403 statsResults = self.workQueueStatsCheck( workQueueName,
2404 workQueueCompleted,
2405 workQueueInProgress,
2406 workQueuePending )
2407 utilities.assert_equals( expect=True,
2408 actual=statsResults,
2409 onpass="Work Queue stats correct",
2410 onfail="Work Queue stats incorrect " )
2411
2412 main.step( "Work Queue takeAndComplete() 2" )
Jon Hallca319892017-06-15 15:25:22 -07002413 ctrl = main.Cluster.next()
Jon Halle0f0b342017-04-18 11:43:47 -07002414 number = 2
Jon Hallca319892017-06-15 15:25:22 -07002415 take2Result = ctrl.workQueueTakeAndComplete( workQueueName, number )
Jon Halle0f0b342017-04-18 11:43:47 -07002416 workQueuePending -= number
2417 workQueueCompleted += number
2418 main.log.debug( take2Result )
2419 # Check the results
2420 utilities.assert_equals( expect=main.TRUE,
2421 actual=take2Result,
2422 onpass="Work Queue takeAndComplete 2 successful",
2423 onfail="Error taking 2 from Work Queue" )
2424
2425 main.step( "Check the work queue stats" )
2426 statsResults = self.workQueueStatsCheck( workQueueName,
2427 workQueueCompleted,
2428 workQueueInProgress,
2429 workQueuePending )
2430 utilities.assert_equals( expect=True,
2431 actual=statsResults,
2432 onpass="Work Queue stats correct",
2433 onfail="Work Queue stats incorrect " )
2434
2435 main.step( "Work Queue destroy()" )
2436 valueValue = None
2437 threads = []
Jon Hallca319892017-06-15 15:25:22 -07002438 ctrl = main.Cluster.next()
2439 destroyResult = ctrl.workQueueDestroy( workQueueName )
Jon Halle0f0b342017-04-18 11:43:47 -07002440 workQueueCompleted = 0
2441 workQueueInProgress = 0
2442 workQueuePending = 0
2443 main.log.debug( destroyResult )
2444 # Check the results
2445 utilities.assert_equals( expect=main.TRUE,
2446 actual=destroyResult,
2447 onpass="Work Queue destroy successful",
2448 onfail="Error destroying Work Queue" )
2449
2450 main.step( "Check the work queue stats" )
2451 statsResults = self.workQueueStatsCheck( workQueueName,
2452 workQueueCompleted,
2453 workQueueInProgress,
2454 workQueuePending )
2455 utilities.assert_equals( expect=True,
2456 actual=statsResults,
2457 onpass="Work Queue stats correct",
2458 onfail="Work Queue stats incorrect " )
Jon Hall7a6ebfd2017-03-13 10:58:58 -07002459 except Exception as e:
2460 main.log.error( "Exception: " + str( e ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002461
2462 def cleanUp( self, main ):
2463 """
2464 Clean up
2465 """
Devin Lim58046fa2017-07-05 16:55:00 -07002466 assert main, "main not defined"
2467 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002468
2469 # printing colors to terminal
2470 colors = { 'cyan': '\033[96m', 'purple': '\033[95m',
2471 'blue': '\033[94m', 'green': '\033[92m',
2472 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m' }
Jon Hall4173b242017-09-12 17:04:38 -07002473
Devin Lim58046fa2017-07-05 16:55:00 -07002474 main.case( "Test Cleanup" )
Jon Hall4173b242017-09-12 17:04:38 -07002475
2476 main.step( "Checking raft log size" )
2477 # TODO: this is a flaky check, but the intent is to make sure the raft logs
2478 # get compacted periodically
Jon Hall3e6edb32018-08-21 16:20:30 -07002479
2480 # FIXME: We need to look at the raft servers, which might not be on the ONOS machine
Jon Hall4173b242017-09-12 17:04:38 -07002481 logCheck = main.Cluster.checkPartitionSize()
2482 utilities.assert_equals( expect=True, actual=logCheck,
2483 onpass="Raft log size is not too big",
2484 onfail="Raft logs grew too big" )
2485
Devin Lim58046fa2017-07-05 16:55:00 -07002486 main.step( "Killing tcpdumps" )
2487 main.Mininet2.stopTcpdump()
2488
2489 testname = main.TEST
2490 if main.params[ 'BACKUP' ][ 'ENABLED' ] == "True":
2491 main.step( "Copying MN pcap and ONOS log files to test station" )
2492 teststationUser = main.params[ 'BACKUP' ][ 'TESTONUSER' ]
2493 teststationIP = main.params[ 'BACKUP' ][ 'TESTONIP' ]
2494 # NOTE: MN Pcap file is being saved to logdir.
2495 # We scp this file as MN and TestON aren't necessarily the same vm
2496
2497 # FIXME: To be replaced with a Jenkin's post script
2498 # TODO: Load these from params
2499 # NOTE: must end in /
2500 logFolder = "/opt/onos/log/"
2501 logFiles = [ "karaf.log", "karaf.log.1" ]
2502 # NOTE: must end in /
2503 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002504 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002505 dstName = main.logdir + "/" + ctrl.name + "-" + f
2506 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002507 logFolder + f, dstName )
2508 # std*.log's
2509 # NOTE: must end in /
2510 logFolder = "/opt/onos/var/"
2511 logFiles = [ "stderr.log", "stdout.log" ]
2512 # NOTE: must end in /
2513 for f in logFiles:
Devin Lim142b5342017-07-20 15:22:39 -07002514 for ctrl in main.Cluster.runningNodes:
Jon Hallca319892017-06-15 15:25:22 -07002515 dstName = main.logdir + "/" + ctrl.name + "-" + f
2516 main.ONOSbench.secureCopy( ctrl.user_name, ctrl.ipAddress,
Devin Lim58046fa2017-07-05 16:55:00 -07002517 logFolder + f, dstName )
2518 else:
2519 main.log.debug( "skipping saving log files" )
2520
Jon Hall5d5876e2017-11-30 09:33:16 -08002521 main.step( "Checking ONOS Logs for errors" )
2522 for ctrl in main.Cluster.runningNodes:
2523 main.log.debug( "Checking logs for errors on " + ctrl.name + ":" )
2524 main.log.warn( main.ONOSbench.checkLogs( ctrl.ipAddress ) )
2525
Devin Lim58046fa2017-07-05 16:55:00 -07002526 main.step( "Stopping Mininet" )
2527 mnResult = main.Mininet1.stopNet()
2528 utilities.assert_equals( expect=main.TRUE, actual=mnResult,
2529 onpass="Mininet stopped",
2530 onfail="MN cleanup NOT successful" )
2531
Devin Lim58046fa2017-07-05 16:55:00 -07002532 try:
2533 timerLog = open( main.logdir + "/Timers.csv", 'w' )
2534 main.log.debug( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2535 timerLog.write( ", ".join( main.HAlabels ) + "\n" + ", ".join( main.HAdata ) )
2536 timerLog.close()
2537 except NameError as e:
2538 main.log.exception( e )
Jon Hallca319892017-06-15 15:25:22 -07002539
Devin Lim58046fa2017-07-05 16:55:00 -07002540 def assignMastership( self, main ):
2541 """
2542 Assign mastership to controllers
2543 """
2544 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002545 assert main, "main not defined"
2546 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002547
2548 main.case( "Assigning Controller roles for switches" )
2549 main.caseExplanation = "Check that ONOS is connected to each " +\
2550 "device. Then manually assign" +\
2551 " mastership to specific ONOS nodes using" +\
2552 " 'device-role'"
2553 main.step( "Assign mastership of switches to specific controllers" )
2554 # Manually assign mastership to the controller we want
2555 roleCall = main.TRUE
2556
2557 ipList = []
2558 deviceList = []
Devin Lim58046fa2017-07-05 16:55:00 -07002559 try:
2560 # Assign mastership to specific controllers. This assignment was
2561 # determined for a 7 node cluser, but will work with any sized
2562 # cluster
2563 for i in range( 1, 29 ): # switches 1 through 28
2564 # set up correct variables:
2565 if i == 1:
2566 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002567 ip = main.Cluster.active( c ).ip_address # ONOS1
Jon Hall0e240372018-05-02 11:21:57 -07002568 deviceId = main.Cluster.next().getDevice( "1000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002569 elif i == 2:
Devin Lim142b5342017-07-20 15:22:39 -07002570 c = 1 % main.Cluster.numCtrls
2571 ip = main.Cluster.active( c ).ip_address # ONOS2
Jon Hall0e240372018-05-02 11:21:57 -07002572 deviceId = main.Cluster.next().getDevice( "2000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002573 elif i == 3:
Devin Lim142b5342017-07-20 15:22:39 -07002574 c = 1 % main.Cluster.numCtrls
2575 ip = main.Cluster.active( c ).ip_address # ONOS2
Jon Hall0e240372018-05-02 11:21:57 -07002576 deviceId = main.Cluster.next().getDevice( "3000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002577 elif i == 4:
Devin Lim142b5342017-07-20 15:22:39 -07002578 c = 3 % main.Cluster.numCtrls
2579 ip = main.Cluster.active( c ).ip_address # ONOS4
Jon Hall0e240372018-05-02 11:21:57 -07002580 deviceId = main.Cluster.next().getDevice( "3004" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002581 elif i == 5:
Devin Lim142b5342017-07-20 15:22:39 -07002582 c = 2 % main.Cluster.numCtrls
2583 ip = main.Cluster.active( c ).ip_address # ONOS3
Jon Hall0e240372018-05-02 11:21:57 -07002584 deviceId = main.Cluster.next().getDevice( "5000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002585 elif i == 6:
Devin Lim142b5342017-07-20 15:22:39 -07002586 c = 2 % main.Cluster.numCtrls
2587 ip = main.Cluster.active( c ).ip_address # ONOS3
Jon Hall0e240372018-05-02 11:21:57 -07002588 deviceId = main.Cluster.next().getDevice( "6000" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002589 elif i == 7:
Devin Lim142b5342017-07-20 15:22:39 -07002590 c = 5 % main.Cluster.numCtrls
2591 ip = main.Cluster.active( c ).ip_address # ONOS6
Jon Hall0e240372018-05-02 11:21:57 -07002592 deviceId = main.Cluster.next().getDevice( "6007" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002593 elif i >= 8 and i <= 17:
Devin Lim142b5342017-07-20 15:22:39 -07002594 c = 4 % main.Cluster.numCtrls
2595 ip = main.Cluster.active( c ).ip_address # ONOS5
Devin Lim58046fa2017-07-05 16:55:00 -07002596 dpid = '3' + str( i ).zfill( 3 )
Jon Hall0e240372018-05-02 11:21:57 -07002597 deviceId = main.Cluster.next().getDevice( dpid ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002598 elif i >= 18 and i <= 27:
Devin Lim142b5342017-07-20 15:22:39 -07002599 c = 6 % main.Cluster.numCtrls
2600 ip = main.Cluster.active( c ).ip_address # ONOS7
Devin Lim58046fa2017-07-05 16:55:00 -07002601 dpid = '6' + str( i ).zfill( 3 )
Jon Hall0e240372018-05-02 11:21:57 -07002602 deviceId = main.Cluster.next().getDevice( dpid ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002603 elif i == 28:
2604 c = 0
Devin Lim142b5342017-07-20 15:22:39 -07002605 ip = main.Cluster.active( c ).ip_address # ONOS1
Jon Hall0e240372018-05-02 11:21:57 -07002606 deviceId = main.Cluster.next().getDevice( "2800" ).get( 'id' )
Devin Lim58046fa2017-07-05 16:55:00 -07002607 else:
2608 main.log.error( "You didn't write an else statement for " +
2609 "switch s" + str( i ) )
2610 roleCall = main.FALSE
2611 # Assign switch
2612 assert deviceId, "No device id for s" + str( i ) + " in ONOS"
2613 # TODO: make this controller dynamic
Jon Hall0e240372018-05-02 11:21:57 -07002614 roleCall = roleCall and main.Cluster.next().deviceRole( deviceId, ip )
Devin Lim58046fa2017-07-05 16:55:00 -07002615 ipList.append( ip )
2616 deviceList.append( deviceId )
2617 except ( AttributeError, AssertionError ):
2618 main.log.exception( "Something is wrong with ONOS device view" )
Jon Hall0e240372018-05-02 11:21:57 -07002619 main.log.info( main.Cluster.next().devices() )
Devin Lim58046fa2017-07-05 16:55:00 -07002620 utilities.assert_equals(
2621 expect=main.TRUE,
2622 actual=roleCall,
2623 onpass="Re-assigned switch mastership to designated controller",
2624 onfail="Something wrong with deviceRole calls" )
2625
2626 main.step( "Check mastership was correctly assigned" )
2627 roleCheck = main.TRUE
2628 # NOTE: This is due to the fact that device mastership change is not
2629 # atomic and is actually a multi step process
2630 time.sleep( 5 )
2631 for i in range( len( ipList ) ):
2632 ip = ipList[ i ]
2633 deviceId = deviceList[ i ]
2634 # Check assignment
Jon Hall0e240372018-05-02 11:21:57 -07002635 master = main.Cluster.next().getRole( deviceId ).get( 'master' )
Devin Lim58046fa2017-07-05 16:55:00 -07002636 if ip in master:
2637 roleCheck = roleCheck and main.TRUE
2638 else:
2639 roleCheck = roleCheck and main.FALSE
2640 main.log.error( "Error, controller " + ip + " is not" +
2641 " master " + "of device " +
2642 str( deviceId ) + ". Master is " +
2643 repr( master ) + "." )
2644 utilities.assert_equals(
2645 expect=main.TRUE,
2646 actual=roleCheck,
2647 onpass="Switches were successfully reassigned to designated " +
2648 "controller",
2649 onfail="Switches were not successfully reassigned" )
Jon Hallca319892017-06-15 15:25:22 -07002650
Jon Hall5d5876e2017-11-30 09:33:16 -08002651 def bringUpStoppedNodes( self, main ):
Devin Lim58046fa2017-07-05 16:55:00 -07002652 """
Jon Hall5d5876e2017-11-30 09:33:16 -08002653 The bring up stopped nodes.
Devin Lim58046fa2017-07-05 16:55:00 -07002654 """
2655 import time
Devin Lim58046fa2017-07-05 16:55:00 -07002656 assert main, "main not defined"
2657 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002658 assert main.kill, "main.kill not defined"
2659 main.case( "Restart minority of ONOS nodes" )
2660
2661 main.step( "Restarting " + str( len( main.kill ) ) + " ONOS nodes" )
2662 startResults = main.TRUE
2663 restartTime = time.time()
Jon Hallca319892017-06-15 15:25:22 -07002664 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002665 startResults = startResults and\
Jon Hallca319892017-06-15 15:25:22 -07002666 ctrl.onosStart( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002667 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2668 onpass="ONOS nodes started successfully",
2669 onfail="ONOS nodes NOT successfully started" )
2670
2671 main.step( "Checking if ONOS is up yet" )
2672 count = 0
2673 onosIsupResult = main.FALSE
2674 while onosIsupResult == main.FALSE and count < 10:
2675 onosIsupResult = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002676 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002677 onosIsupResult = onosIsupResult and\
Jon Hallca319892017-06-15 15:25:22 -07002678 ctrl.isup( ctrl.ipAddress )
Devin Lim58046fa2017-07-05 16:55:00 -07002679 count = count + 1
2680 utilities.assert_equals( expect=main.TRUE, actual=onosIsupResult,
2681 onpass="ONOS restarted successfully",
2682 onfail="ONOS restart NOT successful" )
2683
Jon Hall5d5876e2017-11-30 09:33:16 -08002684 main.step( "Restarting ONOS CLI" )
Devin Lim58046fa2017-07-05 16:55:00 -07002685 cliResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002686 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002687 cliResults = cliResults and\
Jon Hallca319892017-06-15 15:25:22 -07002688 ctrl.startOnosCli( ctrl.ipAddress )
2689 ctrl.active = True
Devin Lim58046fa2017-07-05 16:55:00 -07002690 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
Jon Hallca319892017-06-15 15:25:22 -07002691 onpass="ONOS node(s) restarted",
2692 onfail="ONOS node(s) did not restart" )
Devin Lim58046fa2017-07-05 16:55:00 -07002693
Jon Hall5d5876e2017-11-30 09:33:16 -08002694 # Grab the time of restart so we can have some idea of average time
Devin Lim58046fa2017-07-05 16:55:00 -07002695 main.restartTime = time.time() - restartTime
2696 main.log.debug( "Restart time: " + str( main.restartTime ) )
2697 # TODO: MAke this configurable. Also, we are breaking the above timer
Jon Hall30668ff2019-02-27 17:43:09 -08002698 main.testSetUp.checkOnosNodes( main.Cluster )
Devin Lim58046fa2017-07-05 16:55:00 -07002699
Jon Hallca319892017-06-15 15:25:22 -07002700 self.commonChecks()
Devin Lim58046fa2017-07-05 16:55:00 -07002701
2702 main.step( "Rerun for election on the node(s) that were killed" )
2703 runResults = main.TRUE
Jon Hallca319892017-06-15 15:25:22 -07002704 for ctrl in main.kill:
Devin Lim58046fa2017-07-05 16:55:00 -07002705 runResults = runResults and\
Jon Hallca319892017-06-15 15:25:22 -07002706 ctrl.electionTestRun()
Devin Lim58046fa2017-07-05 16:55:00 -07002707 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2708 onpass="ONOS nodes reran for election topic",
Jon Hall5d5876e2017-11-30 09:33:16 -08002709 onfail="Error rerunning for election" )
2710
2711 def upgradeNodes( self, main ):
2712 """
2713 Reinstall some nodes with an upgraded version.
2714
2715 This will reinstall nodes in main.kill with an upgraded version.
2716 """
2717 import time
2718 assert main, "main not defined"
2719 assert utilities.assert_equals, "utilities.assert_equals not defined"
2720 assert main.kill, "main.kill not defined"
2721 nodeNames = [ node.name for node in main.kill ]
2722 main.step( "Upgrading" + str( nodeNames ) + " ONOS nodes" )
2723
2724 stopResults = main.TRUE
2725 uninstallResults = main.TRUE
2726 startResults = main.TRUE
2727 sshResults = main.TRUE
2728 isup = main.TRUE
2729 restartTime = time.time()
2730 for ctrl in main.kill:
2731 stopResults = stopResults and\
2732 ctrl.onosStop( ctrl.ipAddress )
2733 uninstallResults = uninstallResults and\
2734 ctrl.onosUninstall( ctrl.ipAddress )
2735 # Install the new version of onos
2736 startResults = startResults and\
2737 ctrl.onosInstall( options="-fv", node=ctrl.ipAddress )
2738 sshResults = sshResults and\
2739 ctrl.onosSecureSSH( node=ctrl.ipAddress )
2740 isup = isup and ctrl.isup( ctrl.ipAddress )
2741 utilities.assert_equals( expect=main.TRUE, actual=stopResults,
2742 onpass="ONOS nodes stopped successfully",
2743 onfail="ONOS nodes NOT successfully stopped" )
2744 utilities.assert_equals( expect=main.TRUE, actual=uninstallResults,
2745 onpass="ONOS nodes uninstalled successfully",
2746 onfail="ONOS nodes NOT successfully uninstalled" )
2747 utilities.assert_equals( expect=main.TRUE, actual=startResults,
2748 onpass="ONOS nodes started successfully",
2749 onfail="ONOS nodes NOT successfully started" )
2750 utilities.assert_equals( expect=main.TRUE, actual=sshResults,
2751 onpass="Successfully secured onos ssh",
2752 onfail="Failed to secure onos ssh" )
2753 utilities.assert_equals( expect=main.TRUE, actual=isup,
2754 onpass="ONOS nodes fully started",
2755 onfail="ONOS nodes NOT fully started" )
2756
2757 main.step( "Restarting ONOS CLI" )
2758 cliResults = main.TRUE
2759 for ctrl in main.kill:
2760 cliResults = cliResults and\
2761 ctrl.startOnosCli( ctrl.ipAddress )
2762 ctrl.active = True
2763 utilities.assert_equals( expect=main.TRUE, actual=cliResults,
2764 onpass="ONOS node(s) restarted",
2765 onfail="ONOS node(s) did not restart" )
2766
2767 # Grab the time of restart so we can have some idea of average time
2768 main.restartTime = time.time() - restartTime
2769 main.log.debug( "Restart time: " + str( main.restartTime ) )
2770 # TODO: Make this configurable.
Jon Hall30668ff2019-02-27 17:43:09 -08002771 main.testSetUp.checkOnosNodes( main.Cluster )
Jon Hall5d5876e2017-11-30 09:33:16 -08002772
2773 self.commonChecks()
2774
2775 main.step( "Rerun for election on the node(s) that were killed" )
2776 runResults = main.TRUE
2777 for ctrl in main.kill:
2778 runResults = runResults and\
2779 ctrl.electionTestRun()
2780 utilities.assert_equals( expect=main.TRUE, actual=runResults,
2781 onpass="ONOS nodes reran for election topic",
2782 onfail="Error rerunning for election" )
Jon Hall4173b242017-09-12 17:04:38 -07002783
Devin Lim142b5342017-07-20 15:22:39 -07002784 def tempCell( self, cellName, ipList ):
2785 main.step( "Create cell file" )
2786 cellAppString = main.params[ 'ENV' ][ 'appString' ]
Devin Lim58046fa2017-07-05 16:55:00 -07002787
Devin Lim142b5342017-07-20 15:22:39 -07002788 main.ONOSbench.createCellFile( main.ONOSbench.ip_address, cellName,
2789 main.Mininet1.ip_address,
Jon Hall3e6edb32018-08-21 16:20:30 -07002790 cellAppString, ipList, ipList,
2791 main.ONOScli1.karafUser )
Devin Lim142b5342017-07-20 15:22:39 -07002792 main.step( "Applying cell variable to environment" )
2793 cellResult = main.ONOSbench.setCell( cellName )
2794 verifyResult = main.ONOSbench.verifyCell()
2795
Devin Lim142b5342017-07-20 15:22:39 -07002796 def checkStateAfterEvent( self, main, afterWhich, compareSwitch=False, isRestart=False ):
Devin Lim58046fa2017-07-05 16:55:00 -07002797 """
2798 afterWhich :
Jon Hallca319892017-06-15 15:25:22 -07002799 0: failure
Devin Lim58046fa2017-07-05 16:55:00 -07002800 1: scaling
2801 """
2802 """
2803 Check state after ONOS failure/scaling
2804 """
2805 import json
Devin Lim58046fa2017-07-05 16:55:00 -07002806 assert main, "main not defined"
2807 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07002808 main.case( "Running ONOS Constant State Tests" )
2809
Jon Hall3e6edb32018-08-21 16:20:30 -07002810 OnosAfterWhich = [ "failure", "scaling" ]
Devin Lim58046fa2017-07-05 16:55:00 -07002811
Devin Lim58046fa2017-07-05 16:55:00 -07002812 # Assert that each device has a master
Devin Lim142b5342017-07-20 15:22:39 -07002813 self.checkRoleNotNull()
Devin Lim58046fa2017-07-05 16:55:00 -07002814
Devin Lim142b5342017-07-20 15:22:39 -07002815 ONOSMastership, rolesResults, consistentMastership = self.checkTheRole()
Jon Hallca319892017-06-15 15:25:22 -07002816 mastershipCheck = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07002817
2818 if rolesResults and not consistentMastership:
2819 for i in range( len( ONOSMastership ) ):
Devin Lim142b5342017-07-20 15:22:39 -07002820 node = str( main.Cluster.active( i ) )
Jon Hallca319892017-06-15 15:25:22 -07002821 main.log.warn( node + " roles: ",
Devin Lim58046fa2017-07-05 16:55:00 -07002822 json.dumps( json.loads( ONOSMastership[ i ] ),
2823 sort_keys=True,
2824 indent=4,
2825 separators=( ',', ': ' ) ) )
2826
2827 if compareSwitch:
2828 description2 = "Compare switch roles from before failure"
2829 main.step( description2 )
2830 try:
2831 currentJson = json.loads( ONOSMastership[ 0 ] )
2832 oldJson = json.loads( mastershipState )
2833 except ( ValueError, TypeError ):
2834 main.log.exception( "Something is wrong with parsing " +
2835 "ONOSMastership[0] or mastershipState" )
Jon Hallca319892017-06-15 15:25:22 -07002836 main.log.debug( "ONOSMastership[0]: " + repr( ONOSMastership[ 0 ] ) )
2837 main.log.debug( "mastershipState" + repr( mastershipState ) )
Devin Lim44075962017-08-11 10:56:37 -07002838 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07002839 mastershipCheck = main.TRUE
Jon Hallab611372018-02-21 15:26:05 -08002840 for swName, swDetails in main.Mininet1.getSwitches().items():
2841 switchDPID = swDetails[ 'dpid' ]
Devin Lim58046fa2017-07-05 16:55:00 -07002842 current = [ switch[ 'master' ] for switch in currentJson
2843 if switchDPID in switch[ 'id' ] ]
2844 old = [ switch[ 'master' ] for switch in oldJson
2845 if switchDPID in switch[ 'id' ] ]
2846 if current == old:
2847 mastershipCheck = mastershipCheck and main.TRUE
2848 else:
2849 main.log.warn( "Mastership of switch %s changed" % switchDPID )
2850 mastershipCheck = main.FALSE
2851 utilities.assert_equals(
2852 expect=main.TRUE,
2853 actual=mastershipCheck,
2854 onpass="Mastership of Switches was not changed",
2855 onfail="Mastership of some switches changed" )
2856
2857 # NOTE: we expect mastership to change on controller failure/scaling down
Devin Lim142b5342017-07-20 15:22:39 -07002858 ONOSIntents, intentsResults = self.checkingIntents()
Devin Lim58046fa2017-07-05 16:55:00 -07002859 intentCheck = main.FALSE
2860 consistentIntents = True
Devin Lim58046fa2017-07-05 16:55:00 -07002861
2862 main.step( "Check for consistency in Intents from each controller" )
2863 if all( [ sorted( i ) == sorted( ONOSIntents[ 0 ] ) for i in ONOSIntents ] ):
2864 main.log.info( "Intents are consistent across all ONOS " +
2865 "nodes" )
2866 else:
2867 consistentIntents = False
2868
2869 # Try to make it easy to figure out what is happening
2870 #
2871 # Intent ONOS1 ONOS2 ...
2872 # 0x01 INSTALLED INSTALLING
2873 # ... ... ...
2874 # ... ... ...
2875 title = " ID"
Jon Hallca319892017-06-15 15:25:22 -07002876 for ctrl in main.Cluster.active():
2877 title += " " * 10 + ctrl.name
Devin Lim58046fa2017-07-05 16:55:00 -07002878 main.log.warn( title )
2879 # get all intent keys in the cluster
2880 keys = []
2881 for nodeStr in ONOSIntents:
2882 node = json.loads( nodeStr )
2883 for intent in node:
2884 keys.append( intent.get( 'id' ) )
2885 keys = set( keys )
2886 for key in keys:
2887 row = "%-13s" % key
2888 for nodeStr in ONOSIntents:
2889 node = json.loads( nodeStr )
2890 for intent in node:
2891 if intent.get( 'id' ) == key:
2892 row += "%-15s" % intent.get( 'state' )
2893 main.log.warn( row )
2894 # End table view
2895
2896 utilities.assert_equals(
2897 expect=True,
2898 actual=consistentIntents,
2899 onpass="Intents are consistent across all ONOS nodes",
2900 onfail="ONOS nodes have different views of intents" )
2901 intentStates = []
2902 for node in ONOSIntents: # Iter through ONOS nodes
2903 nodeStates = []
2904 # Iter through intents of a node
2905 try:
2906 for intent in json.loads( node ):
2907 nodeStates.append( intent[ 'state' ] )
2908 except ( ValueError, TypeError ):
2909 main.log.exception( "Error in parsing intents" )
2910 main.log.error( repr( node ) )
2911 intentStates.append( nodeStates )
2912 out = [ ( i, nodeStates.count( i ) ) for i in set( nodeStates ) ]
2913 main.log.info( dict( out ) )
2914
2915 if intentsResults and not consistentIntents:
Jon Hallca319892017-06-15 15:25:22 -07002916 for i in range( len( main.Cluster.active() ) ):
Jon Hall6040bcf2017-08-14 11:15:41 -07002917 ctrl = main.Cluster.controllers[ i ]
Jon Hallca319892017-06-15 15:25:22 -07002918 main.log.warn( ctrl.name + " intents: " )
Devin Lim58046fa2017-07-05 16:55:00 -07002919 main.log.warn( json.dumps(
2920 json.loads( ONOSIntents[ i ] ),
2921 sort_keys=True,
2922 indent=4,
2923 separators=( ',', ': ' ) ) )
2924 elif intentsResults and consistentIntents:
2925 intentCheck = main.TRUE
2926
2927 # NOTE: Store has no durability, so intents are lost across system
2928 # restarts
2929 if not isRestart:
2930 main.step( "Compare current intents with intents before the " + OnosAfterWhich[ afterWhich ] )
2931 # NOTE: this requires case 5 to pass for intentState to be set.
2932 # maybe we should stop the test if that fails?
2933 sameIntents = main.FALSE
2934 try:
2935 intentState
2936 except NameError:
2937 main.log.warn( "No previous intent state was saved" )
2938 else:
2939 if intentState and intentState == ONOSIntents[ 0 ]:
2940 sameIntents = main.TRUE
2941 main.log.info( "Intents are consistent with before " + OnosAfterWhich[ afterWhich ] )
2942 # TODO: possibly the states have changed? we may need to figure out
2943 # what the acceptable states are
2944 elif len( intentState ) == len( ONOSIntents[ 0 ] ):
2945 sameIntents = main.TRUE
2946 try:
2947 before = json.loads( intentState )
2948 after = json.loads( ONOSIntents[ 0 ] )
2949 for intent in before:
2950 if intent not in after:
2951 sameIntents = main.FALSE
2952 main.log.debug( "Intent is not currently in ONOS " +
2953 "(at least in the same form):" )
2954 main.log.debug( json.dumps( intent ) )
2955 except ( ValueError, TypeError ):
2956 main.log.exception( "Exception printing intents" )
2957 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2958 main.log.debug( repr( intentState ) )
2959 if sameIntents == main.FALSE:
2960 try:
2961 main.log.debug( "ONOS intents before: " )
2962 main.log.debug( json.dumps( json.loads( intentState ),
2963 sort_keys=True, indent=4,
2964 separators=( ',', ': ' ) ) )
2965 main.log.debug( "Current ONOS intents: " )
2966 main.log.debug( json.dumps( json.loads( ONOSIntents[ 0 ] ),
2967 sort_keys=True, indent=4,
2968 separators=( ',', ': ' ) ) )
2969 except ( ValueError, TypeError ):
2970 main.log.exception( "Exception printing intents" )
2971 main.log.debug( repr( ONOSIntents[ 0 ] ) )
2972 main.log.debug( repr( intentState ) )
2973 utilities.assert_equals(
2974 expect=main.TRUE,
2975 actual=sameIntents,
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07002976 onpass="Intents are consistent with before " + OnosAfterWhich[ afterWhich ],
Devin Lim58046fa2017-07-05 16:55:00 -07002977 onfail="The Intents changed during " + OnosAfterWhich[ afterWhich ] )
2978 intentCheck = intentCheck and sameIntents
2979
2980 main.step( "Get the OF Table entries and compare to before " +
2981 "component " + OnosAfterWhich[ afterWhich ] )
2982 FlowTables = main.TRUE
Jon Hallab611372018-02-21 15:26:05 -08002983 for switch in main.Mininet1.getSwitches().keys():
2984 main.log.info( "Checking flow table on " + switch )
2985 tmpFlows = main.Mininet1.getFlowTable( switch, version="1.3", debug=False )
2986 curSwitch = main.Mininet1.flowTableComp( flows[ switch ], tmpFlows )
Devin Lim58046fa2017-07-05 16:55:00 -07002987 FlowTables = FlowTables and curSwitch
2988 if curSwitch == main.FALSE:
Jon Hallab611372018-02-21 15:26:05 -08002989 main.log.warn( "Differences in flow table for switch: {}".format( switch ) )
Devin Lim58046fa2017-07-05 16:55:00 -07002990 utilities.assert_equals(
2991 expect=main.TRUE,
2992 actual=FlowTables,
2993 onpass="No changes were found in the flow tables",
2994 onfail="Changes were found in the flow tables" )
2995
Jon Hallca319892017-06-15 15:25:22 -07002996 main.Mininet2.pingLongKill()
Devin Lim58046fa2017-07-05 16:55:00 -07002997 """
2998 main.step( "Check the continuous pings to ensure that no packets " +
2999 "were dropped during component failure" )
3000 main.Mininet2.pingKill( main.params[ 'TESTONUSER' ],
3001 main.params[ 'TESTONIP' ] )
3002 LossInPings = main.FALSE
3003 # NOTE: checkForLoss returns main.FALSE with 0% packet loss
3004 for i in range( 8, 18 ):
3005 main.log.info(
3006 "Checking for a loss in pings along flow from s" +
3007 str( i ) )
3008 LossInPings = main.Mininet2.checkForLoss(
3009 "/tmp/ping.h" +
3010 str( i ) ) or LossInPings
3011 if LossInPings == main.TRUE:
3012 main.log.info( "Loss in ping detected" )
3013 elif LossInPings == main.ERROR:
3014 main.log.info( "There are multiple mininet process running" )
3015 elif LossInPings == main.FALSE:
3016 main.log.info( "No Loss in the pings" )
3017 main.log.info( "No loss of dataplane connectivity" )
3018 utilities.assert_equals(
3019 expect=main.FALSE,
3020 actual=LossInPings,
3021 onpass="No Loss of connectivity",
3022 onfail="Loss of dataplane connectivity detected" )
3023 # NOTE: Since intents are not persisted with IntnentStore,
3024 # we expect loss in dataplane connectivity
3025 LossInPings = main.FALSE
3026 """
Devin Lim58046fa2017-07-05 16:55:00 -07003027 def compareTopo( self, main ):
3028 """
3029 Compare topo
3030 """
3031 import json
3032 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003033 assert main, "main not defined"
3034 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003035 try:
3036 from tests.dependencies.topology import Topology
3037 except ImportError:
3038 main.log.error( "Topology not found exiting the test" )
Devin Lim44075962017-08-11 10:56:37 -07003039 main.cleanAndExit()
Devin Lim58046fa2017-07-05 16:55:00 -07003040 try:
3041 main.topoRelated
3042 except ( NameError, AttributeError ):
3043 main.topoRelated = Topology()
3044 main.case( "Compare ONOS Topology view to Mininet topology" )
3045 main.caseExplanation = "Compare topology objects between Mininet" +\
3046 " and ONOS"
3047 topoResult = main.FALSE
3048 topoFailMsg = "ONOS topology don't match Mininet"
3049 elapsed = 0
3050 count = 0
3051 main.step( "Comparing ONOS topology to MN topology" )
3052 startTime = time.time()
3053 # Give time for Gossip to work
3054 while topoResult == main.FALSE and ( elapsed < 60 or count < 3 ):
3055 devicesResults = main.TRUE
3056 linksResults = main.TRUE
3057 hostsResults = main.TRUE
3058 hostAttachmentResults = True
3059 count += 1
3060 cliStart = time.time()
Devin Lim142b5342017-07-20 15:22:39 -07003061 devices = main.topoRelated.getAll( "devices", True,
Jon Hallca319892017-06-15 15:25:22 -07003062 kwargs={ 'sleep': 5, 'attempts': 5,
3063 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003064 ipResult = main.TRUE
3065
Devin Lim142b5342017-07-20 15:22:39 -07003066 hosts = main.topoRelated.getAll( "hosts", True,
Jon Hallca319892017-06-15 15:25:22 -07003067 kwargs={ 'sleep': 5, 'attempts': 5,
3068 'randomTime': True },
3069 inJson=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003070
3071 for controller in range( 0, len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003072 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003073 if hosts[ controller ]:
3074 for host in hosts[ controller ]:
3075 if host is None or host.get( 'ipAddresses', [] ) == []:
3076 main.log.error(
3077 "Error with host ipAddresses on controller" +
3078 controllerStr + ": " + str( host ) )
3079 ipResult = main.FALSE
Jeremy Ronquillo23fb2162017-09-15 14:59:57 -07003080 ports = main.topoRelated.getAll( "ports", True,
Jon Hallca319892017-06-15 15:25:22 -07003081 kwargs={ 'sleep': 5, 'attempts': 5,
3082 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003083 links = main.topoRelated.getAll( "links", True,
Jon Hallca319892017-06-15 15:25:22 -07003084 kwargs={ 'sleep': 5, 'attempts': 5,
3085 'randomTime': True } )
Devin Lim142b5342017-07-20 15:22:39 -07003086 clusters = main.topoRelated.getAll( "clusters", True,
Jon Hallca319892017-06-15 15:25:22 -07003087 kwargs={ 'sleep': 5, 'attempts': 5,
3088 'randomTime': True } )
Devin Lim58046fa2017-07-05 16:55:00 -07003089
3090 elapsed = time.time() - startTime
3091 cliTime = time.time() - cliStart
Jon Hall5d5876e2017-11-30 09:33:16 -08003092 main.log.debug( "Elapsed time: " + str( elapsed ) )
3093 main.log.debug( "CLI time: " + str( cliTime ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003094
3095 if all( e is None for e in devices ) and\
3096 all( e is None for e in hosts ) and\
3097 all( e is None for e in ports ) and\
3098 all( e is None for e in links ) and\
3099 all( e is None for e in clusters ):
3100 topoFailMsg = "Could not get topology from ONOS"
3101 main.log.error( topoFailMsg )
3102 continue # Try again, No use trying to compare
3103
3104 mnSwitches = main.Mininet1.getSwitches()
3105 mnLinks = main.Mininet1.getLinks()
3106 mnHosts = main.Mininet1.getHosts()
Jon Hallca319892017-06-15 15:25:22 -07003107 for controller in range( len( main.Cluster.active() ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003108 controllerStr = str( main.Cluster.active( controller ) )
Jon Hall4173b242017-09-12 17:04:38 -07003109 currentDevicesResult = main.topoRelated.compareDevicePort( main.Mininet1,
3110 controller,
3111 mnSwitches,
3112 devices,
3113 ports )
Devin Lim58046fa2017-07-05 16:55:00 -07003114 utilities.assert_equals( expect=main.TRUE,
3115 actual=currentDevicesResult,
Jon Hallca319892017-06-15 15:25:22 -07003116 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003117 " Switches view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003118 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003119 " Switches view is incorrect" )
3120
Devin Lim58046fa2017-07-05 16:55:00 -07003121 currentLinksResult = main.topoRelated.compareBase( links, controller,
Jon Hall4173b242017-09-12 17:04:38 -07003122 main.Mininet1.compareLinks,
3123 [ mnSwitches, mnLinks ] )
Devin Lim58046fa2017-07-05 16:55:00 -07003124 utilities.assert_equals( expect=main.TRUE,
3125 actual=currentLinksResult,
Jon Hallca319892017-06-15 15:25:22 -07003126 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003127 " links view is correct",
Jon Hallca319892017-06-15 15:25:22 -07003128 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003129 " links view is incorrect" )
3130 if hosts[ controller ] and "Error" not in hosts[ controller ]:
3131 currentHostsResult = main.Mininet1.compareHosts(
3132 mnHosts,
3133 hosts[ controller ] )
3134 elif hosts[ controller ] == []:
3135 currentHostsResult = main.TRUE
3136 else:
3137 currentHostsResult = main.FALSE
3138 utilities.assert_equals( expect=main.TRUE,
3139 actual=currentHostsResult,
Jon Hallca319892017-06-15 15:25:22 -07003140 onpass=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003141 " hosts exist in Mininet",
Jon Hallca319892017-06-15 15:25:22 -07003142 onfail=controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003143 " hosts don't match Mininet" )
Devin Lim58046fa2017-07-05 16:55:00 -07003144 hostAttachment = True
Jon Hallab611372018-02-21 15:26:05 -08003145 if main.topoMappings:
3146 ctrl = main.Cluster.next()
3147 # CHECKING HOST ATTACHMENT POINTS
3148 zeroHosts = False
3149 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3150 if hosts[ controller ] == []:
3151 main.log.warn( "There are no hosts discovered" )
3152 zeroHosts = True
3153 else:
3154 for host in hosts[ controller ]:
3155 mac = None
3156 locations = []
3157 device = None
3158 port = None
3159 try:
3160 mac = host.get( 'mac' )
3161 assert mac, "mac field could not be found for this host object"
3162 if 'locations' in host:
3163 locations = host.get( 'locations' )
3164 elif 'location' in host:
3165 locations.append( host.get( 'location' ) )
3166 assert locations, "locations field could not be found for this host object"
Devin Lim58046fa2017-07-05 16:55:00 -07003167
Jon Hallab611372018-02-21 15:26:05 -08003168 # Trim the protocol identifier off deviceId
3169 device = str( locations[0].get( 'elementId' ) ).split( ':' )[ 1 ]
3170 assert device, "elementId field could not be found for this host location object"
Devin Lim58046fa2017-07-05 16:55:00 -07003171
Jon Hallab611372018-02-21 15:26:05 -08003172 port = locations[0].get( 'port' )
3173 assert port, "port field could not be found for this host location object"
Devin Lim58046fa2017-07-05 16:55:00 -07003174
Jon Hallab611372018-02-21 15:26:05 -08003175 # Now check if this matches where they should be
3176 if mac and device and port:
3177 if str( port ) != "1":
3178 main.log.error( "The attachment port is incorrect for " +
3179 "host " + str( mac ) +
3180 ". Expected: 1 Actual: " + str( port ) )
3181 hostAttachment = False
3182 if device != main.topoMappings[ str( mac ) ]:
3183 main.log.error( "The attachment device is incorrect for " +
3184 "host " + str( mac ) +
3185 ". Expected: " + main.topoMppings[ str( mac ) ] +
3186 " Actual: " + device )
3187 hostAttachment = False
3188 else:
Devin Lim58046fa2017-07-05 16:55:00 -07003189 hostAttachment = False
Jon Hallab611372018-02-21 15:26:05 -08003190 except ( AssertionError, TypeError ):
3191 main.log.exception( "Json object not as expected" )
3192 main.log.error( repr( host ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003193 hostAttachment = False
Jon Hallab611372018-02-21 15:26:05 -08003194 else:
3195 main.log.error( "No hosts json output or \"Error\"" +
3196 " in output. hosts = " +
3197 repr( hosts[ controller ] ) )
3198 if zeroHosts is False:
3199 # TODO: Find a way to know if there should be hosts in a
3200 # given point of the test
3201 hostAttachment = True
Devin Lim58046fa2017-07-05 16:55:00 -07003202
Jon Hallab611372018-02-21 15:26:05 -08003203 # END CHECKING HOST ATTACHMENT POINTS
Devin Lim58046fa2017-07-05 16:55:00 -07003204 devicesResults = devicesResults and currentDevicesResult
3205 linksResults = linksResults and currentLinksResult
3206 hostsResults = hostsResults and currentHostsResult
3207 hostAttachmentResults = hostAttachmentResults and\
3208 hostAttachment
3209 topoResult = ( devicesResults and linksResults
3210 and hostsResults and ipResult and
3211 hostAttachmentResults )
3212 utilities.assert_equals( expect=True,
3213 actual=topoResult,
3214 onpass="ONOS topology matches Mininet",
3215 onfail=topoFailMsg )
3216 # End of While loop to pull ONOS state
3217
3218 # Compare json objects for hosts and dataplane clusters
3219
3220 # hosts
3221 main.step( "Hosts view is consistent across all ONOS nodes" )
3222 consistentHostsResult = main.TRUE
3223 for controller in range( len( hosts ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003224 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003225 if hosts[ controller ] is not None and "Error" not in hosts[ controller ]:
3226 if hosts[ controller ] == hosts[ 0 ]:
3227 continue
3228 else: # hosts not consistent
Jon Hallca319892017-06-15 15:25:22 -07003229 main.log.error( "hosts from " + controllerStr +
Devin Lim58046fa2017-07-05 16:55:00 -07003230 " is inconsistent with ONOS1" )
Jon Hallca319892017-06-15 15:25:22 -07003231 main.log.debug( repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003232 consistentHostsResult = main.FALSE
3233
3234 else:
Jon Hallca319892017-06-15 15:25:22 -07003235 main.log.error( "Error in getting ONOS hosts from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003236 controllerStr )
3237 consistentHostsResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003238 main.log.debug( controllerStr +
3239 " hosts response: " +
3240 repr( hosts[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003241 utilities.assert_equals(
3242 expect=main.TRUE,
3243 actual=consistentHostsResult,
3244 onpass="Hosts view is consistent across all ONOS nodes",
3245 onfail="ONOS nodes have different views of hosts" )
3246
3247 main.step( "Hosts information is correct" )
3248 hostsResults = hostsResults and ipResult
3249 utilities.assert_equals(
3250 expect=main.TRUE,
3251 actual=hostsResults,
3252 onpass="Host information is correct",
3253 onfail="Host information is incorrect" )
3254
3255 main.step( "Host attachment points to the network" )
3256 utilities.assert_equals(
3257 expect=True,
3258 actual=hostAttachmentResults,
3259 onpass="Hosts are correctly attached to the network",
3260 onfail="ONOS did not correctly attach hosts to the network" )
3261
3262 # Strongly connected clusters of devices
3263 main.step( "Clusters view is consistent across all ONOS nodes" )
3264 consistentClustersResult = main.TRUE
3265 for controller in range( len( clusters ) ):
Devin Lim142b5342017-07-20 15:22:39 -07003266 controllerStr = str( main.Cluster.active( controller ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003267 if "Error" not in clusters[ controller ]:
3268 if clusters[ controller ] == clusters[ 0 ]:
3269 continue
3270 else: # clusters not consistent
Jon Hallca319892017-06-15 15:25:22 -07003271 main.log.error( "clusters from " +
Devin Lim58046fa2017-07-05 16:55:00 -07003272 controllerStr +
3273 " is inconsistent with ONOS1" )
3274 consistentClustersResult = main.FALSE
3275 else:
3276 main.log.error( "Error in getting dataplane clusters " +
Jon Hallca319892017-06-15 15:25:22 -07003277 "from " + controllerStr )
Devin Lim58046fa2017-07-05 16:55:00 -07003278 consistentClustersResult = main.FALSE
Jon Hallca319892017-06-15 15:25:22 -07003279 main.log.debug( controllerStr +
3280 " clusters response: " +
3281 repr( clusters[ controller ] ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003282 utilities.assert_equals(
3283 expect=main.TRUE,
3284 actual=consistentClustersResult,
3285 onpass="Clusters view is consistent across all ONOS nodes",
3286 onfail="ONOS nodes have different views of clusters" )
3287 if not consistentClustersResult:
3288 main.log.debug( clusters )
3289 for x in links:
Jon Hallca319892017-06-15 15:25:22 -07003290 main.log.debug( "{}: {}".format( len( x ), x ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003291
3292 main.step( "There is only one SCC" )
3293 # there should always only be one cluster
3294 try:
3295 numClusters = len( json.loads( clusters[ 0 ] ) )
3296 except ( ValueError, TypeError ):
3297 main.log.exception( "Error parsing clusters[0]: " +
3298 repr( clusters[ 0 ] ) )
3299 numClusters = "ERROR"
3300 clusterResults = main.FALSE
3301 if numClusters == 1:
3302 clusterResults = main.TRUE
3303 utilities.assert_equals(
3304 expect=1,
3305 actual=numClusters,
3306 onpass="ONOS shows 1 SCC",
3307 onfail="ONOS shows " + str( numClusters ) + " SCCs" )
3308
3309 topoResult = ( devicesResults and linksResults
3310 and hostsResults and consistentHostsResult
3311 and consistentClustersResult and clusterResults
3312 and ipResult and hostAttachmentResults )
3313
3314 topoResult = topoResult and int( count <= 2 )
3315 note = "note it takes about " + str( int( cliTime ) ) + \
3316 " seconds for the test to make all the cli calls to fetch " +\
3317 "the topology from each ONOS instance"
3318 main.log.info(
3319 "Very crass estimate for topology discovery/convergence( " +
3320 str( note ) + " ): " + str( elapsed ) + " seconds, " +
3321 str( count ) + " tries" )
3322
3323 main.step( "Device information is correct" )
3324 utilities.assert_equals(
3325 expect=main.TRUE,
3326 actual=devicesResults,
3327 onpass="Device information is correct",
3328 onfail="Device information is incorrect" )
3329
3330 main.step( "Links are correct" )
3331 utilities.assert_equals(
3332 expect=main.TRUE,
3333 actual=linksResults,
3334 onpass="Link are correct",
3335 onfail="Links are incorrect" )
3336
3337 main.step( "Hosts are correct" )
3338 utilities.assert_equals(
3339 expect=main.TRUE,
3340 actual=hostsResults,
3341 onpass="Hosts are correct",
3342 onfail="Hosts are incorrect" )
3343
3344 # FIXME: move this to an ONOS state case
Jon Hall30668ff2019-02-27 17:43:09 -08003345 main.testSetUp.checkOnosNodes( main.Cluster )
Jon Hallca319892017-06-15 15:25:22 -07003346
Jon Hallab611372018-02-21 15:26:05 -08003347 def linkDown( self, main, src="s3", dst="s28" ):
Devin Lim58046fa2017-07-05 16:55:00 -07003348 """
Jon Hallab611372018-02-21 15:26:05 -08003349 Link src-dst down
Devin Lim58046fa2017-07-05 16:55:00 -07003350 """
3351 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003352 assert main, "main not defined"
3353 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003354 # NOTE: You should probably run a topology check after this
3355
3356 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3357
3358 description = "Turn off a link to ensure that Link Discovery " +\
3359 "is working properly"
3360 main.case( description )
3361
Jon Hallab611372018-02-21 15:26:05 -08003362 main.step( "Kill Link between " + src + " and " + dst )
3363 LinkDown = main.Mininet1.link( END1=src, END2=dst, OPTION="down" )
Devin Lim58046fa2017-07-05 16:55:00 -07003364 main.log.info( "Waiting " + str( linkSleep ) +
3365 " seconds for link down to be discovered" )
3366 time.sleep( linkSleep )
3367 utilities.assert_equals( expect=main.TRUE, actual=LinkDown,
3368 onpass="Link down successful",
3369 onfail="Failed to bring link down" )
3370 # TODO do some sort of check here
3371
Jon Hallab611372018-02-21 15:26:05 -08003372 def linkUp( self, main, src="s3", dst="s28" ):
Devin Lim58046fa2017-07-05 16:55:00 -07003373 """
Jon Hallab611372018-02-21 15:26:05 -08003374 Link src-dst up
Devin Lim58046fa2017-07-05 16:55:00 -07003375 """
3376 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003377 assert main, "main not defined"
3378 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003379 # NOTE: You should probably run a topology check after this
3380
3381 linkSleep = float( main.params[ 'timers' ][ 'LinkDiscovery' ] )
3382
3383 description = "Restore a link to ensure that Link Discovery is " + \
3384 "working properly"
3385 main.case( description )
3386
Jon Hallab611372018-02-21 15:26:05 -08003387 main.step( "Bring link between " + src + " and " + dst + " back up" )
3388 LinkUp = main.Mininet1.link( END1=src, END2=dst, OPTION="up" )
Devin Lim58046fa2017-07-05 16:55:00 -07003389 main.log.info( "Waiting " + str( linkSleep ) +
3390 " seconds for link up to be discovered" )
3391 time.sleep( linkSleep )
3392 utilities.assert_equals( expect=main.TRUE, actual=LinkUp,
3393 onpass="Link up successful",
3394 onfail="Failed to bring link up" )
3395
3396 def switchDown( self, main ):
3397 """
3398 Switch Down
3399 """
3400 # NOTE: You should probably run a topology check after this
3401 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003402 assert main, "main not defined"
3403 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003404
3405 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3406
3407 description = "Killing a switch to ensure it is discovered correctly"
Devin Lim58046fa2017-07-05 16:55:00 -07003408 main.case( description )
3409 switch = main.params[ 'kill' ][ 'switch' ]
3410 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3411
3412 # TODO: Make this switch parameterizable
3413 main.step( "Kill " + switch )
3414 main.log.info( "Deleting " + switch )
3415 main.Mininet1.delSwitch( switch )
3416 main.log.info( "Waiting " + str( switchSleep ) +
3417 " seconds for switch down to be discovered" )
3418 time.sleep( switchSleep )
Jon Hall0e240372018-05-02 11:21:57 -07003419 device = main.Cluster.next().getDevice( dpid=switchDPID )
Devin Lim58046fa2017-07-05 16:55:00 -07003420 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003421 main.log.warn( "Bringing down switch " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003422 result = main.FALSE
3423 if device and device[ 'available' ] is False:
3424 result = main.TRUE
3425 utilities.assert_equals( expect=main.TRUE, actual=result,
3426 onpass="Kill switch successful",
3427 onfail="Failed to kill switch?" )
Jon Hallca319892017-06-15 15:25:22 -07003428
Devin Lim58046fa2017-07-05 16:55:00 -07003429 def switchUp( self, main ):
3430 """
3431 Switch Up
3432 """
3433 # NOTE: You should probably run a topology check after this
3434 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003435 assert main, "main not defined"
3436 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003437
3438 switchSleep = float( main.params[ 'timers' ][ 'SwitchDiscovery' ] )
3439 switch = main.params[ 'kill' ][ 'switch' ]
3440 switchDPID = main.params[ 'kill' ][ 'dpid' ]
3441 links = main.params[ 'kill' ][ 'links' ].split()
Devin Lim58046fa2017-07-05 16:55:00 -07003442 description = "Adding a switch to ensure it is discovered correctly"
3443 main.case( description )
3444
3445 main.step( "Add back " + switch )
3446 main.Mininet1.addSwitch( switch, dpid=switchDPID )
3447 for peer in links:
3448 main.Mininet1.addLink( switch, peer )
Jon Hallca319892017-06-15 15:25:22 -07003449 ipList = main.Cluster.getIps()
Devin Lim58046fa2017-07-05 16:55:00 -07003450 main.Mininet1.assignSwController( sw=switch, ip=ipList )
3451 main.log.info( "Waiting " + str( switchSleep ) +
3452 " seconds for switch up to be discovered" )
3453 time.sleep( switchSleep )
Jon Hall0e240372018-05-02 11:21:57 -07003454 device = main.Cluster.next().getDevice( dpid=switchDPID )
Devin Lim58046fa2017-07-05 16:55:00 -07003455 # Peek at the deleted switch
Jon Hallca319892017-06-15 15:25:22 -07003456 main.log.debug( "Added device: " + str( device ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003457 result = main.FALSE
3458 if device and device[ 'available' ]:
3459 result = main.TRUE
3460 utilities.assert_equals( expect=main.TRUE, actual=result,
3461 onpass="add switch successful",
3462 onfail="Failed to add switch?" )
3463
3464 def startElectionApp( self, main ):
3465 """
3466 start election app on all onos nodes
3467 """
Devin Lim58046fa2017-07-05 16:55:00 -07003468 assert main, "main not defined"
3469 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003470
3471 main.case( "Start Leadership Election app" )
3472 main.step( "Install leadership election app" )
Jon Hall0e240372018-05-02 11:21:57 -07003473 appResult = main.Cluster.next().CLI.activateApp( "org.onosproject.election" )
Devin Lim58046fa2017-07-05 16:55:00 -07003474 utilities.assert_equals(
3475 expect=main.TRUE,
3476 actual=appResult,
3477 onpass="Election app installed",
3478 onfail="Something went wrong with installing Leadership election" )
3479
3480 main.step( "Run for election on each node" )
Jon Hall0e240372018-05-02 11:21:57 -07003481 main.Cluster.next().electionTestRun()
Jon Hallca319892017-06-15 15:25:22 -07003482 main.Cluster.command( "electionTestRun" )
Devin Lim58046fa2017-07-05 16:55:00 -07003483 time.sleep( 5 )
Jon Hallca319892017-06-15 15:25:22 -07003484 sameResult, leaders = main.HA.consistentLeaderboards( main.Cluster.active() )
Devin Lim58046fa2017-07-05 16:55:00 -07003485 utilities.assert_equals(
3486 expect=True,
3487 actual=sameResult,
3488 onpass="All nodes see the same leaderboards",
3489 onfail="Inconsistent leaderboards" )
3490
3491 if sameResult:
Jon Hall5d5876e2017-11-30 09:33:16 -08003492 # Check that the leader is one of the active nodes
3493 ips = sorted( main.Cluster.getIps( activeOnly=True ) )
Devin Lim58046fa2017-07-05 16:55:00 -07003494 leader = leaders[ 0 ][ 0 ]
Jon Hall5d5876e2017-11-30 09:33:16 -08003495 if leader in ips:
3496 legitimate = True
Devin Lim58046fa2017-07-05 16:55:00 -07003497 else:
Jon Hall5d5876e2017-11-30 09:33:16 -08003498 legitimate = False
3499 main.log.debug( leaders )
3500 main.step( "Active node was elected leader?" )
Devin Lim58046fa2017-07-05 16:55:00 -07003501 utilities.assert_equals(
3502 expect=True,
Jon Hall5d5876e2017-11-30 09:33:16 -08003503 actual=legitimate,
Devin Lim58046fa2017-07-05 16:55:00 -07003504 onpass="Correct leader was elected",
3505 onfail="Incorrect leader" )
Jon Hallca319892017-06-15 15:25:22 -07003506 main.Cluster.testLeader = leader
3507
Devin Lim58046fa2017-07-05 16:55:00 -07003508 def isElectionFunctional( self, main ):
3509 """
3510 Check that Leadership Election is still functional
3511 15.1 Run election on each node
3512 15.2 Check that each node has the same leaders and candidates
3513 15.3 Find current leader and withdraw
3514 15.4 Check that a new node was elected leader
3515 15.5 Check that that new leader was the candidate of old leader
3516 15.6 Run for election on old leader
3517 15.7 Check that oldLeader is a candidate, and leader if only 1 node
3518 15.8 Make sure that the old leader was added to the candidate list
3519
3520 old and new variable prefixes refer to data from before vs after
3521 withdrawl and later before withdrawl vs after re-election
3522 """
3523 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003524 assert main, "main not defined"
3525 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003526
3527 description = "Check that Leadership Election is still functional"
3528 main.case( description )
Jon Hall627b1572020-12-01 12:01:15 -08003529 # NOTE: Need to re-run after restarts since being a candidate is not persistent
Devin Lim58046fa2017-07-05 16:55:00 -07003530
3531 oldLeaders = [] # list of lists of each nodes' candidates before
3532 newLeaders = [] # list of lists of each nodes' candidates after
3533 oldLeader = '' # the old leader from oldLeaders, None if not same
3534 newLeader = '' # the new leaders fron newLoeaders, None if not same
3535 oldLeaderCLI = None # the CLI of the old leader used for re-electing
3536 expectNoLeader = False # True when there is only one leader
Devin Lim142b5342017-07-20 15:22:39 -07003537 if len( main.Cluster.runningNodes ) == 1:
Devin Lim58046fa2017-07-05 16:55:00 -07003538 expectNoLeader = True
3539
3540 main.step( "Run for election on each node" )
Devin Lim142b5342017-07-20 15:22:39 -07003541 electionResult = main.Cluster.command( "electionTestRun", returnBool=True )
Devin Lim58046fa2017-07-05 16:55:00 -07003542 utilities.assert_equals(
Jon Hallca319892017-06-15 15:25:22 -07003543 expect=True,
Devin Lim58046fa2017-07-05 16:55:00 -07003544 actual=electionResult,
3545 onpass="All nodes successfully ran for leadership",
3546 onfail="At least one node failed to run for leadership" )
3547
3548 if electionResult == main.FALSE:
3549 main.log.error(
3550 "Skipping Test Case because Election Test App isn't loaded" )
3551 main.skipCase()
3552
3553 main.step( "Check that each node shows the same leader and candidates" )
3554 failMessage = "Nodes have different leaderboards"
Jon Hallca319892017-06-15 15:25:22 -07003555 activeCLIs = main.Cluster.active()
3556 sameResult, oldLeaders = main.HA.consistentLeaderboards( activeCLIs )
Devin Lim58046fa2017-07-05 16:55:00 -07003557 if sameResult:
3558 oldLeader = oldLeaders[ 0 ][ 0 ]
Jon Hallca319892017-06-15 15:25:22 -07003559 main.log.info( "Old leader: " + oldLeader )
Devin Lim58046fa2017-07-05 16:55:00 -07003560 else:
3561 oldLeader = None
3562 utilities.assert_equals(
3563 expect=True,
3564 actual=sameResult,
3565 onpass="Leaderboards are consistent for the election topic",
3566 onfail=failMessage )
3567
3568 main.step( "Find current leader and withdraw" )
3569 withdrawResult = main.TRUE
3570 # do some sanity checking on leader before using it
3571 if oldLeader is None:
3572 main.log.error( "Leadership isn't consistent." )
3573 withdrawResult = main.FALSE
3574 # Get the CLI of the oldLeader
Jon Hallca319892017-06-15 15:25:22 -07003575 for ctrl in main.Cluster.active():
3576 if oldLeader == ctrl.ipAddress:
3577 oldLeaderCLI = ctrl
Devin Lim58046fa2017-07-05 16:55:00 -07003578 break
3579 else: # FOR/ELSE statement
Jon Hall701fea12018-10-08 11:09:22 -07003580 main.log.error( "Leader election, could not find current leader amongst active nodes" )
3581 for ctrl in main.Cluster.controllers:
3582 if oldLeader == ctrl.ipAddress:
3583 oldLeaderCLI = ctrl
3584 main.log.warn( "Old leader was found as node " + str( ctrl.ipAddress ) )
3585 # Should we skip the next if statement then? There should be a new leader elected?
Devin Lim58046fa2017-07-05 16:55:00 -07003586 if oldLeader:
3587 withdrawResult = oldLeaderCLI.electionTestWithdraw()
3588 utilities.assert_equals(
3589 expect=main.TRUE,
3590 actual=withdrawResult,
3591 onpass="Node was withdrawn from election",
3592 onfail="Node was not withdrawn from election" )
3593
3594 main.step( "Check that a new node was elected leader" )
3595 failMessage = "Nodes have different leaders"
3596 # Get new leaders and candidates
3597 newLeaderResult, newLeaders = self.consistentLeaderboards( activeCLIs )
3598 newLeader = None
3599 if newLeaderResult:
3600 if newLeaders[ 0 ][ 0 ] == 'none':
3601 main.log.error( "No leader was elected on at least 1 node" )
3602 if not expectNoLeader:
3603 newLeaderResult = False
3604 newLeader = newLeaders[ 0 ][ 0 ]
3605
3606 # Check that the new leader is not the older leader, which was withdrawn
3607 if newLeader == oldLeader:
3608 newLeaderResult = False
3609 main.log.error( "All nodes still see old leader: " + str( oldLeader ) +
3610 " as the current leader" )
3611 utilities.assert_equals(
3612 expect=True,
3613 actual=newLeaderResult,
3614 onpass="Leadership election passed",
3615 onfail="Something went wrong with Leadership election" )
3616
3617 main.step( "Check that that new leader was the candidate of old leader" )
3618 # candidates[ 2 ] should become the top candidate after withdrawl
3619 correctCandidateResult = main.TRUE
3620 if expectNoLeader:
3621 if newLeader == 'none':
3622 main.log.info( "No leader expected. None found. Pass" )
3623 correctCandidateResult = main.TRUE
3624 else:
3625 main.log.info( "Expected no leader, got: " + str( newLeader ) )
3626 correctCandidateResult = main.FALSE
Devin Lim58046fa2017-07-05 16:55:00 -07003627 utilities.assert_equals(
3628 expect=main.TRUE,
3629 actual=correctCandidateResult,
3630 onpass="Correct Candidate Elected",
3631 onfail="Incorrect Candidate Elected" )
3632
3633 main.step( "Run for election on old leader( just so everyone " +
3634 "is in the hat )" )
3635 if oldLeaderCLI is not None:
3636 runResult = oldLeaderCLI.electionTestRun()
3637 else:
3638 main.log.error( "No old leader to re-elect" )
3639 runResult = main.FALSE
3640 utilities.assert_equals(
3641 expect=main.TRUE,
3642 actual=runResult,
3643 onpass="App re-ran for election",
3644 onfail="App failed to run for election" )
3645
3646 main.step(
3647 "Check that oldLeader is a candidate, and leader if only 1 node" )
3648 # verify leader didn't just change
3649 # Get new leaders and candidates
3650 reRunLeaders = []
3651 time.sleep( 5 ) # Paremterize
3652 positionResult, reRunLeaders = self.consistentLeaderboards( activeCLIs )
3653
Devin Lim58046fa2017-07-05 16:55:00 -07003654 def installDistributedPrimitiveApp( self, main ):
Jon Hall5d5876e2017-11-30 09:33:16 -08003655 '''
Devin Lim58046fa2017-07-05 16:55:00 -07003656 Install Distributed Primitives app
Jon Hall5d5876e2017-11-30 09:33:16 -08003657 '''
Devin Lim58046fa2017-07-05 16:55:00 -07003658 import time
Devin Lim58046fa2017-07-05 16:55:00 -07003659 assert main, "main not defined"
3660 assert utilities.assert_equals, "utilities.assert_equals not defined"
Devin Lim58046fa2017-07-05 16:55:00 -07003661
3662 # Variables for the distributed primitives tests
3663 main.pCounterName = "TestON-Partitions"
3664 main.pCounterValue = 0
3665 main.onosSet = set( [] )
3666 main.onosSetName = "TestON-set"
3667
3668 description = "Install Primitives app"
3669 main.case( description )
3670 main.step( "Install Primitives app" )
3671 appName = "org.onosproject.distributedprimitives"
Devin Lime9f0ccf2017-08-11 17:25:12 -07003672 appResults = main.Cluster.next().CLI.activateApp( appName )
Devin Lim58046fa2017-07-05 16:55:00 -07003673 utilities.assert_equals( expect=main.TRUE,
3674 actual=appResults,
3675 onpass="Primitives app activated",
3676 onfail="Primitives app not activated" )
3677 # TODO check on all nodes instead of sleeping
Jeremy Ronquillob27ce4c2017-07-17 12:41:28 -07003678 time.sleep( 5 ) # To allow all nodes to activate
Jon Halla478b852017-12-04 15:00:15 -08003679
3680 def upgradeInit( self, main ):
3681 '''
3682 Initiates an update
3683 '''
3684 main.step( "Send the command to initialize the upgrade" )
3685 ctrl = main.Cluster.next().CLI
3686 initialized = ctrl.issuInit()
3687 utilities.assert_equals( expect=main.TRUE, actual=initialized,
3688 onpass="ISSU initialized",
3689 onfail="Error initializing the upgrade" )
3690
3691 main.step( "Check the status of the upgrade" )
3692 ctrl = main.Cluster.next().CLI
3693 status = ctrl.issu()
3694 main.log.debug( status )
3695 # TODO: check things here?
3696
Jon Hall30668ff2019-02-27 17:43:09 -08003697 main.testSetUp.checkOnosNodes( main.Cluster )
Jon Hall7ce46ea2018-02-05 12:20:59 -08003698
3699 def backupData( self, main, location ):
3700 """
3701 Backs up ONOS data and logs to a given location on each active node in a cluster
3702 """
3703 result = True
3704 for ctrl in main.Cluster.active():
3705 try:
3706 ctrl.server.handle.sendline( "rm " + location )
3707 ctrl.server.handle.expect( ctrl.server.prompt )
3708 main.log.debug( ctrl.server.handle.before + ctrl.server.handle.after )
3709 except pexpect.ExceptionPexpect as e:
3710 main.log.error( e )
3711 main.cleanAndExit()
3712 ctrl.CLI.log( "'Starting backup of onos data'", level="INFO" )
3713 result = result and ( ctrl.server.backupData( location ) is main.TRUE )
3714 ctrl.CLI.log( "'End of backup of onos data'", level="INFO" )
3715 return result
3716
3717 def restoreData( self, main, location ):
3718 """
3719 Restores ONOS data and logs from a given location on each node in a cluster
3720 """
3721 result = True
3722 for ctrl in main.Cluster.controllers:
3723 result = result and ( ctrl.server.restoreData( location ) is main.TRUE )
3724 return result
Jon Hallab611372018-02-21 15:26:05 -08003725
3726 def startTopology( self, main ):
3727 """
3728 Starts Mininet using a topology file after pushing a network config file to ONOS.
3729 """
3730 import json
3731 import time
3732 main.case( "Starting Mininet Topology" )
3733
3734 main.step( "Pushing Network config" )
3735 ctrl = main.Cluster.next()
3736 cfgPath = main.testsRoot + main.params[ 'topology' ][ 'configPath' ]
3737 cfgResult = ctrl.onosNetCfg( ctrl.ipAddress,
3738 path=cfgPath,
3739 fileName=main.params[ 'topology' ][ 'configName' ] )
3740 utilities.assert_equals( expect=main.TRUE, actual=cfgResult,
3741 onpass="Pushed Network Configuration to ONOS",
3742 onfail="Failed to push Network Configuration to ONOS" )
3743
3744 main.step( "Check Network config" )
3745 try:
3746 cfgFile = cfgPath + main.params[ 'topology' ][ 'configName' ]
3747 with open( cfgFile, 'r' ) as contents:
3748 pushedNetCfg = json.load( contents )
3749 pushedNetCfg = json.loads( json.dumps( pushedNetCfg ).lower() )
3750 except IOError:
3751 main.log.exception( "Net Cfg file not found." )
3752 main.cleanAndExit()
3753 netCfgSleep = int( main.params[ 'timers' ][ 'NetCfg' ] )
3754 time.sleep( netCfgSleep )
3755 rawONOSNetCfg = utilities.retry( f=main.Cluster.next().REST.getNetCfg,
3756 retValue=False,
3757 attempts=5,
3758 sleep=netCfgSleep )
3759 # Fix differences between ONOS printing and Pushed Cfg
3760 onosNetCfg = json.loads( rawONOSNetCfg.lower() )
3761
3762 # Compare pushed device config
3763 cfgResult = True
3764 for did, pushedDevice in pushedNetCfg[ 'devices' ].items():
3765 onosDevice = onosNetCfg[ 'devices' ].get( did )
3766 if pushedDevice != onosDevice:
3767 cfgResult = False
3768 main.log.error( "Pushed Network configuration does not match what is in " +
3769 "ONOS:\nPushed: {}\nONOS: {}".format( ctrl.pprint( pushedDevice ),
3770 ctrl.pprint( onosDevice ) ) )
3771
3772 # Compare pushed port config
3773 for portURI, pushedInterface in pushedNetCfg[ 'ports' ].items():
3774 onosInterface = onosNetCfg[ 'ports' ].get( portURI )
3775 # NOTE: pushed Cfg doesn't have macs
3776 for i in xrange( 0, len( pushedInterface[ 'interfaces' ] ) ):
3777 keys = pushedInterface[ 'interfaces' ][ i ].keys()
3778 portCompare = True
3779 for key in keys:
3780 if pushedInterface[ 'interfaces' ][ i ].get( key ) != onosInterface[ 'interfaces' ][ i ].get( key ) :
3781 main.log.debug( "{} mismatch for port {}".format( key, portURI ) )
3782 portCompare = False
3783 if not portCompare:
3784 cfgResult = False
3785 main.log.error( "Pushed Network configuration does not match what is in " +
3786 "ONOS:\nPushed: {}\nONOS: {}".format( ctrl.pprint( pushedInterface ),
3787 ctrl.pprint( onosInterface ) ) )
3788
Jon Hall9677ed32018-04-24 11:16:23 -07003789 if pushedNetCfg.get( 'hosts' ) is not None:
3790 # Compare pushed host config
3791 for hid, pushedHost in pushedNetCfg[ 'hosts' ].items():
3792 onosHost = onosNetCfg[ 'hosts' ].get( hid.lower() )
3793 if pushedHost != onosHost:
3794 cfgResult = False
3795 main.log.error( "Pushed Network configuration does not match what is in " +
Jon Hall0e240372018-05-02 11:21:57 -07003796 "ONOS:\nPushed: {}\nONOS: {}".format( ctrl.pprint( pushedHost ),
Jon Hall9677ed32018-04-24 11:16:23 -07003797 ctrl.pprint( onosHost ) ) )
Jon Hallab611372018-02-21 15:26:05 -08003798 utilities.assert_equals( expect=True,
3799 actual=cfgResult,
3800 onpass="Net Cfg set",
3801 onfail="Net Cfg not correctly set" )
3802 if not cfgResult:
3803 main.log.debug( "Pushed Network Config:" + ctrl.pprint( pushedNetCfg ) )
3804 main.log.debug( "ONOS Network Config:" + ctrl.pprint( onosNetCfg ) )
3805
3806 main.step( "Start Mininet topology" )
3807 for f in main.params[ 'topology' ][ 'files' ].values():
3808 main.ONOSbench.scp( main.Mininet1,
3809 f,
3810 main.Mininet1.home,
3811 direction="to" )
3812 topoName = main.params[ 'topology' ][ 'topoFile' ]
3813 topo = main.Mininet1.home + topoName
3814 ctrlList = ''
3815 for ctrl in main.Cluster.controllers:
3816 ctrlList += str( ctrl.ipAddress ) + ","
3817 args = main.params[ 'topology' ][ 'args' ]
3818 startResult = main.Mininet1.startNet( topoFile=topo,
3819 args=" --onos-ip=" + ctrlList + " " + args )
3820 utilities.assert_equals( expect=main.TRUE, actual=startResult,
3821 onpass="Mininet Started",
3822 onfail="Failed to start Mininet" )
3823 # Give SR app time to configure the network
3824 time.sleep( int( main.params[ 'timers' ][ 'SRSetup' ] ) )